sidekiq 4.2.4 → 5.2.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (106) hide show
  1. checksums.yaml +4 -4
  2. data/.github/issue_template.md +8 -1
  3. data/.gitignore +1 -0
  4. data/.travis.yml +5 -3
  5. data/5.0-Upgrade.md +56 -0
  6. data/COMM-LICENSE +1 -1
  7. data/Changes.md +151 -0
  8. data/Ent-Changes.md +77 -2
  9. data/Gemfile +10 -25
  10. data/LICENSE +1 -1
  11. data/Pro-4.0-Upgrade.md +35 -0
  12. data/Pro-Changes.md +156 -2
  13. data/README.md +9 -6
  14. data/Rakefile +1 -2
  15. data/bin/sidekiqctl +1 -1
  16. data/bin/sidekiqload +15 -33
  17. data/lib/generators/sidekiq/templates/worker_spec.rb.erb +1 -1
  18. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  19. data/lib/sidekiq/api.rb +157 -67
  20. data/lib/sidekiq/cli.rb +71 -26
  21. data/lib/sidekiq/client.rb +25 -18
  22. data/lib/sidekiq/core_ext.rb +1 -106
  23. data/lib/sidekiq/delay.rb +42 -0
  24. data/lib/sidekiq/exception_handler.rb +2 -4
  25. data/lib/sidekiq/extensions/generic_proxy.rb +7 -1
  26. data/lib/sidekiq/fetch.rb +1 -1
  27. data/lib/sidekiq/job_logger.rb +25 -0
  28. data/lib/sidekiq/job_retry.rb +241 -0
  29. data/lib/sidekiq/launcher.rb +45 -37
  30. data/lib/sidekiq/logging.rb +18 -2
  31. data/lib/sidekiq/manager.rb +3 -4
  32. data/lib/sidekiq/middleware/server/active_record.rb +10 -0
  33. data/lib/sidekiq/processor.rb +91 -34
  34. data/lib/sidekiq/rails.rb +15 -51
  35. data/lib/sidekiq/redis_connection.rb +31 -5
  36. data/lib/sidekiq/scheduled.rb +35 -8
  37. data/lib/sidekiq/testing.rb +24 -7
  38. data/lib/sidekiq/util.rb +6 -2
  39. data/lib/sidekiq/version.rb +1 -1
  40. data/lib/sidekiq/web/action.rb +2 -6
  41. data/lib/sidekiq/web/application.rb +28 -21
  42. data/lib/sidekiq/web/helpers.rb +67 -23
  43. data/lib/sidekiq/web/router.rb +14 -10
  44. data/lib/sidekiq/web.rb +4 -4
  45. data/lib/sidekiq/worker.rb +97 -14
  46. data/lib/sidekiq.rb +23 -24
  47. data/sidekiq.gemspec +7 -10
  48. data/web/assets/javascripts/application.js +0 -0
  49. data/web/assets/javascripts/dashboard.js +18 -13
  50. data/web/assets/stylesheets/application-rtl.css +246 -0
  51. data/web/assets/stylesheets/application.css +336 -4
  52. data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
  53. data/web/assets/stylesheets/bootstrap.css +2 -2
  54. data/web/locales/ar.yml +80 -0
  55. data/web/locales/en.yml +1 -0
  56. data/web/locales/es.yml +4 -3
  57. data/web/locales/fa.yml +80 -0
  58. data/web/locales/he.yml +79 -0
  59. data/web/locales/ja.yml +5 -3
  60. data/web/locales/ur.yml +80 -0
  61. data/web/views/_footer.erb +5 -2
  62. data/web/views/_job_info.erb +1 -1
  63. data/web/views/_nav.erb +1 -1
  64. data/web/views/_paging.erb +1 -1
  65. data/web/views/busy.erb +9 -5
  66. data/web/views/dashboard.erb +3 -3
  67. data/web/views/layout.erb +11 -2
  68. data/web/views/morgue.erb +14 -10
  69. data/web/views/queue.erb +10 -10
  70. data/web/views/queues.erb +4 -2
  71. data/web/views/retries.erb +13 -11
  72. data/web/views/retry.erb +1 -1
  73. data/web/views/scheduled.erb +2 -2
  74. metadata +26 -160
  75. data/lib/sidekiq/middleware/server/logging.rb +0 -40
  76. data/lib/sidekiq/middleware/server/retry_jobs.rb +0 -205
  77. data/test/config.yml +0 -9
  78. data/test/env_based_config.yml +0 -11
  79. data/test/fake_env.rb +0 -1
  80. data/test/fixtures/en.yml +0 -2
  81. data/test/helper.rb +0 -75
  82. data/test/test_actors.rb +0 -138
  83. data/test/test_api.rb +0 -528
  84. data/test/test_cli.rb +0 -418
  85. data/test/test_client.rb +0 -266
  86. data/test/test_exception_handler.rb +0 -56
  87. data/test/test_extensions.rb +0 -127
  88. data/test/test_fetch.rb +0 -50
  89. data/test/test_launcher.rb +0 -95
  90. data/test/test_logging.rb +0 -35
  91. data/test/test_manager.rb +0 -50
  92. data/test/test_middleware.rb +0 -158
  93. data/test/test_processor.rb +0 -235
  94. data/test/test_rails.rb +0 -22
  95. data/test/test_redis_connection.rb +0 -132
  96. data/test/test_retry.rb +0 -326
  97. data/test/test_retry_exhausted.rb +0 -149
  98. data/test/test_scheduled.rb +0 -115
  99. data/test/test_scheduling.rb +0 -58
  100. data/test/test_sidekiq.rb +0 -107
  101. data/test/test_testing.rb +0 -143
  102. data/test/test_testing_fake.rb +0 -357
  103. data/test/test_testing_inline.rb +0 -94
  104. data/test/test_util.rb +0 -13
  105. data/test/test_web.rb +0 -726
  106. data/test/test_web_helpers.rb +0 -54
@@ -0,0 +1,241 @@
1
+ # frozen_string_literal: true
2
+ require 'sidekiq/scheduled'
3
+ require 'sidekiq/api'
4
+
5
+ module Sidekiq
6
+ ##
7
+ # Automatically retry jobs that fail in Sidekiq.
8
+ # Sidekiq's retry support assumes a typical development lifecycle:
9
+ #
10
+ # 0. Push some code changes with a bug in it.
11
+ # 1. Bug causes job processing to fail, Sidekiq's middleware captures
12
+ # the job and pushes it onto a retry queue.
13
+ # 2. Sidekiq retries jobs in the retry queue multiple times with
14
+ # an exponential delay, the job continues to fail.
15
+ # 3. After a few days, a developer deploys a fix. The job is
16
+ # reprocessed successfully.
17
+ # 4. Once retries are exhausted, Sidekiq will give up and move the
18
+ # job to the Dead Job Queue (aka morgue) where it must be dealt with
19
+ # manually in the Web UI.
20
+ # 5. After 6 months on the DJQ, Sidekiq will discard the job.
21
+ #
22
+ # A job looks like:
23
+ #
24
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
25
+ #
26
+ # The 'retry' option also accepts a number (in place of 'true'):
27
+ #
28
+ # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
29
+ #
30
+ # The job will be retried this number of times before giving up. (If simply
31
+ # 'true', Sidekiq retries 25 times)
32
+ #
33
+ # We'll add a bit more data to the job to support retries:
34
+ #
35
+ # * 'queue' - the queue to use
36
+ # * 'retry_count' - number of times we've retried so far.
37
+ # * 'error_message' - the message from the exception
38
+ # * 'error_class' - the exception class
39
+ # * 'failed_at' - the first time it failed
40
+ # * 'retried_at' - the last time it was retried
41
+ # * 'backtrace' - the number of lines of error backtrace to store
42
+ #
43
+ # We don't store the backtrace by default as that can add a lot of overhead
44
+ # to the job and everyone is using an error service, right?
45
+ #
46
+ # The default number of retries is 25 which works out to about 3 weeks
47
+ # You can change the default maximum number of retries in your initializer:
48
+ #
49
+ # Sidekiq.options[:max_retries] = 7
50
+ #
51
+ # or limit the number of retries for a particular worker with:
52
+ #
53
+ # class MyWorker
54
+ # include Sidekiq::Worker
55
+ # sidekiq_options :retry => 10
56
+ # end
57
+ #
58
+ class JobRetry
59
+ class Skip < ::RuntimeError; end
60
+
61
+ include Sidekiq::Util
62
+
63
+ DEFAULT_MAX_RETRY_ATTEMPTS = 25
64
+
65
+ def initialize(options = {})
66
+ @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
67
+ end
68
+
69
+ # The global retry handler requires only the barest of data.
70
+ # We want to be able to retry as much as possible so we don't
71
+ # require the worker to be instantiated.
72
+ def global(msg, queue)
73
+ yield
74
+ rescue Skip => ex
75
+ raise ex
76
+ rescue Sidekiq::Shutdown => ey
77
+ # ignore, will be pushed back onto queue during hard_shutdown
78
+ raise ey
79
+ rescue Exception => e
80
+ # ignore, will be pushed back onto queue during hard_shutdown
81
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
+
83
+ raise e unless msg['retry']
84
+ attempt_retry(nil, msg, queue, e)
85
+ raise e
86
+ end
87
+
88
+
89
+ # The local retry support means that any errors that occur within
90
+ # this block can be associated with the given worker instance.
91
+ # This is required to support the `sidekiq_retries_exhausted` block.
92
+ #
93
+ # Note that any exception from the block is wrapped in the Skip
94
+ # exception so the global block does not reprocess the error. The
95
+ # Skip exception is unwrapped within Sidekiq::Processor#process before
96
+ # calling the handle_exception handlers.
97
+ def local(worker, msg, queue)
98
+ yield
99
+ rescue Skip => ex
100
+ raise ex
101
+ rescue Sidekiq::Shutdown => ey
102
+ # ignore, will be pushed back onto queue during hard_shutdown
103
+ raise ey
104
+ rescue Exception => e
105
+ # ignore, will be pushed back onto queue during hard_shutdown
106
+ raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
107
+
108
+ if msg['retry'] == nil
109
+ msg['retry'] = worker.class.get_sidekiq_options['retry']
110
+ end
111
+
112
+ raise e unless msg['retry']
113
+ attempt_retry(worker, msg, queue, e)
114
+ # We've handled this error associated with this job, don't
115
+ # need to handle it at the global level
116
+ raise Skip
117
+ end
118
+
119
+ private
120
+
121
+ # Note that +worker+ can be nil here if an error is raised before we can
122
+ # instantiate the worker instance. All access must be guarded and
123
+ # best effort.
124
+ def attempt_retry(worker, msg, queue, exception)
125
+ max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
126
+
127
+ msg['queue'] = if msg['retry_queue']
128
+ msg['retry_queue']
129
+ else
130
+ queue
131
+ end
132
+
133
+ # App code can stuff all sorts of crazy binary data into the error message
134
+ # that won't convert to JSON.
135
+ m = exception.message.to_s[0, 10_000]
136
+ if m.respond_to?(:scrub!)
137
+ m.force_encoding("utf-8")
138
+ m.scrub!
139
+ end
140
+
141
+ msg['error_message'] = m
142
+ msg['error_class'] = exception.class.name
143
+ count = if msg['retry_count']
144
+ msg['retried_at'] = Time.now.to_f
145
+ msg['retry_count'] += 1
146
+ else
147
+ msg['failed_at'] = Time.now.to_f
148
+ msg['retry_count'] = 0
149
+ end
150
+
151
+ if msg['backtrace'] == true
152
+ msg['error_backtrace'] = exception.backtrace
153
+ elsif !msg['backtrace']
154
+ # do nothing
155
+ elsif msg['backtrace'].to_i != 0
156
+ msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
157
+ end
158
+
159
+ if count < max_retry_attempts
160
+ delay = delay_for(worker, count, exception)
161
+ logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
162
+ retry_at = Time.now.to_f + delay
163
+ payload = Sidekiq.dump_json(msg)
164
+ Sidekiq.redis do |conn|
165
+ conn.zadd('retry', retry_at.to_s, payload)
166
+ end
167
+ else
168
+ # Goodbye dear message, you (re)tried your best I'm sure.
169
+ retries_exhausted(worker, msg, exception)
170
+ end
171
+ end
172
+
173
+ def retries_exhausted(worker, msg, exception)
174
+ logger.debug { "Retries exhausted for job" }
175
+ begin
176
+ block = worker && worker.sidekiq_retries_exhausted_block
177
+ block.call(msg, exception) if block
178
+ rescue => e
179
+ handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
180
+ end
181
+
182
+ Sidekiq.death_handlers.each do |handler|
183
+ begin
184
+ handler.call(msg, exception)
185
+ rescue => e
186
+ handle_exception(e, { context: "Error calling death handler", job: msg })
187
+ end
188
+ end
189
+
190
+ send_to_morgue(msg) unless msg['dead'] == false
191
+ end
192
+
193
+ def send_to_morgue(msg)
194
+ Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
+ payload = Sidekiq.dump_json(msg)
196
+ DeadSet.new.kill(payload, notify_failure: false)
197
+ end
198
+
199
+ def retry_attempts_from(msg_retry, default)
200
+ if msg_retry.is_a?(Integer)
201
+ msg_retry
202
+ else
203
+ default
204
+ end
205
+ end
206
+
207
+ def delay_for(worker, count, exception)
208
+ if worker && worker.sidekiq_retry_in_block
209
+ custom_retry_in = retry_in(worker, count, exception).to_i
210
+ return custom_retry_in if custom_retry_in > 0
211
+ end
212
+ seconds_to_delay(count)
213
+ end
214
+
215
+ # delayed_job uses the same basic formula
216
+ def seconds_to_delay(count)
217
+ (count ** 4) + 15 + (rand(30)*(count+1))
218
+ end
219
+
220
+ def retry_in(worker, count, exception)
221
+ begin
222
+ worker.sidekiq_retry_in_block.call(count, exception)
223
+ rescue Exception => e
224
+ handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
225
+ nil
226
+ end
227
+ end
228
+
229
+ def exception_caused_by_shutdown?(e, checked_causes = [])
230
+ return false unless e.cause
231
+
232
+ # Handle circular causes
233
+ checked_causes << e.object_id
234
+ return false if checked_causes.include?(e.cause.object_id)
235
+
236
+ e.cause.instance_of?(Sidekiq::Shutdown) ||
237
+ exception_caused_by_shutdown?(e.cause, checked_causes)
238
+ end
239
+
240
+ end
241
+ end
@@ -1,5 +1,4 @@
1
1
  # frozen_string_literal: true
2
- # encoding: utf-8
3
2
  require 'sidekiq/manager'
4
3
  require 'sidekiq/fetch'
5
4
  require 'sidekiq/scheduled'
@@ -13,6 +12,8 @@ module Sidekiq
13
12
  include Util
14
13
 
15
14
  attr_accessor :manager, :poller, :fetcher
15
+
16
+ STATS_TTL = 5*365*24*60*60
16
17
 
17
18
  def initialize(options)
18
19
  @manager = Sidekiq::Manager.new(options)
@@ -61,30 +62,33 @@ module Sidekiq
61
62
 
62
63
  private unless $TESTING
63
64
 
64
- JVM_RESERVED_SIGNALS = ['USR1', 'USR2'] # Don't Process#kill if we get these signals via the API
65
-
66
- def heartbeat(k, data, json)
67
- results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, data) }
65
+ def heartbeat
66
+ results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, to_data) }
68
67
  results.compact!
69
68
  $0 = results.join(' ')
70
69
 
71
- (k, json)
70
+
72
71
  end
73
72
 
74
- def ❤(key, json)
73
+ def ❤
74
+ key = identity
75
75
  fails = procd = 0
76
76
  begin
77
- Processor::FAILURE.update {|curr| fails = curr; 0 }
78
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
79
 
80
- workers_key = "#{key}:workers".freeze
81
- nowdate = Time.now.utc.strftime("%Y-%m-%d".freeze)
80
+ workers_key = "#{key}:workers"
81
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
82
82
  Sidekiq.redis do |conn|
83
83
  conn.multi do
84
- conn.incrby("stat:processed".freeze, procd)
84
+ conn.incrby("stat:processed", procd)
85
85
  conn.incrby("stat:processed:#{nowdate}", procd)
86
- conn.incrby("stat:failed".freeze, fails)
86
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
87
+
88
+ conn.incrby("stat:failed", fails)
87
89
  conn.incrby("stat:failed:#{nowdate}", fails)
90
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
91
+
88
92
  conn.del(workers_key)
89
93
  Processor::WORKER_STATE.each_pair do |tid, hash|
90
94
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
@@ -98,7 +102,7 @@ module Sidekiq
98
102
  conn.multi do
99
103
  conn.sadd('processes', key)
100
104
  conn.exists(key)
101
- conn.hmset(key, 'info', json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
105
+ conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
102
106
  conn.expire(key, 60)
103
107
  conn.rpop("#{key}-signals")
104
108
  end
@@ -109,43 +113,47 @@ module Sidekiq
109
113
 
110
114
  return unless msg
111
115
 
112
- if JVM_RESERVED_SIGNALS.include?(msg)
113
- Sidekiq::CLI.instance.handle_signal(msg)
114
- else
115
- ::Process.kill(msg, $$)
116
- end
116
+ ::Process.kill(msg, $$)
117
117
  rescue => e
118
118
  # ignore all redis/network issues
119
119
  logger.error("heartbeat: #{e.message}")
120
120
  # don't lose the counts if there was a network issue
121
- Processor::PROCESSED.increment(procd)
122
- Processor::FAILURE.increment(fails)
121
+ Processor::PROCESSED.incr(procd)
122
+ Processor::FAILURE.incr(fails)
123
123
  end
124
124
  end
125
125
 
126
126
  def start_heartbeat
127
- k = identity
128
- data = {
129
- 'hostname' => hostname,
130
- 'started_at' => Time.now.to_f,
131
- 'pid' => $$,
132
- 'tag' => @options[:tag] || '',
133
- 'concurrency' => @options[:concurrency],
134
- 'queues' => @options[:queues].uniq,
135
- 'labels' => @options[:labels],
136
- 'identity' => k,
137
- }
138
- # this data doesn't change so dump it to a string
139
- # now so we don't need to dump it every heartbeat.
140
- json = Sidekiq.dump_json(data)
141
-
142
127
  while true
143
- heartbeat(k, data, json)
128
+ heartbeat
144
129
  sleep 5
145
130
  end
146
131
  Sidekiq.logger.info("Heartbeat stopping...")
147
132
  end
148
133
 
134
+ def to_data
135
+ @data ||= begin
136
+ {
137
+ 'hostname' => hostname,
138
+ 'started_at' => Time.now.to_f,
139
+ 'pid' => $$,
140
+ 'tag' => @options[:tag] || '',
141
+ 'concurrency' => @options[:concurrency],
142
+ 'queues' => @options[:queues].uniq,
143
+ 'labels' => @options[:labels],
144
+ 'identity' => identity,
145
+ }
146
+ end
147
+ end
148
+
149
+ def to_json
150
+ @json ||= begin
151
+ # this data changes infrequently so dump it to a string
152
+ # now so we don't need to dump it every heartbeat.
153
+ Sidekiq.dump_json(to_data)
154
+ end
155
+ end
156
+
149
157
  def clear_heartbeat
150
158
  # Remove record from Redis since we are shutting down.
151
159
  # Note we don't stop the heartbeat thread; if the process
@@ -11,7 +11,7 @@ module Sidekiq
11
11
 
12
12
  # Provide a call() method that returns the formatted message.
13
13
  def call(severity, time, program_name, message)
14
- "#{time.utc.iso8601(3)} #{::Process.pid} TID-#{Thread.current.object_id.to_s(36)}#{context} #{severity}: #{message}\n"
14
+ "#{time.utc.iso8601(3)} #{::Process.pid} TID-#{Sidekiq::Logging.tid}#{context} #{severity}: #{message}\n"
15
15
  end
16
16
 
17
17
  def context
@@ -22,10 +22,26 @@ module Sidekiq
22
22
 
23
23
  class WithoutTimestamp < Pretty
24
24
  def call(severity, time, program_name, message)
25
- "#{::Process.pid} TID-#{Thread.current.object_id.to_s(36)}#{context} #{severity}: #{message}\n"
25
+ "#{::Process.pid} TID-#{Sidekiq::Logging.tid}#{context} #{severity}: #{message}\n"
26
26
  end
27
27
  end
28
28
 
29
+ def self.tid
30
+ Thread.current['sidekiq_tid'] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
31
+ end
32
+
33
+ def self.job_hash_context(job_hash)
34
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
35
+ # attribute to expose the underlying thing.
36
+ klass = job_hash['wrapped'] || job_hash["class"]
37
+ bid = job_hash['bid']
38
+ "#{klass} JID-#{job_hash['jid']}#{" BID-#{bid}" if bid}"
39
+ end
40
+
41
+ def self.with_job_hash_context(job_hash, &block)
42
+ with_context(job_hash_context(job_hash), &block)
43
+ end
44
+
29
45
  def self.with_context(msg)
30
46
  Thread.current[:sidekiq_context] ||= []
31
47
  Thread.current[:sidekiq_context] << msg
@@ -1,5 +1,4 @@
1
1
  # frozen_string_literal: true
2
- # encoding: utf-8
3
2
  require 'sidekiq/util'
4
3
  require 'sidekiq/processor'
5
4
  require 'sidekiq/fetch'
@@ -10,7 +9,7 @@ module Sidekiq
10
9
 
11
10
  ##
12
11
  # The Manager is the central coordination point in Sidekiq, controlling
13
- # the lifecycle of the Processors and feeding them jobs as necessary.
12
+ # the lifecycle of the Processors.
14
13
  #
15
14
  # Tasks:
16
15
  #
@@ -54,7 +53,7 @@ module Sidekiq
54
53
 
55
54
  logger.info { "Terminating quiet workers" }
56
55
  @workers.each { |x| x.terminate }
57
- fire_event(:quiet, true)
56
+ fire_event(:quiet, reverse: true)
58
57
  end
59
58
 
60
59
  # hack for quicker development / testing environment #2774
@@ -62,7 +61,7 @@ module Sidekiq
62
61
 
63
62
  def stop(deadline)
64
63
  quiet
65
- fire_event(:shutdown, true)
64
+ fire_event(:shutdown, reverse: true)
66
65
 
67
66
  # some of the shutdown events can be async,
68
67
  # we don't have any way to know when they're done but
@@ -1,7 +1,17 @@
1
+ # frozen_string_literal: true
1
2
  module Sidekiq
2
3
  module Middleware
3
4
  module Server
4
5
  class ActiveRecord
6
+
7
+ def initialize
8
+ # With Rails 5+ we must use the Reloader **always**.
9
+ # The reloader handles code loading and db connection management.
10
+ if defined?(::Rails) && ::Rails::VERSION::MAJOR >= 5
11
+ raise ArgumentError, "Rails 5 no longer needs or uses the ActiveRecord middleware."
12
+ end
13
+ end
14
+
5
15
  def call(*args)
6
16
  yield
7
17
  ensure
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
  require 'sidekiq/util'
3
3
  require 'sidekiq/fetch'
4
+ require 'sidekiq/job_logger'
5
+ require 'sidekiq/job_retry'
4
6
  require 'thread'
5
- require 'concurrent/map'
6
- require 'concurrent/atomic/atomic_fixnum'
7
7
 
8
8
  module Sidekiq
9
9
  ##
@@ -37,6 +37,8 @@ module Sidekiq
37
37
  @thread = nil
38
38
  @strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
39
39
  @reloader = Sidekiq.options[:reloader]
40
+ @logging = (mgr.options[:job_logger] || Sidekiq::JobLogger).new
41
+ @retrier = Sidekiq::JobRetry.new
40
42
  end
41
43
 
42
44
  def terminate(wait=false)
@@ -107,36 +109,63 @@ module Sidekiq
107
109
  if !@down
108
110
  @down = Time.now
109
111
  logger.error("Error fetching job: #{ex}")
110
- ex.backtrace.each do |bt|
111
- logger.error(bt)
112
- end
112
+ handle_exception(ex)
113
113
  end
114
114
  sleep(1)
115
115
  nil
116
116
  end
117
117
 
118
+ def dispatch(job_hash, queue)
119
+ # since middleware can mutate the job hash
120
+ # we clone here so we report the original
121
+ # job structure to the Web UI
122
+ pristine = cloned(job_hash)
123
+
124
+ Sidekiq::Logging.with_job_hash_context(job_hash) do
125
+ @retrier.global(pristine, queue) do
126
+ @logging.call(job_hash, queue) do
127
+ stats(pristine, queue) do
128
+ # Rails 5 requires a Reloader to wrap code execution. In order to
129
+ # constantize the worker and instantiate an instance, we have to call
130
+ # the Reloader. It handles code loading, db connection management, etc.
131
+ # Effectively this block denotes a "unit of work" to Rails.
132
+ @reloader.call do
133
+ klass = constantize(job_hash['class'])
134
+ worker = klass.new
135
+ worker.jid = job_hash['jid']
136
+ @retrier.local(worker, pristine, queue) do
137
+ yield worker
138
+ end
139
+ end
140
+ end
141
+ end
142
+ end
143
+ end
144
+ end
145
+
118
146
  def process(work)
119
147
  jobstr = work.job
120
148
  queue = work.queue_name
121
149
 
122
150
  ack = false
123
151
  begin
124
- @reloader.call do
125
- job = Sidekiq.load_json(jobstr)
126
- klass = job['class'.freeze].constantize
127
- worker = klass.new
128
- worker.jid = job['jid'.freeze]
129
-
130
- stats(worker, job, queue) do
131
- Sidekiq.server_middleware.invoke(worker, job, queue) do
132
- # Only ack if we either attempted to start this job or
133
- # successfully completed it. This prevents us from
134
- # losing jobs if a middleware raises an exception before yielding
135
- ack = true
136
- execute_job(worker, cloned(job['args'.freeze]))
137
- end
138
- end
152
+ # Treat malformed JSON as a special case: job goes straight to the morgue.
153
+ job_hash = nil
154
+ begin
155
+ job_hash = Sidekiq.load_json(jobstr)
156
+ rescue => ex
157
+ handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
158
+ # we can't notify because the job isn't a valid hash payload.
159
+ DeadSet.new.kill(jobstr, notify_failure: false)
139
160
  ack = true
161
+ raise
162
+ end
163
+
164
+ ack = true
165
+ dispatch(job_hash, queue) do |worker|
166
+ Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
167
+ execute_job(worker, cloned(job_hash['args']))
168
+ end
140
169
  end
141
170
  rescue Sidekiq::Shutdown
142
171
  # Had to force kill this job because it didn't finish
@@ -144,8 +173,9 @@ module Sidekiq
144
173
  # we didn't properly finish it.
145
174
  ack = false
146
175
  rescue Exception => ex
147
- handle_exception(ex, { :context => "Job raised exception", :job => job, :jobstr => jobstr })
148
- raise
176
+ e = ex.is_a?(::Sidekiq::JobRetry::Skip) && ex.cause ? ex.cause : ex
177
+ handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
178
+ raise e
149
179
  ensure
150
180
  work.acknowledge if ack
151
181
  end
@@ -155,34 +185,61 @@ module Sidekiq
155
185
  worker.perform(*cloned_args)
156
186
  end
157
187
 
158
- def thread_identity
159
- @str ||= Thread.current.object_id.to_s(36)
188
+ # Ruby doesn't provide atomic counters out of the box so we'll
189
+ # implement something simple ourselves.
190
+ # https://bugs.ruby-lang.org/issues/14706
191
+ class Counter
192
+ def initialize
193
+ @value = 0
194
+ @lock = Mutex.new
195
+ end
196
+
197
+ def incr(amount=1)
198
+ @lock.synchronize { @value = @value + amount }
199
+ end
200
+
201
+ def reset
202
+ @lock.synchronize { val = @value; @value = 0; val }
203
+ end
160
204
  end
161
205
 
162
- WORKER_STATE = Concurrent::Map.new
163
- PROCESSED = Concurrent::AtomicFixnum.new
164
- FAILURE = Concurrent::AtomicFixnum.new
206
+ PROCESSED = Counter.new
207
+ FAILURE = Counter.new
208
+ # This is mutable global state but because each thread is storing
209
+ # its own unique key/value, there's no thread-safety issue AFAIK.
210
+ WORKER_STATE = {}
165
211
 
166
- def stats(worker, job, queue)
167
- tid = thread_identity
168
- WORKER_STATE[tid] = {:queue => queue, :payload => cloned(job), :run_at => Time.now.to_i }
212
+ def stats(job_hash, queue)
213
+ tid = Sidekiq::Logging.tid
214
+ WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
169
215
 
170
216
  begin
171
217
  yield
172
218
  rescue Exception
173
- FAILURE.increment
219
+ FAILURE.incr
174
220
  raise
175
221
  ensure
176
222
  WORKER_STATE.delete(tid)
177
- PROCESSED.increment
223
+ PROCESSED.incr
178
224
  end
179
225
  end
180
226
 
181
227
  # Deep clone the arguments passed to the worker so that if
182
228
  # the job fails, what is pushed back onto Redis hasn't
183
229
  # been mutated by the worker.
184
- def cloned(ary)
185
- Marshal.load(Marshal.dump(ary))
230
+ def cloned(thing)
231
+ Marshal.load(Marshal.dump(thing))
232
+ end
233
+
234
+ def constantize(str)
235
+ names = str.split('::')
236
+ names.shift if names.empty? || names.first.empty?
237
+
238
+ names.inject(Object) do |constant, name|
239
+ # the false flag limits search for name to under the constant namespace
240
+ # which mimics Rails' behaviour
241
+ constant.const_defined?(name, false) ? constant.const_get(name, false) : constant.const_missing(name)
242
+ end
186
243
  end
187
244
 
188
245
  end