sidekiq 5.1.3 → 5.2.10

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (47) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.gitignore +2 -0
  4. data/.travis.yml +2 -5
  5. data/COMM-LICENSE +11 -9
  6. data/Changes.md +80 -0
  7. data/Ent-Changes.md +22 -0
  8. data/Gemfile +16 -5
  9. data/Pro-Changes.md +30 -0
  10. data/README.md +1 -1
  11. data/Rakefile +2 -1
  12. data/bin/sidekiqctl +13 -92
  13. data/bin/sidekiqload +1 -1
  14. data/lib/sidekiq/api.rb +50 -15
  15. data/lib/sidekiq/cli.rb +64 -58
  16. data/lib/sidekiq/client.rb +4 -3
  17. data/lib/sidekiq/ctl.rb +221 -0
  18. data/lib/sidekiq/job_logger.rb +2 -2
  19. data/lib/sidekiq/job_retry.rb +33 -12
  20. data/lib/sidekiq/launcher.rb +15 -8
  21. data/lib/sidekiq/manager.rb +3 -3
  22. data/lib/sidekiq/middleware/server/active_record.rb +1 -1
  23. data/lib/sidekiq/processor.rb +76 -25
  24. data/lib/sidekiq/rails.rb +2 -1
  25. data/lib/sidekiq/redis_connection.rb +20 -1
  26. data/lib/sidekiq/scheduled.rb +32 -3
  27. data/lib/sidekiq/testing.rb +4 -4
  28. data/lib/sidekiq/version.rb +1 -1
  29. data/lib/sidekiq/web/action.rb +1 -1
  30. data/lib/sidekiq/web/application.rb +26 -1
  31. data/lib/sidekiq/web/helpers.rb +13 -6
  32. data/lib/sidekiq/worker.rb +24 -8
  33. data/lib/sidekiq.rb +5 -3
  34. data/sidekiq.gemspec +5 -12
  35. data/web/assets/javascripts/application.js +0 -0
  36. data/web/assets/javascripts/dashboard.js +15 -5
  37. data/web/assets/stylesheets/application.css +35 -2
  38. data/web/assets/stylesheets/bootstrap.css +2 -2
  39. data/web/locales/ar.yml +1 -0
  40. data/web/locales/en.yml +1 -0
  41. data/web/locales/es.yml +3 -3
  42. data/web/views/_nav.erb +3 -17
  43. data/web/views/layout.erb +1 -1
  44. data/web/views/queue.erb +1 -0
  45. data/web/views/queues.erb +1 -1
  46. data/web/views/retries.erb +4 -0
  47. metadata +19 -86
@@ -56,7 +56,8 @@ module Sidekiq
56
56
  # end
57
57
  #
58
58
  class JobRetry
59
- class Skip < ::RuntimeError; end
59
+ class Handled < ::RuntimeError; end
60
+ class Skip < Handled; end
60
61
 
61
62
  include Sidekiq::Util
62
63
 
@@ -71,7 +72,7 @@ module Sidekiq
71
72
  # require the worker to be instantiated.
72
73
  def global(msg, queue)
73
74
  yield
74
- rescue Skip => ex
75
+ rescue Handled => ex
75
76
  raise ex
76
77
  rescue Sidekiq::Shutdown => ey
77
78
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -80,9 +81,19 @@ module Sidekiq
80
81
  # ignore, will be pushed back onto queue during hard_shutdown
81
82
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
83
 
83
- raise e unless msg['retry']
84
- attempt_retry(nil, msg, queue, e)
85
- raise e
84
+ if msg['retry']
85
+ attempt_retry(nil, msg, queue, e)
86
+ else
87
+ Sidekiq.death_handlers.each do |handler|
88
+ begin
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
+ end
93
+ end
94
+ end
95
+
96
+ raise Handled
86
97
  end
87
98
 
88
99
 
@@ -96,7 +107,7 @@ module Sidekiq
96
107
  # calling the handle_exception handlers.
97
108
  def local(worker, msg, queue)
98
109
  yield
99
- rescue Skip => ex
110
+ rescue Handled => ex
100
111
  raise ex
101
112
  rescue Sidekiq::Shutdown => ey
102
113
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -130,9 +141,7 @@ module Sidekiq
130
141
  queue
131
142
  end
132
143
 
133
- # App code can stuff all sorts of crazy binary data into the error message
134
- # that won't convert to JSON.
135
- m = exception.message.to_s[0, 10_000]
144
+ m = exception_message(exception)
136
145
  if m.respond_to?(:scrub!)
137
146
  m.force_encoding("utf-8")
138
147
  m.scrub!
@@ -158,7 +167,8 @@ module Sidekiq
158
167
 
159
168
  if count < max_retry_attempts
160
169
  delay = delay_for(worker, count, exception)
161
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
170
+ # Logging here can break retries if the logging device raises ENOSPC #3979
171
+ #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
162
172
  retry_at = Time.now.to_f + delay
163
173
  payload = Sidekiq.dump_json(msg)
164
174
  Sidekiq.redis do |conn|
@@ -171,7 +181,6 @@ module Sidekiq
171
181
  end
172
182
 
173
183
  def retries_exhausted(worker, msg, exception)
174
- logger.debug { "Retries exhausted for job" }
175
184
  begin
176
185
  block = worker && worker.sidekiq_retries_exhausted_block
177
186
  block.call(msg, exception) if block
@@ -191,7 +200,7 @@ module Sidekiq
191
200
  end
192
201
 
193
202
  def send_to_morgue(msg)
194
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
203
+ logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
204
  payload = Sidekiq.dump_json(msg)
196
205
  DeadSet.new.kill(payload, notify_failure: false)
197
206
  end
@@ -237,5 +246,17 @@ module Sidekiq
237
246
  exception_caused_by_shutdown?(e.cause, checked_causes)
238
247
  end
239
248
 
249
+ # Extract message from exception.
250
+ # Set a default if the message raises an error
251
+ def exception_message(exception)
252
+ begin
253
+ # App code can stuff all sorts of crazy binary data into the error message
254
+ # that won't convert to JSON.
255
+ exception.message.to_s[0, 10_000]
256
+ rescue
257
+ "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
+ end
259
+ end
260
+
240
261
  end
241
262
  end
@@ -13,6 +13,8 @@ module Sidekiq
13
13
 
14
14
  attr_accessor :manager, :poller, :fetcher
15
15
 
16
+ STATS_TTL = 5*365*24*60*60
17
+
16
18
  def initialize(options)
17
19
  @manager = Sidekiq::Manager.new(options)
18
20
  @poller = Sidekiq::Scheduled::Poller.new
@@ -38,7 +40,7 @@ module Sidekiq
38
40
  # return until all work is complete and cleaned up.
39
41
  # It can take up to the timeout to complete.
40
42
  def stop
41
- deadline = Time.now + @options[:timeout]
43
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
42
44
 
43
45
  @done = true
44
46
  @manager.quiet
@@ -72,8 +74,9 @@ module Sidekiq
72
74
  key = identity
73
75
  fails = procd = 0
74
76
  begin
75
- Processor::FAILURE.update {|curr| fails = curr; 0 }
76
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
+ curstate = Processor::WORKER_STATE.dup
77
80
 
78
81
  workers_key = "#{key}:workers"
79
82
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
@@ -81,10 +84,14 @@ module Sidekiq
81
84
  conn.multi do
82
85
  conn.incrby("stat:processed", procd)
83
86
  conn.incrby("stat:processed:#{nowdate}", procd)
87
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
88
+
84
89
  conn.incrby("stat:failed", fails)
85
90
  conn.incrby("stat:failed:#{nowdate}", fails)
91
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
+
86
93
  conn.del(workers_key)
87
- Processor::WORKER_STATE.each_pair do |tid, hash|
94
+ curstate.each_pair do |tid, hash|
88
95
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
89
96
  end
90
97
  conn.expire(workers_key, 60)
@@ -95,8 +102,8 @@ module Sidekiq
95
102
  _, exists, _, _, msg = Sidekiq.redis do |conn|
96
103
  conn.multi do
97
104
  conn.sadd('processes', key)
98
- conn.exists(key)
99
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
105
+ conn.exists?(key)
106
+ conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
100
107
  conn.expire(key, 60)
101
108
  conn.rpop("#{key}-signals")
102
109
  end
@@ -112,8 +119,8 @@ module Sidekiq
112
119
  # ignore all redis/network issues
113
120
  logger.error("heartbeat: #{e.message}")
114
121
  # don't lose the counts if there was a network issue
115
- Processor::PROCESSED.increment(procd)
116
- Processor::FAILURE.increment(fails)
122
+ Processor::PROCESSED.incr(procd)
123
+ Processor::FAILURE.incr(fails)
117
124
  end
118
125
  end
119
126
 
@@ -30,7 +30,7 @@ module Sidekiq
30
30
  def initialize(options={})
31
31
  logger.debug { options.inspect }
32
32
  @options = options
33
- @count = options[:concurrency] || 25
33
+ @count = options[:concurrency] || 10
34
34
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
35
35
 
36
36
  @done = false
@@ -70,11 +70,11 @@ module Sidekiq
70
70
  return if @workers.empty?
71
71
 
72
72
  logger.info { "Pausing to allow workers to finish..." }
73
- remaining = deadline - Time.now
73
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
74
74
  while remaining > PAUSE_TIME
75
75
  return if @workers.empty?
76
76
  sleep PAUSE_TIME
77
- remaining = deadline - Time.now
77
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
78
78
  end
79
79
  return if @workers.empty?
80
80
 
@@ -7,7 +7,7 @@ module Sidekiq
7
7
  def initialize
8
8
  # With Rails 5+ we must use the Reloader **always**.
9
9
  # The reloader handles code loading and db connection management.
10
- if defined?(::Rails) && ::Rails::VERSION::MAJOR >= 5
10
+ if defined?(::Rails) && defined?(::Rails::VERSION) && ::Rails::VERSION::MAJOR >= 5
11
11
  raise ArgumentError, "Rails 5 no longer needs or uses the ActiveRecord middleware."
12
12
  end
13
13
  end
@@ -4,8 +4,6 @@ require 'sidekiq/fetch'
4
4
  require 'sidekiq/job_logger'
5
5
  require 'sidekiq/job_retry'
6
6
  require 'thread'
7
- require 'concurrent/map'
8
- require 'concurrent/atomic/atomic_fixnum'
9
7
 
10
8
  module Sidekiq
11
9
  ##
@@ -89,7 +87,7 @@ module Sidekiq
89
87
  def get_one
90
88
  begin
91
89
  work = @strategy.retrieve_work
92
- (logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
90
+ (logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }; @down = nil) if @down
93
91
  work
94
92
  rescue Sidekiq::Shutdown
95
93
  rescue => ex
@@ -109,7 +107,7 @@ module Sidekiq
109
107
 
110
108
  def handle_fetch_exception(ex)
111
109
  if !@down
112
- @down = Time.now
110
+ @down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
113
111
  logger.error("Error fetching job: #{ex}")
114
112
  handle_exception(ex)
115
113
  end
@@ -149,21 +147,19 @@ module Sidekiq
149
147
  jobstr = work.job
150
148
  queue = work.queue_name
151
149
 
152
- ack = false
150
+ # Treat malformed JSON as a special case: job goes straight to the morgue.
151
+ job_hash = nil
153
152
  begin
154
- # Treat malformed JSON as a special case: job goes straight to the morgue.
155
- job_hash = nil
156
- begin
157
- job_hash = Sidekiq.load_json(jobstr)
158
- rescue => ex
159
- handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
160
- # we can't notify because the job isn't a valid hash payload.
161
- DeadSet.new.kill(jobstr, notify_failure: false)
162
- ack = true
163
- raise
164
- end
153
+ job_hash = Sidekiq.load_json(jobstr)
154
+ rescue => ex
155
+ handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
156
+ # we can't notify because the job isn't a valid hash payload.
157
+ DeadSet.new.kill(jobstr, notify_failure: false)
158
+ return work.acknowledge
159
+ end
165
160
 
166
- ack = true
161
+ ack = true
162
+ begin
167
163
  dispatch(job_hash, queue) do |worker|
168
164
  Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
169
165
  execute_job(worker, cloned(job_hash['args']))
@@ -174,10 +170,19 @@ module Sidekiq
174
170
  # within the timeout. Don't acknowledge the work since
175
171
  # we didn't properly finish it.
176
172
  ack = false
177
- rescue Exception => ex
178
- e = ex.is_a?(::Sidekiq::JobRetry::Skip) && ex.cause ? ex.cause : ex
173
+ rescue Sidekiq::JobRetry::Handled => h
174
+ # this is the common case: job raised error and Sidekiq::JobRetry::Handled
175
+ # signals that we created a retry successfully. We can acknowlege the job.
176
+ e = h.cause ? h.cause : h
179
177
  handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
180
178
  raise e
179
+ rescue Exception => ex
180
+ # Unexpected error! This is very bad and indicates an exception that got past
181
+ # the retry subsystem (e.g. network partition). We won't acknowledge the job
182
+ # so it can be rescued when using Sidekiq Pro.
183
+ ack = false
184
+ handle_exception(ex, { :context => "Internal exception!", :job => job_hash, :jobstr => jobstr })
185
+ raise e
181
186
  ensure
182
187
  work.acknowledge if ack
183
188
  end
@@ -187,22 +192,68 @@ module Sidekiq
187
192
  worker.perform(*cloned_args)
188
193
  end
189
194
 
190
- WORKER_STATE = Concurrent::Map.new
191
- PROCESSED = Concurrent::AtomicFixnum.new
192
- FAILURE = Concurrent::AtomicFixnum.new
195
+ # Ruby doesn't provide atomic counters out of the box so we'll
196
+ # implement something simple ourselves.
197
+ # https://bugs.ruby-lang.org/issues/14706
198
+ class Counter
199
+ def initialize
200
+ @value = 0
201
+ @lock = Mutex.new
202
+ end
203
+
204
+ def incr(amount=1)
205
+ @lock.synchronize { @value = @value + amount }
206
+ end
207
+
208
+ def reset
209
+ @lock.synchronize { val = @value; @value = 0; val }
210
+ end
211
+ end
212
+
213
+ # jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
214
+ class SharedWorkerState
215
+ def initialize
216
+ @worker_state = {}
217
+ @lock = Mutex.new
218
+ end
219
+
220
+ def set(tid, hash)
221
+ @lock.synchronize { @worker_state[tid] = hash }
222
+ end
223
+
224
+ def delete(tid)
225
+ @lock.synchronize { @worker_state.delete(tid) }
226
+ end
227
+
228
+ def dup
229
+ @lock.synchronize { @worker_state.dup }
230
+ end
231
+
232
+ def size
233
+ @lock.synchronize { @worker_state.size }
234
+ end
235
+
236
+ def clear
237
+ @lock.synchronize { @worker_state.clear }
238
+ end
239
+ end
240
+
241
+ PROCESSED = Counter.new
242
+ FAILURE = Counter.new
243
+ WORKER_STATE = SharedWorkerState.new
193
244
 
194
245
  def stats(job_hash, queue)
195
246
  tid = Sidekiq::Logging.tid
196
- WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
247
+ WORKER_STATE.set(tid, {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i })
197
248
 
198
249
  begin
199
250
  yield
200
251
  rescue Exception
201
- FAILURE.increment
252
+ FAILURE.incr
202
253
  raise
203
254
  ensure
204
255
  WORKER_STATE.delete(tid)
205
- PROCESSED.increment
256
+ PROCESSED.incr
206
257
  end
207
258
  end
208
259
 
data/lib/sidekiq/rails.rb CHANGED
@@ -1,4 +1,5 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class Rails < ::Rails::Engine
4
5
  # We need to setup this up before any application configuration which might
@@ -54,4 +55,4 @@ if defined?(::Rails) && ::Rails::VERSION::MAJOR < 4
54
55
  $stderr.puts("**************************************************")
55
56
  $stderr.puts("⛔️ WARNING: Sidekiq server is no longer supported by Rails 3.2 - please ensure your server/workers are updated")
56
57
  $stderr.puts("**************************************************")
57
- end
58
+ end
@@ -78,7 +78,7 @@ module Sidekiq
78
78
  opts.delete(:network_timeout)
79
79
  end
80
80
 
81
- opts[:driver] ||= 'ruby'
81
+ opts[:driver] ||= Redis::Connection.drivers.last || 'ruby'
82
82
 
83
83
  # Issue #3303, redis-rb will silently retry an operation.
84
84
  # This can lead to duplicate jobs if Sidekiq::Client's LPUSH
@@ -115,6 +115,25 @@ module Sidekiq
115
115
  # REDIS_PROVIDER=MY_REDIS_URL
116
116
  # and Sidekiq will find your custom URL variable with no custom
117
117
  # initialization code at all.
118
+ p = ENV['REDIS_PROVIDER']
119
+ if p && p =~ /\:/
120
+ Sidekiq.logger.error <<-EOM
121
+
122
+ #################################################################################
123
+
124
+ REDIS_PROVIDER should be set to the **name** of the variable which contains the Redis URL, not a URL itself.
125
+ Platforms like Heroku sell addons that publish a *_URL variable. You tell Sidekiq with REDIS_PROVIDER, e.g.:
126
+
127
+ REDIS_PROVIDER=REDISTOGO_URL
128
+ REDISTOGO_URL=redis://somehost.example.com:6379/4
129
+
130
+ Use REDIS_URL if you wish to point Sidekiq to a URL directly.
131
+
132
+ This configuration error will crash starting in Sidekiq 5.3.
133
+
134
+ #################################################################################
135
+ EOM
136
+ end
118
137
  ENV[
119
138
  ENV['REDIS_PROVIDER'] || 'REDIS_URL'
120
139
  ]
@@ -97,9 +97,34 @@ module Sidekiq
97
97
  sleep 5
98
98
  end
99
99
 
100
- # Calculates a random interval that is ±50% the desired average.
101
100
  def random_poll_interval
102
- poll_interval_average * rand + poll_interval_average.to_f / 2
101
+ # We want one Sidekiq process to schedule jobs every N seconds. We have M processes
102
+ # and **don't** want to coordinate.
103
+ #
104
+ # So in N*M second timespan, we want each process to schedule once. The basic loop is:
105
+ #
106
+ # * sleep a random amount within that N*M timespan
107
+ # * wake up and schedule
108
+ #
109
+ # We want to avoid one edge case: imagine a set of 2 processes, scheduling every 5 seconds,
110
+ # so N*M = 10. Each process decides to randomly sleep 8 seconds, now we've failed to meet
111
+ # that 5 second average. Thankfully each schedule cycle will sleep randomly so the next
112
+ # iteration could see each process sleep for 1 second, undercutting our average.
113
+ #
114
+ # So below 10 processes, we special case and ensure the processes sleep closer to the average.
115
+ # In the example above, each process should schedule every 10 seconds on average. We special
116
+ # case smaller clusters to add 50% so they would sleep somewhere between 5 and 15 seconds.
117
+ # As we run more processes, the scheduling interval average will approach an even spread
118
+ # between 0 and poll interval so we don't need this artifical boost.
119
+ #
120
+ if process_count < 10
121
+ # For small clusters, calculate a random interval that is ±50% the desired average.
122
+ poll_interval_average * rand + poll_interval_average.to_f / 2
123
+ else
124
+ # With 10+ processes, we should have enough randomness to get decent polling
125
+ # across the entire timespan
126
+ poll_interval_average * rand
127
+ end
103
128
  end
104
129
 
105
130
  # We do our best to tune the poll interval to the size of the active Sidekiq
@@ -123,9 +148,13 @@ module Sidekiq
123
148
  # This minimizes a single point of failure by dispersing check-ins but without taxing
124
149
  # Redis if you run many Sidekiq processes.
125
150
  def scaled_poll_interval
151
+ process_count * Sidekiq.options[:average_scheduled_poll_interval]
152
+ end
153
+
154
+ def process_count
126
155
  pcount = Sidekiq::ProcessSet.new.size
127
156
  pcount = 1 if pcount == 0
128
- pcount * Sidekiq.options[:average_scheduled_poll_interval]
157
+ pcount
129
158
  end
130
159
 
131
160
  def initial_wait
@@ -72,9 +72,7 @@ module Sidekiq
72
72
 
73
73
  class EmptyQueueError < RuntimeError; end
74
74
 
75
- class Client
76
- alias_method :raw_push_real, :raw_push
77
-
75
+ module TestingClient
78
76
  def raw_push(payloads)
79
77
  if Sidekiq::Testing.fake?
80
78
  payloads.each do |job|
@@ -92,11 +90,13 @@ module Sidekiq
92
90
  end
93
91
  true
94
92
  else
95
- raw_push_real(payloads)
93
+ super
96
94
  end
97
95
  end
98
96
  end
99
97
 
98
+ Sidekiq::Client.prepend TestingClient
99
+
100
100
  module Queues
101
101
  ##
102
102
  # The Queues class is only for testing the fake queue implementation.
@@ -1,4 +1,4 @@
1
1
  # frozen_string_literal: true
2
2
  module Sidekiq
3
- VERSION = "5.1.3"
3
+ VERSION = "5.2.10"
4
4
  end
@@ -15,7 +15,7 @@ module Sidekiq
15
15
  end
16
16
 
17
17
  def halt(res)
18
- throw :halt, res
18
+ throw :halt, [res, {"Content-Type" => "text/plain"}, [res.to_s]]
19
19
  end
20
20
 
21
21
  def redirect(location)
@@ -7,6 +7,21 @@ module Sidekiq
7
7
  CONTENT_LENGTH = "Content-Length"
8
8
  CONTENT_TYPE = "Content-Type"
9
9
  REDIS_KEYS = %w(redis_version uptime_in_days connected_clients used_memory_human used_memory_peak_human)
10
+ CSP_HEADER = [
11
+ "default-src 'self' https: http:",
12
+ "child-src 'self'",
13
+ "connect-src 'self' https: http: wss: ws:",
14
+ "font-src 'self' https: http:",
15
+ "frame-src 'self'",
16
+ "img-src 'self' https: http: data:",
17
+ "manifest-src 'self'",
18
+ "media-src 'self'",
19
+ "object-src 'none'",
20
+ "script-src 'self' https: http: 'unsafe-inline'",
21
+ "style-src 'self' https: http: 'unsafe-inline'",
22
+ "worker-src 'self'",
23
+ "base-uri 'self'"
24
+ ].join('; ').freeze
10
25
 
11
26
  def initialize(klass)
12
27
  @klass = klass
@@ -30,7 +45,10 @@ module Sidekiq
30
45
 
31
46
  get "/" do
32
47
  @redis_info = redis_info.select{ |k, v| REDIS_KEYS.include? k }
33
- stats_history = Sidekiq::Stats::History.new((params['days'] || 30).to_i)
48
+ days = (params["days"] || 30).to_i
49
+ return halt(401) if days < 1 || days > 180
50
+
51
+ stats_history = Sidekiq::Stats::History.new(days)
34
52
  @processed_history = stats_history.processed
35
53
  @failed_history = stats_history.failed
36
54
 
@@ -181,6 +199,12 @@ module Sidekiq
181
199
  redirect "#{root_path}retries"
182
200
  end
183
201
 
202
+ post "/retries/all/kill" do
203
+ Sidekiq::RetrySet.new.kill_all
204
+
205
+ redirect "#{root_path}retries"
206
+ end
207
+
184
208
  post "/retries/:key" do
185
209
  job = Sidekiq::RetrySet.new.fetch(*parse_params(route_params[:key])).first
186
210
 
@@ -279,6 +303,7 @@ module Sidekiq
279
303
  "Content-Type" => "text/html",
280
304
  "Cache-Control" => "no-cache",
281
305
  "Content-Language" => action.locale,
306
+ "Content-Security-Policy" => CSP_HEADER
282
307
  }
283
308
 
284
309
  [200, headers, [resp]]
@@ -121,7 +121,7 @@ module Sidekiq
121
121
  end
122
122
 
123
123
  def t(msg, options={})
124
- string = get_locale[msg] || msg
124
+ string = get_locale[msg] || strings('en')[msg] || msg
125
125
  if options.empty?
126
126
  string
127
127
  else
@@ -155,7 +155,7 @@ module Sidekiq
155
155
  end
156
156
 
157
157
  def namespace
158
- @@ns ||= Sidekiq.redis { |conn| conn.respond_to?(:namespace) ? conn.namespace : nil }
158
+ @ns ||= Sidekiq.redis { |conn| conn.respond_to?(:namespace) ? conn.namespace : nil }
159
159
  end
160
160
 
161
161
  def redis_info
@@ -184,7 +184,7 @@ module Sidekiq
184
184
  end
185
185
 
186
186
  def parse_params(params)
187
- score, jid = params.split("-")
187
+ score, jid = params.split("-", 2)
188
188
  [score.to_f, jid]
189
189
  end
190
190
 
@@ -207,9 +207,16 @@ module Sidekiq
207
207
  end
208
208
 
209
209
  def display_args(args, truncate_after_chars = 2000)
210
- args.map do |arg|
211
- h(truncate(to_display(arg), truncate_after_chars))
212
- end.join(", ")
210
+ return "Invalid job payload, args is nil" if args == nil
211
+ return "Invalid job payload, args must be an Array, not #{args.class.name}" if !args.is_a?(Array)
212
+
213
+ begin
214
+ args.map do |arg|
215
+ h(truncate(to_display(arg), truncate_after_chars))
216
+ end.join(", ")
217
+ rescue
218
+ "Illegal job arguments: #{h args.inspect}"
219
+ end
213
220
  end
214
221
 
215
222
  def csrf_tag
@@ -7,13 +7,13 @@ module Sidekiq
7
7
  # Include this module in your worker class and you can easily create
8
8
  # asynchronous jobs:
9
9
  #
10
- # class HardWorker
11
- # include Sidekiq::Worker
10
+ # class HardWorker
11
+ # include Sidekiq::Worker
12
12
  #
13
- # def perform(*args)
14
- # # do some work
13
+ # def perform(*args)
14
+ # # do some work
15
+ # end
15
16
  # end
16
- # end
17
17
  #
18
18
  # Then in your Rails app, you can do this:
19
19
  #
@@ -46,6 +46,11 @@ module Sidekiq
46
46
  @opts = opts
47
47
  end
48
48
 
49
+ def set(options)
50
+ @opts.merge!(options)
51
+ self
52
+ end
53
+
49
54
  def perform_async(*args)
50
55
  @klass.client_push(@opts.merge('args' => args, 'class' => @klass))
51
56
  end
@@ -66,6 +71,7 @@ module Sidekiq
66
71
  end
67
72
 
68
73
  module ClassMethods
74
+ ACCESSOR_MUTEX = Mutex.new
69
75
 
70
76
  def delay(*args)
71
77
  raise ArgumentError, "Do not call .delay on a Sidekiq::Worker class, call .perform_async"
@@ -148,10 +154,18 @@ module Sidekiq
148
154
  instance_writer = true
149
155
 
150
156
  attrs.each do |name|
157
+ synchronized_getter = "__synchronized_#{name}"
158
+
151
159
  singleton_class.instance_eval do
152
160
  undef_method(name) if method_defined?(name) || private_method_defined?(name)
153
161
  end
154
- define_singleton_method(name) { nil }
162
+
163
+ define_singleton_method(synchronized_getter) { nil }
164
+ singleton_class.class_eval do
165
+ private(synchronized_getter)
166
+ end
167
+
168
+ define_singleton_method(name) { ACCESSOR_MUTEX.synchronize { send synchronized_getter } }
155
169
 
156
170
  ivar = "@#{name}"
157
171
 
@@ -161,8 +175,10 @@ module Sidekiq
161
175
  end
162
176
  define_singleton_method("#{name}=") do |val|
163
177
  singleton_class.class_eval do
164
- undef_method(name) if method_defined?(name) || private_method_defined?(name)
165
- define_method(name) { val }
178
+ ACCESSOR_MUTEX.synchronize do
179
+ undef_method(synchronized_getter) if method_defined?(synchronized_getter) || private_method_defined?(synchronized_getter)
180
+ define_method(synchronized_getter) { val }
181
+ end
166
182
  end
167
183
 
168
184
  if singleton_class?