inst-jobs 2.2.1 → 2.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (98) hide show
  1. checksums.yaml +4 -4
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
  3. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
  4. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
  5. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
  6. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
  7. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
  8. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
  9. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
  10. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
  11. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
  12. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
  13. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
  14. data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
  15. data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
  16. data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
  17. data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
  18. data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
  19. data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
  20. data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
  21. data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
  22. data/exe/inst_jobs +3 -2
  23. data/lib/delayed/backend/active_record.rb +187 -159
  24. data/lib/delayed/backend/base.rb +80 -69
  25. data/lib/delayed/batch.rb +11 -9
  26. data/lib/delayed/cli.rb +98 -84
  27. data/lib/delayed/core_ext/kernel.rb +4 -2
  28. data/lib/delayed/daemon.rb +70 -74
  29. data/lib/delayed/job_tracking.rb +26 -25
  30. data/lib/delayed/lifecycle.rb +27 -24
  31. data/lib/delayed/log_tailer.rb +17 -17
  32. data/lib/delayed/logging.rb +13 -16
  33. data/lib/delayed/message_sending.rb +42 -51
  34. data/lib/delayed/performable_method.rb +5 -7
  35. data/lib/delayed/periodic.rb +66 -65
  36. data/lib/delayed/plugin.rb +2 -4
  37. data/lib/delayed/pool.rb +198 -192
  38. data/lib/delayed/server/helpers.rb +6 -6
  39. data/lib/delayed/server.rb +51 -54
  40. data/lib/delayed/settings.rb +93 -81
  41. data/lib/delayed/testing.rb +21 -22
  42. data/lib/delayed/version.rb +1 -1
  43. data/lib/delayed/work_queue/in_process.rb +21 -17
  44. data/lib/delayed/work_queue/parent_process/client.rb +55 -53
  45. data/lib/delayed/work_queue/parent_process/server.rb +215 -209
  46. data/lib/delayed/work_queue/parent_process.rb +52 -53
  47. data/lib/delayed/worker/consul_health_check.rb +21 -19
  48. data/lib/delayed/worker/health_check.rb +21 -12
  49. data/lib/delayed/worker/null_health_check.rb +3 -1
  50. data/lib/delayed/worker/process_helper.rb +8 -9
  51. data/lib/delayed/worker.rb +271 -261
  52. data/lib/delayed/yaml_extensions.rb +12 -10
  53. data/lib/delayed_job.rb +37 -37
  54. data/lib/inst-jobs.rb +1 -1
  55. data/spec/active_record_job_spec.rb +142 -138
  56. data/spec/delayed/cli_spec.rb +7 -7
  57. data/spec/delayed/daemon_spec.rb +8 -8
  58. data/spec/delayed/message_sending_spec.rb +8 -9
  59. data/spec/delayed/periodic_spec.rb +13 -12
  60. data/spec/delayed/server_spec.rb +38 -38
  61. data/spec/delayed/settings_spec.rb +26 -25
  62. data/spec/delayed/work_queue/in_process_spec.rb +7 -7
  63. data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
  64. data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
  65. data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
  66. data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
  67. data/spec/delayed/worker/health_check_spec.rb +51 -49
  68. data/spec/delayed/worker_spec.rb +28 -25
  69. data/spec/gemfiles/52.gemfile +5 -3
  70. data/spec/gemfiles/52.gemfile.lock +240 -0
  71. data/spec/gemfiles/60.gemfile +5 -3
  72. data/spec/gemfiles/60.gemfile.lock +246 -0
  73. data/spec/gemfiles/61.gemfile +9 -0
  74. data/spec/sample_jobs.rb +45 -15
  75. data/spec/shared/delayed_batch.rb +74 -67
  76. data/spec/shared/delayed_method.rb +143 -102
  77. data/spec/shared/performable_method.rb +39 -38
  78. data/spec/shared/shared_backend.rb +520 -437
  79. data/spec/shared/testing.rb +14 -14
  80. data/spec/shared/worker.rb +156 -148
  81. data/spec/shared_jobs_specs.rb +13 -13
  82. data/spec/spec_helper.rb +43 -51
  83. metadata +101 -70
  84. data/lib/delayed/backend/redis/bulk_update.lua +0 -50
  85. data/lib/delayed/backend/redis/destroy_job.lua +0 -2
  86. data/lib/delayed/backend/redis/enqueue.lua +0 -29
  87. data/lib/delayed/backend/redis/fail_job.lua +0 -5
  88. data/lib/delayed/backend/redis/find_available.lua +0 -3
  89. data/lib/delayed/backend/redis/functions.rb +0 -59
  90. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
  91. data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
  92. data/lib/delayed/backend/redis/job.rb +0 -535
  93. data/lib/delayed/backend/redis/set_running.lua +0 -5
  94. data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
  95. data/spec/gemfiles/42.gemfile +0 -7
  96. data/spec/gemfiles/50.gemfile +0 -7
  97. data/spec/gemfiles/51.gemfile +0 -7
  98. data/spec/redis_job_spec.rb +0 -148
@@ -8,21 +8,21 @@ module Delayed
8
8
  end
9
9
 
10
10
  def url_path(*path_parts)
11
- [path_prefix, path_parts].join('/').squeeze('/')
11
+ [path_prefix, path_parts].join("/").squeeze("/")
12
12
  end
13
13
 
14
14
  def path_prefix
15
- request.env['SCRIPT_NAME']
15
+ request.env["SCRIPT_NAME"]
16
16
  end
17
17
 
18
18
  def render_javascript_env
19
19
  {
20
20
  Routes: {
21
21
  root: path_prefix,
22
- running: url_path('running'),
23
- tags: url_path('tags'),
24
- jobs: url_path('jobs'),
25
- bulkUpdate: url_path('bulk_update'),
22
+ running: url_path("running"),
23
+ tags: url_path("tags"),
24
+ jobs: url_path("jobs"),
25
+ bulkUpdate: url_path("bulk_update")
26
26
  }
27
27
  }.to_json
28
28
  end
@@ -1,34 +1,32 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'sinatra/base'
4
- require 'sinatra/json'
5
- require 'json'
6
- require 'delayed_job'
3
+ require "sinatra/base"
4
+ require "sinatra/json"
5
+ require "json"
6
+ require "delayed_job"
7
7
 
8
8
  module Delayed
9
9
  class Server < Sinatra::Base
10
10
  APP_DIR = File.dirname(File.expand_path(__FILE__))
11
- set :views, File.join(APP_DIR, 'server', 'views')
12
- set :public_folder, File.join(APP_DIR, 'server', 'public')
11
+ set :views, File.join(APP_DIR, "server", "views")
12
+ set :public_folder, File.join(APP_DIR, "server", "public")
13
13
 
14
- def initialize(*args, &block)
14
+ def initialize(*args)
15
15
  super()
16
16
  # Rails will take care of establishing the DB connection for us if there is
17
17
  # an application present
18
18
  if using_active_record? && !ActiveRecord::Base.connected?
19
- ActiveRecord::Base.establish_connection(ENV['DATABASE_URL'])
19
+ ActiveRecord::Base.establish_connection(ENV["DATABASE_URL"])
20
20
  end
21
21
 
22
- @allow_update = args.length > 0 && args[0][:update]
22
+ @allow_update = args.length.positive? && args[0][:update]
23
23
  end
24
24
 
25
25
  def using_active_record?
26
26
  Delayed::Job == Delayed::Backend::ActiveRecord::Job
27
27
  end
28
28
 
29
- def allow_update
30
- @allow_update
31
- end
29
+ attr_reader :allow_update
32
30
 
33
31
  # Ensure we're connected to the DB before processing the request
34
32
  before do
@@ -43,72 +41,72 @@ module Delayed
43
41
  end
44
42
 
45
43
  configure :development do
46
- require 'sinatra/reloader'
44
+ require "sinatra/reloader"
47
45
  register Sinatra::Reloader
48
46
  end
49
47
 
50
48
  helpers do
51
49
  # this can't get required until the class has been opened for the first time
52
- require 'delayed/server/helpers'
50
+ require "delayed/server/helpers"
53
51
  include Delayed::Server::Helpers
54
52
  end
55
53
 
56
- get '/' do
54
+ get "/" do
57
55
  erb :index
58
56
  end
59
57
 
60
- get '/running' do
58
+ get "/running" do
61
59
  content_type :json
62
60
  json({
63
- draw: params['draw'].to_i,
64
- recordsTotal: Delayed::Job.running.count,
65
- recordsFiltered: Delayed::Job.running.count,
66
- data: Delayed::Job.running_jobs.map{ |j|
67
- j.as_json(include_root: false, except: [:handler, :last_error])
68
- },
69
- })
61
+ draw: params["draw"].to_i,
62
+ recordsTotal: Delayed::Job.running.count,
63
+ recordsFiltered: Delayed::Job.running.count,
64
+ data: Delayed::Job.running_jobs.map do |j|
65
+ j.as_json(include_root: false, except: %i[handler last_error])
66
+ end
67
+ })
70
68
  end
71
69
 
72
- get '/tags' do
70
+ get "/tags" do
73
71
  content_type :json
74
72
  json({
75
- draw: params['draw'].to_i,
76
- data: Delayed::Job.tag_counts('current', 10)
77
- })
73
+ draw: params["draw"].to_i,
74
+ data: Delayed::Job.tag_counts("current", 10)
75
+ })
78
76
  end
79
77
 
80
78
  DEFAULT_PAGE_SIZE = 10
81
79
  MAX_PAGE_SIZE = 100
82
- get '/jobs' do
80
+ get "/jobs" do
83
81
  content_type :json
84
- flavor = params['flavor'] || 'current'
82
+ flavor = params["flavor"] || "current"
85
83
  page_size = extract_page_size
86
- offset = Integer(params['start'] || 0)
84
+ offset = Integer(params["start"] || 0)
87
85
  case flavor
88
- when 'id'
89
- jobs = Delayed::Job.where(id: params['search_term'])
86
+ when "id"
87
+ jobs = Delayed::Job.where(id: params["search_term"])
90
88
  total_records = 1
91
- when 'future', 'current', 'failed'
89
+ when "future", "current", "failed"
92
90
  jobs = Delayed::Job.list_jobs(flavor, page_size, offset)
93
- total_records = Delayed::Job.jobs_count(flavor)
91
+ total_records = Delayed::Job.jobs_count(flavor)
94
92
  else
95
- query = params['search_term']
96
- if query.present?
97
- jobs = Delayed::Job.list_jobs(flavor, page_size, offset, query)
98
- else
99
- jobs = []
100
- end
101
- total_records = Delayed::Job.jobs_count(flavor, query)
93
+ query = params["search_term"]
94
+ jobs = if query.present?
95
+ Delayed::Job.list_jobs(flavor, page_size, offset, query)
96
+ else
97
+ []
98
+ end
99
+ total_records = Delayed::Job.jobs_count(flavor, query)
102
100
  end
103
101
  json({
104
- draw: params['draw'].to_i,
105
- recordsTotal: total_records,
106
- recordsFiltered: jobs.size,
107
- data: build_jobs_json(jobs),
108
- })
102
+ draw: params["draw"].to_i,
103
+ recordsTotal: total_records,
104
+ recordsFiltered: jobs.size,
105
+ data: build_jobs_json(jobs)
106
+ })
109
107
  end
110
108
 
111
- post '/bulk_update' do
109
+ post "/bulk_update" do
112
110
  content_type :json
113
111
 
114
112
  halt 403 unless @allow_update
@@ -117,25 +115,24 @@ module Delayed
117
115
  Delayed::Job.bulk_update(payload[:action], { ids: payload[:ids] })
118
116
 
119
117
  json({
120
- success: true
121
- })
118
+ success: true
119
+ })
122
120
  end
123
121
 
124
122
  private
125
123
 
126
124
  def extract_page_size
127
- page_size = Integer(params['length'] || DEFAULT_PAGE_SIZE)
125
+ page_size = Integer(params["length"] || DEFAULT_PAGE_SIZE)
128
126
  # if dataTables wants all of the records it will send us -1 but we don't
129
127
  # want the potential to kill our servers with this request so we'll limit it
130
128
  page_size = DEFAULT_PAGE_SIZE if page_size == -1
131
129
  [page_size, MAX_PAGE_SIZE].min
132
130
  end
133
131
 
134
-
135
132
  def build_jobs_json(jobs)
136
- json = jobs.map{ |j|
137
- j.as_json(root: false, except: [:handler, :last_error])
138
- }
133
+ jobs.map do |j|
134
+ j.as_json(root: false, except: %i[handler last_error])
135
+ end
139
136
  end
140
137
  end
141
138
  end
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'yaml'
4
- require 'erb'
5
- require 'active_support/core_ext/hash/indifferent_access'
3
+ require "yaml"
4
+ require "erb"
5
+ require "active_support/core_ext/hash/indifferent_access"
6
6
 
7
7
  module Delayed
8
8
  module Settings
@@ -12,6 +12,10 @@ module Delayed
12
12
  :disable_periodic_jobs,
13
13
  :disable_automatic_orphan_unlocking,
14
14
  :fetch_batch_size,
15
+ # this is a transitional setting, so that you don't have a time where a
16
+ # singleton switches to using the singleton column, but there are old
17
+ # jobs that only used strand
18
+ :infer_strand_from_singleton,
15
19
  :kill_workers_on_exit,
16
20
  :last_ditch_logfile,
17
21
  :max_attempts,
@@ -24,23 +28,13 @@ module Delayed
24
28
  :slow_exit_timeout,
25
29
  :worker_health_check_type,
26
30
  :worker_health_check_config,
27
- :worker_procname_prefix,
28
- ]
29
- SETTINGS_WITH_ARGS = [
30
- :job_detailed_log_format,
31
- :num_strands
32
- ]
33
-
34
- SETTINGS.each do |setting|
35
- mattr_writer(setting)
36
- self.send("#{setting}=", nil)
37
- define_singleton_method(setting) do
38
- val = class_variable_get(:"@@#{setting}")
39
- val.respond_to?(:call) ? val.call() : val
40
- end
41
- end
31
+ :worker_procname_prefix
32
+ ].freeze
42
33
 
43
- mattr_accessor(*SETTINGS_WITH_ARGS)
34
+ SETTINGS_WITH_ARGS = %i[
35
+ job_detailed_log_format
36
+ num_strands
37
+ ].freeze
44
38
 
45
39
  PARENT_PROCESS_DEFAULTS = {
46
40
  server_socket_timeout: 10.0,
@@ -50,17 +44,86 @@ module Delayed
50
44
 
51
45
  # We'll accept a partial, relative path and assume we want it inside
52
46
  # Rails.root with inst-jobs.sock appended if provided a directory.
53
- server_address: 'tmp',
47
+ server_address: "tmp"
54
48
  }.with_indifferent_access.freeze
55
49
 
56
- mattr_reader(:parent_process)
57
- @@parent_process = PARENT_PROCESS_DEFAULTS.dup
50
+ class << self
51
+ attr_accessor(*SETTINGS_WITH_ARGS)
52
+ attr_reader :parent_process
53
+
54
+ SETTINGS.each do |setting|
55
+ attr_writer setting
56
+
57
+ define_method(setting) do
58
+ val = instance_variable_get(:"@#{setting}")
59
+ val.respond_to?(:call) ? val.call : val
60
+ end
61
+ end
62
+
63
+ def queue=(queue_name)
64
+ raise ArgumentError, "queue_name must not be blank" if queue_name.blank?
65
+
66
+ @queue = queue_name
67
+ end
68
+
69
+ def worker_config(config_filename = nil)
70
+ config_filename ||= default_worker_config_name
71
+ config = YAML.load(ERB.new(File.read(config_filename)).result)
72
+ env = Rails.env || "development"
73
+ config = config[env] || config["default"]
74
+ # Backwards compatibility from when the config was just an array of queues
75
+ config = { workers: config } if config.is_a?(Array)
76
+ unless config.is_a?(Hash)
77
+ raise ArgumentError,
78
+ "Invalid config file #{config_filename}"
79
+ end
80
+ config = config.with_indifferent_access
81
+ config[:workers].map! do |worker_config|
82
+ config.except(:workers).merge(worker_config.with_indifferent_access)
83
+ end
84
+ config
85
+ end
86
+
87
+ def apply_worker_config!(config)
88
+ SETTINGS.each do |setting|
89
+ send("#{setting}=", config[setting.to_s]) if config.key?(setting.to_s)
90
+ end
91
+ if config.key?("parent_process_client_timeout")
92
+ parent_process.client_timeout = config["parent_process_client_timeout"]
93
+ end
94
+ self.parent_process = config["parent_process"] if config.key?("parent_process")
95
+ end
96
+
97
+ def default_worker_config_name
98
+ expand_rails_path("config/delayed_jobs.yml")
99
+ end
100
+
101
+ # Expands rails-relative paths, without depending on rails being loaded.
102
+ def expand_rails_path(path)
103
+ root = if defined?(Rails) && Rails.root
104
+ "#{Rails.root}Gemfile".to_s
105
+ else
106
+ ENV.fetch("BUNDLE_GEMFILE", "#{Dir.pwd}/Gemfile")
107
+ end
108
+ File.expand_path("../#{path}", root)
109
+ end
110
+
111
+ def parent_process_client_timeout=(val)
112
+ parent_process["server_socket_timeout"] = Integer(val)
113
+ end
114
+
115
+ def parent_process=(new_config)
116
+ raise "Parent process configurations must be a hash!" unless new_config.is_a?(Hash)
117
+
118
+ @parent_process = PARENT_PROCESS_DEFAULTS.merge(new_config)
119
+ end
58
120
 
59
- def self.queue=(queue_name)
60
- raise(ArgumentError, "queue_name must not be blank") if queue_name.blank?
61
- @@queue = queue_name
121
+ def worker_health_check_config=(new_config)
122
+ @worker_health_check_config = (new_config || {}).with_indifferent_access
123
+ end
62
124
  end
63
125
 
126
+ self.parent_process = PARENT_PROCESS_DEFAULTS.dup
64
127
  self.queue = "queue"
65
128
  self.max_attempts = 1
66
129
  self.sleep_delay = 2.0
@@ -69,9 +132,11 @@ module Delayed
69
132
  self.select_random_from_batch = false
70
133
  self.silence_periodic_log = false
71
134
 
72
- self.num_strands = ->(strand_name){ nil }
73
- self.default_job_options = ->{ Hash.new }
74
- self.job_detailed_log_format = ->(job){ job.to_json(include_root: false, only: %w(tag strand priority attempts created_at max_attempts source)) }
135
+ self.num_strands = ->(_strand_name) {}
136
+ self.default_job_options = -> { {} }
137
+ self.job_detailed_log_format = lambda { |job|
138
+ job.to_json(include_root: false, only: %w[tag strand priority attempts created_at max_attempts source])
139
+ }
75
140
 
76
141
  # Send workers KILL after QUIT if they haven't exited within the
77
142
  # slow_exit_timeout
@@ -80,58 +145,5 @@ module Delayed
80
145
 
81
146
  self.worker_health_check_type = :none
82
147
  self.worker_health_check_config = {}
83
-
84
- def self.worker_config(config_filename = nil)
85
- config_filename ||= default_worker_config_name
86
- config = YAML.load(ERB.new(File.read(config_filename)).result)
87
- env = defined?(RAILS_ENV) ? RAILS_ENV : ENV['RAILS_ENV'] || 'development'
88
- config = config[env] || config['default']
89
- # Backwards compatibility from when the config was just an array of queues
90
- config = { :workers => config } if config.is_a?(Array)
91
- unless config && config.is_a?(Hash)
92
- raise ArgumentError,
93
- "Invalid config file #{config_filename}"
94
- end
95
- config = config.with_indifferent_access
96
- config[:workers].map! do |worker_config|
97
- config.except(:workers).merge(worker_config.with_indifferent_access)
98
- end
99
- config
100
- end
101
-
102
- def self.apply_worker_config!(config)
103
- SETTINGS.each do |setting|
104
- self.send("#{setting}=", config[setting.to_s]) if config.key?(setting.to_s)
105
- end
106
- parent_process.client_timeout = config['parent_process_client_timeout'] if config.key?('parent_process_client_timeout')
107
- self.parent_process = config['parent_process'] if config.key?('parent_process')
108
- end
109
-
110
- def self.default_worker_config_name
111
- expand_rails_path("config/delayed_jobs.yml")
112
- end
113
-
114
- # Expands rails-relative paths, without depending on rails being loaded.
115
- def self.expand_rails_path(path)
116
- root = if defined?(Rails) && Rails.root
117
- (Rails.root+"Gemfile").to_s
118
- else
119
- ENV.fetch('BUNDLE_GEMFILE', Dir.pwd+"/Gemfile")
120
- end
121
- File.expand_path("../#{path}", root)
122
- end
123
-
124
- def self.parent_process_client_timeout=(val)
125
- parent_process['server_socket_timeout'] = Integer(val)
126
- end
127
-
128
- def self.parent_process=(new_config)
129
- raise 'Parent process configurations must be a hash!' unless Hash === new_config
130
- @@parent_process = PARENT_PROCESS_DEFAULTS.merge(new_config)
131
- end
132
-
133
- def self.worker_health_check_config=(new_config)
134
- @@worker_health_check_config = (new_config || {}).with_indifferent_access
135
- end
136
148
  end
137
149
  end
@@ -1,34 +1,33 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Delayed
4
- module Testing
5
- def self.run_job(job)
6
- Delayed::Worker.new.perform(job)
7
- end
4
+ module Testing
5
+ def self.run_job(job)
6
+ Delayed::Worker.new.perform(job)
7
+ end
8
8
 
9
- def self.drain
10
- while job = Delayed::Job.get_and_lock_next_available(
11
- 'spec run_jobs',
9
+ def self.drain
10
+ while (job = Delayed::Job.get_and_lock_next_available(
11
+ "spec run_jobs",
12
12
  Delayed::Settings.queue,
13
13
  0,
14
- Delayed::MAX_PRIORITY)
15
- run_job(job)
14
+ Delayed::MAX_PRIORITY
15
+ ))
16
+ run_job(job)
17
+ end
16
18
  end
17
- end
18
19
 
19
- def self.track_created
20
- job_tracking = JobTracking.track { yield }
21
- job_tracking.created
22
- end
20
+ def self.track_created(&block)
21
+ job_tracking = JobTracking.track(&block)
22
+ job_tracking.created
23
+ end
23
24
 
24
- def self.clear_all!
25
- case Delayed::Job.name
26
- when /Redis/
27
- Delayed::Job.redis.flushdb
28
- when /ActiveRecord/
29
- Delayed::Job.delete_all
30
- Delayed::Job::Failed.delete_all
25
+ def self.clear_all!
26
+ case Delayed::Job.name
27
+ when /ActiveRecord/
28
+ Delayed::Job.delete_all
29
+ Delayed::Job::Failed.delete_all
30
+ end
31
31
  end
32
32
  end
33
33
  end
34
- end