inst-jobs 2.3.1 → 2.4.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +182 -148
- data/lib/delayed/backend/base.rb +80 -69
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +42 -51
- data/lib/delayed/performable_method.rb +5 -7
- data/lib/delayed/periodic.rb +66 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -192
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +215 -209
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +21 -12
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -261
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -38
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +128 -135
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +8 -8
- data/spec/delayed/message_sending_spec.rb +8 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -7
- data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +51 -49
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/gemfiles/52.gemfile +5 -3
- data/spec/gemfiles/52.gemfile.lock +240 -0
- data/spec/gemfiles/60.gemfile +5 -3
- data/spec/gemfiles/60.gemfile.lock +246 -0
- data/spec/gemfiles/61.gemfile +5 -3
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +537 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +155 -147
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +43 -40
- metadata +73 -52
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -528
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/redis_job_spec.rb +0 -148
@@ -8,21 +8,21 @@ module Delayed
|
|
8
8
|
end
|
9
9
|
|
10
10
|
def url_path(*path_parts)
|
11
|
-
[path_prefix, path_parts].join(
|
11
|
+
[path_prefix, path_parts].join("/").squeeze("/")
|
12
12
|
end
|
13
13
|
|
14
14
|
def path_prefix
|
15
|
-
request.env[
|
15
|
+
request.env["SCRIPT_NAME"]
|
16
16
|
end
|
17
17
|
|
18
18
|
def render_javascript_env
|
19
19
|
{
|
20
20
|
Routes: {
|
21
21
|
root: path_prefix,
|
22
|
-
running: url_path(
|
23
|
-
tags: url_path(
|
24
|
-
jobs: url_path(
|
25
|
-
bulkUpdate: url_path(
|
22
|
+
running: url_path("running"),
|
23
|
+
tags: url_path("tags"),
|
24
|
+
jobs: url_path("jobs"),
|
25
|
+
bulkUpdate: url_path("bulk_update")
|
26
26
|
}
|
27
27
|
}.to_json
|
28
28
|
end
|
data/lib/delayed/server.rb
CHANGED
@@ -1,34 +1,32 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
3
|
+
require "sinatra/base"
|
4
|
+
require "sinatra/json"
|
5
|
+
require "json"
|
6
|
+
require "delayed_job"
|
7
7
|
|
8
8
|
module Delayed
|
9
9
|
class Server < Sinatra::Base
|
10
10
|
APP_DIR = File.dirname(File.expand_path(__FILE__))
|
11
|
-
set :views, File.join(APP_DIR,
|
12
|
-
set :public_folder, File.join(APP_DIR,
|
11
|
+
set :views, File.join(APP_DIR, "server", "views")
|
12
|
+
set :public_folder, File.join(APP_DIR, "server", "public")
|
13
13
|
|
14
|
-
def initialize(*args
|
14
|
+
def initialize(*args)
|
15
15
|
super()
|
16
16
|
# Rails will take care of establishing the DB connection for us if there is
|
17
17
|
# an application present
|
18
18
|
if using_active_record? && !ActiveRecord::Base.connected?
|
19
|
-
ActiveRecord::Base.establish_connection(ENV[
|
19
|
+
ActiveRecord::Base.establish_connection(ENV["DATABASE_URL"])
|
20
20
|
end
|
21
21
|
|
22
|
-
@allow_update = args.length
|
22
|
+
@allow_update = args.length.positive? && args[0][:update]
|
23
23
|
end
|
24
24
|
|
25
25
|
def using_active_record?
|
26
26
|
Delayed::Job == Delayed::Backend::ActiveRecord::Job
|
27
27
|
end
|
28
28
|
|
29
|
-
|
30
|
-
@allow_update
|
31
|
-
end
|
29
|
+
attr_reader :allow_update
|
32
30
|
|
33
31
|
# Ensure we're connected to the DB before processing the request
|
34
32
|
before do
|
@@ -43,72 +41,72 @@ module Delayed
|
|
43
41
|
end
|
44
42
|
|
45
43
|
configure :development do
|
46
|
-
require
|
44
|
+
require "sinatra/reloader"
|
47
45
|
register Sinatra::Reloader
|
48
46
|
end
|
49
47
|
|
50
48
|
helpers do
|
51
49
|
# this can't get required until the class has been opened for the first time
|
52
|
-
require
|
50
|
+
require "delayed/server/helpers"
|
53
51
|
include Delayed::Server::Helpers
|
54
52
|
end
|
55
53
|
|
56
|
-
get
|
54
|
+
get "/" do
|
57
55
|
erb :index
|
58
56
|
end
|
59
57
|
|
60
|
-
get
|
58
|
+
get "/running" do
|
61
59
|
content_type :json
|
62
60
|
json({
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
61
|
+
draw: params["draw"].to_i,
|
62
|
+
recordsTotal: Delayed::Job.running.count,
|
63
|
+
recordsFiltered: Delayed::Job.running.count,
|
64
|
+
data: Delayed::Job.running_jobs.map do |j|
|
65
|
+
j.as_json(include_root: false, except: %i[handler last_error])
|
66
|
+
end
|
67
|
+
})
|
70
68
|
end
|
71
69
|
|
72
|
-
get
|
70
|
+
get "/tags" do
|
73
71
|
content_type :json
|
74
72
|
json({
|
75
|
-
|
76
|
-
|
77
|
-
|
73
|
+
draw: params["draw"].to_i,
|
74
|
+
data: Delayed::Job.tag_counts("current", 10)
|
75
|
+
})
|
78
76
|
end
|
79
77
|
|
80
78
|
DEFAULT_PAGE_SIZE = 10
|
81
79
|
MAX_PAGE_SIZE = 100
|
82
|
-
get
|
80
|
+
get "/jobs" do
|
83
81
|
content_type :json
|
84
|
-
flavor = params[
|
82
|
+
flavor = params["flavor"] || "current"
|
85
83
|
page_size = extract_page_size
|
86
|
-
offset = Integer(params[
|
84
|
+
offset = Integer(params["start"] || 0)
|
87
85
|
case flavor
|
88
|
-
when
|
89
|
-
jobs = Delayed::Job.where(id: params[
|
86
|
+
when "id"
|
87
|
+
jobs = Delayed::Job.where(id: params["search_term"])
|
90
88
|
total_records = 1
|
91
|
-
when
|
89
|
+
when "future", "current", "failed"
|
92
90
|
jobs = Delayed::Job.list_jobs(flavor, page_size, offset)
|
93
|
-
total_records =
|
91
|
+
total_records = Delayed::Job.jobs_count(flavor)
|
94
92
|
else
|
95
|
-
query = params[
|
96
|
-
if query.present?
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
total_records =
|
93
|
+
query = params["search_term"]
|
94
|
+
jobs = if query.present?
|
95
|
+
Delayed::Job.list_jobs(flavor, page_size, offset, query)
|
96
|
+
else
|
97
|
+
[]
|
98
|
+
end
|
99
|
+
total_records = Delayed::Job.jobs_count(flavor, query)
|
102
100
|
end
|
103
101
|
json({
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
102
|
+
draw: params["draw"].to_i,
|
103
|
+
recordsTotal: total_records,
|
104
|
+
recordsFiltered: jobs.size,
|
105
|
+
data: build_jobs_json(jobs)
|
106
|
+
})
|
109
107
|
end
|
110
108
|
|
111
|
-
post
|
109
|
+
post "/bulk_update" do
|
112
110
|
content_type :json
|
113
111
|
|
114
112
|
halt 403 unless @allow_update
|
@@ -117,25 +115,24 @@ module Delayed
|
|
117
115
|
Delayed::Job.bulk_update(payload[:action], { ids: payload[:ids] })
|
118
116
|
|
119
117
|
json({
|
120
|
-
|
121
|
-
|
118
|
+
success: true
|
119
|
+
})
|
122
120
|
end
|
123
121
|
|
124
122
|
private
|
125
123
|
|
126
124
|
def extract_page_size
|
127
|
-
page_size = Integer(params[
|
125
|
+
page_size = Integer(params["length"] || DEFAULT_PAGE_SIZE)
|
128
126
|
# if dataTables wants all of the records it will send us -1 but we don't
|
129
127
|
# want the potential to kill our servers with this request so we'll limit it
|
130
128
|
page_size = DEFAULT_PAGE_SIZE if page_size == -1
|
131
129
|
[page_size, MAX_PAGE_SIZE].min
|
132
130
|
end
|
133
131
|
|
134
|
-
|
135
132
|
def build_jobs_json(jobs)
|
136
|
-
|
137
|
-
j.as_json(root: false, except: [
|
138
|
-
|
133
|
+
jobs.map do |j|
|
134
|
+
j.as_json(root: false, except: %i[handler last_error])
|
135
|
+
end
|
139
136
|
end
|
140
137
|
end
|
141
138
|
end
|
data/lib/delayed/settings.rb
CHANGED
@@ -1,8 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
3
|
+
require "yaml"
|
4
|
+
require "erb"
|
5
|
+
require "active_support/core_ext/hash/indifferent_access"
|
6
6
|
|
7
7
|
module Delayed
|
8
8
|
module Settings
|
@@ -12,6 +12,10 @@ module Delayed
|
|
12
12
|
:disable_periodic_jobs,
|
13
13
|
:disable_automatic_orphan_unlocking,
|
14
14
|
:fetch_batch_size,
|
15
|
+
# this is a transitional setting, so that you don't have a time where a
|
16
|
+
# singleton switches to using the singleton column, but there are old
|
17
|
+
# jobs that only used strand
|
18
|
+
:infer_strand_from_singleton,
|
15
19
|
:kill_workers_on_exit,
|
16
20
|
:last_ditch_logfile,
|
17
21
|
:max_attempts,
|
@@ -24,23 +28,13 @@ module Delayed
|
|
24
28
|
:slow_exit_timeout,
|
25
29
|
:worker_health_check_type,
|
26
30
|
:worker_health_check_config,
|
27
|
-
:worker_procname_prefix
|
28
|
-
]
|
29
|
-
SETTINGS_WITH_ARGS = [
|
30
|
-
:job_detailed_log_format,
|
31
|
-
:num_strands
|
32
|
-
]
|
33
|
-
|
34
|
-
SETTINGS.each do |setting|
|
35
|
-
mattr_writer(setting)
|
36
|
-
self.send("#{setting}=", nil)
|
37
|
-
define_singleton_method(setting) do
|
38
|
-
val = class_variable_get(:"@@#{setting}")
|
39
|
-
val.respond_to?(:call) ? val.call() : val
|
40
|
-
end
|
41
|
-
end
|
31
|
+
:worker_procname_prefix
|
32
|
+
].freeze
|
42
33
|
|
43
|
-
|
34
|
+
SETTINGS_WITH_ARGS = %i[
|
35
|
+
job_detailed_log_format
|
36
|
+
num_strands
|
37
|
+
].freeze
|
44
38
|
|
45
39
|
PARENT_PROCESS_DEFAULTS = {
|
46
40
|
server_socket_timeout: 10.0,
|
@@ -50,17 +44,86 @@ module Delayed
|
|
50
44
|
|
51
45
|
# We'll accept a partial, relative path and assume we want it inside
|
52
46
|
# Rails.root with inst-jobs.sock appended if provided a directory.
|
53
|
-
server_address:
|
47
|
+
server_address: "tmp"
|
54
48
|
}.with_indifferent_access.freeze
|
55
49
|
|
56
|
-
|
57
|
-
|
50
|
+
class << self
|
51
|
+
attr_accessor(*SETTINGS_WITH_ARGS)
|
52
|
+
attr_reader :parent_process
|
53
|
+
|
54
|
+
SETTINGS.each do |setting|
|
55
|
+
attr_writer setting
|
56
|
+
|
57
|
+
define_method(setting) do
|
58
|
+
val = instance_variable_get(:"@#{setting}")
|
59
|
+
val.respond_to?(:call) ? val.call : val
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def queue=(queue_name)
|
64
|
+
raise ArgumentError, "queue_name must not be blank" if queue_name.blank?
|
65
|
+
|
66
|
+
@queue = queue_name
|
67
|
+
end
|
68
|
+
|
69
|
+
def worker_config(config_filename = nil)
|
70
|
+
config_filename ||= default_worker_config_name
|
71
|
+
config = YAML.load(ERB.new(File.read(config_filename)).result)
|
72
|
+
env = Rails.env || "development"
|
73
|
+
config = config[env] || config["default"]
|
74
|
+
# Backwards compatibility from when the config was just an array of queues
|
75
|
+
config = { workers: config } if config.is_a?(Array)
|
76
|
+
unless config.is_a?(Hash)
|
77
|
+
raise ArgumentError,
|
78
|
+
"Invalid config file #{config_filename}"
|
79
|
+
end
|
80
|
+
config = config.with_indifferent_access
|
81
|
+
config[:workers].map! do |worker_config|
|
82
|
+
config.except(:workers).merge(worker_config.with_indifferent_access)
|
83
|
+
end
|
84
|
+
config
|
85
|
+
end
|
86
|
+
|
87
|
+
def apply_worker_config!(config)
|
88
|
+
SETTINGS.each do |setting|
|
89
|
+
send("#{setting}=", config[setting.to_s]) if config.key?(setting.to_s)
|
90
|
+
end
|
91
|
+
if config.key?("parent_process_client_timeout")
|
92
|
+
parent_process.client_timeout = config["parent_process_client_timeout"]
|
93
|
+
end
|
94
|
+
self.parent_process = config["parent_process"] if config.key?("parent_process")
|
95
|
+
end
|
96
|
+
|
97
|
+
def default_worker_config_name
|
98
|
+
expand_rails_path("config/delayed_jobs.yml")
|
99
|
+
end
|
100
|
+
|
101
|
+
# Expands rails-relative paths, without depending on rails being loaded.
|
102
|
+
def expand_rails_path(path)
|
103
|
+
root = if defined?(Rails) && Rails.root
|
104
|
+
"#{Rails.root}Gemfile".to_s
|
105
|
+
else
|
106
|
+
ENV.fetch("BUNDLE_GEMFILE", "#{Dir.pwd}/Gemfile")
|
107
|
+
end
|
108
|
+
File.expand_path("../#{path}", root)
|
109
|
+
end
|
110
|
+
|
111
|
+
def parent_process_client_timeout=(val)
|
112
|
+
parent_process["server_socket_timeout"] = Integer(val)
|
113
|
+
end
|
114
|
+
|
115
|
+
def parent_process=(new_config)
|
116
|
+
raise "Parent process configurations must be a hash!" unless new_config.is_a?(Hash)
|
117
|
+
|
118
|
+
@parent_process = PARENT_PROCESS_DEFAULTS.merge(new_config)
|
119
|
+
end
|
58
120
|
|
59
|
-
|
60
|
-
|
61
|
-
|
121
|
+
def worker_health_check_config=(new_config)
|
122
|
+
@worker_health_check_config = (new_config || {}).with_indifferent_access
|
123
|
+
end
|
62
124
|
end
|
63
125
|
|
126
|
+
self.parent_process = PARENT_PROCESS_DEFAULTS.dup
|
64
127
|
self.queue = "queue"
|
65
128
|
self.max_attempts = 1
|
66
129
|
self.sleep_delay = 2.0
|
@@ -69,9 +132,11 @@ module Delayed
|
|
69
132
|
self.select_random_from_batch = false
|
70
133
|
self.silence_periodic_log = false
|
71
134
|
|
72
|
-
self.num_strands = ->(
|
73
|
-
self.default_job_options = ->{
|
74
|
-
self.job_detailed_log_format =
|
135
|
+
self.num_strands = ->(_strand_name) {}
|
136
|
+
self.default_job_options = -> { {} }
|
137
|
+
self.job_detailed_log_format = lambda { |job|
|
138
|
+
job.to_json(include_root: false, only: %w[tag strand priority attempts created_at max_attempts source])
|
139
|
+
}
|
75
140
|
|
76
141
|
# Send workers KILL after QUIT if they haven't exited within the
|
77
142
|
# slow_exit_timeout
|
@@ -80,58 +145,5 @@ module Delayed
|
|
80
145
|
|
81
146
|
self.worker_health_check_type = :none
|
82
147
|
self.worker_health_check_config = {}
|
83
|
-
|
84
|
-
def self.worker_config(config_filename = nil)
|
85
|
-
config_filename ||= default_worker_config_name
|
86
|
-
config = YAML.load(ERB.new(File.read(config_filename)).result)
|
87
|
-
env = defined?(RAILS_ENV) ? RAILS_ENV : ENV['RAILS_ENV'] || 'development'
|
88
|
-
config = config[env] || config['default']
|
89
|
-
# Backwards compatibility from when the config was just an array of queues
|
90
|
-
config = { :workers => config } if config.is_a?(Array)
|
91
|
-
unless config && config.is_a?(Hash)
|
92
|
-
raise ArgumentError,
|
93
|
-
"Invalid config file #{config_filename}"
|
94
|
-
end
|
95
|
-
config = config.with_indifferent_access
|
96
|
-
config[:workers].map! do |worker_config|
|
97
|
-
config.except(:workers).merge(worker_config.with_indifferent_access)
|
98
|
-
end
|
99
|
-
config
|
100
|
-
end
|
101
|
-
|
102
|
-
def self.apply_worker_config!(config)
|
103
|
-
SETTINGS.each do |setting|
|
104
|
-
self.send("#{setting}=", config[setting.to_s]) if config.key?(setting.to_s)
|
105
|
-
end
|
106
|
-
parent_process.client_timeout = config['parent_process_client_timeout'] if config.key?('parent_process_client_timeout')
|
107
|
-
self.parent_process = config['parent_process'] if config.key?('parent_process')
|
108
|
-
end
|
109
|
-
|
110
|
-
def self.default_worker_config_name
|
111
|
-
expand_rails_path("config/delayed_jobs.yml")
|
112
|
-
end
|
113
|
-
|
114
|
-
# Expands rails-relative paths, without depending on rails being loaded.
|
115
|
-
def self.expand_rails_path(path)
|
116
|
-
root = if defined?(Rails) && Rails.root
|
117
|
-
(Rails.root+"Gemfile").to_s
|
118
|
-
else
|
119
|
-
ENV.fetch('BUNDLE_GEMFILE', Dir.pwd+"/Gemfile")
|
120
|
-
end
|
121
|
-
File.expand_path("../#{path}", root)
|
122
|
-
end
|
123
|
-
|
124
|
-
def self.parent_process_client_timeout=(val)
|
125
|
-
parent_process['server_socket_timeout'] = Integer(val)
|
126
|
-
end
|
127
|
-
|
128
|
-
def self.parent_process=(new_config)
|
129
|
-
raise 'Parent process configurations must be a hash!' unless Hash === new_config
|
130
|
-
@@parent_process = PARENT_PROCESS_DEFAULTS.merge(new_config)
|
131
|
-
end
|
132
|
-
|
133
|
-
def self.worker_health_check_config=(new_config)
|
134
|
-
@@worker_health_check_config = (new_config || {}).with_indifferent_access
|
135
|
-
end
|
136
148
|
end
|
137
149
|
end
|
data/lib/delayed/testing.rb
CHANGED
@@ -1,34 +1,33 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
module Delayed
|
4
|
-
module Testing
|
5
|
-
|
6
|
-
|
7
|
-
|
4
|
+
module Testing
|
5
|
+
def self.run_job(job)
|
6
|
+
Delayed::Worker.new.perform(job)
|
7
|
+
end
|
8
8
|
|
9
|
-
|
10
|
-
|
11
|
-
|
9
|
+
def self.drain
|
10
|
+
while (job = Delayed::Job.get_and_lock_next_available(
|
11
|
+
"spec run_jobs",
|
12
12
|
Delayed::Settings.queue,
|
13
13
|
0,
|
14
|
-
Delayed::MAX_PRIORITY
|
15
|
-
|
14
|
+
Delayed::MAX_PRIORITY
|
15
|
+
))
|
16
|
+
run_job(job)
|
17
|
+
end
|
16
18
|
end
|
17
|
-
end
|
18
19
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
20
|
+
def self.track_created(&block)
|
21
|
+
job_tracking = JobTracking.track(&block)
|
22
|
+
job_tracking.created
|
23
|
+
end
|
23
24
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
Delayed::Job::Failed.delete_all
|
25
|
+
def self.clear_all!
|
26
|
+
case Delayed::Job.name
|
27
|
+
when /ActiveRecord/
|
28
|
+
Delayed::Job.delete_all
|
29
|
+
Delayed::Job::Failed.delete_all
|
30
|
+
end
|
31
31
|
end
|
32
32
|
end
|
33
33
|
end
|
34
|
-
end
|