ductwork 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +5 -0
- data/LICENSE.txt +168 -0
- data/README.md +154 -0
- data/Rakefile +10 -0
- data/app/models/ductwork/availability.rb +9 -0
- data/app/models/ductwork/execution.rb +13 -0
- data/app/models/ductwork/job.rb +181 -0
- data/app/models/ductwork/pipeline.rb +195 -0
- data/app/models/ductwork/process.rb +19 -0
- data/app/models/ductwork/record.rb +15 -0
- data/app/models/ductwork/result.rb +13 -0
- data/app/models/ductwork/run.rb +9 -0
- data/app/models/ductwork/step.rb +27 -0
- data/lib/ductwork/cli.rb +48 -0
- data/lib/ductwork/configuration.rb +145 -0
- data/lib/ductwork/dsl/branch_builder.rb +102 -0
- data/lib/ductwork/dsl/definition_builder.rb +153 -0
- data/lib/ductwork/engine.rb +14 -0
- data/lib/ductwork/machine_identifier.rb +11 -0
- data/lib/ductwork/processes/job_worker.rb +71 -0
- data/lib/ductwork/processes/job_worker_runner.rb +164 -0
- data/lib/ductwork/processes/pipeline_advancer.rb +91 -0
- data/lib/ductwork/processes/pipeline_advancer_runner.rb +169 -0
- data/lib/ductwork/processes/supervisor.rb +160 -0
- data/lib/ductwork/processes/supervisor_runner.rb +35 -0
- data/lib/ductwork/running_context.rb +22 -0
- data/lib/ductwork/testing/helpers.rb +18 -0
- data/lib/ductwork/testing/minitest.rb +8 -0
- data/lib/ductwork/testing/rspec.rb +63 -0
- data/lib/ductwork/testing.rb +15 -0
- data/lib/ductwork/version.rb +5 -0
- data/lib/ductwork.rb +77 -0
- data/lib/generators/ductwork/install/USAGE +11 -0
- data/lib/generators/ductwork/install/install_generator.rb +36 -0
- data/lib/generators/ductwork/install/templates/bin/ductwork +8 -0
- data/lib/generators/ductwork/install/templates/config/ductwork.yml +25 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_availabilities.rb +16 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_executions.rb +14 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_jobs.rb +17 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_pipelines.rb +19 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_processes.rb +14 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_results.rb +14 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_runs.rb +12 -0
- data/lib/generators/ductwork/install/templates/db/create_ductwork_steps.rb +17 -0
- metadata +165 -0
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
module DSL
|
|
5
|
+
class DefinitionBuilder
|
|
6
|
+
class StartError < StandardError; end
|
|
7
|
+
class CollapseError < StandardError; end
|
|
8
|
+
class CombineError < StandardError; end
|
|
9
|
+
|
|
10
|
+
def initialize
|
|
11
|
+
@definition = {
|
|
12
|
+
nodes: [],
|
|
13
|
+
edges: {},
|
|
14
|
+
}
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
def start(klass)
|
|
18
|
+
validate_start_once!
|
|
19
|
+
add_new_nodes(klass)
|
|
20
|
+
|
|
21
|
+
self
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
# NOTE: there is a bug here that does not allow the user to reuse step
|
|
25
|
+
# classes in the same pipeline. i'll fix this later
|
|
26
|
+
def chain(klass)
|
|
27
|
+
validate_definition_started!(action: "chaining")
|
|
28
|
+
add_edge_to_last_node(klass, type: :chain)
|
|
29
|
+
add_new_nodes(klass)
|
|
30
|
+
|
|
31
|
+
self
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def divide(to:)
|
|
35
|
+
validate_definition_started!(action: "dividing chain")
|
|
36
|
+
add_edge_to_last_node(*to, type: :divide)
|
|
37
|
+
add_new_nodes(*to)
|
|
38
|
+
|
|
39
|
+
if block_given?
|
|
40
|
+
branches = to.map do |klass|
|
|
41
|
+
Ductwork::DSL::BranchBuilder
|
|
42
|
+
.new(klass:, definition:)
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
yield branches
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
self
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def combine(into:)
|
|
52
|
+
validate_definition_started!(action: "combining steps")
|
|
53
|
+
validate_definition_divided!
|
|
54
|
+
|
|
55
|
+
last_nodes = definition[:nodes].reverse.select do |node|
|
|
56
|
+
definition[:edges][node].empty?
|
|
57
|
+
end
|
|
58
|
+
last_nodes.each do |node|
|
|
59
|
+
definition[:edges][node] << {
|
|
60
|
+
to: [into.name],
|
|
61
|
+
type: :combine,
|
|
62
|
+
}
|
|
63
|
+
end
|
|
64
|
+
add_new_nodes(into)
|
|
65
|
+
|
|
66
|
+
self
|
|
67
|
+
end
|
|
68
|
+
|
|
69
|
+
def expand(to:)
|
|
70
|
+
validate_definition_started!(action: "expanding chain")
|
|
71
|
+
add_edge_to_last_node(to, type: :expand)
|
|
72
|
+
add_new_nodes(to)
|
|
73
|
+
|
|
74
|
+
self
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def collapse(into:)
|
|
78
|
+
validate_definition_started!(action: "collapsing steps")
|
|
79
|
+
validate_definition_expanded!
|
|
80
|
+
add_edge_to_last_node(into, type: :collapse)
|
|
81
|
+
add_new_nodes(into)
|
|
82
|
+
|
|
83
|
+
self
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def on_halt(klass)
|
|
87
|
+
definition[:metadata] ||= {}
|
|
88
|
+
definition[:metadata][:on_halt] = {}
|
|
89
|
+
definition[:metadata][:on_halt][:klass] = klass.name
|
|
90
|
+
|
|
91
|
+
self
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def complete
|
|
95
|
+
validate_definition_started!(action: "completing")
|
|
96
|
+
|
|
97
|
+
definition
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
private
|
|
101
|
+
|
|
102
|
+
attr_reader :definition
|
|
103
|
+
|
|
104
|
+
def validate_start_once!
|
|
105
|
+
if definition[:nodes].any?
|
|
106
|
+
raise StartError, "Can only start pipeline definition once"
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
def validate_definition_started!(action:)
|
|
111
|
+
if definition[:nodes].empty?
|
|
112
|
+
raise StartError, "Must start pipeline definition before #{action}"
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
def validate_definition_divided!
|
|
117
|
+
if last_edge.nil? || last_edge[:type] != :divide
|
|
118
|
+
raise CombineError, "Must divide pipeline definition before combining steps"
|
|
119
|
+
end
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def validate_definition_expanded!
|
|
123
|
+
if last_edge.nil? || last_edge[:type] != :expand
|
|
124
|
+
raise CollapseError, "Must expand pipeline definition before collapsing steps"
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def last_edge
|
|
129
|
+
last_edge_node = definition[:nodes].reverse.find do |node|
|
|
130
|
+
definition[:edges][node].any?
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
definition.dig(:edges, last_edge_node, -1)
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
def add_new_nodes(*klasses)
|
|
137
|
+
definition[:nodes].push(*klasses.map(&:name))
|
|
138
|
+
klasses.each do |klass|
|
|
139
|
+
definition[:edges][klass.name] ||= []
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
def add_edge_to_last_node(*klasses, type:)
|
|
144
|
+
last_node = definition.dig(:nodes, -1)
|
|
145
|
+
|
|
146
|
+
definition[:edges][last_node] << {
|
|
147
|
+
to: klasses.map(&:name),
|
|
148
|
+
type: type,
|
|
149
|
+
}
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
end
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
class Engine < ::Rails::Engine
|
|
5
|
+
initializer "ductwork.app_executor", before: :run_prepare_callbacks do |app|
|
|
6
|
+
Ductwork.app_executor = app.executor
|
|
7
|
+
end
|
|
8
|
+
|
|
9
|
+
initializer "ductwork.configure" do
|
|
10
|
+
Ductwork.configuration ||= Ductwork::Configuration.new
|
|
11
|
+
Ductwork.configuration.logger ||= Rails.logger
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
module Processes
|
|
5
|
+
class JobWorker
|
|
6
|
+
def initialize(pipeline, running_context)
|
|
7
|
+
@pipeline = pipeline
|
|
8
|
+
@running_context = running_context
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def run
|
|
12
|
+
run_hooks_for(:start)
|
|
13
|
+
logger.debug(
|
|
14
|
+
msg: "Entering main work loop",
|
|
15
|
+
role: :job_worker,
|
|
16
|
+
pipeline: pipeline
|
|
17
|
+
)
|
|
18
|
+
while running_context.running?
|
|
19
|
+
logger.debug(
|
|
20
|
+
msg: "Attempting to claim job",
|
|
21
|
+
role: :job_worker,
|
|
22
|
+
pipeline: pipeline
|
|
23
|
+
)
|
|
24
|
+
job = Ductwork.wrap_with_app_executor do
|
|
25
|
+
Job.claim_latest(pipeline)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
if job.present?
|
|
29
|
+
Ductwork.wrap_with_app_executor do
|
|
30
|
+
job.execute(pipeline)
|
|
31
|
+
end
|
|
32
|
+
else
|
|
33
|
+
logger.debug(
|
|
34
|
+
msg: "No job to claim, looping",
|
|
35
|
+
role: :job_worker,
|
|
36
|
+
pipeline: pipeline
|
|
37
|
+
)
|
|
38
|
+
sleep(Ductwork.configuration.job_worker_polling_timeout)
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
shutdown
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
private
|
|
46
|
+
|
|
47
|
+
attr_reader :pipeline, :running_context
|
|
48
|
+
|
|
49
|
+
def shutdown
|
|
50
|
+
logger.debug(
|
|
51
|
+
msg: "Shutting down",
|
|
52
|
+
role: :job_worker,
|
|
53
|
+
pipeline: pipeline
|
|
54
|
+
)
|
|
55
|
+
run_hooks_for(:stop)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def run_hooks_for(event)
|
|
59
|
+
Ductwork.hooks[:worker].fetch(event, []).each do |block|
|
|
60
|
+
Ductwork.wrap_with_app_executor do
|
|
61
|
+
block.call(self)
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def logger
|
|
67
|
+
Ductwork.configuration.logger
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
module Processes
|
|
5
|
+
class JobWorkerRunner
|
|
6
|
+
def initialize(pipeline)
|
|
7
|
+
@pipeline = pipeline
|
|
8
|
+
@running_context = Ductwork::RunningContext.new
|
|
9
|
+
@threads = create_threads
|
|
10
|
+
|
|
11
|
+
Signal.trap(:INT) { running_context.shutdown! }
|
|
12
|
+
Signal.trap(:TERM) { running_context.shutdown! }
|
|
13
|
+
Signal.trap(:TTIN) do
|
|
14
|
+
Thread.list.each do |thread|
|
|
15
|
+
puts thread.name
|
|
16
|
+
if thread.backtrace
|
|
17
|
+
puts thread.backtrace.join("\n")
|
|
18
|
+
else
|
|
19
|
+
puts "No backtrace to dump"
|
|
20
|
+
end
|
|
21
|
+
puts
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def run
|
|
27
|
+
create_process!
|
|
28
|
+
logger.debug(
|
|
29
|
+
msg: "Entering main work loop",
|
|
30
|
+
role: :job_worker_runner,
|
|
31
|
+
pipeline: pipeline
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
while running?
|
|
35
|
+
# TODO: Increase or make configurable
|
|
36
|
+
sleep(5)
|
|
37
|
+
attempt_synchronize_threads
|
|
38
|
+
report_heartbeat!
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
shutdown!
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
private
|
|
45
|
+
|
|
46
|
+
attr_reader :pipeline, :running_context, :threads
|
|
47
|
+
|
|
48
|
+
def worker_count
|
|
49
|
+
Ductwork.configuration.job_worker_count(pipeline)
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
def create_threads
|
|
53
|
+
worker_count.times.map do |i|
|
|
54
|
+
job_worker = Ductwork::Processes::JobWorker.new(
|
|
55
|
+
pipeline,
|
|
56
|
+
running_context
|
|
57
|
+
)
|
|
58
|
+
logger.debug(
|
|
59
|
+
msg: "Creating new thread",
|
|
60
|
+
role: :job_worker_runner,
|
|
61
|
+
pipeline: pipeline
|
|
62
|
+
)
|
|
63
|
+
thread = Thread.new do
|
|
64
|
+
job_worker.run
|
|
65
|
+
end
|
|
66
|
+
thread.name = "ductwork.job_worker_#{i}"
|
|
67
|
+
|
|
68
|
+
logger.debug(
|
|
69
|
+
msg: "Created new thread",
|
|
70
|
+
role: :job_worker_runner,
|
|
71
|
+
pipeline: pipeline
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
thread
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
|
|
78
|
+
def create_process!
|
|
79
|
+
Ductwork.wrap_with_app_executor do
|
|
80
|
+
Ductwork::Process.create!(
|
|
81
|
+
pid: ::Process.pid,
|
|
82
|
+
machine_identifier: Ductwork::MachineIdentifier.fetch,
|
|
83
|
+
last_heartbeat_at: Time.current
|
|
84
|
+
)
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
def running?
|
|
89
|
+
running_context.running?
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
def attempt_synchronize_threads
|
|
93
|
+
logger.debug(
|
|
94
|
+
msg: "Attempting to synchronize threads",
|
|
95
|
+
role: :job_worker_runner,
|
|
96
|
+
pipeline: pipeline
|
|
97
|
+
)
|
|
98
|
+
threads.each { |thread| thread.join(0.1) }
|
|
99
|
+
logger.debug(
|
|
100
|
+
msg: "Synchronizing threads timed out",
|
|
101
|
+
role: :job_worker_runner,
|
|
102
|
+
pipeline: pipeline
|
|
103
|
+
)
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def report_heartbeat!
|
|
107
|
+
logger.debug(msg: "Reporting heartbeat", role: :job_worker_runner)
|
|
108
|
+
Ductwork.wrap_with_app_executor do
|
|
109
|
+
Ductwork::Process.report_heartbeat!
|
|
110
|
+
end
|
|
111
|
+
logger.debug(msg: "Reported heartbeat", role: :job_worker_runner)
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
def shutdown!
|
|
115
|
+
running_context.shutdown!
|
|
116
|
+
await_threads_graceful_shutdown
|
|
117
|
+
kill_remaining_threads
|
|
118
|
+
delete_process
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def await_threads_graceful_shutdown
|
|
122
|
+
timeout = Ductwork.configuration.job_worker_shutdown_timeout
|
|
123
|
+
deadline = Time.current + timeout
|
|
124
|
+
|
|
125
|
+
logger.debug(msg: "Attempting graceful shutdown", role: :job_worker_runner)
|
|
126
|
+
while Time.current < deadline && threads.any?(&:alive?)
|
|
127
|
+
threads.each do |thread|
|
|
128
|
+
break if Time.current < deadline
|
|
129
|
+
|
|
130
|
+
# TODO: Maybe make this configurable. If there's a ton of workers
|
|
131
|
+
# it may not even get to the "later" ones depending on the timeout
|
|
132
|
+
thread.join(1)
|
|
133
|
+
end
|
|
134
|
+
end
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
def kill_remaining_threads
|
|
138
|
+
threads.each do |thread|
|
|
139
|
+
if thread.alive?
|
|
140
|
+
thread.kill
|
|
141
|
+
logger.debug(
|
|
142
|
+
msg: "Killed thread",
|
|
143
|
+
role: :job_worker_runner,
|
|
144
|
+
thread: thread.name
|
|
145
|
+
)
|
|
146
|
+
end
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
def delete_process
|
|
151
|
+
Ductwork.wrap_with_app_executor do
|
|
152
|
+
Ductwork::Process.find_by!(
|
|
153
|
+
pid: ::Process.pid,
|
|
154
|
+
machine_identifier: Ductwork::MachineIdentifier.fetch
|
|
155
|
+
).delete
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
def logger
|
|
160
|
+
Ductwork.configuration.logger
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
module Processes
|
|
5
|
+
class PipelineAdvancer
|
|
6
|
+
def initialize(running_context, klass)
|
|
7
|
+
@running_context = running_context
|
|
8
|
+
@klass = klass
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def run # rubocop:todo Metrics/AbcSize, Metrics/MethodLength
|
|
12
|
+
run_hooks_for(:start)
|
|
13
|
+
while running_context.running?
|
|
14
|
+
id = Ductwork::Record.uncached do
|
|
15
|
+
Ductwork::Pipeline
|
|
16
|
+
.in_progress
|
|
17
|
+
.where(klass: klass, claimed_for_advancing_at: nil)
|
|
18
|
+
.where.not(steps: Ductwork::Step.where.not(status: %w[advancing completed]))
|
|
19
|
+
.order(:last_advanced_at)
|
|
20
|
+
.limit(1)
|
|
21
|
+
.pluck(:id)
|
|
22
|
+
.first
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
if id.present?
|
|
26
|
+
rows_updated = Ductwork::Pipeline
|
|
27
|
+
.where(id: id, claimed_for_advancing_at: nil)
|
|
28
|
+
.update_all(claimed_for_advancing_at: Time.current)
|
|
29
|
+
|
|
30
|
+
if rows_updated == 1
|
|
31
|
+
logger.debug(
|
|
32
|
+
msg: "Pipeline claimed",
|
|
33
|
+
pipeline: klass,
|
|
34
|
+
role: :pipeline_advancer
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
pipeline = Ductwork::Pipeline.find(id)
|
|
38
|
+
pipeline.advance!
|
|
39
|
+
|
|
40
|
+
logger.debug(
|
|
41
|
+
msg: "Pipeline advanced",
|
|
42
|
+
pipeline: klass,
|
|
43
|
+
role: :pipeline_advancer
|
|
44
|
+
)
|
|
45
|
+
else
|
|
46
|
+
logger.debug(
|
|
47
|
+
msg: "Did not claim pipeline, avoided race condition",
|
|
48
|
+
pipeline: klass,
|
|
49
|
+
role: :pipeline_advancer
|
|
50
|
+
)
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
# release the pipeline and set last advanced at so it doesnt block.
|
|
54
|
+
# we're not using a queue so we have to use a db timestamp
|
|
55
|
+
Ductwork::Pipeline.find(id).update!(
|
|
56
|
+
claimed_for_advancing_at: nil,
|
|
57
|
+
last_advanced_at: Time.current
|
|
58
|
+
)
|
|
59
|
+
else
|
|
60
|
+
logger.debug(
|
|
61
|
+
msg: "No pipeline needs advancing",
|
|
62
|
+
pipeline: klass,
|
|
63
|
+
id: id,
|
|
64
|
+
role: :pipeline_advancer
|
|
65
|
+
)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
sleep(Ductwork.configuration.pipeline_polling_timeout)
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
run_hooks_for(:stop)
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
private
|
|
75
|
+
|
|
76
|
+
attr_reader :running_context, :klass
|
|
77
|
+
|
|
78
|
+
def run_hooks_for(event)
|
|
79
|
+
Ductwork.hooks[:advancer].fetch(event, []).each do |block|
|
|
80
|
+
Ductwork.wrap_with_app_executor do
|
|
81
|
+
block.call(self)
|
|
82
|
+
end
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def logger
|
|
87
|
+
Ductwork.configuration.logger
|
|
88
|
+
end
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Ductwork
|
|
4
|
+
module Processes
|
|
5
|
+
class PipelineAdvancerRunner
|
|
6
|
+
def initialize(*klasses)
|
|
7
|
+
@klasses = klasses
|
|
8
|
+
@running_context = Ductwork::RunningContext.new
|
|
9
|
+
@threads = create_threads
|
|
10
|
+
|
|
11
|
+
Signal.trap(:INT) { running_context.shutdown! }
|
|
12
|
+
Signal.trap(:TERM) { running_context.shutdown! }
|
|
13
|
+
Signal.trap(:TTIN) do
|
|
14
|
+
Thread.list.each do |thread|
|
|
15
|
+
puts thread.name
|
|
16
|
+
if thread.backtrace
|
|
17
|
+
puts thread.backtrace.join("\n")
|
|
18
|
+
else
|
|
19
|
+
puts "No backtrace to dump"
|
|
20
|
+
end
|
|
21
|
+
puts
|
|
22
|
+
end
|
|
23
|
+
end
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def run
|
|
27
|
+
create_process!
|
|
28
|
+
logger.debug(
|
|
29
|
+
msg: "Entering main work loop",
|
|
30
|
+
role: :pipeline_advancer_runner
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
while running_context.running?
|
|
34
|
+
# TODO: Increase or make configurable
|
|
35
|
+
sleep(5)
|
|
36
|
+
attempt_synchronize_threads
|
|
37
|
+
report_heartbeat!
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
shutdown
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
private
|
|
44
|
+
|
|
45
|
+
attr_reader :klasses, :running_context, :threads
|
|
46
|
+
|
|
47
|
+
def create_threads
|
|
48
|
+
klasses.map do |klass|
|
|
49
|
+
pipeline_advancer = Ductwork::Processes::PipelineAdvancer.new(
|
|
50
|
+
running_context,
|
|
51
|
+
klass
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
logger.debug(
|
|
55
|
+
msg: "Creating new thread",
|
|
56
|
+
role: :pipeline_advancer_runner,
|
|
57
|
+
pipeline: klass
|
|
58
|
+
)
|
|
59
|
+
thread = Thread.new do
|
|
60
|
+
Ductwork.wrap_with_app_executor do
|
|
61
|
+
pipeline_advancer.run
|
|
62
|
+
end
|
|
63
|
+
end
|
|
64
|
+
thread.name = "ductwork.pipeline_advancer.#{klass}"
|
|
65
|
+
|
|
66
|
+
logger.debug(
|
|
67
|
+
msg: "Created new thread",
|
|
68
|
+
role: :pipeline_advancer_runner,
|
|
69
|
+
thread: thread.name,
|
|
70
|
+
pipeline: klass
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
thread
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
def attempt_synchronize_threads
|
|
78
|
+
logger.debug(
|
|
79
|
+
msg: "Attempting to synchronize threads",
|
|
80
|
+
role: :pipeline_advancer_runner
|
|
81
|
+
)
|
|
82
|
+
threads.each { |thread| thread.join(0.1) }
|
|
83
|
+
logger.debug(
|
|
84
|
+
msg: "Synchronizing threads timed out",
|
|
85
|
+
role: :pipeline_advancer_runner
|
|
86
|
+
)
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
def create_process!
|
|
90
|
+
Ductwork.wrap_with_app_executor do
|
|
91
|
+
Ductwork::Process.create!(
|
|
92
|
+
pid: ::Process.pid,
|
|
93
|
+
machine_identifier: Ductwork::MachineIdentifier.fetch,
|
|
94
|
+
last_heartbeat_at: Time.current
|
|
95
|
+
)
|
|
96
|
+
end
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
def report_heartbeat!
|
|
100
|
+
logger.debug(msg: "Reporting heartbeat", role: :pipeline_advancer_runner)
|
|
101
|
+
Ductwork.wrap_with_app_executor do
|
|
102
|
+
Ductwork::Process.report_heartbeat!
|
|
103
|
+
end
|
|
104
|
+
logger.debug(msg: "Reported heartbeat", role: :pipeline_advancer_runner)
|
|
105
|
+
end
|
|
106
|
+
|
|
107
|
+
def shutdown
|
|
108
|
+
log_shutting_down
|
|
109
|
+
stop_running_context
|
|
110
|
+
await_threads_graceful_shutdown
|
|
111
|
+
kill_remaining_threads
|
|
112
|
+
delete_process!
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
def log_shutting_down
|
|
116
|
+
logger.debug(msg: "Shutting down", role: :pipeline_advancer_runner)
|
|
117
|
+
end
|
|
118
|
+
|
|
119
|
+
def stop_running_context
|
|
120
|
+
running_context.shutdown!
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
def await_threads_graceful_shutdown
|
|
124
|
+
timeout = Ductwork.configuration.pipeline_shutdown_timeout
|
|
125
|
+
deadline = Time.current + timeout
|
|
126
|
+
|
|
127
|
+
logger.debug(
|
|
128
|
+
msg: "Attempting graceful shutdown",
|
|
129
|
+
role: :pipeline_advancer_runner
|
|
130
|
+
)
|
|
131
|
+
while Time.current < deadline && threads.any?(&:alive?)
|
|
132
|
+
threads.each do |thread|
|
|
133
|
+
break if Time.current < deadline
|
|
134
|
+
|
|
135
|
+
# TODO: Maybe make this configurable. If there's a ton of workers
|
|
136
|
+
# it may not even get to the "later" ones depending on the timeout
|
|
137
|
+
thread.join(1)
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
def kill_remaining_threads
|
|
143
|
+
threads.each do |thread|
|
|
144
|
+
if thread.alive?
|
|
145
|
+
thread.kill
|
|
146
|
+
logger.debug(
|
|
147
|
+
msg: "Killed thread",
|
|
148
|
+
role: :pipeline_advancer_runner,
|
|
149
|
+
thread: thread.name
|
|
150
|
+
)
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
def delete_process!
|
|
156
|
+
Ductwork.wrap_with_app_executor do
|
|
157
|
+
Ductwork::Process.find_by!(
|
|
158
|
+
pid: ::Process.pid,
|
|
159
|
+
machine_identifier: Ductwork::MachineIdentifier.fetch
|
|
160
|
+
).delete
|
|
161
|
+
end
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
def logger
|
|
165
|
+
Ductwork.configuration.logger
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
end
|