ductwork 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ab38ab3c5ce61b7fd6d48eac93d93bcaa33b3476a7b8b6e126403da7b4c2ea04
4
- data.tar.gz: 7f3c09913cf33f5f2d6b42f2f3b786b09b35d343d1277262f77b0f9ff666b653
3
+ metadata.gz: 18ebf23ac85cb80300ecc0c35475e0a2364fa4296a6012fdc21e95b8c5943023
4
+ data.tar.gz: e08b2cda1074255a7b7a79c5fbe789ac373909eef57b7836be49b3f7554a4350
5
5
  SHA512:
6
- metadata.gz: cd1bbacdd428ed77064e526a7a2bfe639aad474c19d002417b5d3893856cef916baaba805124e1f4042c90ce972328f3de8c9747c88a96d8b3a60d88a26b183a
7
- data.tar.gz: cef8abdee38bea2530ba058fde0062699fc53f190e3e67deb8c369f7c1badae7a7e9538f11ce6fc8f1bce488e15a9ab97271bf6fbc0c59f38f8eea1187f376e8
6
+ metadata.gz: 9d3b28265bfdf2f90210c43e2609d4255dda6d120ab597b74e54e9c020cbdc4be0b07235bb104873a62ae309511fc841c7d236f20dd52124150760c8dfaa94c8
7
+ data.tar.gz: fb6db1e8385b880b942404a25aba9ef02637d0c1a2dd53b1a6ddd78a8c70e6494c6349b8d7bbf837727cee5d2ed2c2e2afd67fe506c0be193797fc2de8867619
data/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Ductwork Changelog
2
2
 
3
+ ## [0.4.0]
4
+
5
+ - chore: change job worker thread name format
6
+ - feat: add and respect pipeline-level `pipeline_advancer.polling_timeout` configuration in pipeline advancer
7
+ - feat: respect `job_worker.polling_timeout` configuration in job runner
8
+ - feat: add pipeline-level `job_worker.polling_timeout` configuration
9
+ - feat: check pipeline and step-level max retry configurations when retrying a job
10
+ - feat: add pipeline and step-level `job_worker.max_retry` configurations
11
+ - feat: add ability to set `job_worker.count` config manually
12
+ - chore: move configuration specs under their own directory
13
+ - feat: halt pipeline instead of erroring if max step depth is exceeded
14
+ - chore: move specs under directory
15
+ - feat: allow setting `pipeline_advancer.steps_max_depth` configuration manually
16
+ - feat: raise `Ductwork::Pipeline::StepDepthError` error if return payload count exceeds the configuration
17
+ - feat: add `pipeline_advancer.steps.max_depth` configuration
18
+
3
19
  ## [0.3.1]
4
20
 
5
21
  - chore: bump dependencies and update necessary files
@@ -142,9 +142,12 @@ module Ductwork
142
142
  end
143
143
  end
144
144
 
145
- def execution_failed!(execution, run, error) # rubocop:disable Metrics/AbcSize
145
+ def execution_failed!(execution, run, error) # rubocop:todo Metrics
146
146
  halted = false
147
147
  pipeline = step.pipeline
148
+ max_retry = Ductwork
149
+ .configuration
150
+ .job_worker_max_retry(pipeline: pipeline.klass, step: klass)
148
151
 
149
152
  Ductwork::Record.transaction do
150
153
  execution.update!(completed_at: Time.current)
@@ -156,7 +159,7 @@ module Ductwork
156
159
  error_backtrace: error.backtrace
157
160
  )
158
161
 
159
- if execution.retry_count < Ductwork.configuration.job_worker_max_retry
162
+ if execution.retry_count < max_retry
160
163
  new_execution = executions.create!(
161
164
  retry_count: execution.retry_count + 1,
162
165
  started_at: FAILED_EXECUTION_TIMEOUT.from_now
@@ -178,14 +178,26 @@ module Ductwork
178
178
  end
179
179
 
180
180
  def advance_to_next_steps(step_type, advancing, edge)
181
- edge[:to].each do |to_klass|
182
- next_step = steps.create!(
183
- klass: to_klass,
184
- status: :in_progress,
185
- step_type: step_type,
186
- started_at: Time.current
187
- )
188
- Ductwork::Job.enqueue(next_step, advancing.take.job.return_value)
181
+ too_many = edge[:to].tally.any? do |to_klass, count|
182
+ depth = Ductwork
183
+ .configuration
184
+ .steps_max_depth(pipeline: klass, step: to_klass)
185
+
186
+ depth != -1 && count > depth
187
+ end
188
+
189
+ if too_many
190
+ halted!
191
+ else
192
+ edge[:to].each do |to_klass|
193
+ next_step = steps.create!(
194
+ klass: to_klass,
195
+ status: :in_progress,
196
+ step_type: step_type,
197
+ started_at: Time.current
198
+ )
199
+ Ductwork::Job.enqueue(next_step, advancing.take.job.return_value)
200
+ end
189
201
  end
190
202
  end
191
203
 
@@ -219,12 +231,20 @@ module Ductwork
219
231
  end
220
232
 
221
233
  def expand_to_next_steps(step_type, advancing, edge)
222
- Array(advancing.take.job.return_value).each do |input_arg|
223
- create_step_and_enqueue_job(
224
- klass: edge[:to].sole,
225
- step_type: step_type,
226
- input_arg: input_arg
227
- )
234
+ next_klass = edge[:to].sole
235
+ return_value = advancing.take.job.return_value
236
+ max_depth = Ductwork.configuration.steps_max_depth(pipeline: klass, step: next_klass)
237
+
238
+ if max_depth != -1 && return_value.count > max_depth
239
+ halted!
240
+ else
241
+ Array(return_value).each do |input_arg|
242
+ create_step_and_enqueue_job(
243
+ klass: next_klass,
244
+ step_type: step_type,
245
+ input_arg: input_arg
246
+ )
247
+ end
228
248
  end
229
249
  end
230
250
 
@@ -12,15 +12,17 @@ module Ductwork
12
12
  DEFAULT_LOGGER_SOURCE = "default" # `Logger` instance writing to STDOUT
13
13
  DEFAULT_PIPELINE_POLLING_TIMEOUT = 1 # second
14
14
  DEFAULT_PIPELINE_SHUTDOWN_TIMEOUT = 20 # seconds
15
+ DEFAULT_STEPS_MAX_DEPTH = -1 # unlimited count
15
16
  DEFAULT_SUPERVISOR_POLLING_TIMEOUT = 1 # second
16
17
  DEFAULT_SUPERVISOR_SHUTDOWN_TIMEOUT = 30 # seconds
17
18
  DEFAULT_LOGGER = ::Logger.new($stdout)
18
19
  PIPELINES_WILDCARD = "*"
19
20
 
20
- attr_writer :job_worker_polling_timeout, :job_worker_shutdown_timeout,
21
- :job_worker_max_retry,
21
+ attr_writer :job_worker_count, :job_worker_polling_timeout,
22
+ :job_worker_shutdown_timeout, :job_worker_max_retry,
22
23
  :logger_level,
23
24
  :pipeline_polling_timeout, :pipeline_shutdown_timeout,
25
+ :steps_max_depth,
24
26
  :supervisor_polling_timeout, :supervisor_shutdown_timeout
25
27
 
26
28
  def initialize(path: DEFAULT_FILE_PATH)
@@ -49,6 +51,8 @@ module Ductwork
49
51
  end
50
52
 
51
53
  def job_worker_count(pipeline)
54
+ return @job_worker_count if instance_variable_defined?(:@job_worker_count)
55
+
52
56
  raw_count = config.dig(:job_worker, :count) || DEFAULT_JOB_WORKER_COUNT
53
57
 
54
58
  if raw_count.is_a?(Hash)
@@ -58,12 +62,36 @@ module Ductwork
58
62
  end
59
63
  end
60
64
 
61
- def job_worker_max_retry
62
- @job_worker_max_retry ||= fetch_job_worker_max_retry
65
+ def job_worker_max_retry(pipeline: nil, step: nil) # rubocop:disable Metrics
66
+ return @job_worker_max_retry if instance_variable_defined?(:@job_worker_max_retry)
67
+
68
+ pipeline ||= :default
69
+ step ||= :default
70
+ base_config = config.dig(:job_worker, :max_retry)
71
+
72
+ if base_config.is_a?(Hash) && base_config[pipeline.to_sym].is_a?(Hash)
73
+ pipeline_config = config.dig(:job_worker, :max_retry, pipeline.to_sym)
74
+
75
+ pipeline_config[step.to_sym] || pipeline_config[:default] || DEFAULT_JOB_WORKER_MAX_RETRY
76
+ elsif base_config.is_a?(Hash)
77
+ base_config[pipeline.to_sym] || base_config[:default] || DEFAULT_JOB_WORKER_MAX_RETRY
78
+ else
79
+ base_config || DEFAULT_JOB_WORKER_MAX_RETRY
80
+ end
63
81
  end
64
82
 
65
- def job_worker_polling_timeout
66
- @job_worker_polling_timeout ||= fetch_job_worker_polling_timeout
83
+ def job_worker_polling_timeout(pipeline = nil)
84
+ pipeline ||= :default
85
+ default = DEFAULT_JOB_WORKER_POLLING_TIMEOUT
86
+ base_config = config.dig(:job_worker, :polling_timeout)
87
+
88
+ if instance_variable_defined?(:@job_worker_polling_timeout)
89
+ @job_worker_polling_timeout
90
+ elsif base_config.is_a?(Hash)
91
+ base_config[pipeline.to_sym] || base_config[:default] || default
92
+ else
93
+ base_config || default
94
+ end
67
95
  end
68
96
 
69
97
  def job_worker_shutdown_timeout
@@ -78,14 +106,46 @@ module Ductwork
78
106
  @logger_source ||= fetch_logger_source
79
107
  end
80
108
 
81
- def pipeline_polling_timeout
82
- @pipeline_polling_timeout ||= fetch_pipeline_polling_timeout
109
+ def pipeline_polling_timeout(pipeline = nil)
110
+ pipeline ||= :default
111
+ default = DEFAULT_PIPELINE_POLLING_TIMEOUT
112
+ base_config = config.dig(:pipeline_advancer, :polling_timeout)
113
+
114
+ if instance_variable_defined?(:@pipeline_polling_timeout)
115
+ @pipeline_polling_timeout
116
+ elsif base_config.is_a?(Hash)
117
+ base_config[pipeline.to_sym] || base_config[:default] || default
118
+ else
119
+ base_config || default
120
+ end
83
121
  end
84
122
 
85
123
  def pipeline_shutdown_timeout
86
124
  @pipeline_shutdown_timeout ||= fetch_pipeline_shutdown_timeout
87
125
  end
88
126
 
127
+ def steps_max_depth(pipeline: nil, step: nil) # rubocop:disable Metrics
128
+ return @steps_max_depth if instance_variable_defined?(:@steps_max_depth)
129
+
130
+ pipeline ||= :default
131
+ step ||= :default
132
+ base_config = config.dig(:pipeline_advancer, :steps, :max_depth)
133
+
134
+ if base_config.is_a?(Hash) && base_config[pipeline.to_sym].is_a?(Hash)
135
+ pipeline_config = config.dig(:pipeline_advancer, :steps, :max_depth, pipeline.to_sym)
136
+
137
+ pipeline_config[step.to_sym] ||
138
+ pipeline_config[:default] ||
139
+ DEFAULT_STEPS_MAX_DEPTH
140
+ elsif base_config.is_a?(Hash)
141
+ base_config[pipeline.to_sym] ||
142
+ base_config[:default] ||
143
+ DEFAULT_STEPS_MAX_DEPTH
144
+ else
145
+ base_config || DEFAULT_STEPS_MAX_DEPTH
146
+ end
147
+ end
148
+
89
149
  def supervisor_polling_timeout
90
150
  @supervisor_polling_timeout ||= fetch_supervisor_polling_timeout
91
151
  end
@@ -98,16 +158,6 @@ module Ductwork
98
158
 
99
159
  attr_reader :config
100
160
 
101
- def fetch_job_worker_max_retry
102
- config.dig(:job_worker, :max_retry) ||
103
- DEFAULT_JOB_WORKER_MAX_RETRY
104
- end
105
-
106
- def fetch_job_worker_polling_timeout
107
- config.dig(:job_worker, :polling_timeout) ||
108
- DEFAULT_JOB_WORKER_POLLING_TIMEOUT
109
- end
110
-
111
161
  def fetch_job_worker_shutdown_timeout
112
162
  config.dig(:job_worker, :shutdown_timeout) ||
113
163
  DEFAULT_JOB_WORKER_SHUTDOWN_TIMEOUT
@@ -121,11 +171,6 @@ module Ductwork
121
171
  config.dig(:logger, :source) || DEFAULT_LOGGER_SOURCE
122
172
  end
123
173
 
124
- def fetch_pipeline_polling_timeout
125
- config.dig(:pipeline_advancer, :polling_timeout) ||
126
- DEFAULT_PIPELINE_POLLING_TIMEOUT
127
- end
128
-
129
174
  def fetch_pipeline_shutdown_timeout
130
175
  config.dig(:pipeline_advancer, :shutdown_timeout) ||
131
176
  DEFAULT_PIPELINE_SHUTDOWN_TIMEOUT
@@ -35,7 +35,7 @@ module Ductwork
35
35
  role: :job_worker,
36
36
  pipeline: pipeline
37
37
  )
38
- sleep(Ductwork.configuration.job_worker_polling_timeout)
38
+ sleep(polling_timeout)
39
39
  end
40
40
  end
41
41
 
@@ -62,6 +62,10 @@ module Ductwork
62
62
  end
63
63
  end
64
64
  end
65
+
66
+ def polling_timeout
67
+ Ductwork.configuration.job_worker_polling_timeout(pipeline)
68
+ end
65
69
  end
66
70
  end
67
71
  end
@@ -63,7 +63,7 @@ module Ductwork
63
63
  thread = Thread.new do
64
64
  job_worker.run
65
65
  end
66
- thread.name = "ductwork.job_worker_#{i}"
66
+ thread.name = "ductwork.job_worker.#{i}"
67
67
 
68
68
  Ductwork.logger.debug(
69
69
  msg: "Created new thread",
@@ -65,7 +65,7 @@ module Ductwork
65
65
  )
66
66
  end
67
67
 
68
- sleep(Ductwork.configuration.pipeline_polling_timeout)
68
+ sleep(polling_timeout)
69
69
  end
70
70
 
71
71
  run_hooks_for(:stop)
@@ -82,6 +82,10 @@ module Ductwork
82
82
  end
83
83
  end
84
84
  end
85
+
86
+ def polling_timeout
87
+ Ductwork.configuration.pipeline_polling_timeout(klass)
88
+ end
85
89
  end
86
90
  end
87
91
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Ductwork
4
- VERSION = "0.3.1"
4
+ VERSION = "0.4.0"
5
5
  end
@@ -11,6 +11,8 @@ default: &default
11
11
  pipeline_advancer:
12
12
  polling_timeout: 1
13
13
  shutdown_timeout: 20
14
+ steps:
15
+ max_depth: -1
14
16
  supervisor:
15
17
  polling_timeout: 1
16
18
  shutdown_timeout: 30
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ductwork
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.1
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Tyler Ewing