floe 0.8.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -1
- data/exe/floe +6 -19
- data/floe.gemspec +2 -1
- data/lib/floe/version.rb +1 -1
- data/lib/floe/workflow/runner/docker.rb +99 -2
- data/lib/floe/workflow/runner/kubernetes.rb +84 -1
- data/lib/floe/workflow/runner/podman.rb +26 -0
- data/lib/floe/workflow/runner.rb +27 -10
- data/lib/floe/workflow/state.rb +10 -6
- data/lib/floe/workflow/states/non_terminal_mixin.rb +5 -0
- data/lib/floe/workflow/states/task.rb +3 -2
- data/lib/floe/workflow/states/wait.rb +2 -3
- data/lib/floe/workflow.rb +66 -7
- data/lib/floe.rb +20 -0
- metadata +18 -4
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 8c7a74a5297258d481fb588ae0fa6eb1b22b7ecf5c049865b77ad23d6fb135cb
|
4
|
+
data.tar.gz: 82f73726e293e5345d3e7fa55a0049f881f2dce6e6d46570d2352968907c04b9
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 32d58e28cd76d936f31f9af2c1091d8a7dd930e47a2197b532c02d6d48df2c82feee696072af701aeca5f2af5437040ea3bace5df622c1ad5d0e47e388884ad2
|
7
|
+
data.tar.gz: 1ee0628fbfde496d00fae67812ac869582b1aca754aca7cf44caa7170edc1b0f6c9100a587770d6f6bb97bc0a948dd8fe0123fd108b9ed037ad71bc62bb8e104
|
data/CHANGELOG.md
CHANGED
@@ -4,6 +4,16 @@ This project adheres to [Semantic Versioning](http://semver.org/).
|
|
4
4
|
|
5
5
|
## [Unreleased]
|
6
6
|
|
7
|
+
## [0.9.0] - 2024-02-19
|
8
|
+
### Changed
|
9
|
+
- Default to wait indefinitely ([#157](https://github.com/ManageIQ/floe/pull/157))
|
10
|
+
- Create docker runners factory and add scheme ([#152](https://github.com/ManageIQ/floe/pull/152))
|
11
|
+
- Add a watch method to Workflow::Runner for event driven updates ([#95](https://github.com/ManageIQ/floe/pull/95))
|
12
|
+
|
13
|
+
### Fixed
|
14
|
+
- Fix waiting on extremely short durations ([#160](https://github.com/ManageIQ/floe/pull/160))
|
15
|
+
- Fix wait state missing finish ([#159](https://github.com/ManageIQ/floe/pull/159))
|
16
|
+
|
7
17
|
## [0.8.0] - 2024-01-17
|
8
18
|
### Added
|
9
19
|
- Add CLI shorthand options for docker runner ([#147](https://github.com/ManageIQ/floe/pull/147))
|
@@ -126,7 +136,8 @@ This project adheres to [Semantic Versioning](http://semver.org/).
|
|
126
136
|
### Added
|
127
137
|
- Initial release
|
128
138
|
|
129
|
-
[Unreleased]: https://github.com/ManageIQ/floe/compare/v0.
|
139
|
+
[Unreleased]: https://github.com/ManageIQ/floe/compare/v0.9.0...HEAD
|
140
|
+
[0.9.0]: https://github.com/ManageIQ/floe/compare/v0.8.0...v0.9.0
|
130
141
|
[0.8.0]: https://github.com/ManageIQ/floe/compare/v0.7.0...v0.8.0
|
131
142
|
[0.7.0]: https://github.com/ManageIQ/floe/compare/v0.6.1...v0.7.0
|
132
143
|
[0.6.1]: https://github.com/ManageIQ/floe/compare/v0.6.0...v0.6.1
|
data/exe/floe
CHANGED
@@ -20,8 +20,6 @@ opts = Optimist.options do
|
|
20
20
|
opt :kubernetes, "Use kubernetes to run images (short for --docker_runner=kubernetes)", :type => :boolean
|
21
21
|
end
|
22
22
|
|
23
|
-
Optimist.die(:docker_runner, "must be one of #{Floe::Workflow::Runner::TYPES.join(", ")}") unless Floe::Workflow::Runner::TYPES.include?(opts[:docker_runner])
|
24
|
-
|
25
23
|
# legacy support for --workflow
|
26
24
|
args = ARGV.empty? ? [opts[:workflow], opts[:input]] : ARGV
|
27
25
|
Optimist.die(:workflow, "must be specified") if args.empty?
|
@@ -34,18 +32,13 @@ opts[:docker_runner] ||= "kubernetes" if opts[:kubernetes]
|
|
34
32
|
require "logger"
|
35
33
|
Floe.logger = Logger.new($stdout)
|
36
34
|
|
37
|
-
runner_klass = case opts[:docker_runner]
|
38
|
-
when "docker"
|
39
|
-
Floe::Workflow::Runner::Docker
|
40
|
-
when "podman"
|
41
|
-
Floe::Workflow::Runner::Podman
|
42
|
-
when "kubernetes"
|
43
|
-
Floe::Workflow::Runner::Kubernetes
|
44
|
-
end
|
45
|
-
|
46
35
|
runner_options = opts[:docker_runner_options].to_h { |opt| opt.split("=", 2) }
|
47
36
|
|
48
|
-
|
37
|
+
begin
|
38
|
+
Floe.set_runner("docker", opts[:docker_runner], runner_options)
|
39
|
+
rescue ArgumentError => e
|
40
|
+
Optimist.die(:docker_runner, e.message)
|
41
|
+
end
|
49
42
|
|
50
43
|
credentials =
|
51
44
|
if opts[:credentials_given]
|
@@ -62,13 +55,7 @@ workflows =
|
|
62
55
|
|
63
56
|
# run
|
64
57
|
|
65
|
-
|
66
|
-
until outstanding.empty?
|
67
|
-
ready = outstanding.select(&:step_nonblock_ready?)
|
68
|
-
ready.map(&:run_nonblock)
|
69
|
-
outstanding -= ready.select(&:end?)
|
70
|
-
sleep(1) if !outstanding.empty?
|
71
|
-
end
|
58
|
+
Floe::Workflow.wait(workflows, &:run_nonblock)
|
72
59
|
|
73
60
|
# display status
|
74
61
|
|
data/floe.gemspec
CHANGED
@@ -29,7 +29,8 @@ Gem::Specification.new do |spec|
|
|
29
29
|
spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
|
30
30
|
spec.require_paths = ["lib"]
|
31
31
|
|
32
|
-
spec.add_dependency "awesome_spawn", "~>1.
|
32
|
+
spec.add_dependency "awesome_spawn", "~>1.6"
|
33
|
+
spec.add_dependency "io-wait"
|
33
34
|
spec.add_dependency "jsonpath", "~>1.1"
|
34
35
|
spec.add_dependency "kubeclient", "~>4.7"
|
35
36
|
spec.add_dependency "optimist", "~>3.0"
|
data/lib/floe/version.rb
CHANGED
@@ -10,6 +10,7 @@ module Floe
|
|
10
10
|
|
11
11
|
def initialize(options = {})
|
12
12
|
require "awesome_spawn"
|
13
|
+
require "io/wait"
|
13
14
|
require "tempfile"
|
14
15
|
|
15
16
|
super
|
@@ -45,10 +46,63 @@ module Floe
|
|
45
46
|
delete_secret(secrets_file) if secrets_file
|
46
47
|
end
|
47
48
|
|
49
|
+
def wait(timeout: nil, events: %i[create update delete], &block)
|
50
|
+
until_timestamp = Time.now.utc + timeout if timeout
|
51
|
+
|
52
|
+
r, w = IO.pipe
|
53
|
+
|
54
|
+
pid = AwesomeSpawn.run_detached(
|
55
|
+
self.class::DOCKER_COMMAND, :err => :out, :out => w, :params => wait_params(until_timestamp)
|
56
|
+
)
|
57
|
+
|
58
|
+
w.close
|
59
|
+
|
60
|
+
loop do
|
61
|
+
readable_timeout = until_timestamp - Time.now.utc if until_timestamp
|
62
|
+
|
63
|
+
# Wait for our end of the pipe to be readable and if it didn't timeout
|
64
|
+
# get the events from stdout
|
65
|
+
next if r.wait_readable(readable_timeout).nil?
|
66
|
+
|
67
|
+
# Get all events while the pipe is readable
|
68
|
+
notices = []
|
69
|
+
while r.ready?
|
70
|
+
notice = r.gets
|
71
|
+
|
72
|
+
# If the process has exited `r.gets` returns `nil` and the pipe is
|
73
|
+
# always `ready?`
|
74
|
+
break if notice.nil?
|
75
|
+
|
76
|
+
event, runner_context = parse_notice(notice)
|
77
|
+
next if event.nil? || !events.include?(event)
|
78
|
+
|
79
|
+
notices << [event, runner_context]
|
80
|
+
end
|
81
|
+
|
82
|
+
# If we're given a block yield the events otherwise return them
|
83
|
+
if block
|
84
|
+
notices.each(&block)
|
85
|
+
else
|
86
|
+
# Terminate the `docker events` process before returning the events
|
87
|
+
sigterm(pid)
|
88
|
+
|
89
|
+
return notices
|
90
|
+
end
|
91
|
+
|
92
|
+
# Check that the `docker events` process is still alive
|
93
|
+
Process.kill(0, pid)
|
94
|
+
rescue Errno::ESRCH
|
95
|
+
# Break out of the loop if the `docker events` process has exited
|
96
|
+
break
|
97
|
+
end
|
98
|
+
ensure
|
99
|
+
r.close
|
100
|
+
end
|
101
|
+
|
48
102
|
def status!(runner_context)
|
49
103
|
return if runner_context.key?("Error")
|
50
104
|
|
51
|
-
runner_context["container_state"] = inspect_container(runner_context["container_ref"])
|
105
|
+
runner_context["container_state"] = inspect_container(runner_context["container_ref"])&.dig("State")
|
52
106
|
end
|
53
107
|
|
54
108
|
def running?(runner_context)
|
@@ -91,8 +145,45 @@ module Floe
|
|
91
145
|
params << image
|
92
146
|
end
|
93
147
|
|
148
|
+
def wait_params(until_timestamp)
|
149
|
+
params = ["events", [:format, "{{json .}}"], [:filter, "type=container"], [:since, Time.now.utc.to_i]]
|
150
|
+
params << [:until, until_timestamp.to_i] if until_timestamp
|
151
|
+
params
|
152
|
+
end
|
153
|
+
|
154
|
+
def parse_notice(notice)
|
155
|
+
notice = JSON.parse(notice)
|
156
|
+
|
157
|
+
status = notice["status"]
|
158
|
+
event = docker_event_status_to_event(status)
|
159
|
+
running = event != :delete
|
160
|
+
|
161
|
+
name, exit_code = notice.dig("Actor", "Attributes")&.values_at("name", "exitCode")
|
162
|
+
|
163
|
+
runner_context = {"container_ref" => name, "container_state" => {"Running" => running, "ExitCode" => exit_code.to_i}}
|
164
|
+
|
165
|
+
[event, runner_context]
|
166
|
+
rescue JSON::ParserError
|
167
|
+
[]
|
168
|
+
end
|
169
|
+
|
170
|
+
def docker_event_status_to_event(status)
|
171
|
+
case status
|
172
|
+
when "create"
|
173
|
+
:create
|
174
|
+
when "start"
|
175
|
+
:update
|
176
|
+
when "die", "destroy"
|
177
|
+
:delete
|
178
|
+
else
|
179
|
+
:unkonwn
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
94
183
|
def inspect_container(container_id)
|
95
|
-
JSON.parse(docker!("inspect", container_id).output)
|
184
|
+
JSON.parse(docker!("inspect", container_id).output).first
|
185
|
+
rescue
|
186
|
+
nil
|
96
187
|
end
|
97
188
|
|
98
189
|
def delete_container(container_id)
|
@@ -116,6 +207,12 @@ module Floe
|
|
116
207
|
secrets_file.path
|
117
208
|
end
|
118
209
|
|
210
|
+
def sigterm(pid)
|
211
|
+
Process.kill("TERM", pid)
|
212
|
+
rescue Errno::ESRCH
|
213
|
+
nil
|
214
|
+
end
|
215
|
+
|
119
216
|
def global_docker_options
|
120
217
|
[]
|
121
218
|
end
|
@@ -53,7 +53,7 @@ module Floe
|
|
53
53
|
name = container_name(image)
|
54
54
|
secret = create_secret!(secrets) if secrets && !secrets.empty?
|
55
55
|
|
56
|
-
runner_context = {"container_ref" => name, "secrets_ref" => secret}
|
56
|
+
runner_context = {"container_ref" => name, "container_state" => {"phase" => "Pending"}, "secrets_ref" => secret}
|
57
57
|
|
58
58
|
begin
|
59
59
|
create_pod!(name, image, env, secret)
|
@@ -102,6 +102,54 @@ module Floe
|
|
102
102
|
delete_secret(secret) if secret
|
103
103
|
end
|
104
104
|
|
105
|
+
def wait(timeout: nil, events: %i[create update delete])
|
106
|
+
retry_connection = true
|
107
|
+
|
108
|
+
begin
|
109
|
+
watcher = kubeclient.watch_pods(:namespace => namespace)
|
110
|
+
|
111
|
+
retry_connection = true
|
112
|
+
|
113
|
+
if timeout.to_i > 0
|
114
|
+
timeout_thread = Thread.new do
|
115
|
+
sleep(timeout)
|
116
|
+
watcher.finish
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
watcher.each do |notice|
|
121
|
+
break if error_notice?(notice)
|
122
|
+
|
123
|
+
event = kube_notice_type_to_event(notice.type)
|
124
|
+
next unless events.include?(event)
|
125
|
+
|
126
|
+
runner_context = parse_notice(notice)
|
127
|
+
next if runner_context.nil?
|
128
|
+
|
129
|
+
if block_given?
|
130
|
+
yield [event, runner_context]
|
131
|
+
else
|
132
|
+
timeout_thread&.kill # If we break out before the timeout, kill the timeout thread
|
133
|
+
return [[event, runner_context]]
|
134
|
+
end
|
135
|
+
end
|
136
|
+
rescue Kubeclient::HttpError => err
|
137
|
+
raise unless err.error_code == 401 && retry_connection
|
138
|
+
|
139
|
+
@kubeclient = nil
|
140
|
+
retry_connection = false
|
141
|
+
retry
|
142
|
+
ensure
|
143
|
+
begin
|
144
|
+
watch&.finish
|
145
|
+
rescue
|
146
|
+
nil
|
147
|
+
end
|
148
|
+
|
149
|
+
timeout_thread&.join(0)
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
105
153
|
private
|
106
154
|
|
107
155
|
attr_reader :ca_file, :kubeconfig_file, :kubeconfig_context, :namespace, :server, :token, :verify_ssl
|
@@ -217,6 +265,41 @@ module Floe
|
|
217
265
|
nil
|
218
266
|
end
|
219
267
|
|
268
|
+
def kube_notice_type_to_event(type)
|
269
|
+
case type
|
270
|
+
when "ADDED"
|
271
|
+
:create
|
272
|
+
when "MODIFIED"
|
273
|
+
:update
|
274
|
+
when "DELETED"
|
275
|
+
:delete
|
276
|
+
else
|
277
|
+
:unknown
|
278
|
+
end
|
279
|
+
end
|
280
|
+
|
281
|
+
def error_notice?(notice)
|
282
|
+
return false unless notice.type == "ERROR"
|
283
|
+
|
284
|
+
message = notice.object&.message
|
285
|
+
code = notice.object&.code
|
286
|
+
reason = notice.object&.reason
|
287
|
+
|
288
|
+
logger.warn("Received [#{code} #{reason}], [#{message}]")
|
289
|
+
|
290
|
+
true
|
291
|
+
end
|
292
|
+
|
293
|
+
def parse_notice(notice)
|
294
|
+
return if notice.object.nil?
|
295
|
+
|
296
|
+
pod = notice.object
|
297
|
+
container_ref = pod.metadata.name
|
298
|
+
container_state = pod.to_h[:status].deep_stringify_keys
|
299
|
+
|
300
|
+
{"container_ref" => container_ref, "container_state" => container_state}
|
301
|
+
end
|
302
|
+
|
220
303
|
def kubeclient
|
221
304
|
return @kubeclient unless @kubeclient.nil?
|
222
305
|
|
@@ -55,6 +55,32 @@ module Floe
|
|
55
55
|
nil
|
56
56
|
end
|
57
57
|
|
58
|
+
def parse_notice(notice)
|
59
|
+
id, status, exit_code = JSON.parse(notice).values_at("ID", "Status", "ContainerExitCode")
|
60
|
+
|
61
|
+
event = podman_event_status_to_event(status)
|
62
|
+
running = event != :delete
|
63
|
+
|
64
|
+
runner_context = {"container_ref" => id, "container_state" => {"Running" => running, "ExitCode" => exit_code.to_i}}
|
65
|
+
|
66
|
+
[event, runner_context]
|
67
|
+
rescue JSON::ParserError
|
68
|
+
[]
|
69
|
+
end
|
70
|
+
|
71
|
+
def podman_event_status_to_event(status)
|
72
|
+
case status
|
73
|
+
when "create"
|
74
|
+
:create
|
75
|
+
when "init", "start"
|
76
|
+
:update
|
77
|
+
when "died", "cleanup", "remove"
|
78
|
+
:delete
|
79
|
+
else
|
80
|
+
:unknown
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
58
84
|
alias podman! docker!
|
59
85
|
|
60
86
|
def global_docker_options
|
data/lib/floe/workflow/runner.rb
CHANGED
@@ -5,29 +5,42 @@ module Floe
|
|
5
5
|
class Runner
|
6
6
|
include Logging
|
7
7
|
|
8
|
-
TYPES = %w[docker podman kubernetes].freeze
|
9
8
|
OUTPUT_MARKER = "__FLOE_OUTPUT__\n"
|
10
9
|
|
11
10
|
def initialize(_options = {})
|
12
11
|
end
|
13
12
|
|
13
|
+
@runners = {}
|
14
14
|
class << self
|
15
|
-
|
15
|
+
# deprecated -- use Floe.set_runner instead
|
16
|
+
def docker_runner=(value)
|
17
|
+
set_runner("docker", value)
|
18
|
+
end
|
16
19
|
|
17
|
-
|
18
|
-
|
20
|
+
# see Floe.set_runner
|
21
|
+
def set_runner(scheme, name_or_instance, options = {})
|
22
|
+
@runners[scheme] =
|
23
|
+
case name_or_instance
|
24
|
+
when "docker", nil
|
25
|
+
Floe::Workflow::Runner::Docker.new(options)
|
26
|
+
when "podman"
|
27
|
+
Floe::Workflow::Runner::Podman.new(options)
|
28
|
+
when "kubernetes"
|
29
|
+
Floe::Workflow::Runner::Kubernetes.new(options)
|
30
|
+
when Floe::Workflow::Runner
|
31
|
+
name_or_instance
|
32
|
+
else
|
33
|
+
raise ArgumentError, "docker runner must be one of: docker, podman, kubernetes"
|
34
|
+
end
|
19
35
|
end
|
20
36
|
|
21
37
|
def for_resource(resource)
|
22
38
|
raise ArgumentError, "resource cannot be nil" if resource.nil?
|
23
39
|
|
40
|
+
# if no runners are set, default docker:// to docker
|
41
|
+
set_runner("docker", "docker") if @runners.empty?
|
24
42
|
scheme = resource.split("://").first
|
25
|
-
|
26
|
-
when "docker"
|
27
|
-
docker_runner
|
28
|
-
else
|
29
|
-
raise "Invalid resource scheme [#{scheme}]"
|
30
|
-
end
|
43
|
+
@runners[scheme] || raise(ArgumentError, "Invalid resource scheme [#{scheme}]")
|
31
44
|
end
|
32
45
|
end
|
33
46
|
|
@@ -55,6 +68,10 @@ module Floe
|
|
55
68
|
def cleanup(_runner_context)
|
56
69
|
raise NotImplementedError, "Must be implemented in a subclass"
|
57
70
|
end
|
71
|
+
|
72
|
+
def wait(timeout: nil, events: %i[create update delete])
|
73
|
+
raise NotImplementedError, "Must be implemented in a subclass"
|
74
|
+
end
|
58
75
|
end
|
59
76
|
end
|
60
77
|
end
|
data/lib/floe/workflow/state.rb
CHANGED
@@ -33,12 +33,12 @@ module Floe
|
|
33
33
|
raise Floe::InvalidWorkflowError, "State name [#{name}] must be less than or equal to 80 characters" if name.length > 80
|
34
34
|
end
|
35
35
|
|
36
|
-
def wait(timeout:
|
36
|
+
def wait(timeout: nil)
|
37
37
|
start = Time.now.utc
|
38
38
|
|
39
39
|
loop do
|
40
40
|
return 0 if ready?
|
41
|
-
return Errno::EAGAIN if timeout.zero? || Time.now.utc - start > timeout
|
41
|
+
return Errno::EAGAIN if timeout && (timeout.zero? || Time.now.utc - start > timeout)
|
42
42
|
|
43
43
|
sleep(1)
|
44
44
|
end
|
@@ -93,6 +93,14 @@ module Floe
|
|
93
93
|
context.state.key?("FinishedTime")
|
94
94
|
end
|
95
95
|
|
96
|
+
def waiting?
|
97
|
+
context.state["WaitUntil"] && Time.now.utc <= Time.parse(context.state["WaitUntil"])
|
98
|
+
end
|
99
|
+
|
100
|
+
def wait_until
|
101
|
+
context.state["WaitUntil"] && Time.parse(context.state["WaitUntil"])
|
102
|
+
end
|
103
|
+
|
96
104
|
private
|
97
105
|
|
98
106
|
def wait_until!(seconds: nil, time: nil)
|
@@ -105,10 +113,6 @@ module Floe
|
|
105
113
|
time.iso8601
|
106
114
|
end
|
107
115
|
end
|
108
|
-
|
109
|
-
def waiting?
|
110
|
-
context.state["WaitUntil"] && Time.now.utc <= Time.parse(context.state["WaitUntil"])
|
111
|
-
end
|
112
116
|
end
|
113
117
|
end
|
114
118
|
end
|
@@ -4,6 +4,11 @@ module Floe
|
|
4
4
|
class Workflow
|
5
5
|
module States
|
6
6
|
module NonTerminalMixin
|
7
|
+
def finish
|
8
|
+
context.next_state = end? ? nil : @next
|
9
|
+
super
|
10
|
+
end
|
11
|
+
|
7
12
|
def validate_state_next!
|
8
13
|
raise Floe::InvalidWorkflowError, "Missing \"Next\" field in state [#{name}]" if @next.nil? && !@end
|
9
14
|
raise Floe::InvalidWorkflowError, "\"Next\" [#{@next}] not in \"States\" for state [#{name}]" if @next && !workflow.payload["States"].key?(@next)
|
@@ -46,18 +46,19 @@ module Floe
|
|
46
46
|
end
|
47
47
|
|
48
48
|
def finish
|
49
|
+
super
|
50
|
+
|
49
51
|
output = runner.output(context.state["RunnerContext"])
|
50
52
|
|
51
53
|
if success?
|
52
54
|
output = parse_output(output)
|
53
55
|
context.state["Output"] = process_output(context.input.dup, output)
|
54
|
-
context.next_state = next_state
|
55
56
|
else
|
57
|
+
context.next_state = nil
|
56
58
|
error = parse_error(output)
|
57
59
|
retry_state!(error) || catch_error!(error) || fail_workflow!(error)
|
58
60
|
end
|
59
61
|
|
60
|
-
super
|
61
62
|
ensure
|
62
63
|
runner.cleanup(context.state["RunnerContext"])
|
63
64
|
end
|
@@ -28,10 +28,9 @@ module Floe
|
|
28
28
|
|
29
29
|
def start(input)
|
30
30
|
super
|
31
|
-
input = input_path.value(context, input)
|
32
31
|
|
33
|
-
|
34
|
-
context.
|
32
|
+
input = input_path.value(context, input)
|
33
|
+
context.output = output_path.value(context, input)
|
35
34
|
|
36
35
|
wait_until!(
|
37
36
|
:seconds => seconds_path ? seconds_path.value(context, input).to_i : seconds,
|
data/lib/floe/workflow.rb
CHANGED
@@ -16,21 +16,72 @@ module Floe
|
|
16
16
|
new(payload, context, credentials, name)
|
17
17
|
end
|
18
18
|
|
19
|
-
def wait(workflows, timeout:
|
19
|
+
def wait(workflows, timeout: nil, &block)
|
20
|
+
workflows = [workflows] if workflows.kind_of?(self)
|
20
21
|
logger.info("checking #{workflows.count} workflows...")
|
21
22
|
|
22
|
-
|
23
|
-
ready
|
23
|
+
run_until = Time.now.utc + timeout if timeout.to_i > 0
|
24
|
+
ready = []
|
25
|
+
queue = Queue.new
|
26
|
+
wait_thread = Thread.new do
|
27
|
+
loop do
|
28
|
+
Runner.for_resource("docker").wait do |event, runner_context|
|
29
|
+
queue.push([event, runner_context])
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
24
33
|
|
25
34
|
loop do
|
26
35
|
ready = workflows.select(&:step_nonblock_ready?)
|
27
|
-
break if
|
28
|
-
|
29
|
-
|
36
|
+
break if block.nil? && !ready.empty?
|
37
|
+
|
38
|
+
ready.each(&block)
|
39
|
+
|
40
|
+
# Break if all workflows are completed or we've exceeded the
|
41
|
+
# requested timeout
|
42
|
+
break if workflows.all?(&:end?)
|
43
|
+
break if timeout && (timeout.zero? || Time.now.utc > run_until)
|
44
|
+
|
45
|
+
# Find the earliest time that we should wakeup if no container events
|
46
|
+
# are caught, either a workflow in a Wait or Retry state or we've
|
47
|
+
# exceeded the requested timeout
|
48
|
+
wait_until = workflows.map(&:wait_until)
|
49
|
+
.unshift(run_until)
|
50
|
+
.compact
|
51
|
+
.min
|
52
|
+
|
53
|
+
# If a workflow is in a waiting state wakeup the main thread when
|
54
|
+
# it will be done sleeping
|
55
|
+
if wait_until
|
56
|
+
sleep_thread = Thread.new do
|
57
|
+
sleep_duration = wait_until - Time.now.utc
|
58
|
+
sleep sleep_duration if sleep_duration > 0
|
59
|
+
queue.push(nil)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
loop do
|
64
|
+
# Block until an event is raised
|
65
|
+
event, runner_context = queue.pop
|
66
|
+
break if event.nil?
|
67
|
+
|
68
|
+
# If the event is for one of our workflows set the updated runner_context
|
69
|
+
workflows.each do |workflow|
|
70
|
+
next unless workflow.context.state.dig("RunnerContext", "container_ref") == runner_context["container_ref"]
|
71
|
+
|
72
|
+
workflow.context.state["RunnerContext"] = runner_context
|
73
|
+
end
|
74
|
+
|
75
|
+
break if queue.empty?
|
76
|
+
end
|
77
|
+
ensure
|
78
|
+
sleep_thread&.kill
|
30
79
|
end
|
31
80
|
|
32
81
|
logger.info("checking #{workflows.count} workflows...Complete - #{ready.count} ready")
|
33
82
|
ready
|
83
|
+
ensure
|
84
|
+
wait_thread&.kill
|
34
85
|
end
|
35
86
|
end
|
36
87
|
|
@@ -74,7 +125,7 @@ module Floe
|
|
74
125
|
current_state.run_nonblock!
|
75
126
|
end
|
76
127
|
|
77
|
-
def step_nonblock_wait(timeout:
|
128
|
+
def step_nonblock_wait(timeout: nil)
|
78
129
|
current_state.wait(:timeout => timeout)
|
79
130
|
end
|
80
131
|
|
@@ -82,6 +133,14 @@ module Floe
|
|
82
133
|
current_state.ready?
|
83
134
|
end
|
84
135
|
|
136
|
+
def waiting?
|
137
|
+
current_state.waiting?
|
138
|
+
end
|
139
|
+
|
140
|
+
def wait_until
|
141
|
+
current_state.wait_until
|
142
|
+
end
|
143
|
+
|
85
144
|
def status
|
86
145
|
context.status
|
87
146
|
end
|
data/lib/floe.rb
CHANGED
@@ -45,7 +45,27 @@ module Floe
|
|
45
45
|
@logger ||= NullLogger.new
|
46
46
|
end
|
47
47
|
|
48
|
+
# Set the logger to use
|
49
|
+
#
|
50
|
+
# @example
|
51
|
+
# require "logger"
|
52
|
+
# Floe.logger = Logger.new($stdout)
|
53
|
+
#
|
54
|
+
# @param logger [Logger] logger to use for logging actions
|
48
55
|
def self.logger=(logger)
|
49
56
|
@logger = logger
|
50
57
|
end
|
58
|
+
|
59
|
+
# Set the runner to use
|
60
|
+
#
|
61
|
+
# @example
|
62
|
+
# Floe.set_runner "docker", kubernetes", {}
|
63
|
+
# Floe.set_runner "docker", Floe::Workflow::Runner::Kubernetes.new({})
|
64
|
+
#
|
65
|
+
# @param scheme [String] scheme Protocol to register (e.g.: docker)
|
66
|
+
# @param name_or_instance [String|Floe::Workflow::Runner] Name of runner to use for docker (e.g.: docker)
|
67
|
+
# @param options [Hash] Options for constructor of the runner (optional)
|
68
|
+
def self.set_runner(scheme, name_or_instance, options = {})
|
69
|
+
Floe::Workflow::Runner.set_runner(scheme, name_or_instance, options)
|
70
|
+
end
|
51
71
|
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: floe
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.9.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- ManageIQ Developers
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-
|
11
|
+
date: 2024-02-19 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: awesome_spawn
|
@@ -16,14 +16,28 @@ dependencies:
|
|
16
16
|
requirements:
|
17
17
|
- - "~>"
|
18
18
|
- !ruby/object:Gem::Version
|
19
|
-
version: '1.
|
19
|
+
version: '1.6'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
24
|
- - "~>"
|
25
25
|
- !ruby/object:Gem::Version
|
26
|
-
version: '1.
|
26
|
+
version: '1.6'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: io-wait
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0'
|
34
|
+
type: :runtime
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - ">="
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0'
|
27
41
|
- !ruby/object:Gem::Dependency
|
28
42
|
name: jsonpath
|
29
43
|
requirement: !ruby/object:Gem::Requirement
|