floe 0.8.0 → 0.10.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: c121968c8c3f9c70edaa20c0ab3e7937206623b7c06c0cf0f300aceddcde7814
4
- data.tar.gz: 943d9e77a9e8309a771e57fee5d6f70e5026a4d254b0da5072b768a99f427d1a
3
+ metadata.gz: 2b158e514e08902a1138c7b2878bb8633ca7c1abb59d4b77b6d9ce568c65710b
4
+ data.tar.gz: 32d053bba54e8c35645a636771964692e435ad72ee38e03c8c22b4da5bce25ec
5
5
  SHA512:
6
- metadata.gz: 4fa820c364e8dc3266c2fc1eb29d682e72a9d2ab4d8f154aac3a88ff6d4c35e45e124fe714b80110b52e78a0ab654e25f956bc8a39f45f05f3990c0ab26d01ed
7
- data.tar.gz: b62ac48c4105497c1d3dd2f2ef9d8aec60c25bcadf71046e8488175705365d0d84facde34c11f0047660b5ab2f4390f13babbe8ba2ab6becf31027f027e0b833
6
+ metadata.gz: e39301eed1de9189b66f07a7ecdda807974bf47495b4906bd57f6f8ed862fab38448668155f79e12c87da8935f4054d14dbcd08e292429b194768d2234fa7c46
7
+ data.tar.gz: 5cf0476521a1cd4fc0dcc4edeccdd00c6c72ee88bf7428103b86f5f30097608f934f19e75499a61b5d7e7380f2e1451dccfd82f796eee6bcef041dd6f3db8664
data/CHANGELOG.md CHANGED
@@ -4,6 +4,29 @@ This project adheres to [Semantic Versioning](http://semver.org/).
4
4
 
5
5
  ## [Unreleased]
6
6
 
7
+ ## [0.10.0] - 2024-04-05
8
+ ### Fixed
9
+ - Fix rubocops ([#164](https://github.com/ManageIQ/floe/pull/164))
10
+ - Output should contain errors ([#165](https://github.com/ManageIQ/floe/pull/165))
11
+
12
+ ### Added
13
+ - Add simplecov ([#162](https://github.com/ManageIQ/floe/pull/162))
14
+ - Add ability to pass context on the command line ([#161](https://github.com/ManageIQ/floe/pull/161))
15
+ - Add specs for `Workflow#wait_until`, `#waiting?` ([#166](https://github.com/ManageIQ/floe/pull/166))
16
+
17
+ ### Changed
18
+ - Drop non-standard Error/Cause fields ([#167](https://github.com/ManageIQ/floe/pull/167))
19
+
20
+ ## [0.9.0] - 2024-02-19
21
+ ### Changed
22
+ - Default to wait indefinitely ([#157](https://github.com/ManageIQ/floe/pull/157))
23
+ - Create docker runners factory and add scheme ([#152](https://github.com/ManageIQ/floe/pull/152))
24
+ - Add a watch method to Workflow::Runner for event driven updates ([#95](https://github.com/ManageIQ/floe/pull/95))
25
+
26
+ ### Fixed
27
+ - Fix waiting on extremely short durations ([#160](https://github.com/ManageIQ/floe/pull/160))
28
+ - Fix wait state missing finish ([#159](https://github.com/ManageIQ/floe/pull/159))
29
+
7
30
  ## [0.8.0] - 2024-01-17
8
31
  ### Added
9
32
  - Add CLI shorthand options for docker runner ([#147](https://github.com/ManageIQ/floe/pull/147))
@@ -126,7 +149,9 @@ This project adheres to [Semantic Versioning](http://semver.org/).
126
149
  ### Added
127
150
  - Initial release
128
151
 
129
- [Unreleased]: https://github.com/ManageIQ/floe/compare/v0.8.0...HEAD
152
+ [Unreleased]: https://github.com/ManageIQ/floe/compare/v0.10.0...HEAD
153
+ [0.10.0]: https://github.com/ManageIQ/floe/compare/v0.9.0...v0.10.0
154
+ [0.9.0]: https://github.com/ManageIQ/floe/compare/v0.8.0...v0.9.0
130
155
  [0.8.0]: https://github.com/ManageIQ/floe/compare/v0.7.0...v0.8.0
131
156
  [0.7.0]: https://github.com/ManageIQ/floe/compare/v0.6.1...v0.7.0
132
157
  [0.6.1]: https://github.com/ManageIQ/floe/compare/v0.6.0...v0.6.1
data/Gemfile CHANGED
@@ -7,9 +7,3 @@ require File.join(Bundler::Plugin.index.load_paths("bundler-inject")[0], "bundle
7
7
 
8
8
  # Specify your gem's dependencies in floe.gemspec
9
9
  gemspec
10
-
11
- gem "manageiq-style"
12
- gem "rake", "~> 13.0"
13
- gem "rspec"
14
- gem "rubocop"
15
- gem "timecop"
data/exe/floe CHANGED
@@ -10,6 +10,7 @@ opts = Optimist.options do
10
10
 
11
11
  opt :workflow, "Path to your workflow json (legacy)", :type => :string
12
12
  opt :input, "JSON payload to input to the workflow (legacy)", :type => :string
13
+ opt :context, "JSON payload of the Context", :type => :string
13
14
  opt :credentials, "JSON payload with credentials", :type => :string
14
15
  opt :credentials_file, "Path to a file with credentials", :type => :string
15
16
  opt :docker_runner, "Type of runner for docker images", :type => :string, :short => 'r'
@@ -20,8 +21,6 @@ opts = Optimist.options do
20
21
  opt :kubernetes, "Use kubernetes to run images (short for --docker_runner=kubernetes)", :type => :boolean
21
22
  end
22
23
 
23
- Optimist.die(:docker_runner, "must be one of #{Floe::Workflow::Runner::TYPES.join(", ")}") unless Floe::Workflow::Runner::TYPES.include?(opts[:docker_runner])
24
-
25
24
  # legacy support for --workflow
26
25
  args = ARGV.empty? ? [opts[:workflow], opts[:input]] : ARGV
27
26
  Optimist.die(:workflow, "must be specified") if args.empty?
@@ -34,18 +33,13 @@ opts[:docker_runner] ||= "kubernetes" if opts[:kubernetes]
34
33
  require "logger"
35
34
  Floe.logger = Logger.new($stdout)
36
35
 
37
- runner_klass = case opts[:docker_runner]
38
- when "docker"
39
- Floe::Workflow::Runner::Docker
40
- when "podman"
41
- Floe::Workflow::Runner::Podman
42
- when "kubernetes"
43
- Floe::Workflow::Runner::Kubernetes
44
- end
45
-
46
36
  runner_options = opts[:docker_runner_options].to_h { |opt| opt.split("=", 2) }
47
37
 
48
- Floe::Workflow::Runner.docker_runner = runner_klass.new(runner_options)
38
+ begin
39
+ Floe.set_runner("docker", opts[:docker_runner], runner_options)
40
+ rescue ArgumentError => e
41
+ Optimist.die(:docker_runner, e.message)
42
+ end
49
43
 
50
44
  credentials =
51
45
  if opts[:credentials_given]
@@ -56,19 +50,13 @@ credentials =
56
50
 
57
51
  workflows =
58
52
  args.each_slice(2).map do |workflow, input|
59
- context = Floe::Workflow::Context.new(:input => input || opts[:input] || "{}")
53
+ context = Floe::Workflow::Context.new(opts[:context], :input => input || opts[:input] || "{}")
60
54
  Floe::Workflow.load(workflow, context, credentials)
61
55
  end
62
56
 
63
57
  # run
64
58
 
65
- outstanding = workflows.dup
66
- until outstanding.empty?
67
- ready = outstanding.select(&:step_nonblock_ready?)
68
- ready.map(&:run_nonblock)
69
- outstanding -= ready.select(&:end?)
70
- sleep(1) if !outstanding.empty?
71
- end
59
+ Floe::Workflow.wait(workflows, &:run_nonblock)
72
60
 
73
61
  # display status
74
62
 
data/floe.gemspec CHANGED
@@ -29,8 +29,16 @@ Gem::Specification.new do |spec|
29
29
  spec.executables = spec.files.grep(%r{\Aexe/}) { |f| File.basename(f) }
30
30
  spec.require_paths = ["lib"]
31
31
 
32
- spec.add_dependency "awesome_spawn", "~>1.0"
32
+ spec.add_dependency "awesome_spawn", "~>1.6"
33
+ spec.add_dependency "io-wait"
33
34
  spec.add_dependency "jsonpath", "~>1.1"
34
35
  spec.add_dependency "kubeclient", "~>4.7"
35
36
  spec.add_dependency "optimist", "~>3.0"
37
+
38
+ spec.add_development_dependency "manageiq-style"
39
+ spec.add_development_dependency "rake", "~> 13.0"
40
+ spec.add_development_dependency "rspec"
41
+ spec.add_development_dependency "rubocop"
42
+ spec.add_development_dependency "simplecov", ">= 0.21.2"
43
+ spec.add_development_dependency "timecop"
36
44
  end
data/lib/floe/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Floe
4
- VERSION = "0.8.0".freeze
4
+ VERSION = "0.10.0"
5
5
  end
@@ -3,19 +3,19 @@
3
3
  module Floe
4
4
  class Workflow
5
5
  class Context
6
+ # @param context [Json|Hash] (default, create another with input and execution params)
7
+ # @param input [Hash] (default: {})
6
8
  def initialize(context = nil, input: {})
7
9
  context = JSON.parse(context) if context.kind_of?(String)
8
10
  input = JSON.parse(input) if input.kind_of?(String)
9
11
 
10
- @context = context || {
11
- "Execution" => {
12
- "Input" => input
13
- },
14
- "State" => {},
15
- "StateHistory" => [],
16
- "StateMachine" => {},
17
- "Task" => {}
18
- }
12
+ @context = context || {}
13
+ self["Execution"] ||= {}
14
+ self["Execution"]["Input"] ||= input
15
+ self["State"] ||= {}
16
+ self["StateHistory"] ||= []
17
+ self["StateMachine"] ||= {}
18
+ self["Task"] ||= {}
19
19
  end
20
20
 
21
21
  def execution
@@ -30,6 +30,10 @@ module Floe
30
30
  started? && !ended?
31
31
  end
32
32
 
33
+ def failed?
34
+ output&.key?("Error") || false
35
+ end
36
+
33
37
  def ended?
34
38
  execution.key?("EndTime")
35
39
  end
@@ -67,7 +71,7 @@ module Floe
67
71
  "pending"
68
72
  elsif running?
69
73
  "running"
70
- elsif state["Error"]
74
+ elsif failed?
71
75
  "failure"
72
76
  else
73
77
  "success"
@@ -19,11 +19,11 @@ module Floe
19
19
  super
20
20
 
21
21
  raise Floe::InvalidWorkflowError, "Invalid Reference Path" if payload.match?(/@|,|:|\?/)
22
+
22
23
  @path = JsonPath.new(payload)
23
24
  .path[1..]
24
25
  .map { |v| v.match(/\[(?<name>.+)\]/)["name"] }
25
- .map { |v| v[0] == "'" ? v.delete("'") : v.to_i }
26
- .compact
26
+ .filter_map { |v| v[0] == "'" ? v.delete("'") : v.to_i }
27
27
  end
28
28
 
29
29
  def get(context)
@@ -10,6 +10,7 @@ module Floe
10
10
 
11
11
  def initialize(options = {})
12
12
  require "awesome_spawn"
13
+ require "io/wait"
13
14
  require "tempfile"
14
15
 
15
16
  super
@@ -45,10 +46,63 @@ module Floe
45
46
  delete_secret(secrets_file) if secrets_file
46
47
  end
47
48
 
49
+ def wait(timeout: nil, events: %i[create update delete], &block)
50
+ until_timestamp = Time.now.utc + timeout if timeout
51
+
52
+ r, w = IO.pipe
53
+
54
+ pid = AwesomeSpawn.run_detached(
55
+ self.class::DOCKER_COMMAND, :err => :out, :out => w, :params => wait_params(until_timestamp)
56
+ )
57
+
58
+ w.close
59
+
60
+ loop do
61
+ readable_timeout = until_timestamp - Time.now.utc if until_timestamp
62
+
63
+ # Wait for our end of the pipe to be readable and if it didn't timeout
64
+ # get the events from stdout
65
+ next if r.wait_readable(readable_timeout).nil?
66
+
67
+ # Get all events while the pipe is readable
68
+ notices = []
69
+ while r.ready?
70
+ notice = r.gets
71
+
72
+ # If the process has exited `r.gets` returns `nil` and the pipe is
73
+ # always `ready?`
74
+ break if notice.nil?
75
+
76
+ event, runner_context = parse_notice(notice)
77
+ next if event.nil? || !events.include?(event)
78
+
79
+ notices << [event, runner_context]
80
+ end
81
+
82
+ # If we're given a block yield the events otherwise return them
83
+ if block
84
+ notices.each(&block)
85
+ else
86
+ # Terminate the `docker events` process before returning the events
87
+ sigterm(pid)
88
+
89
+ return notices
90
+ end
91
+
92
+ # Check that the `docker events` process is still alive
93
+ Process.kill(0, pid)
94
+ rescue Errno::ESRCH
95
+ # Break out of the loop if the `docker events` process has exited
96
+ break
97
+ end
98
+ ensure
99
+ r.close
100
+ end
101
+
48
102
  def status!(runner_context)
49
103
  return if runner_context.key?("Error")
50
104
 
51
- runner_context["container_state"] = inspect_container(runner_context["container_ref"]).first&.dig("State")
105
+ runner_context["container_state"] = inspect_container(runner_context["container_ref"])&.dig("State")
52
106
  end
53
107
 
54
108
  def running?(runner_context)
@@ -91,8 +145,45 @@ module Floe
91
145
  params << image
92
146
  end
93
147
 
148
+ def wait_params(until_timestamp)
149
+ params = ["events", [:format, "{{json .}}"], [:filter, "type=container"], [:since, Time.now.utc.to_i]]
150
+ params << [:until, until_timestamp.to_i] if until_timestamp
151
+ params
152
+ end
153
+
154
+ def parse_notice(notice)
155
+ notice = JSON.parse(notice)
156
+
157
+ status = notice["status"]
158
+ event = docker_event_status_to_event(status)
159
+ running = event != :delete
160
+
161
+ name, exit_code = notice.dig("Actor", "Attributes")&.values_at("name", "exitCode")
162
+
163
+ runner_context = {"container_ref" => name, "container_state" => {"Running" => running, "ExitCode" => exit_code.to_i}}
164
+
165
+ [event, runner_context]
166
+ rescue JSON::ParserError
167
+ []
168
+ end
169
+
170
+ def docker_event_status_to_event(status)
171
+ case status
172
+ when "create"
173
+ :create
174
+ when "start"
175
+ :update
176
+ when "die", "destroy"
177
+ :delete
178
+ else
179
+ :unkonwn
180
+ end
181
+ end
182
+
94
183
  def inspect_container(container_id)
95
- JSON.parse(docker!("inspect", container_id).output)
184
+ JSON.parse(docker!("inspect", container_id).output).first
185
+ rescue
186
+ nil
96
187
  end
97
188
 
98
189
  def delete_container(container_id)
@@ -116,6 +207,12 @@ module Floe
116
207
  secrets_file.path
117
208
  end
118
209
 
210
+ def sigterm(pid)
211
+ Process.kill("TERM", pid)
212
+ rescue Errno::ESRCH
213
+ nil
214
+ end
215
+
119
216
  def global_docker_options
120
217
  []
121
218
  end
@@ -6,9 +6,10 @@ module Floe
6
6
  image.match(%r{^(?<repository>.+/)?(?<image>.+):(?<tag>.+)$})&.named_captures&.dig("image")
7
7
  end
8
8
 
9
- MAX_CONTAINER_NAME_SIZE = 63 - 5 - 9 # 63 is the max kubernetes pod name length
10
- # -5 for the "floe-" prefix
11
- # -9 for the random hex suffix and leading hyphen
9
+ # 63 is the max kubernetes pod name length
10
+ # -5 for the "floe-" prefix
11
+ # -9 for the random hex suffix and leading hyphen
12
+ MAX_CONTAINER_NAME_SIZE = 63 - 5 - 9
12
13
 
13
14
  def container_name(image)
14
15
  name = image_name(image)
@@ -53,7 +53,7 @@ module Floe
53
53
  name = container_name(image)
54
54
  secret = create_secret!(secrets) if secrets && !secrets.empty?
55
55
 
56
- runner_context = {"container_ref" => name, "secrets_ref" => secret}
56
+ runner_context = {"container_ref" => name, "container_state" => {"phase" => "Pending"}, "secrets_ref" => secret}
57
57
 
58
58
  begin
59
59
  create_pod!(name, image, env, secret)
@@ -102,6 +102,54 @@ module Floe
102
102
  delete_secret(secret) if secret
103
103
  end
104
104
 
105
+ def wait(timeout: nil, events: %i[create update delete])
106
+ retry_connection = true
107
+
108
+ begin
109
+ watcher = kubeclient.watch_pods(:namespace => namespace)
110
+
111
+ retry_connection = true
112
+
113
+ if timeout.to_i > 0
114
+ timeout_thread = Thread.new do
115
+ sleep(timeout)
116
+ watcher.finish
117
+ end
118
+ end
119
+
120
+ watcher.each do |notice|
121
+ break if error_notice?(notice)
122
+
123
+ event = kube_notice_type_to_event(notice.type)
124
+ next unless events.include?(event)
125
+
126
+ runner_context = parse_notice(notice)
127
+ next if runner_context.nil?
128
+
129
+ if block_given?
130
+ yield [event, runner_context]
131
+ else
132
+ timeout_thread&.kill # If we break out before the timeout, kill the timeout thread
133
+ return [[event, runner_context]]
134
+ end
135
+ end
136
+ rescue Kubeclient::HttpError => err
137
+ raise unless err.error_code == 401 && retry_connection
138
+
139
+ @kubeclient = nil
140
+ retry_connection = false
141
+ retry
142
+ ensure
143
+ begin
144
+ watch&.finish
145
+ rescue
146
+ nil
147
+ end
148
+
149
+ timeout_thread&.join(0)
150
+ end
151
+ end
152
+
105
153
  private
106
154
 
107
155
  attr_reader :ca_file, :kubeconfig_file, :kubeconfig_context, :namespace, :server, :token, :verify_ssl
@@ -116,7 +164,7 @@ module Floe
116
164
 
117
165
  def failed_container_states(context)
118
166
  container_statuses = context.dig("container_state", "containerStatuses") || []
119
- container_statuses.map { |status| status["state"]&.values&.first }.compact
167
+ container_statuses.filter_map { |status| status["state"]&.values&.first }
120
168
  .select { |state| FAILURE_REASONS.include?(state["reason"]) }
121
169
  end
122
170
 
@@ -217,6 +265,41 @@ module Floe
217
265
  nil
218
266
  end
219
267
 
268
+ def kube_notice_type_to_event(type)
269
+ case type
270
+ when "ADDED"
271
+ :create
272
+ when "MODIFIED"
273
+ :update
274
+ when "DELETED"
275
+ :delete
276
+ else
277
+ :unknown
278
+ end
279
+ end
280
+
281
+ def error_notice?(notice)
282
+ return false unless notice.type == "ERROR"
283
+
284
+ message = notice.object&.message
285
+ code = notice.object&.code
286
+ reason = notice.object&.reason
287
+
288
+ logger.warn("Received [#{code} #{reason}], [#{message}]")
289
+
290
+ true
291
+ end
292
+
293
+ def parse_notice(notice)
294
+ return if notice.object.nil?
295
+
296
+ pod = notice.object
297
+ container_ref = pod.metadata.name
298
+ container_state = pod.to_h[:status].deep_stringify_keys
299
+
300
+ {"container_ref" => container_ref, "container_state" => container_state}
301
+ end
302
+
220
303
  def kubeclient
221
304
  return @kubeclient unless @kubeclient.nil?
222
305
 
@@ -55,6 +55,32 @@ module Floe
55
55
  nil
56
56
  end
57
57
 
58
+ def parse_notice(notice)
59
+ id, status, exit_code = JSON.parse(notice).values_at("ID", "Status", "ContainerExitCode")
60
+
61
+ event = podman_event_status_to_event(status)
62
+ running = event != :delete
63
+
64
+ runner_context = {"container_ref" => id, "container_state" => {"Running" => running, "ExitCode" => exit_code.to_i}}
65
+
66
+ [event, runner_context]
67
+ rescue JSON::ParserError
68
+ []
69
+ end
70
+
71
+ def podman_event_status_to_event(status)
72
+ case status
73
+ when "create"
74
+ :create
75
+ when "init", "start"
76
+ :update
77
+ when "died", "cleanup", "remove"
78
+ :delete
79
+ else
80
+ :unknown
81
+ end
82
+ end
83
+
58
84
  alias podman! docker!
59
85
 
60
86
  def global_docker_options
@@ -5,29 +5,42 @@ module Floe
5
5
  class Runner
6
6
  include Logging
7
7
 
8
- TYPES = %w[docker podman kubernetes].freeze
9
8
  OUTPUT_MARKER = "__FLOE_OUTPUT__\n"
10
9
 
11
10
  def initialize(_options = {})
12
11
  end
13
12
 
13
+ @runners = {}
14
14
  class << self
15
- attr_writer :docker_runner
15
+ # deprecated -- use Floe.set_runner instead
16
+ def docker_runner=(value)
17
+ set_runner("docker", value)
18
+ end
16
19
 
17
- def docker_runner
18
- @docker_runner ||= Floe::Workflow::Runner::Docker.new
20
+ # see Floe.set_runner
21
+ def set_runner(scheme, name_or_instance, options = {})
22
+ @runners[scheme] =
23
+ case name_or_instance
24
+ when "docker", nil
25
+ Floe::Workflow::Runner::Docker.new(options)
26
+ when "podman"
27
+ Floe::Workflow::Runner::Podman.new(options)
28
+ when "kubernetes"
29
+ Floe::Workflow::Runner::Kubernetes.new(options)
30
+ when Floe::Workflow::Runner
31
+ name_or_instance
32
+ else
33
+ raise ArgumentError, "docker runner must be one of: docker, podman, kubernetes"
34
+ end
19
35
  end
20
36
 
21
37
  def for_resource(resource)
22
38
  raise ArgumentError, "resource cannot be nil" if resource.nil?
23
39
 
40
+ # if no runners are set, default docker:// to docker
41
+ set_runner("docker", "docker") if @runners.empty?
24
42
  scheme = resource.split("://").first
25
- case scheme
26
- when "docker"
27
- docker_runner
28
- else
29
- raise "Invalid resource scheme [#{scheme}]"
30
- end
43
+ @runners[scheme] || raise(ArgumentError, "Invalid resource scheme [#{scheme}]")
31
44
  end
32
45
  end
33
46
 
@@ -55,6 +68,10 @@ module Floe
55
68
  def cleanup(_runner_context)
56
69
  raise NotImplementedError, "Must be implemented in a subclass"
57
70
  end
71
+
72
+ def wait(timeout: nil, events: %i[create update delete])
73
+ raise NotImplementedError, "Must be implemented in a subclass"
74
+ end
58
75
  end
59
76
  end
60
77
  end
@@ -33,12 +33,12 @@ module Floe
33
33
  raise Floe::InvalidWorkflowError, "State name [#{name}] must be less than or equal to 80 characters" if name.length > 80
34
34
  end
35
35
 
36
- def wait(timeout: 5)
36
+ def wait(timeout: nil)
37
37
  start = Time.now.utc
38
38
 
39
39
  loop do
40
40
  return 0 if ready?
41
- return Errno::EAGAIN if timeout.zero? || Time.now.utc - start > timeout
41
+ return Errno::EAGAIN if timeout && (timeout.zero? || Time.now.utc - start > timeout)
42
42
 
43
43
  sleep(1)
44
44
  end
@@ -58,7 +58,7 @@ module Floe
58
58
  context.state["Guid"] = SecureRandom.uuid
59
59
  context.state["EnteredTime"] = start_time
60
60
 
61
- logger.info("Running state: [#{context.state_name}] with input [#{context.input}]...")
61
+ logger.info("Running state: [#{long_name}] with input [#{context.input}]...")
62
62
  end
63
63
 
64
64
  def finish
@@ -70,7 +70,8 @@ module Floe
70
70
  context.state["Duration"] = finished_time - entered_time
71
71
  context.execution["EndTime"] = finished_time_iso if context.next_state.nil?
72
72
 
73
- logger.info("Running state: [#{context.state_name}] with input [#{context.input}]...Complete - next state: [#{context.next_state}] output: [#{context.output}]")
73
+ level = context.output&.[]("Error") ? :error : :info
74
+ logger.public_send(level, "Running state: [#{long_name}] with input [#{context.input}]...Complete #{context.next_state ? "- next state [#{context.next_state}]" : "workflow -"} output: [#{context.output}]")
74
75
 
75
76
  context.state_history << context.state
76
77
 
@@ -93,6 +94,18 @@ module Floe
93
94
  context.state.key?("FinishedTime")
94
95
  end
95
96
 
97
+ def waiting?
98
+ context.state["WaitUntil"] && Time.now.utc <= Time.parse(context.state["WaitUntil"])
99
+ end
100
+
101
+ def wait_until
102
+ context.state["WaitUntil"] && Time.parse(context.state["WaitUntil"])
103
+ end
104
+
105
+ def long_name
106
+ "#{self.class.name.split("::").last}:#{name}"
107
+ end
108
+
96
109
  private
97
110
 
98
111
  def wait_until!(seconds: nil, time: nil)
@@ -105,10 +118,6 @@ module Floe
105
118
  time.iso8601
106
119
  end
107
120
  end
108
-
109
- def waiting?
110
- context.state["WaitUntil"] && Time.now.utc <= Time.parse(context.state["WaitUntil"])
111
- end
112
121
  end
113
122
  end
114
123
  end
@@ -18,14 +18,14 @@ module Floe
18
18
  @output_path = Path.new(payload.fetch("OutputPath", "$"))
19
19
  end
20
20
 
21
- def start(input)
22
- super
23
- input = input_path.value(context, input)
21
+ def finish
22
+ input = input_path.value(context, context.input)
24
23
  next_state = choices.detect { |choice| choice.true?(context, input) }&.next || default
25
24
  output = output_path.value(context, input)
26
25
 
27
26
  context.next_state = next_state
28
27
  context.output = output
28
+ super
29
29
  end
30
30
 
31
31
  def running?
@@ -15,18 +15,16 @@ module Floe
15
15
  @error_path = Path.new(payload["ErrorPath"]) if payload["ErrorPath"]
16
16
  end
17
17
 
18
- def start(input)
19
- super
18
+ def finish
20
19
  context.next_state = nil
21
20
  # TODO: support intrinsic functions here
22
21
  # see https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-fail-state.html
23
22
  # https://docs.aws.amazon.com/step-functions/latest/dg/amazon-states-language-intrinsic-functions.html#asl-intrsc-func-generic
24
23
  context.output = {
25
- "Error" => @error_path ? @error_path.value(context, input) : error,
26
- "Cause" => @cause_path ? @cause_path.value(context, input) : cause
24
+ "Error" => @error_path ? @error_path.value(context, context.input) : error,
25
+ "Cause" => @cause_path ? @cause_path.value(context, context.input) : cause
27
26
  }.compact
28
- context.state["Error"] = context.output["Error"]
29
- context.state["Cause"] = context.output["Cause"]
27
+ super
30
28
  end
31
29
 
32
30
  def running?
@@ -4,6 +4,11 @@ module Floe
4
4
  class Workflow
5
5
  module States
6
6
  module NonTerminalMixin
7
+ def finish
8
+ context.next_state = end? ? nil : @next
9
+ super
10
+ end
11
+
7
12
  def validate_state_next!
8
13
  raise Floe::InvalidWorkflowError, "Missing \"Next\" field in state [#{name}]" if @next.nil? && !@end
9
14
  raise Floe::InvalidWorkflowError, "\"Next\" [#{@next}] not in \"States\" for state [#{name}]" if @next && !workflow.payload["States"].key?(@next)
@@ -24,13 +24,11 @@ module Floe
24
24
  validate_state!
25
25
  end
26
26
 
27
- def start(input)
28
- super
29
-
30
- input = process_input(input)
31
-
27
+ def finish
28
+ input = process_input(context.input)
32
29
  context.output = process_output(input, result)
33
30
  context.next_state = end? ? nil : @next
31
+ super
34
32
  end
35
33
 
36
34
  def running?
@@ -10,10 +10,10 @@ module Floe
10
10
  super
11
11
  end
12
12
 
13
- def start(input)
14
- super
13
+ def finish
15
14
  context.next_state = nil
16
- context.output = input
15
+ context.output = context.input
16
+ super
17
17
  end
18
18
 
19
19
  def running?
@@ -50,14 +50,14 @@ module Floe
50
50
 
51
51
  if success?
52
52
  output = parse_output(output)
53
- context.state["Output"] = process_output(context.input.dup, output)
54
- context.next_state = next_state
53
+ context.output = process_output(context.input.dup, output)
54
+ super
55
55
  else
56
- error = parse_error(output)
56
+ context.next_state = nil
57
+ context.output = error = parse_error(output)
58
+ super
57
59
  retry_state!(error) || catch_error!(error) || fail_workflow!(error)
58
60
  end
59
-
60
- super
61
61
  ensure
62
62
  runner.cleanup(context.state["RunnerContext"])
63
63
  end
@@ -109,6 +109,7 @@ module Floe
109
109
 
110
110
  wait_until!(:seconds => retrier.sleep_duration(context["State"]["RetryCount"]))
111
111
  context.next_state = context.state_name
112
+ logger.info("Running state: [#{long_name}] with input [#{context.input}]...Retry - delay: #{wait_until}")
112
113
  true
113
114
  end
114
115
 
@@ -118,13 +119,15 @@ module Floe
118
119
 
119
120
  context.next_state = catcher.next
120
121
  context.output = catcher.result_path.set(context.input, error)
122
+ logger.info("Running state: [#{long_name}] with input [#{context.input}]...CatchError - next state: [#{context.next_state}] output: [#{context.output}]")
123
+
121
124
  true
122
125
  end
123
126
 
124
127
  def fail_workflow!(error)
125
128
  context.next_state = nil
126
129
  context.output = {"Error" => error["Error"], "Cause" => error["Cause"]}.compact
127
- context.state["Error"] = context.output["Error"]
130
+ logger.error("Running state: [#{long_name}] with input [#{context.input}]...Complete workflow - output: [#{context.output}]")
128
131
  end
129
132
 
130
133
  def parse_error(output)
@@ -28,10 +28,8 @@ module Floe
28
28
 
29
29
  def start(input)
30
30
  super
31
- input = input_path.value(context, input)
32
31
 
33
- context.output = output_path.value(context, input)
34
- context.next_state = end? ? nil : @next
32
+ input = input_path.value(context, context.input)
35
33
 
36
34
  wait_until!(
37
35
  :seconds => seconds_path ? seconds_path.value(context, input).to_i : seconds,
@@ -39,6 +37,12 @@ module Floe
39
37
  )
40
38
  end
41
39
 
40
+ def finish
41
+ input = input_path.value(context, context.input)
42
+ context.output = output_path.value(context, input)
43
+ super
44
+ end
45
+
42
46
  def running?
43
47
  waiting?
44
48
  end
data/lib/floe/workflow.rb CHANGED
@@ -16,25 +16,76 @@ module Floe
16
16
  new(payload, context, credentials, name)
17
17
  end
18
18
 
19
- def wait(workflows, timeout: 5)
19
+ def wait(workflows, timeout: nil, &block)
20
+ workflows = [workflows] if workflows.kind_of?(self)
20
21
  logger.info("checking #{workflows.count} workflows...")
21
22
 
22
- start = Time.now.utc
23
- ready = []
23
+ run_until = Time.now.utc + timeout if timeout.to_i > 0
24
+ ready = []
25
+ queue = Queue.new
26
+ wait_thread = Thread.new do
27
+ loop do
28
+ Runner.for_resource("docker").wait do |event, runner_context|
29
+ queue.push([event, runner_context])
30
+ end
31
+ end
32
+ end
24
33
 
25
34
  loop do
26
35
  ready = workflows.select(&:step_nonblock_ready?)
27
- break if timeout.zero? || Time.now.utc - start > timeout || !ready.empty?
28
-
29
- sleep(1)
36
+ break if block.nil? && !ready.empty?
37
+
38
+ ready.each(&block)
39
+
40
+ # Break if all workflows are completed or we've exceeded the
41
+ # requested timeout
42
+ break if workflows.all?(&:end?)
43
+ break if timeout && (timeout.zero? || Time.now.utc > run_until)
44
+
45
+ # Find the earliest time that we should wakeup if no container events
46
+ # are caught, either a workflow in a Wait or Retry state or we've
47
+ # exceeded the requested timeout
48
+ wait_until = workflows.map(&:wait_until)
49
+ .unshift(run_until)
50
+ .compact
51
+ .min
52
+
53
+ # If a workflow is in a waiting state wakeup the main thread when
54
+ # it will be done sleeping
55
+ if wait_until
56
+ sleep_thread = Thread.new do
57
+ sleep_duration = wait_until - Time.now.utc
58
+ sleep sleep_duration if sleep_duration > 0
59
+ queue.push(nil)
60
+ end
61
+ end
62
+
63
+ loop do
64
+ # Block until an event is raised
65
+ event, runner_context = queue.pop
66
+ break if event.nil?
67
+
68
+ # If the event is for one of our workflows set the updated runner_context
69
+ workflows.each do |workflow|
70
+ next unless workflow.context.state.dig("RunnerContext", "container_ref") == runner_context["container_ref"]
71
+
72
+ workflow.context.state["RunnerContext"] = runner_context
73
+ end
74
+
75
+ break if queue.empty?
76
+ end
77
+ ensure
78
+ sleep_thread&.kill
30
79
  end
31
80
 
32
81
  logger.info("checking #{workflows.count} workflows...Complete - #{ready.count} ready")
33
82
  ready
83
+ ensure
84
+ wait_thread&.kill
34
85
  end
35
86
  end
36
87
 
37
- attr_reader :context, :credentials, :payload, :states, :states_by_name, :start_at, :name
88
+ attr_reader :context, :credentials, :payload, :states, :states_by_name, :start_at, :name, :comment
38
89
 
39
90
  def initialize(payload, context = nil, credentials = {}, name = nil)
40
91
  payload = JSON.parse(payload) if payload.kind_of?(String)
@@ -49,6 +100,7 @@ module Floe
49
100
  @payload = payload
50
101
  @context = context
51
102
  @credentials = credentials || {}
103
+ @comment = payload["Comment"]
52
104
  @start_at = payload["StartAt"]
53
105
 
54
106
  @states = payload["States"].to_a.map { |state_name, state| State.build!(self, state_name, state) }
@@ -74,7 +126,7 @@ module Floe
74
126
  current_state.run_nonblock!
75
127
  end
76
128
 
77
- def step_nonblock_wait(timeout: 5)
129
+ def step_nonblock_wait(timeout: nil)
78
130
  current_state.wait(:timeout => timeout)
79
131
  end
80
132
 
@@ -82,6 +134,14 @@ module Floe
82
134
  current_state.ready?
83
135
  end
84
136
 
137
+ def waiting?
138
+ current_state.waiting?
139
+ end
140
+
141
+ def wait_until
142
+ current_state.wait_until
143
+ end
144
+
85
145
  def status
86
146
  context.status
87
147
  end
data/lib/floe.rb CHANGED
@@ -45,7 +45,27 @@ module Floe
45
45
  @logger ||= NullLogger.new
46
46
  end
47
47
 
48
+ # Set the logger to use
49
+ #
50
+ # @example
51
+ # require "logger"
52
+ # Floe.logger = Logger.new($stdout)
53
+ #
54
+ # @param logger [Logger] logger to use for logging actions
48
55
  def self.logger=(logger)
49
56
  @logger = logger
50
57
  end
58
+
59
+ # Set the runner to use
60
+ #
61
+ # @example
62
+ # Floe.set_runner "docker", kubernetes", {}
63
+ # Floe.set_runner "docker", Floe::Workflow::Runner::Kubernetes.new({})
64
+ #
65
+ # @param scheme [String] scheme Protocol to register (e.g.: docker)
66
+ # @param name_or_instance [String|Floe::Workflow::Runner] Name of runner to use for docker (e.g.: docker)
67
+ # @param options [Hash] Options for constructor of the runner (optional)
68
+ def self.set_runner(scheme, name_or_instance, options = {})
69
+ Floe::Workflow::Runner.set_runner(scheme, name_or_instance, options)
70
+ end
51
71
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: floe
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.0
4
+ version: 0.10.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - ManageIQ Developers
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-01-17 00:00:00.000000000 Z
11
+ date: 2024-04-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: awesome_spawn
@@ -16,14 +16,28 @@ dependencies:
16
16
  requirements:
17
17
  - - "~>"
18
18
  - !ruby/object:Gem::Version
19
- version: '1.0'
19
+ version: '1.6'
20
20
  type: :runtime
21
21
  prerelease: false
22
22
  version_requirements: !ruby/object:Gem::Requirement
23
23
  requirements:
24
24
  - - "~>"
25
25
  - !ruby/object:Gem::Version
26
- version: '1.0'
26
+ version: '1.6'
27
+ - !ruby/object:Gem::Dependency
28
+ name: io-wait
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
27
41
  - !ruby/object:Gem::Dependency
28
42
  name: jsonpath
29
43
  requirement: !ruby/object:Gem::Requirement
@@ -66,6 +80,90 @@ dependencies:
66
80
  - - "~>"
67
81
  - !ruby/object:Gem::Version
68
82
  version: '3.0'
83
+ - !ruby/object:Gem::Dependency
84
+ name: manageiq-style
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - ">="
88
+ - !ruby/object:Gem::Version
89
+ version: '0'
90
+ type: :development
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - ">="
95
+ - !ruby/object:Gem::Version
96
+ version: '0'
97
+ - !ruby/object:Gem::Dependency
98
+ name: rake
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: '13.0'
104
+ type: :development
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - "~>"
109
+ - !ruby/object:Gem::Version
110
+ version: '13.0'
111
+ - !ruby/object:Gem::Dependency
112
+ name: rspec
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - ">="
116
+ - !ruby/object:Gem::Version
117
+ version: '0'
118
+ type: :development
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - ">="
123
+ - !ruby/object:Gem::Version
124
+ version: '0'
125
+ - !ruby/object:Gem::Dependency
126
+ name: rubocop
127
+ requirement: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - ">="
130
+ - !ruby/object:Gem::Version
131
+ version: '0'
132
+ type: :development
133
+ prerelease: false
134
+ version_requirements: !ruby/object:Gem::Requirement
135
+ requirements:
136
+ - - ">="
137
+ - !ruby/object:Gem::Version
138
+ version: '0'
139
+ - !ruby/object:Gem::Dependency
140
+ name: simplecov
141
+ requirement: !ruby/object:Gem::Requirement
142
+ requirements:
143
+ - - ">="
144
+ - !ruby/object:Gem::Version
145
+ version: 0.21.2
146
+ type: :development
147
+ prerelease: false
148
+ version_requirements: !ruby/object:Gem::Requirement
149
+ requirements:
150
+ - - ">="
151
+ - !ruby/object:Gem::Version
152
+ version: 0.21.2
153
+ - !ruby/object:Gem::Dependency
154
+ name: timecop
155
+ requirement: !ruby/object:Gem::Requirement
156
+ requirements:
157
+ - - ">="
158
+ - !ruby/object:Gem::Version
159
+ version: '0'
160
+ type: :development
161
+ prerelease: false
162
+ version_requirements: !ruby/object:Gem::Requirement
163
+ requirements:
164
+ - - ">="
165
+ - !ruby/object:Gem::Version
166
+ version: '0'
69
167
  description: Simple Workflow Runner.
70
168
  email:
71
169
  executables: