ruby_slm 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.idea/.gitignore +8 -0
- data/CODE_OF_CONDUCT.md +132 -0
- data/LICENSE +21 -0
- data/LICENSE.txt +21 -0
- data/README.md +768 -0
- data/Rakefile +16 -0
- data/examples/test_complex_workflow.rb +747 -0
- data/examples/test_parallel_complex_workflow.rb +983 -0
- data/lib/ruby_slm/errors.rb +24 -0
- data/lib/ruby_slm/execution.rb +176 -0
- data/lib/ruby_slm/state.rb +47 -0
- data/lib/ruby_slm/state_machine.rb +140 -0
- data/lib/ruby_slm/states/base.rb +149 -0
- data/lib/ruby_slm/states/choice.rb +144 -0
- data/lib/ruby_slm/states/fail.rb +62 -0
- data/lib/ruby_slm/states/parallel.rb +178 -0
- data/lib/ruby_slm/states/pass.rb +42 -0
- data/lib/ruby_slm/states/succeed.rb +39 -0
- data/lib/ruby_slm/states/task.rb +523 -0
- data/lib/ruby_slm/states/wait.rb +123 -0
- data/lib/ruby_slm/version.rb +5 -0
- data/lib/ruby_slm.rb +50 -0
- data/sig/states_language_machine.rbs +4 -0
- data/test/test_state_machine.rb +52 -0
- metadata +146 -0
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module StatesLanguageMachine
|
|
4
|
+
module States
|
|
5
|
+
class Fail < Base
|
|
6
|
+
# @return [String] the cause of the failure
|
|
7
|
+
attr_reader :cause
|
|
8
|
+
# @return [String] the error type
|
|
9
|
+
attr_reader :error
|
|
10
|
+
|
|
11
|
+
# @param name [String] the name of the state
|
|
12
|
+
# @param definition [Hash] the state definition
|
|
13
|
+
def initialize(name, definition)
|
|
14
|
+
# Don't call super - we need to handle this differently for Fail states
|
|
15
|
+
@name = name
|
|
16
|
+
@type = definition["Type"]
|
|
17
|
+
@cause = definition["Cause"]
|
|
18
|
+
@error = definition["Error"]
|
|
19
|
+
@definition = definition
|
|
20
|
+
@end_state = true # Fail states are always end states
|
|
21
|
+
@next_state = nil # Fail states never have next states
|
|
22
|
+
|
|
23
|
+
validate!
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# @param execution [Execution] the current execution
|
|
27
|
+
# @param input [Hash] the input data for the state
|
|
28
|
+
# @return [Hash] the output data from the state
|
|
29
|
+
def execute(execution, input)
|
|
30
|
+
execution.logger&.info("Executing fail state: #{@name}")
|
|
31
|
+
|
|
32
|
+
# Set execution status to failed
|
|
33
|
+
execution.status = :failed
|
|
34
|
+
execution.error = @error
|
|
35
|
+
execution.cause = @cause
|
|
36
|
+
|
|
37
|
+
process_result(execution, input)
|
|
38
|
+
|
|
39
|
+
input
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Fail states are always end states
|
|
43
|
+
# @return [Boolean] always true for fail states
|
|
44
|
+
def end_state?
|
|
45
|
+
true
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Fail states don't have next states
|
|
49
|
+
# @return [nil] always nil for fail states
|
|
50
|
+
def next_state_name(input = nil)
|
|
51
|
+
nil
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Validate the fail state definition
|
|
55
|
+
# @raise [DefinitionError] if the definition is invalid
|
|
56
|
+
def validate!
|
|
57
|
+
raise DefinitionError, "Fail state '#{@name}' must have a Cause" unless @cause
|
|
58
|
+
raise DefinitionError, "Fail state '#{@name}' must have an Error" unless @error
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
62
|
+
end
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module StatesLanguageMachine
|
|
4
|
+
module States
|
|
5
|
+
class Parallel < Base
|
|
6
|
+
# @return [Array<Hash>] the list of branches to execute in parallel
|
|
7
|
+
attr_reader :branches
|
|
8
|
+
|
|
9
|
+
# @return [Integer] maximum number of concurrent branches
|
|
10
|
+
attr_reader :max_concurrency
|
|
11
|
+
|
|
12
|
+
# @param name [String] the name of the state
|
|
13
|
+
# @param definition [Hash] the state definition
|
|
14
|
+
def initialize(name, definition)
|
|
15
|
+
# Ensure parallel states have End: true as required by base class
|
|
16
|
+
definition_with_end = definition.merge('End' => true)
|
|
17
|
+
super(name, definition_with_end)
|
|
18
|
+
@branches = definition["Branches"] || []
|
|
19
|
+
@max_concurrency = definition["MaxConcurrency"] || @branches.size
|
|
20
|
+
validate_parallel_specific!
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# @param execution [Execution] the current execution
|
|
24
|
+
# @param input [Hash] the input data for the state
|
|
25
|
+
# @return [Hash] the output data from the state
|
|
26
|
+
def execute(execution, input)
|
|
27
|
+
execution.logger&.info("Executing parallel state: #{@name} with #{@branches.size} branches")
|
|
28
|
+
|
|
29
|
+
# Execute branches concurrently using Fibers
|
|
30
|
+
results = execute_branches_concurrently(execution, input)
|
|
31
|
+
|
|
32
|
+
# Handle any branch failures
|
|
33
|
+
handle_branch_errors(results)
|
|
34
|
+
|
|
35
|
+
# Combine successful results
|
|
36
|
+
final_result = merge_branch_results(results)
|
|
37
|
+
process_result(execution, final_result)
|
|
38
|
+
final_result
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
private
|
|
42
|
+
|
|
43
|
+
# Execute all branches concurrently using Fibers
|
|
44
|
+
# @param execution [Execution] the parent execution
|
|
45
|
+
# @param input [Hash] the input data
|
|
46
|
+
# @return [Array<Hash>] results from all branches
|
|
47
|
+
def execute_branches_concurrently(execution, input)
|
|
48
|
+
# Create fibers for each branch
|
|
49
|
+
fibers = @branches.map.with_index do |branch_def, index|
|
|
50
|
+
Fiber.new do
|
|
51
|
+
execute_branch(execution, branch_def, input, index)
|
|
52
|
+
end
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Execute fibers with concurrency control
|
|
56
|
+
execute_fibers_with_concurrency_limit(fibers)
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Execute fibers with concurrency limit using round-robin scheduling
|
|
60
|
+
# @param fibers [Array<Fiber>] fibers to execute
|
|
61
|
+
# @return [Array<Hash>] results from all fibers
|
|
62
|
+
def execute_fibers_with_concurrency_limit(fibers)
|
|
63
|
+
results = []
|
|
64
|
+
active_fibers = fibers.dup
|
|
65
|
+
|
|
66
|
+
# Continue until all fibers are done
|
|
67
|
+
until active_fibers.empty?
|
|
68
|
+
# Process each active fiber in round-robin fashion
|
|
69
|
+
active_fibers.dup.each do |fiber|
|
|
70
|
+
begin
|
|
71
|
+
if fiber.alive?
|
|
72
|
+
# Resume the fiber
|
|
73
|
+
result = fiber.resume
|
|
74
|
+
# If the fiber returned a result (finished), store it and remove from active
|
|
75
|
+
unless fiber.alive?
|
|
76
|
+
results << result
|
|
77
|
+
active_fibers.delete(fiber)
|
|
78
|
+
end
|
|
79
|
+
else
|
|
80
|
+
# Fiber is dead but still in active list, remove it
|
|
81
|
+
active_fibers.delete(fiber)
|
|
82
|
+
end
|
|
83
|
+
rescue => e
|
|
84
|
+
# Handle any fiber errors
|
|
85
|
+
results << ExecutionError.new(@name, "Fiber execution failed: #{e.message}")
|
|
86
|
+
active_fibers.delete(fiber)
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
# Small sleep to prevent busy waiting (optional, but good practice)
|
|
91
|
+
sleep(0.001) if active_fibers.any?
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
results
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Execute a single branch within a Fiber
|
|
98
|
+
# @param execution [Execution] the parent execution
|
|
99
|
+
# @param branch_def [Hash] the branch definition
|
|
100
|
+
# @param input [Hash] the input data
|
|
101
|
+
# @param branch_index [Integer] the index of the branch
|
|
102
|
+
# @return [Hash] the branch execution result
|
|
103
|
+
def execute_branch(execution, branch_def, input, branch_index)
|
|
104
|
+
execution.logger&.debug("Starting branch #{branch_index} in parallel state: #{@name}")
|
|
105
|
+
|
|
106
|
+
branch_machine = StateMachine.new(branch_def, format: :hash)
|
|
107
|
+
branch_execution = branch_machine.start_execution(
|
|
108
|
+
input,
|
|
109
|
+
"#{execution.name}-branch-#{branch_index}",
|
|
110
|
+
execution.context
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Run the branch execution to completion
|
|
114
|
+
# For true cooperative multitasking, we'd need the state machine to yield
|
|
115
|
+
# For now, we'll run it to completion within the fiber
|
|
116
|
+
branch_execution.run_all
|
|
117
|
+
|
|
118
|
+
unless branch_execution.succeeded?
|
|
119
|
+
raise ExecutionError.new(@name, "Branch #{branch_index} execution failed: #{branch_execution.error}")
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
execution.logger&.debug("Branch #{branch_index} completed successfully")
|
|
123
|
+
branch_execution.output
|
|
124
|
+
rescue => e
|
|
125
|
+
ExecutionError.new(@name, "Branch #{branch_index} failed: #{e.message}")
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
# Handle any branch errors in the results
|
|
129
|
+
# @param results [Array] results from branch executions
|
|
130
|
+
# @raise [ExecutionError] if any branches failed
|
|
131
|
+
def handle_branch_errors(results)
|
|
132
|
+
failed_branches = results.select { |r| r.is_a?(ExecutionError) }
|
|
133
|
+
|
|
134
|
+
unless failed_branches.empty?
|
|
135
|
+
error_messages = failed_branches.map(&:message).join('; ')
|
|
136
|
+
raise ExecutionError.new(@name, "#{failed_branches.size} branch(es) failed: #{error_messages}")
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
# Merge results from all successful branches
|
|
141
|
+
# @param results [Array<Hash>] successful branch results
|
|
142
|
+
# @return [Hash] merged result
|
|
143
|
+
def merge_branch_results(results)
|
|
144
|
+
results.reduce({}) { |acc, result| deep_merge(acc, result) }
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
# Deep merge helper for nested hashes
|
|
148
|
+
# @param hash1 [Hash] first hash
|
|
149
|
+
# @param hash2 [Hash] second hash
|
|
150
|
+
# @return [Hash] deeply merged hash
|
|
151
|
+
def deep_merge(hash1, hash2)
|
|
152
|
+
hash1.merge(hash2) do |key, old_val, new_val|
|
|
153
|
+
if old_val.is_a?(Hash) && new_val.is_a?(Hash)
|
|
154
|
+
deep_merge(old_val, new_val)
|
|
155
|
+
else
|
|
156
|
+
new_val
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
end
|
|
160
|
+
|
|
161
|
+
# Validate parallel-specific requirements
|
|
162
|
+
def validate_parallel_specific!
|
|
163
|
+
raise DefinitionError, "Parallel state '#{@name}' must have at least one branch" if @branches.empty?
|
|
164
|
+
|
|
165
|
+
# Validate each branch structure
|
|
166
|
+
@branches.each_with_index do |branch, index|
|
|
167
|
+
unless branch["States"] && branch["StartAt"]
|
|
168
|
+
raise DefinitionError, "Branch #{index} in parallel state '#{@name}' must have States and StartAt"
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
if @max_concurrency < 1
|
|
173
|
+
raise DefinitionError, "MaxConcurrency must be at least 1 in parallel state '#{@name}'"
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
end
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module StatesLanguageMachine
|
|
4
|
+
module States
|
|
5
|
+
class Pass < Base
|
|
6
|
+
# @return [Object, nil] the result to pass through
|
|
7
|
+
attr_reader :result
|
|
8
|
+
# @return [String, nil] the result path
|
|
9
|
+
attr_reader :result_path
|
|
10
|
+
# @return [String, nil] the input path
|
|
11
|
+
attr_reader :input_path
|
|
12
|
+
# @return [String, nil] the output path
|
|
13
|
+
attr_reader :output_path
|
|
14
|
+
|
|
15
|
+
# @param name [String] the name of the state
|
|
16
|
+
# @param definition [Hash] the state definition
|
|
17
|
+
def initialize(name, definition)
|
|
18
|
+
super
|
|
19
|
+
@result = definition["Result"]
|
|
20
|
+
@result_path = definition["ResultPath"]
|
|
21
|
+
@input_path = definition["InputPath"]
|
|
22
|
+
@output_path = definition["OutputPath"]
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
# @param execution [Execution] the current execution
|
|
26
|
+
# @param input [Hash] the input data for the state
|
|
27
|
+
# @return [Hash] the output data from the state
|
|
28
|
+
def execute(execution, input)
|
|
29
|
+
execution.logger&.info("Executing pass state: #{@name}")
|
|
30
|
+
|
|
31
|
+
processed_input = apply_input_path(input, @input_path)
|
|
32
|
+
|
|
33
|
+
result = @result || processed_input
|
|
34
|
+
output = apply_result_path(input, result, @result_path)
|
|
35
|
+
final_output = apply_output_path(output, @output_path)
|
|
36
|
+
|
|
37
|
+
process_result(execution, final_output)
|
|
38
|
+
final_output
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
end
|
|
42
|
+
end
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module StatesLanguageMachine
|
|
4
|
+
module States
|
|
5
|
+
class Succeed < Base
|
|
6
|
+
# @return [String, nil] the input path
|
|
7
|
+
attr_reader :input_path
|
|
8
|
+
# @return [String, nil] the output path
|
|
9
|
+
attr_reader :output_path
|
|
10
|
+
|
|
11
|
+
# @param name [String] the name of the state
|
|
12
|
+
# @param definition [Hash] the state definition
|
|
13
|
+
def initialize(name, definition)
|
|
14
|
+
super
|
|
15
|
+
@input_path = definition["InputPath"]
|
|
16
|
+
@output_path = definition["OutputPath"]
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
# @param execution [Execution] the current execution
|
|
20
|
+
# @param input [Hash] the input data for the state
|
|
21
|
+
# @return [Hash] the output data from the state
|
|
22
|
+
def execute(execution, input)
|
|
23
|
+
execution.logger&.info("Executing succeed state: #{@name}")
|
|
24
|
+
|
|
25
|
+
processed_input = apply_input_path(input, @input_path)
|
|
26
|
+
final_output = apply_output_path(processed_input, @output_path)
|
|
27
|
+
|
|
28
|
+
execution.status = :succeeded
|
|
29
|
+
process_result(execution, final_output)
|
|
30
|
+
final_output
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Validate the succeed state definition
|
|
34
|
+
def validate!
|
|
35
|
+
# Succeed states don't need Next or End
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|