brute_flow 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/brute_flow/builder.rb +337 -0
- data/lib/brute_flow/runner.rb +172 -0
- data/lib/brute_flow/services/agent_service.rb +30 -0
- data/lib/brute_flow/services/memory_recall_service.rb +70 -0
- data/lib/brute_flow/services/router_service.rb +40 -0
- data/lib/brute_flow/services/self_check_service.rb +73 -0
- data/lib/brute_flow/services/tool_suggest_service.rb +37 -0
- data/lib/brute_flow.rb +37 -0
- metadata +75 -0
checksums.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
SHA256:
|
|
3
|
+
metadata.gz: 214695b96e9f5c22e58314e6028e4068ee0c4fc093ef0ee0c6e4905d8375cebd
|
|
4
|
+
data.tar.gz: 411ff39327de393a5ead111eb57a089b846ab50a6ae1dbe2ee3d3106fff9a3c5
|
|
5
|
+
SHA512:
|
|
6
|
+
metadata.gz: b4a3c48b9aed668ba2602735899cedc3992808e44ed87cb7ad8392952ce75c929d88f7ebf21fa04f3f0a04c667b4b89cf45c1426e86ec006f5035120962a6da9
|
|
7
|
+
data.tar.gz: d3361873ca16e7bc97119979f0ffb2b5698592b9379a231b8f73e4c64839c0f87a17b578233f6563d4cf4540ee5de3a7e6c6948c23dc9b076db5fdc14a95869c
|
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "securerandom"
|
|
4
|
+
|
|
5
|
+
module Brute
|
|
6
|
+
module Flow
|
|
7
|
+
# Builds BPMN::Definitions directly from a Ruby DSL — no XML involved.
|
|
8
|
+
#
|
|
9
|
+
# Usage:
|
|
10
|
+
# defs = Builder.build("my_flow") do
|
|
11
|
+
# service :router, type: "Brute::Flow::Services::RouterService"
|
|
12
|
+
# exclusive_gateway :mode, default: :simple_path do
|
|
13
|
+
# branch :fibre_path, condition: '=agent_mode = "fibre"' do
|
|
14
|
+
# parallel do
|
|
15
|
+
# service :tools, type: "Brute::Flow::Services::ToolSuggestService"
|
|
16
|
+
# service :memory, type: "Brute::Flow::Services::MemoryRecallService"
|
|
17
|
+
# end
|
|
18
|
+
# service :agent, type: "Brute::Flow::Services::AgentService"
|
|
19
|
+
# end
|
|
20
|
+
# branch :simple_path do
|
|
21
|
+
# service :agent, type: "Brute::Flow::Services::AgentService"
|
|
22
|
+
# end
|
|
23
|
+
# end
|
|
24
|
+
# loop_while '=self_check_passed = false', max: 3, timeout: "PT5M" do
|
|
25
|
+
# service :check, type: "Brute::Flow::Services::SelfCheckService"
|
|
26
|
+
# end
|
|
27
|
+
# end
|
|
28
|
+
#
|
|
29
|
+
class Builder
|
|
30
|
+
def self.build(process_id = "brute_flow", &block)
|
|
31
|
+
new(process_id).tap { |b| b.instance_eval(&block) }.finalize
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
def initialize(process_id)
|
|
35
|
+
@process_id = process_id
|
|
36
|
+
@counter = 0
|
|
37
|
+
# Collect raw hashes by BPMN type
|
|
38
|
+
@start_events = []
|
|
39
|
+
@end_events = []
|
|
40
|
+
@service_tasks = []
|
|
41
|
+
@exclusive_gateways = []
|
|
42
|
+
@parallel_gateways = []
|
|
43
|
+
@boundary_events = []
|
|
44
|
+
@sub_processes = []
|
|
45
|
+
@sequence_flows = []
|
|
46
|
+
# Ordered list of element ids representing the top-level sequence
|
|
47
|
+
@sequence = []
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# -- DSL methods --
|
|
51
|
+
|
|
52
|
+
def service(id, type:, headers: {})
|
|
53
|
+
task_id = uid(id)
|
|
54
|
+
header_arr = headers.map { |k, v| { key: k.to_s, value: v.to_s } }
|
|
55
|
+
ext = {}
|
|
56
|
+
ext[:task_definition] = { type: type }
|
|
57
|
+
ext[:task_headers] = { header: header_arr } unless header_arr.empty?
|
|
58
|
+
|
|
59
|
+
@service_tasks << {
|
|
60
|
+
id: task_id,
|
|
61
|
+
name: id.to_s,
|
|
62
|
+
extension_elements: ext,
|
|
63
|
+
}
|
|
64
|
+
@sequence << task_id
|
|
65
|
+
task_id
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def exclusive_gateway(id, default: nil, &block)
|
|
69
|
+
split_id = uid(:"#{id}_split")
|
|
70
|
+
join_id = uid(:"#{id}_join")
|
|
71
|
+
default_branch_id = default
|
|
72
|
+
|
|
73
|
+
ctx = GatewayContext.new(self)
|
|
74
|
+
ctx.instance_eval(&block)
|
|
75
|
+
branches = ctx.branches
|
|
76
|
+
|
|
77
|
+
# Add split gateway
|
|
78
|
+
default_flow_id = nil
|
|
79
|
+
@exclusive_gateways << { id: split_id, name: "#{id}_split" }
|
|
80
|
+
@sequence << split_id
|
|
81
|
+
|
|
82
|
+
# Each branch: split → [branch elements] → join
|
|
83
|
+
branches.each do |br|
|
|
84
|
+
flow_id = uid(:"flow_#{br[:id]}")
|
|
85
|
+
flow = { id: flow_id, source_ref: split_id, target_ref: br[:elements].first }
|
|
86
|
+
flow[:condition_expression] = br[:condition] if br[:condition]
|
|
87
|
+
@sequence_flows << flow
|
|
88
|
+
|
|
89
|
+
if br[:id].to_s == default_branch_id.to_s
|
|
90
|
+
default_flow_id = flow_id
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
# Chain branch elements internally
|
|
94
|
+
br[:elements].each_cons(2) do |from, to|
|
|
95
|
+
@sequence_flows << { id: uid(:flow), source_ref: from, target_ref: to }
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
# Last branch element → join
|
|
99
|
+
@sequence_flows << { id: uid(:flow), source_ref: br[:elements].last, target_ref: join_id }
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
# Set default on split gateway
|
|
103
|
+
@exclusive_gateways.last[:default] = default_flow_id if default_flow_id
|
|
104
|
+
|
|
105
|
+
# Add join gateway
|
|
106
|
+
@exclusive_gateways << { id: join_id, name: "#{id}_join" }
|
|
107
|
+
@sequence << join_id
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
def parallel(&block)
|
|
111
|
+
fork_id = uid(:parallel_fork)
|
|
112
|
+
join_id = uid(:parallel_join)
|
|
113
|
+
|
|
114
|
+
ctx = ParallelContext.new(self)
|
|
115
|
+
ctx.instance_eval(&block)
|
|
116
|
+
branches = ctx.branches
|
|
117
|
+
|
|
118
|
+
@parallel_gateways << { id: fork_id, name: "parallel_fork" }
|
|
119
|
+
@sequence << fork_id
|
|
120
|
+
|
|
121
|
+
branches.each do |br|
|
|
122
|
+
# fork → branch elements → join
|
|
123
|
+
@sequence_flows << { id: uid(:flow), source_ref: fork_id, target_ref: br.first }
|
|
124
|
+
br.each_cons(2) do |from, to|
|
|
125
|
+
@sequence_flows << { id: uid(:flow), source_ref: from, target_ref: to }
|
|
126
|
+
end
|
|
127
|
+
@sequence_flows << { id: uid(:flow), source_ref: br.last, target_ref: join_id }
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
@parallel_gateways << { id: join_id, name: "parallel_join" }
|
|
131
|
+
@sequence << join_id
|
|
132
|
+
end
|
|
133
|
+
|
|
134
|
+
def loop_while(condition, max: 3, timeout: nil, &block)
|
|
135
|
+
# Build the loop body into a subprocess
|
|
136
|
+
sub_id = uid(:loop_sub)
|
|
137
|
+
inner = Builder.new(sub_id)
|
|
138
|
+
inner.instance_eval(&block)
|
|
139
|
+
|
|
140
|
+
# The loop gate: after the subprocess, check condition to loop back
|
|
141
|
+
gate_id = uid(:loop_gate)
|
|
142
|
+
back_flow_id = uid(:loop_back)
|
|
143
|
+
exit_flow_id = uid(:loop_exit)
|
|
144
|
+
|
|
145
|
+
# Counter variable for max iterations
|
|
146
|
+
counter_var = "_loop_#{@counter}_count"
|
|
147
|
+
# FEEL condition: original condition AND counter < max
|
|
148
|
+
guarded = "=#{strip_feel(condition)} and #{counter_var} < #{max}"
|
|
149
|
+
|
|
150
|
+
# Add subprocess hash (it's a Process-like thing)
|
|
151
|
+
sub_process_hash = inner.to_sub_process_hash(sub_id)
|
|
152
|
+
@sub_processes << sub_process_hash
|
|
153
|
+
@sequence << sub_id
|
|
154
|
+
|
|
155
|
+
# Add gate after subprocess
|
|
156
|
+
@exclusive_gateways << { id: gate_id, name: "loop_gate", default: exit_flow_id }
|
|
157
|
+
@sequence << gate_id
|
|
158
|
+
|
|
159
|
+
# Back-edge: gate → subprocess (with condition)
|
|
160
|
+
@sequence_flows << { id: back_flow_id, source_ref: gate_id, target_ref: sub_id, condition_expression: guarded }
|
|
161
|
+
# Exit edge: gate → (whatever comes next, wired in finalize)
|
|
162
|
+
# We store exit_flow_id so the next element connects from here
|
|
163
|
+
@loop_exit_flow = exit_flow_id
|
|
164
|
+
|
|
165
|
+
# Timeout boundary event on the subprocess
|
|
166
|
+
if timeout
|
|
167
|
+
boundary_id = uid(:loop_timeout)
|
|
168
|
+
@boundary_events << {
|
|
169
|
+
id: boundary_id,
|
|
170
|
+
name: "loop_timeout",
|
|
171
|
+
attached_to_ref: sub_id,
|
|
172
|
+
cancel_activity: "true",
|
|
173
|
+
timer_event_definition: { time_duration: timeout },
|
|
174
|
+
}
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
# -- Internal: used by GatewayContext / ParallelContext --
|
|
179
|
+
|
|
180
|
+
def _build_branch(&block)
|
|
181
|
+
saved_sequence = @sequence
|
|
182
|
+
@sequence = []
|
|
183
|
+
instance_eval(&block)
|
|
184
|
+
branch_elements = @sequence
|
|
185
|
+
@sequence = saved_sequence
|
|
186
|
+
branch_elements
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
def uid(prefix = :el)
|
|
190
|
+
@counter += 1
|
|
191
|
+
"#{prefix}_#{@counter}"
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
# -- Finalize: assemble into BPMN::Definitions --
|
|
195
|
+
|
|
196
|
+
def finalize
|
|
197
|
+
start_id = uid(:start)
|
|
198
|
+
end_id = uid(:end)
|
|
199
|
+
|
|
200
|
+
@start_events << { id: start_id, name: "Start" }
|
|
201
|
+
@end_events << { id: end_id, name: "End" }
|
|
202
|
+
|
|
203
|
+
# Build the main sequence: start → elements → end
|
|
204
|
+
all = [start_id] + @sequence + [end_id]
|
|
205
|
+
all.each_cons(2) do |from, to|
|
|
206
|
+
# Skip if there's already a flow from `from` (gateways wire their own)
|
|
207
|
+
next if @sequence_flows.any? { |f| f[:source_ref] == from }
|
|
208
|
+
# For loop exit flows
|
|
209
|
+
if @loop_exit_flow && @sequence_flows.none? { |f| f[:id] == @loop_exit_flow }
|
|
210
|
+
@sequence_flows << { id: @loop_exit_flow, source_ref: from, target_ref: to }
|
|
211
|
+
@loop_exit_flow = nil
|
|
212
|
+
next
|
|
213
|
+
end
|
|
214
|
+
@sequence_flows << { id: uid(:flow), source_ref: from, target_ref: to }
|
|
215
|
+
end
|
|
216
|
+
|
|
217
|
+
# Build incoming/outgoing arrays for each element
|
|
218
|
+
incoming = Hash.new { |h, k| h[k] = [] }
|
|
219
|
+
outgoing = Hash.new { |h, k| h[k] = [] }
|
|
220
|
+
@sequence_flows.each do |f|
|
|
221
|
+
outgoing[f[:source_ref]] << f[:id]
|
|
222
|
+
incoming[f[:target_ref]] << f[:id]
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
# Attach incoming/outgoing to all elements
|
|
226
|
+
all_elements = @start_events + @end_events + @service_tasks +
|
|
227
|
+
@exclusive_gateways + @parallel_gateways + @sub_processes
|
|
228
|
+
all_elements.each do |el|
|
|
229
|
+
el[:incoming] = incoming[el[:id]] unless incoming[el[:id]].empty?
|
|
230
|
+
el[:outgoing] = outgoing[el[:id]] unless outgoing[el[:id]].empty?
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
process_hash = {
|
|
234
|
+
id: @process_id,
|
|
235
|
+
name: @process_id,
|
|
236
|
+
is_executable: "true",
|
|
237
|
+
start_event: @start_events,
|
|
238
|
+
end_event: @end_events,
|
|
239
|
+
service_task: @service_tasks,
|
|
240
|
+
exclusive_gateway: @exclusive_gateways,
|
|
241
|
+
parallel_gateway: @parallel_gateways,
|
|
242
|
+
sub_process: @sub_processes,
|
|
243
|
+
boundary_event: @boundary_events,
|
|
244
|
+
sequence_flow: @sequence_flows,
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
defs = BPMN::Definitions.new(process: [process_hash])
|
|
248
|
+
defs.processes.each { |p| p.wire_references(defs) }
|
|
249
|
+
defs
|
|
250
|
+
end
|
|
251
|
+
|
|
252
|
+
# Build a sub-process hash from the current builder state (for loop bodies).
|
|
253
|
+
def to_sub_process_hash(id)
|
|
254
|
+
start_id = uid(:sub_start)
|
|
255
|
+
end_id = uid(:sub_end)
|
|
256
|
+
|
|
257
|
+
sub_start = [{ id: start_id, name: "SubStart" }]
|
|
258
|
+
sub_end = [{ id: end_id, name: "SubEnd" }]
|
|
259
|
+
|
|
260
|
+
all = [start_id] + @sequence + [end_id]
|
|
261
|
+
all.each_cons(2) do |from, to|
|
|
262
|
+
next if @sequence_flows.any? { |f| f[:source_ref] == from }
|
|
263
|
+
@sequence_flows << { id: uid(:flow), source_ref: from, target_ref: to }
|
|
264
|
+
end
|
|
265
|
+
|
|
266
|
+
incoming = Hash.new { |h, k| h[k] = [] }
|
|
267
|
+
outgoing = Hash.new { |h, k| h[k] = [] }
|
|
268
|
+
@sequence_flows.each do |f|
|
|
269
|
+
outgoing[f[:source_ref]] << f[:id]
|
|
270
|
+
incoming[f[:target_ref]] << f[:id]
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
all_els = sub_start + sub_end + @service_tasks + @exclusive_gateways +
|
|
274
|
+
@parallel_gateways + @sub_processes
|
|
275
|
+
all_els.each do |el|
|
|
276
|
+
el[:incoming] = incoming[el[:id]] unless incoming[el[:id]].empty?
|
|
277
|
+
el[:outgoing] = outgoing[el[:id]] unless outgoing[el[:id]].empty?
|
|
278
|
+
end
|
|
279
|
+
|
|
280
|
+
{
|
|
281
|
+
id: id,
|
|
282
|
+
name: id,
|
|
283
|
+
start_event: sub_start,
|
|
284
|
+
end_event: sub_end,
|
|
285
|
+
service_task: @service_tasks,
|
|
286
|
+
exclusive_gateway: @exclusive_gateways,
|
|
287
|
+
parallel_gateway: @parallel_gateways,
|
|
288
|
+
sequence_flow: @sequence_flows,
|
|
289
|
+
}
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
private
|
|
293
|
+
|
|
294
|
+
def strip_feel(expr)
|
|
295
|
+
expr.to_s.delete_prefix("=")
|
|
296
|
+
end
|
|
297
|
+
|
|
298
|
+
# -- Context objects for gateway/parallel DSL blocks --
|
|
299
|
+
|
|
300
|
+
class GatewayContext
|
|
301
|
+
attr_reader :branches
|
|
302
|
+
|
|
303
|
+
def initialize(builder)
|
|
304
|
+
@builder = builder
|
|
305
|
+
@branches = []
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
def branch(id, condition: nil, &block)
|
|
309
|
+
elements = @builder._build_branch(&block)
|
|
310
|
+
@branches << { id: id, condition: condition, elements: elements }
|
|
311
|
+
end
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
class ParallelContext
|
|
315
|
+
attr_reader :branches
|
|
316
|
+
|
|
317
|
+
def initialize(builder)
|
|
318
|
+
@builder = builder
|
|
319
|
+
@branches = []
|
|
320
|
+
end
|
|
321
|
+
|
|
322
|
+
def service(id, type:, headers: {})
|
|
323
|
+
# Single-element branch
|
|
324
|
+
task_id = @builder.service(id, type: type, headers: headers)
|
|
325
|
+
# Pop it from the main sequence (we manage our own)
|
|
326
|
+
@builder.instance_variable_get(:@sequence).pop
|
|
327
|
+
@branches << [task_id]
|
|
328
|
+
end
|
|
329
|
+
|
|
330
|
+
def branch(&block)
|
|
331
|
+
elements = @builder._build_branch(&block)
|
|
332
|
+
@branches << elements
|
|
333
|
+
end
|
|
334
|
+
end
|
|
335
|
+
end
|
|
336
|
+
end
|
|
337
|
+
end
|
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "async"
|
|
4
|
+
require "async/barrier"
|
|
5
|
+
|
|
6
|
+
module Brute
|
|
7
|
+
module Flow
|
|
8
|
+
# Async-native BPMN execution runner.
|
|
9
|
+
#
|
|
10
|
+
# Takes a BPMN::Definitions (built by Builder), starts the process,
|
|
11
|
+
# and drives automated ServiceTasks to completion — running parallel
|
|
12
|
+
# branches concurrently via Async::Barrier.
|
|
13
|
+
#
|
|
14
|
+
# WorkflowKit's engine is synchronous by design: ServiceTask#run is
|
|
15
|
+
# called inline and ParallelGateway branches execute sequentially.
|
|
16
|
+
# We monkey-patch ServiceTask#run to be a no-op and handle execution
|
|
17
|
+
# ourselves, giving us true async parallelism.
|
|
18
|
+
#
|
|
19
|
+
class Runner
|
|
20
|
+
attr_reader :execution, :context
|
|
21
|
+
|
|
22
|
+
def initialize(definitions, cwd: Dir.pwd, variables: {})
|
|
23
|
+
@definitions = definitions
|
|
24
|
+
@cwd = cwd
|
|
25
|
+
@initial_variables = variables.merge(cwd: cwd)
|
|
26
|
+
@context = nil
|
|
27
|
+
@execution = nil
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Execute the flow to completion. Blocks until done.
|
|
31
|
+
# Returns the final variables hash.
|
|
32
|
+
def run
|
|
33
|
+
@context = BPMN::Context.new([], processes: @definitions.processes)
|
|
34
|
+
@execution = @context.start(variables: @initial_variables)
|
|
35
|
+
|
|
36
|
+
Async do
|
|
37
|
+
run_loop
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
@execution.variables
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# Serialize execution state for session persistence.
|
|
44
|
+
def serialize
|
|
45
|
+
@execution&.serialize
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Restore from serialized state.
|
|
49
|
+
def restore(state)
|
|
50
|
+
@context = BPMN::Context.new([], processes: @definitions.processes)
|
|
51
|
+
@execution = @context.restore(state)
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
# Final output variables.
|
|
55
|
+
def variables
|
|
56
|
+
@execution&.variables || {}
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
private
|
|
60
|
+
|
|
61
|
+
def run_loop
|
|
62
|
+
loop do
|
|
63
|
+
# Check expired timers (for loop timeouts)
|
|
64
|
+
@execution.check_expired_timers if @execution.respond_to?(:check_expired_timers)
|
|
65
|
+
|
|
66
|
+
# Find all waiting automated (ServiceTask) executions
|
|
67
|
+
waiting = find_waiting_automated(@execution)
|
|
68
|
+
break if waiting.empty?
|
|
69
|
+
|
|
70
|
+
# Group by whether they share a parallel gateway parent
|
|
71
|
+
# (i.e., sibling branches that should run concurrently)
|
|
72
|
+
groups = group_by_parallel_parent(waiting)
|
|
73
|
+
|
|
74
|
+
groups.each do |_parent_id, tasks|
|
|
75
|
+
if tasks.size > 1
|
|
76
|
+
run_parallel(tasks)
|
|
77
|
+
else
|
|
78
|
+
run_single(tasks.first)
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
83
|
+
|
|
84
|
+
def run_parallel(tasks)
|
|
85
|
+
barrier = Async::Barrier.new
|
|
86
|
+
results = {}
|
|
87
|
+
|
|
88
|
+
tasks.each do |exec|
|
|
89
|
+
barrier.async do
|
|
90
|
+
results[exec] = run_service(exec)
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
barrier.wait
|
|
95
|
+
ensure
|
|
96
|
+
barrier&.stop
|
|
97
|
+
|
|
98
|
+
# Signal each execution with its result
|
|
99
|
+
results.each do |exec, result|
|
|
100
|
+
signal_with_result(exec, result)
|
|
101
|
+
end
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
def run_single(exec)
|
|
105
|
+
result = run_service(exec)
|
|
106
|
+
signal_with_result(exec, result)
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def run_service(exec)
|
|
110
|
+
step = exec.step
|
|
111
|
+
return nil unless step.respond_to?(:task_type) && step.task_type
|
|
112
|
+
|
|
113
|
+
klass = step.task_type.constantize
|
|
114
|
+
vars = exec.parent&.variables || {}
|
|
115
|
+
hdrs = step.respond_to?(:headers) ? (step.headers || {}) : {}
|
|
116
|
+
|
|
117
|
+
klass.new.call(vars, hdrs)
|
|
118
|
+
rescue => e
|
|
119
|
+
{ error: true, message: e.message, class: e.class.name }
|
|
120
|
+
end
|
|
121
|
+
|
|
122
|
+
def signal_with_result(exec, result)
|
|
123
|
+
return unless exec.waiting?
|
|
124
|
+
exec.signal(result)
|
|
125
|
+
rescue => e
|
|
126
|
+
warn "[brute/flow] Signal failed for #{exec.step&.id}: #{e.message}"
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
# Recursively find all waiting executions whose step is an automated task.
|
|
130
|
+
def find_waiting_automated(execution)
|
|
131
|
+
found = []
|
|
132
|
+
return found unless execution.respond_to?(:children)
|
|
133
|
+
|
|
134
|
+
execution.children.each do |child|
|
|
135
|
+
if child.waiting? && child.step.respond_to?(:is_automated?) && child.step.is_automated?
|
|
136
|
+
found << child
|
|
137
|
+
end
|
|
138
|
+
# Recurse into sub-process children
|
|
139
|
+
found.concat(find_waiting_automated(child))
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
found
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
# Group waiting tasks by their parent execution id.
|
|
146
|
+
# Tasks sharing the same parent after a ParallelGateway fork
|
|
147
|
+
# are siblings that should run concurrently.
|
|
148
|
+
def group_by_parallel_parent(tasks)
|
|
149
|
+
tasks.group_by { |t| t.parent&.object_id || :root }
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# ----------------------------------------------------------------
|
|
154
|
+
# Monkey-patch: make ServiceTask#run a no-op.
|
|
155
|
+
#
|
|
156
|
+
# WorkflowKit's ServiceTask#run calls task_type.constantize.new.call()
|
|
157
|
+
# synchronously. We override it so the engine only puts the task into
|
|
158
|
+
# `waiting` state. Our Runner then picks up waiting tasks and runs
|
|
159
|
+
# them with async parallelism.
|
|
160
|
+
# ----------------------------------------------------------------
|
|
161
|
+
module ServiceTaskAsyncPatch
|
|
162
|
+
def run(execution)
|
|
163
|
+
# No-op: Runner handles execution externally.
|
|
164
|
+
# The task is already in `waiting` state from execute().
|
|
165
|
+
nil
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
# Apply the monkey-patch
|
|
172
|
+
BPMN::ServiceTask.prepend(Brute::Flow::ServiceTaskAsyncPatch)
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Brute
|
|
4
|
+
module Flow
|
|
5
|
+
module Services
|
|
6
|
+
# Runs a full Brute::Orchestrator for the user's message.
|
|
7
|
+
# This is the core service — it invokes the coding agent with
|
|
8
|
+
# all tools, middleware, retry, compaction, etc.
|
|
9
|
+
class AgentService
|
|
10
|
+
def call(variables, headers)
|
|
11
|
+
cwd = variables[:cwd] || variables["cwd"] || Dir.pwd
|
|
12
|
+
message = variables[:user_message] || variables["user_message"]
|
|
13
|
+
tools_filter = variables[:suggested_tools] || variables["suggested_tools"]
|
|
14
|
+
context_files = variables[:relevant_files] || variables["relevant_files"]
|
|
15
|
+
|
|
16
|
+
# Build enriched prompt with context from upstream services
|
|
17
|
+
prompt = message.dup
|
|
18
|
+
if context_files.is_a?(Array) && !context_files.empty?
|
|
19
|
+
prompt = "Relevant files:\n#{context_files.map { |f| "- #{f}" }.join("\n")}\n\n#{prompt}"
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
orch = Brute.agent(cwd: cwd)
|
|
23
|
+
response = orch.run(prompt)
|
|
24
|
+
|
|
25
|
+
{ agent_result: response&.content }
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "open3"
|
|
4
|
+
|
|
5
|
+
module Brute
|
|
6
|
+
module Flow
|
|
7
|
+
module Services
|
|
8
|
+
# Searches the workspace for files relevant to the task.
|
|
9
|
+
# Uses ripgrep to find matches. No LLM call.
|
|
10
|
+
class MemoryRecallService
|
|
11
|
+
MAX_FILES = 20
|
|
12
|
+
MAX_SNIPPET = 200
|
|
13
|
+
|
|
14
|
+
def call(variables, _headers)
|
|
15
|
+
cwd = variables[:cwd] || variables["cwd"] || Dir.pwd
|
|
16
|
+
task = variables[:user_message] || variables["user_message"] || ""
|
|
17
|
+
|
|
18
|
+
# Extract keywords: words > 3 chars, skip common stop words
|
|
19
|
+
keywords = extract_keywords(task)
|
|
20
|
+
return { relevant_files: [] } if keywords.empty?
|
|
21
|
+
|
|
22
|
+
files = {}
|
|
23
|
+
keywords.first(5).each do |kw|
|
|
24
|
+
search_keyword(kw, cwd).each do |path, snippet|
|
|
25
|
+
files[path] ||= snippet
|
|
26
|
+
end
|
|
27
|
+
break if files.size >= MAX_FILES
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
{ relevant_files: files.keys.first(MAX_FILES) }
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
private
|
|
34
|
+
|
|
35
|
+
STOP_WORDS = %w[
|
|
36
|
+
the and for that this with from are was were been have has
|
|
37
|
+
had not but all can will just more some than them into also
|
|
38
|
+
make like over such after first well back even give most
|
|
39
|
+
file files code should would could please help want need use
|
|
40
|
+
].to_set.freeze
|
|
41
|
+
|
|
42
|
+
def extract_keywords(text)
|
|
43
|
+
text.scan(/\b[a-zA-Z_]\w{2,}\b/)
|
|
44
|
+
.map(&:downcase)
|
|
45
|
+
.reject { |w| STOP_WORDS.include?(w) }
|
|
46
|
+
.tally
|
|
47
|
+
.sort_by { |_, count| -count }
|
|
48
|
+
.map(&:first)
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
def search_keyword(keyword, cwd)
|
|
52
|
+
cmd = ["rg", "--line-number", "--max-count=3", "--max-columns=200",
|
|
53
|
+
"--ignore-case", "--no-heading", keyword, cwd]
|
|
54
|
+
stdout, _, status = Open3.capture3(*cmd)
|
|
55
|
+
return [] unless status.success?
|
|
56
|
+
|
|
57
|
+
results = []
|
|
58
|
+
stdout.lines.first(10).each do |line|
|
|
59
|
+
if line =~ /\A(.+?):(\d+):(.*)/
|
|
60
|
+
path = Regexp.last_match(1)
|
|
61
|
+
snippet = Regexp.last_match(3).strip[0...MAX_SNIPPET]
|
|
62
|
+
results << [path, snippet]
|
|
63
|
+
end
|
|
64
|
+
end
|
|
65
|
+
results
|
|
66
|
+
end
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Brute
|
|
4
|
+
module Flow
|
|
5
|
+
module Services
|
|
6
|
+
# Classifies the incoming task and decides the execution mode.
|
|
7
|
+
# Single LLM call, no tools. Returns { agent_mode: "simple"|"fibre" }.
|
|
8
|
+
class RouterService
|
|
9
|
+
PROMPT = <<~PROMPT
|
|
10
|
+
Classify this software engineering task into one of two modes:
|
|
11
|
+
|
|
12
|
+
- "simple": Straightforward tasks that one agent can handle linearly.
|
|
13
|
+
Examples: fix a bug, add a function, rename a variable, run tests.
|
|
14
|
+
|
|
15
|
+
- "fibre": Complex tasks requiring parallel research, multi-file changes,
|
|
16
|
+
or analysis from multiple angles before acting.
|
|
17
|
+
Examples: large refactors, new feature across many files, architecture review.
|
|
18
|
+
|
|
19
|
+
Return ONLY a JSON object: {"agent_mode": "simple"} or {"agent_mode": "fibre"}
|
|
20
|
+
|
|
21
|
+
Task: %<task>s
|
|
22
|
+
PROMPT
|
|
23
|
+
|
|
24
|
+
def call(variables, _headers)
|
|
25
|
+
task = variables[:user_message] || variables["user_message"]
|
|
26
|
+
prompt = format(PROMPT, task: task)
|
|
27
|
+
|
|
28
|
+
begin
|
|
29
|
+
response = Brute.provider.complete(prompt)
|
|
30
|
+
parsed = JSON.parse(response.content)
|
|
31
|
+
{ agent_mode: parsed["agent_mode"] || "simple" }
|
|
32
|
+
rescue => e
|
|
33
|
+
warn "[brute/flow/router] Classification failed (#{e.message}), defaulting to simple"
|
|
34
|
+
{ agent_mode: "simple" }
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
end
|
|
39
|
+
end
|
|
40
|
+
end
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "open3"
|
|
4
|
+
|
|
5
|
+
module Brute
|
|
6
|
+
module Flow
|
|
7
|
+
module Services
|
|
8
|
+
# Deterministic post-execution validation. No LLM call.
|
|
9
|
+
# Checks that output files exist and have valid syntax.
|
|
10
|
+
# Returns { self_check_passed: bool, self_check_errors: [...] }
|
|
11
|
+
class SelfCheckService
|
|
12
|
+
SYNTAX_CHECKERS = {
|
|
13
|
+
".rb" => ->(path) { check_cmd("ruby -c #{path.shellescape}") },
|
|
14
|
+
".json" => ->(path) { check_json(path) },
|
|
15
|
+
".yml" => ->(path) { check_yaml(path) },
|
|
16
|
+
".yaml" => ->(path) { check_yaml(path) },
|
|
17
|
+
}.freeze
|
|
18
|
+
|
|
19
|
+
def call(variables, _headers)
|
|
20
|
+
cwd = variables[:cwd] || variables["cwd"] || Dir.pwd
|
|
21
|
+
result = variables[:agent_result] || variables["agent_result"] || ""
|
|
22
|
+
errors = []
|
|
23
|
+
|
|
24
|
+
# Increment loop counter
|
|
25
|
+
counter_key = variables.keys.find { |k| k.to_s.start_with?("_loop_") && k.to_s.end_with?("_count") }
|
|
26
|
+
if counter_key
|
|
27
|
+
variables[counter_key] = (variables[counter_key] || 0).to_i + 1
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
# Extract file paths mentioned in agent output
|
|
31
|
+
paths = result.to_s.scan(%r{(?:^|\s)((?:/|\.{1,2}/)\S+\.\w+)}).flatten.uniq
|
|
32
|
+
paths.each do |rel_path|
|
|
33
|
+
full = File.expand_path(rel_path, cwd)
|
|
34
|
+
unless File.exist?(full)
|
|
35
|
+
errors << "File not found: #{rel_path}"
|
|
36
|
+
next
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
ext = File.extname(full).downcase
|
|
40
|
+
checker = SYNTAX_CHECKERS[ext]
|
|
41
|
+
next unless checker
|
|
42
|
+
|
|
43
|
+
err = checker.call(full)
|
|
44
|
+
errors << "#{rel_path}: #{err}" if err
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
{ self_check_passed: errors.empty?, self_check_errors: errors }
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
private
|
|
51
|
+
|
|
52
|
+
def self.check_cmd(cmd)
|
|
53
|
+
_out, err, status = Open3.capture3(cmd)
|
|
54
|
+
status.success? ? nil : err.lines.first&.strip
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
def self.check_json(path)
|
|
58
|
+
JSON.parse(File.read(path))
|
|
59
|
+
nil
|
|
60
|
+
rescue JSON::ParserError => e
|
|
61
|
+
e.message.lines.first&.strip
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
def self.check_yaml(path)
|
|
65
|
+
YAML.safe_load(File.read(path))
|
|
66
|
+
nil
|
|
67
|
+
rescue Psych::SyntaxError => e
|
|
68
|
+
e.message.lines.first&.strip
|
|
69
|
+
end
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
end
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Brute
|
|
4
|
+
module Flow
|
|
5
|
+
module Services
|
|
6
|
+
# Pre-filters the available tool set to the most relevant subset
|
|
7
|
+
# for the given task. Single LLM call, no tools.
|
|
8
|
+
class ToolSuggestService
|
|
9
|
+
TOOL_NAMES = %w[read write patch remove fs_search undo shell fetch todo_write todo_read delegate].freeze
|
|
10
|
+
|
|
11
|
+
PROMPT = <<~PROMPT
|
|
12
|
+
Given this software engineering task, which of these tools are most likely needed?
|
|
13
|
+
|
|
14
|
+
Available tools: %<tools>s
|
|
15
|
+
|
|
16
|
+
Task: %<task>s
|
|
17
|
+
|
|
18
|
+
Return ONLY a JSON array of tool names, e.g. ["read", "shell", "patch"]
|
|
19
|
+
PROMPT
|
|
20
|
+
|
|
21
|
+
def call(variables, _headers)
|
|
22
|
+
task = variables[:user_message] || variables["user_message"]
|
|
23
|
+
prompt = format(PROMPT, tools: TOOL_NAMES.join(", "), task: task)
|
|
24
|
+
|
|
25
|
+
begin
|
|
26
|
+
response = Brute.provider.complete(prompt)
|
|
27
|
+
parsed = JSON.parse(response.content)
|
|
28
|
+
{ suggested_tools: Array(parsed) & TOOL_NAMES }
|
|
29
|
+
rescue => e
|
|
30
|
+
warn "[brute/flow/tool_suggest] Failed (#{e.message}), returning all tools"
|
|
31
|
+
{ suggested_tools: TOOL_NAMES }
|
|
32
|
+
end
|
|
33
|
+
end
|
|
34
|
+
end
|
|
35
|
+
end
|
|
36
|
+
end
|
|
37
|
+
end
|
data/lib/brute_flow.rb
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "brute"
|
|
4
|
+
require "bpmn"
|
|
5
|
+
require "active_support/core_ext/time/zones"
|
|
6
|
+
|
|
7
|
+
# Ensure a time zone is set for WorkflowKit.
|
|
8
|
+
Time.zone ||= "UTC"
|
|
9
|
+
|
|
10
|
+
module Brute
|
|
11
|
+
module Flow
|
|
12
|
+
module Services; end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
require_relative "brute_flow/builder"
|
|
17
|
+
require_relative "brute_flow/runner"
|
|
18
|
+
require_relative "brute_flow/services/agent_service"
|
|
19
|
+
require_relative "brute_flow/services/router_service"
|
|
20
|
+
require_relative "brute_flow/services/self_check_service"
|
|
21
|
+
require_relative "brute_flow/services/tool_suggest_service"
|
|
22
|
+
require_relative "brute_flow/services/memory_recall_service"
|
|
23
|
+
|
|
24
|
+
module Brute
|
|
25
|
+
# Create a BPMN-driven multi-agent flow.
|
|
26
|
+
#
|
|
27
|
+
# runner = Brute.flow(cwd: "/project", variables: { user_message: msg }) do
|
|
28
|
+
# service :router, type: "Brute::Flow::Services::RouterService"
|
|
29
|
+
# service :agent, type: "Brute::Flow::Services::AgentService"
|
|
30
|
+
# end
|
|
31
|
+
# result = runner.run
|
|
32
|
+
#
|
|
33
|
+
def self.flow(cwd: Dir.pwd, variables: {}, &block)
|
|
34
|
+
definitions = Flow::Builder.build("brute_flow", &block)
|
|
35
|
+
Flow::Runner.new(definitions, cwd: cwd, variables: variables)
|
|
36
|
+
end
|
|
37
|
+
end
|
metadata
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
|
2
|
+
name: brute_flow
|
|
3
|
+
version: !ruby/object:Gem::Version
|
|
4
|
+
version: 0.1.0
|
|
5
|
+
platform: ruby
|
|
6
|
+
authors:
|
|
7
|
+
- Brute Contributors
|
|
8
|
+
bindir: bin
|
|
9
|
+
cert_chain: []
|
|
10
|
+
date: 1980-01-02 00:00:00.000000000 Z
|
|
11
|
+
dependencies:
|
|
12
|
+
- !ruby/object:Gem::Dependency
|
|
13
|
+
name: brute
|
|
14
|
+
requirement: !ruby/object:Gem::Requirement
|
|
15
|
+
requirements:
|
|
16
|
+
- - "~>"
|
|
17
|
+
- !ruby/object:Gem::Version
|
|
18
|
+
version: '0.1'
|
|
19
|
+
type: :runtime
|
|
20
|
+
prerelease: false
|
|
21
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
22
|
+
requirements:
|
|
23
|
+
- - "~>"
|
|
24
|
+
- !ruby/object:Gem::Version
|
|
25
|
+
version: '0.1'
|
|
26
|
+
- !ruby/object:Gem::Dependency
|
|
27
|
+
name: bpmn
|
|
28
|
+
requirement: !ruby/object:Gem::Requirement
|
|
29
|
+
requirements:
|
|
30
|
+
- - ">="
|
|
31
|
+
- !ruby/object:Gem::Version
|
|
32
|
+
version: 0.4.0
|
|
33
|
+
type: :runtime
|
|
34
|
+
prerelease: false
|
|
35
|
+
version_requirements: !ruby/object:Gem::Requirement
|
|
36
|
+
requirements:
|
|
37
|
+
- - ">="
|
|
38
|
+
- !ruby/object:Gem::Version
|
|
39
|
+
version: 0.4.0
|
|
40
|
+
description: Extends the brute gem with a declarative BPMN workflow engine for multi-agent
|
|
41
|
+
orchestration — parallel branches, conditional routing, loops with timeouts, and
|
|
42
|
+
pluggable service tasks.
|
|
43
|
+
executables: []
|
|
44
|
+
extensions: []
|
|
45
|
+
extra_rdoc_files: []
|
|
46
|
+
files:
|
|
47
|
+
- lib/brute_flow.rb
|
|
48
|
+
- lib/brute_flow/builder.rb
|
|
49
|
+
- lib/brute_flow/runner.rb
|
|
50
|
+
- lib/brute_flow/services/agent_service.rb
|
|
51
|
+
- lib/brute_flow/services/memory_recall_service.rb
|
|
52
|
+
- lib/brute_flow/services/router_service.rb
|
|
53
|
+
- lib/brute_flow/services/self_check_service.rb
|
|
54
|
+
- lib/brute_flow/services/tool_suggest_service.rb
|
|
55
|
+
licenses:
|
|
56
|
+
- MIT
|
|
57
|
+
metadata: {}
|
|
58
|
+
rdoc_options: []
|
|
59
|
+
require_paths:
|
|
60
|
+
- lib
|
|
61
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
|
62
|
+
requirements:
|
|
63
|
+
- - ">="
|
|
64
|
+
- !ruby/object:Gem::Version
|
|
65
|
+
version: '3.2'
|
|
66
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
|
67
|
+
requirements:
|
|
68
|
+
- - ">="
|
|
69
|
+
- !ruby/object:Gem::Version
|
|
70
|
+
version: '0'
|
|
71
|
+
requirements: []
|
|
72
|
+
rubygems_version: 3.7.2
|
|
73
|
+
specification_version: 4
|
|
74
|
+
summary: BPMN-based multi-agent flow engine for Brute
|
|
75
|
+
test_files: []
|