scjson 0.3.3 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/LEGAL.md +5 -0
- data/LICENSE +21 -0
- data/README.md +37 -0
- data/lib/scjson/cli.rb +86 -2
- data/lib/scjson/engine/context.rb +1597 -0
- data/lib/scjson/engine.rb +187 -0
- data/lib/scjson/types.rb +1964 -0
- data/lib/scjson/version.rb +1 -1
- data/lib/scjson.rb +76 -16
- metadata +19 -6
|
@@ -0,0 +1,1597 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Agent Name: ruby-engine-context
|
|
4
|
+
#
|
|
5
|
+
# Part of the scjson project.
|
|
6
|
+
# Developed by Softoboros Technology Inc.
|
|
7
|
+
# Licensed under the BSD 1-Clause License.
|
|
8
|
+
|
|
9
|
+
require 'json'
|
|
10
|
+
require 'set'
|
|
11
|
+
require_relative '../../scjson'
|
|
12
|
+
|
|
13
|
+
module Scjson
|
|
14
|
+
module Engine
|
|
15
|
+
#
|
|
16
|
+
# Minimal document context and transition logic to support
|
|
17
|
+
# deterministic trace emission for simple charts. This is an
|
|
18
|
+
# iterative scaffold toward full SCXML semantics.
|
|
19
|
+
#
|
|
20
|
+
class DocumentContext
|
|
21
|
+
# @return [Hash{String=>Object}] Canonical scjson root map
|
|
22
|
+
attr_reader :root
|
|
23
|
+
# @return [Array<String>] Active state configuration
|
|
24
|
+
attr_reader :configuration
|
|
25
|
+
|
|
26
|
+
##
|
|
27
|
+
# Load a document and create a context.
|
|
28
|
+
#
|
|
29
|
+
# @param input_path [String] Path to SCXML or SCJSON document
|
|
30
|
+
# @param xml [Boolean] Treat the input as SCXML when true
|
|
31
|
+
# @return [DocumentContext]
|
|
32
|
+
def self.from_file(input_path, xml: false, parent_link: nil, child_invoke_id: nil)
|
|
33
|
+
data = File.read(input_path)
|
|
34
|
+
json = xml ? Scjson.xml_to_json(data) : data
|
|
35
|
+
new(
|
|
36
|
+
JSON.parse(json),
|
|
37
|
+
parent_link: parent_link,
|
|
38
|
+
child_invoke_id: child_invoke_id,
|
|
39
|
+
base_dir: File.dirname(File.expand_path(input_path))
|
|
40
|
+
)
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
##
|
|
44
|
+
# Construct a context from a canonical scjson object.
|
|
45
|
+
#
|
|
46
|
+
# @param root [Hash] Canonical scjson root map
|
|
47
|
+
def initialize(root, parent_link: nil, child_invoke_id: nil, base_dir: nil)
|
|
48
|
+
@root = root
|
|
49
|
+
@states = {}
|
|
50
|
+
@parent = {}
|
|
51
|
+
@parent_link = parent_link
|
|
52
|
+
@child_invoke_id = child_invoke_id
|
|
53
|
+
@base_dir = base_dir
|
|
54
|
+
@tag_type = {}
|
|
55
|
+
@history_shallow = {}
|
|
56
|
+
@history_deep = {}
|
|
57
|
+
index_states(@root)
|
|
58
|
+
@configuration = initial_configuration
|
|
59
|
+
@time = 0.0
|
|
60
|
+
@timers = [] # array of {time: Float, name: String, data: Object, id?: String}
|
|
61
|
+
@send_seq = 0
|
|
62
|
+
@invocations = {} # id => {state: sid, node: inv_map, status: 'active'|'done'|'canceled'}
|
|
63
|
+
@invocations_by_state = Hash.new { |h, k| h[k] = [] }
|
|
64
|
+
@invoke_seq = 0
|
|
65
|
+
@internal_queue = []
|
|
66
|
+
@deferred_internal = []
|
|
67
|
+
@defer_done = true
|
|
68
|
+
# Schedule invocations for initial configuration
|
|
69
|
+
schedule_invocations_for_entered(@configuration)
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
##
|
|
73
|
+
# Compute the set of leaf state IDs for filtering.
|
|
74
|
+
#
|
|
75
|
+
# @return [Array<String>] Sorted list of leaf state IDs
|
|
76
|
+
def leaf_state_ids
|
|
77
|
+
leaves = []
|
|
78
|
+
@states.each do |id, node|
|
|
79
|
+
has_children = node.key?('state') || node.key?('parallel')
|
|
80
|
+
leaves << id unless has_children
|
|
81
|
+
end
|
|
82
|
+
leaves.sort
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
##
|
|
86
|
+
# Produce the initialization trace snapshot (step 0).
|
|
87
|
+
#
|
|
88
|
+
# @return [Hash] Trace record for initialization step
|
|
89
|
+
def trace_init
|
|
90
|
+
{
|
|
91
|
+
'event' => nil,
|
|
92
|
+
'firedTransitions' => [],
|
|
93
|
+
'enteredStates' => @configuration.dup,
|
|
94
|
+
'exitedStates' => [],
|
|
95
|
+
'configuration' => @configuration.dup,
|
|
96
|
+
'actionLog' => [],
|
|
97
|
+
'datamodelDelta' => {}
|
|
98
|
+
}
|
|
99
|
+
end
|
|
100
|
+
|
|
101
|
+
##
|
|
102
|
+
# Process one external event and emit a trace step.
|
|
103
|
+
#
|
|
104
|
+
# @param name [String] Event name
|
|
105
|
+
# @param data [Object] Event payload (unused in scaffold)
|
|
106
|
+
# @return [Hash] Trace record for this step
|
|
107
|
+
def trace_step(name:, data: nil)
|
|
108
|
+
event_obj = { 'name' => name, 'data' => data }
|
|
109
|
+
fired = []
|
|
110
|
+
exited = []
|
|
111
|
+
entered = []
|
|
112
|
+
action_log = []
|
|
113
|
+
datamodel_delta = {}
|
|
114
|
+
|
|
115
|
+
@data ||= {}
|
|
116
|
+
@internal_queue ||= []
|
|
117
|
+
# Inject any deferred internal events from previous step
|
|
118
|
+
unless @deferred_internal.nil? || @deferred_internal.empty?
|
|
119
|
+
@internal_queue = (@deferred_internal + @internal_queue)
|
|
120
|
+
@deferred_internal.clear
|
|
121
|
+
end
|
|
122
|
+
# When deferring done.invoke, run any pending finalizers at the start of the next step
|
|
123
|
+
if @defer_done
|
|
124
|
+
a0, d0 = run_pending_finalizers
|
|
125
|
+
action_log.concat(a0)
|
|
126
|
+
datamodel_delta.merge!(d0)
|
|
127
|
+
end
|
|
128
|
+
@current_event_name = name
|
|
129
|
+
@current_event_data = data
|
|
130
|
+
|
|
131
|
+
# Process any pending internal events before external one (SCXML internal priority)
|
|
132
|
+
0.upto(100) do
|
|
133
|
+
break if @internal_queue.empty?
|
|
134
|
+
ev = @internal_queue.shift
|
|
135
|
+
if ev.is_a?(Hash)
|
|
136
|
+
# run internal event to quiescence
|
|
137
|
+
process_ev = ev
|
|
138
|
+
internal_name = process_ev['name']
|
|
139
|
+
internal_data = process_ev['data']
|
|
140
|
+
# Use same microstep machinery
|
|
141
|
+
# Inline handler mimicking process_event
|
|
142
|
+
# Note: attributes entered/exited/fired accumulate into this step
|
|
143
|
+
# Handle special invoke completion
|
|
144
|
+
if internal_name == '__invoke_complete'
|
|
145
|
+
iid = internal_data.is_a?(Hash) ? internal_data['invokeid'] : nil
|
|
146
|
+
if iid
|
|
147
|
+
a, d = finalize_invoke(iid, completed: true)
|
|
148
|
+
action_log.concat(a)
|
|
149
|
+
datamodel_delta.merge!(d)
|
|
150
|
+
rec = @invocations[iid]
|
|
151
|
+
order = rec && rec[:order] ? rec[:order] : 0
|
|
152
|
+
(@pending_done_invoke ||= []) << { iid: iid, order: order }
|
|
153
|
+
end
|
|
154
|
+
else
|
|
155
|
+
tx_set = select_transitions_for_event(internal_name)
|
|
156
|
+
unless tx_set.empty?
|
|
157
|
+
e1, x1, f1, a1, d1 = apply_transition_set(tx_set, cause: internal_name)
|
|
158
|
+
entered.concat(e1)
|
|
159
|
+
exited.concat(x1)
|
|
160
|
+
fired.concat(f1)
|
|
161
|
+
action_log.concat(a1)
|
|
162
|
+
datamodel_delta.merge!(d1)
|
|
163
|
+
end
|
|
164
|
+
# Eventless to quiescence after internal
|
|
165
|
+
0.upto(100) do
|
|
166
|
+
tx0 = select_transitions_for_event(nil)
|
|
167
|
+
break if tx0.empty?
|
|
168
|
+
e2, x2, f2, a2, d2 = apply_transition_set(tx0, cause: nil)
|
|
169
|
+
entered.concat(e2)
|
|
170
|
+
exited.concat(x2)
|
|
171
|
+
fired.concat(f2)
|
|
172
|
+
action_log.concat(a2)
|
|
173
|
+
datamodel_delta.merge!(d2)
|
|
174
|
+
end
|
|
175
|
+
enqueue_done_events
|
|
176
|
+
end
|
|
177
|
+
flush_pending_done_invoke
|
|
178
|
+
end
|
|
179
|
+
end
|
|
180
|
+
|
|
181
|
+
# First, process one external-event transition (if any)
|
|
182
|
+
process_event = lambda do |ev_name, ev_data|
|
|
183
|
+
@current_event_name = ev_name
|
|
184
|
+
@current_event_data = ev_data
|
|
185
|
+
# Special internal control: invocation completion
|
|
186
|
+
if ev_name == '__invoke_complete'
|
|
187
|
+
iid = ev_data.is_a?(Hash) ? ev_data['invokeid'] : nil
|
|
188
|
+
if iid
|
|
189
|
+
a, d = finalize_invoke(iid, completed: true)
|
|
190
|
+
action_log.concat(a)
|
|
191
|
+
datamodel_delta.merge!(d)
|
|
192
|
+
# buffer done.invoke events for ordered flushing
|
|
193
|
+
rec = @invocations[iid]
|
|
194
|
+
order = rec && rec[:order] ? rec[:order] : 0
|
|
195
|
+
(@pending_done_invoke ||= []) << { iid: iid, order: order }
|
|
196
|
+
end
|
|
197
|
+
return
|
|
198
|
+
end
|
|
199
|
+
# Autoforward external events to active children that requested it
|
|
200
|
+
begin
|
|
201
|
+
if ev_name && !ev_name.to_s.start_with?('__') && !ev_name.to_s.start_with?('done.')
|
|
202
|
+
@invocations.each do |iid, rec|
|
|
203
|
+
next unless rec[:status] == 'active'
|
|
204
|
+
inv_node = rec[:node]
|
|
205
|
+
af = inv_node['autoforward']
|
|
206
|
+
is_true = (af == true) || (af.is_a?(String) && af.to_s.downcase == 'true')
|
|
207
|
+
if is_true && rec[:ctx]
|
|
208
|
+
route_to_child(iid, ev_name.to_s, ev_data, 0.0)
|
|
209
|
+
end
|
|
210
|
+
end
|
|
211
|
+
end
|
|
212
|
+
rescue StandardError
|
|
213
|
+
# ignore autoforward errors
|
|
214
|
+
end
|
|
215
|
+
loop do
|
|
216
|
+
tx_set = select_transitions_for_event(ev_name)
|
|
217
|
+
break if tx_set.empty?
|
|
218
|
+
e1, x1, f1, a1, d1 = apply_transition_set(tx_set, cause: ev_name)
|
|
219
|
+
entered.concat(e1)
|
|
220
|
+
exited.concat(x1)
|
|
221
|
+
fired.concat(f1)
|
|
222
|
+
action_log.concat(a1)
|
|
223
|
+
datamodel_delta.merge!(d1)
|
|
224
|
+
# After a set, enqueue done.state events if any states completed
|
|
225
|
+
enqueue_done_events
|
|
226
|
+
flush_pending_done_invoke
|
|
227
|
+
# After a set, process eventless transitions to quiescence
|
|
228
|
+
0.upto(100) do
|
|
229
|
+
tx0 = select_transitions_for_event(nil)
|
|
230
|
+
break if tx0.empty?
|
|
231
|
+
e2, x2, f2, a2, d2 = apply_transition_set(tx0, cause: nil)
|
|
232
|
+
entered.concat(e2)
|
|
233
|
+
exited.concat(x2)
|
|
234
|
+
fired.concat(f2)
|
|
235
|
+
action_log.concat(a2)
|
|
236
|
+
datamodel_delta.merge!(d2)
|
|
237
|
+
enqueue_done_events
|
|
238
|
+
flush_pending_done_invoke
|
|
239
|
+
end
|
|
240
|
+
end
|
|
241
|
+
end
|
|
242
|
+
|
|
243
|
+
# Drain any pending internal events before handling the external event
|
|
244
|
+
0.upto(100) do
|
|
245
|
+
break if @internal_queue.empty?
|
|
246
|
+
ev = @internal_queue.shift
|
|
247
|
+
if ev.is_a?(Hash)
|
|
248
|
+
process_event.call(ev['name'], ev['data'])
|
|
249
|
+
else
|
|
250
|
+
process_event.call(ev.to_s, nil)
|
|
251
|
+
end
|
|
252
|
+
flush_pending_done_invoke
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
process_event.call(name, data)
|
|
256
|
+
|
|
257
|
+
# Then process internal events, FIFO
|
|
258
|
+
0.upto(100) do
|
|
259
|
+
break if @internal_queue.empty?
|
|
260
|
+
ev = @internal_queue.shift
|
|
261
|
+
if ev.is_a?(Hash)
|
|
262
|
+
process_event.call(ev['name'], ev['data'])
|
|
263
|
+
else
|
|
264
|
+
process_event.call(ev.to_s, nil)
|
|
265
|
+
end
|
|
266
|
+
# After handling one internal event, flush any pending done.invoke for immediate processing
|
|
267
|
+
flush_pending_done_invoke
|
|
268
|
+
end
|
|
269
|
+
|
|
270
|
+
{
|
|
271
|
+
'event' => event_obj,
|
|
272
|
+
'firedTransitions' => fired,
|
|
273
|
+
'enteredStates' => entered,
|
|
274
|
+
'exitedStates' => exited,
|
|
275
|
+
'configuration' => @configuration.dup,
|
|
276
|
+
'actionLog' => action_log,
|
|
277
|
+
'datamodelDelta' => datamodel_delta
|
|
278
|
+
}
|
|
279
|
+
end
|
|
280
|
+
|
|
281
|
+
private
|
|
282
|
+
|
|
283
|
+
def wrap_list(value)
|
|
284
|
+
return [] if value.nil?
|
|
285
|
+
value.is_a?(Array) ? value : [value]
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
# Set event ordering mode: 'tolerant' (default), 'strict', or 'scion'
|
|
289
|
+
def ordering_mode=(mode)
|
|
290
|
+
@ordering_mode = (mode || 'tolerant').to_s.downcase
|
|
291
|
+
end
|
|
292
|
+
|
|
293
|
+
# Control whether done.invoke is processed in the same step or deferred
|
|
294
|
+
def defer_done=(flag)
|
|
295
|
+
@defer_done = !!flag
|
|
296
|
+
end
|
|
297
|
+
|
|
298
|
+
# Seed or update datamodel entries in this context
|
|
299
|
+
def set_initial_data(pairs)
|
|
300
|
+
return unless pairs.is_a?(Hash)
|
|
301
|
+
@data ||= {}
|
|
302
|
+
pairs.each do |k, v|
|
|
303
|
+
next if k.nil?
|
|
304
|
+
@data[k.to_s] = v
|
|
305
|
+
end
|
|
306
|
+
end
|
|
307
|
+
|
|
308
|
+
# Flush buffered done.invoke events in a stable order per ordering_mode
|
|
309
|
+
def flush_pending_done_invoke
|
|
310
|
+
buf = @pending_done_invoke
|
|
311
|
+
return unless buf && !buf.empty?
|
|
312
|
+
mode = (@ordering_mode || 'tolerant').to_s.downcase
|
|
313
|
+
items = buf.sort_by { |h| h[:order] || 0 }
|
|
314
|
+
if mode == 'scion'
|
|
315
|
+
items.reverse_each do |h|
|
|
316
|
+
iid = h[:iid]
|
|
317
|
+
payload = begin
|
|
318
|
+
rec = @invocations[iid]
|
|
319
|
+
rec && rec[:done_payload]
|
|
320
|
+
rescue StandardError
|
|
321
|
+
nil
|
|
322
|
+
end
|
|
323
|
+
evs = [
|
|
324
|
+
{ 'name' => 'done.invoke', 'data' => payload },
|
|
325
|
+
{ 'name' => "done.invoke.#{iid}", 'data' => payload }
|
|
326
|
+
]
|
|
327
|
+
if @defer_done
|
|
328
|
+
@deferred_internal.unshift(*evs.reverse) # preserve order when later unshifting
|
|
329
|
+
else
|
|
330
|
+
@internal_queue.unshift(*evs.reverse)
|
|
331
|
+
end
|
|
332
|
+
end
|
|
333
|
+
else
|
|
334
|
+
items.each do |h|
|
|
335
|
+
iid = h[:iid]
|
|
336
|
+
payload = begin
|
|
337
|
+
rec = @invocations[iid]
|
|
338
|
+
rec && rec[:done_payload]
|
|
339
|
+
rescue StandardError
|
|
340
|
+
nil
|
|
341
|
+
end
|
|
342
|
+
evs = [
|
|
343
|
+
{ 'name' => 'done.invoke', 'data' => payload },
|
|
344
|
+
{ 'name' => "done.invoke.#{iid}", 'data' => payload }
|
|
345
|
+
]
|
|
346
|
+
if @defer_done
|
|
347
|
+
@deferred_internal.concat(evs)
|
|
348
|
+
else
|
|
349
|
+
@internal_queue.concat(evs)
|
|
350
|
+
end
|
|
351
|
+
end
|
|
352
|
+
end
|
|
353
|
+
@pending_done_invoke.clear
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
# Find the first enabled transition in document order matching the event name.
|
|
357
|
+
def find_enabled_transition_for_event(name)
|
|
358
|
+
@configuration.each do |sid|
|
|
359
|
+
node = @states[sid]
|
|
360
|
+
next unless node
|
|
361
|
+
wrap_list(node['transition']).each do |t|
|
|
362
|
+
next unless t.is_a?(Hash)
|
|
363
|
+
tokens = parse_event_tokens(t['event'])
|
|
364
|
+
if (tokens.include?(name) || tokens.include?('*')) && cond_true?(t['cond'])
|
|
365
|
+
return [sid, t]
|
|
366
|
+
end
|
|
367
|
+
end
|
|
368
|
+
end
|
|
369
|
+
nil
|
|
370
|
+
end
|
|
371
|
+
|
|
372
|
+
# Find the first enabled eventless transition from the active configuration.
|
|
373
|
+
def find_enabled_eventless_transition
|
|
374
|
+
@configuration.each do |sid|
|
|
375
|
+
node = @states[sid]
|
|
376
|
+
next unless node
|
|
377
|
+
wrap_list(node['transition']).each do |t|
|
|
378
|
+
next unless t.is_a?(Hash)
|
|
379
|
+
ev = t['event']
|
|
380
|
+
if (ev.nil? || (ev.is_a?(String) && ev.strip.empty?)) && cond_true?(t['cond'])
|
|
381
|
+
return [sid, t]
|
|
382
|
+
end
|
|
383
|
+
end
|
|
384
|
+
end
|
|
385
|
+
nil
|
|
386
|
+
end
|
|
387
|
+
|
|
388
|
+
# Select a non-conflicting set of transitions for the given event name (or nil for eventless).
|
|
389
|
+
def select_transitions_for_event(name)
|
|
390
|
+
candidates = []
|
|
391
|
+
seq = 0
|
|
392
|
+
# For each active leaf, walk up ancestry and pick the first enabled transition
|
|
393
|
+
@configuration.each do |sid|
|
|
394
|
+
chain = ancestors(sid)
|
|
395
|
+
chain.each do |nid|
|
|
396
|
+
node = @states[nid]
|
|
397
|
+
next unless node
|
|
398
|
+
t = wrap_list(node['transition']).find do |tm|
|
|
399
|
+
next false unless tm.is_a?(Hash)
|
|
400
|
+
if name.nil?
|
|
401
|
+
ev = tm['event']
|
|
402
|
+
(ev.nil? || (ev.is_a?(String) && ev.strip.empty?)) && cond_true?(tm['cond'])
|
|
403
|
+
else
|
|
404
|
+
tokens = parse_event_tokens(tm['event'])
|
|
405
|
+
(tokens.include?(name) || tokens.include?('*')) && cond_true?(tm['cond'])
|
|
406
|
+
end
|
|
407
|
+
end
|
|
408
|
+
if t
|
|
409
|
+
targets = wrap_list(t['target']).map(&:to_s)
|
|
410
|
+
lcas = targets.map { |tid| lca(nid, tid) }
|
|
411
|
+
dom = choose_shallowest_ancestor(lcas)
|
|
412
|
+
exit_chain = path_up_exclusive(nid, dom)
|
|
413
|
+
depth = dom ? ancestors(dom).length : 0
|
|
414
|
+
candidates << { src: nid, t: t, dom: dom, exit: exit_chain, depth: depth, order: seq }
|
|
415
|
+
seq += 1
|
|
416
|
+
break
|
|
417
|
+
end
|
|
418
|
+
end
|
|
419
|
+
end
|
|
420
|
+
# Resolve conflicts by exit set overlap, nested domains, or ancestor/descendant sources.
|
|
421
|
+
# Prefer deeper domain; tie-break by earlier discovery (document order).
|
|
422
|
+
selected = []
|
|
423
|
+
candidates.each do |cand|
|
|
424
|
+
conflicts = selected.select do |s|
|
|
425
|
+
exit_overlap = !(s[:exit] & cand[:exit]).empty?
|
|
426
|
+
dom_conflict = (s[:dom] && cand[:dom]) && (is_ancestor?(s[:dom], cand[:dom]) || is_ancestor?(cand[:dom], s[:dom]))
|
|
427
|
+
src_conflict = is_ancestor?(s[:src], cand[:src]) || is_ancestor?(cand[:src], s[:src])
|
|
428
|
+
exit_overlap || dom_conflict || src_conflict
|
|
429
|
+
end
|
|
430
|
+
if conflicts.empty?
|
|
431
|
+
selected << cand
|
|
432
|
+
next
|
|
433
|
+
end
|
|
434
|
+
# If conflicts exist, decide winner by domain depth then order
|
|
435
|
+
winner = conflicts.all? { |s| cand[:depth] > s[:depth] || (cand[:depth] == s[:depth] && cand[:order] < s[:order]) }
|
|
436
|
+
if winner
|
|
437
|
+
# remove all conflicting and add candidate
|
|
438
|
+
selected.reject! do |s|
|
|
439
|
+
exit_overlap = !(s[:exit] & cand[:exit]).empty?
|
|
440
|
+
dom_conflict = (s[:dom] && cand[:dom]) && (is_ancestor?(s[:dom], cand[:dom]) || is_ancestor?(cand[:dom], s[:dom]))
|
|
441
|
+
src_conflict = is_ancestor?(s[:src], cand[:src]) || is_ancestor?(cand[:src], s[:src])
|
|
442
|
+
exit_overlap || dom_conflict || src_conflict
|
|
443
|
+
end
|
|
444
|
+
selected << cand
|
|
445
|
+
else
|
|
446
|
+
# keep existing, drop candidate
|
|
447
|
+
next
|
|
448
|
+
end
|
|
449
|
+
end
|
|
450
|
+
selected.map { |e| [e[:src], e[:t]] }
|
|
451
|
+
end
|
|
452
|
+
|
|
453
|
+
# Apply a transition: update configuration and compute deltas.
|
|
454
|
+
def apply_transition(source_id, transition_map, cause:)
|
|
455
|
+
targets = wrap_list(transition_map['target']).map(&:to_s)
|
|
456
|
+
actions = []
|
|
457
|
+
delta = {}
|
|
458
|
+
|
|
459
|
+
# Compute exit and entry sets using a basic LCA approach per target
|
|
460
|
+
exit_order = []
|
|
461
|
+
entry_order = []
|
|
462
|
+
|
|
463
|
+
# Exit chain: from source up to (but not including) the shallowest LCA among targets
|
|
464
|
+
lcas = targets.map { |tid| lca(source_id, tid) }
|
|
465
|
+
# choose the highest (closest to root) LCA to be safe across multiple targets
|
|
466
|
+
chosen_lca = choose_shallowest_ancestor(lcas)
|
|
467
|
+
exit_chain = path_up_exclusive(source_id, chosen_lca)
|
|
468
|
+
exit_order.concat(exit_chain) # deep -> shallow
|
|
469
|
+
|
|
470
|
+
# Entry chains: for each target, from LCA down to target (excluding LCA)
|
|
471
|
+
targets.each do |tid|
|
|
472
|
+
chain = path_down_from_lca(chosen_lca, tid)
|
|
473
|
+
entry_order.concat(chain)
|
|
474
|
+
end
|
|
475
|
+
|
|
476
|
+
# Execute onexit in deep->shallow order
|
|
477
|
+
exit_order.each do |sid|
|
|
478
|
+
a, d = run_onexit(sid)
|
|
479
|
+
actions.concat(a)
|
|
480
|
+
delta.merge!(d)
|
|
481
|
+
end
|
|
482
|
+
|
|
483
|
+
# Update configuration: remove exited leaves and add targets (leaf-level model)
|
|
484
|
+
new_config = @configuration.dup
|
|
485
|
+
exit_order.each { |sid| new_config.delete(sid) }
|
|
486
|
+
# Ensure source is removed even if not in exit_order due to missing ids
|
|
487
|
+
new_config.delete(source_id)
|
|
488
|
+
targets.each { |tid| new_config << tid unless new_config.include?(tid) }
|
|
489
|
+
@configuration = new_config
|
|
490
|
+
|
|
491
|
+
# Execute transition actions (between exit and entry)
|
|
492
|
+
ta, td = run_actions_from_map(transition_map, context_state: source_id)
|
|
493
|
+
actions.concat(ta)
|
|
494
|
+
delta.merge!(td)
|
|
495
|
+
|
|
496
|
+
# Execute onentry in shallow->deep order along each entry chain
|
|
497
|
+
entry_order.each do |sid|
|
|
498
|
+
a, d = run_onentry(sid)
|
|
499
|
+
actions.concat(a)
|
|
500
|
+
delta.merge!(d)
|
|
501
|
+
end
|
|
502
|
+
|
|
503
|
+
fired = [{
|
|
504
|
+
'source' => source_id,
|
|
505
|
+
'targets' => targets,
|
|
506
|
+
'event' => cause,
|
|
507
|
+
'cond' => nil
|
|
508
|
+
}]
|
|
509
|
+
[entry_order, exit_order, fired, actions, delta]
|
|
510
|
+
end
|
|
511
|
+
|
|
512
|
+
# Apply a set of non-conflicting transitions in one microstep.
|
|
513
|
+
def apply_transition_set(tx_set, cause:)
|
|
514
|
+
actions = []
|
|
515
|
+
delta = {}
|
|
516
|
+
fired = []
|
|
517
|
+
exit_set = []
|
|
518
|
+
entry_sequences = []
|
|
519
|
+
|
|
520
|
+
# Compute exit and entry sequences per transition
|
|
521
|
+
tx_set.each do |source_id, transition_map|
|
|
522
|
+
targets = wrap_list(transition_map['target']).map(&:to_s)
|
|
523
|
+
lcas = targets.map { |tid| lca(source_id, tid) }
|
|
524
|
+
chosen_lca = choose_shallowest_ancestor(lcas)
|
|
525
|
+
exit_chain = path_up_exclusive(source_id, chosen_lca)
|
|
526
|
+
exit_set.concat(exit_chain)
|
|
527
|
+
seq = []
|
|
528
|
+
targets.each do |tid|
|
|
529
|
+
if @tag_type[tid] == :history
|
|
530
|
+
parent_id = @parent[tid]
|
|
531
|
+
node = @states[tid]
|
|
532
|
+
deep = (node && node['type_value'].to_s.downcase == 'deep')
|
|
533
|
+
remembered = deep ? (@history_deep[parent_id] || []) : (@history_shallow[parent_id] || [])
|
|
534
|
+
if remembered.empty?
|
|
535
|
+
# Fallback to parent's defaults
|
|
536
|
+
if deep
|
|
537
|
+
remembered = initial_leaves_for_id(parent_id)
|
|
538
|
+
else
|
|
539
|
+
parent = @states[parent_id]
|
|
540
|
+
remembered = initial_child_ids_for(parent)
|
|
541
|
+
end
|
|
542
|
+
end
|
|
543
|
+
remembered.each do |rid|
|
|
544
|
+
seq.concat(path_down_from_lca(chosen_lca, rid))
|
|
545
|
+
end
|
|
546
|
+
else
|
|
547
|
+
seq.concat(path_down_from_lca(chosen_lca, tid))
|
|
548
|
+
end
|
|
549
|
+
end
|
|
550
|
+
entry_sequences << seq
|
|
551
|
+
fired << { 'source' => source_id, 'targets' => targets, 'event' => cause, 'cond' => nil }
|
|
552
|
+
end
|
|
553
|
+
|
|
554
|
+
# Deduplicate exit set, deep->shallow
|
|
555
|
+
exit_order = exit_set.uniq
|
|
556
|
+
# Execute onexit
|
|
557
|
+
exit_order.each do |sid|
|
|
558
|
+
a, d = run_onexit(sid)
|
|
559
|
+
actions.concat(a)
|
|
560
|
+
delta.merge!(d)
|
|
561
|
+
end
|
|
562
|
+
|
|
563
|
+
# Update configuration: remove exit leaves; we'll add entered leaves after expansion
|
|
564
|
+
new_config = @configuration.dup
|
|
565
|
+
exit_order.each { |sid| new_config.delete(sid) }
|
|
566
|
+
|
|
567
|
+
# Record history for parents being exited based on previous configuration
|
|
568
|
+
record_history_for_exits(exit_order, @configuration)
|
|
569
|
+
|
|
570
|
+
# Transition bodies
|
|
571
|
+
tx_set.each do |_, tmap|
|
|
572
|
+
a, d = run_actions_from_map(tmap)
|
|
573
|
+
actions.concat(a)
|
|
574
|
+
delta.merge!(d)
|
|
575
|
+
end
|
|
576
|
+
|
|
577
|
+
# Entry: shallow->deep for each sequence in given order; then expand to initial leaves
|
|
578
|
+
entered = []
|
|
579
|
+
entered_leaves = []
|
|
580
|
+
entry_sequences.each do |seq|
|
|
581
|
+
seq.each do |sid|
|
|
582
|
+
a, d = run_onentry(sid)
|
|
583
|
+
actions.concat(a)
|
|
584
|
+
delta.merge!(d)
|
|
585
|
+
entered << sid
|
|
586
|
+
end
|
|
587
|
+
# Expand descendants when last target is a non-history composite
|
|
588
|
+
target_id = seq.last
|
|
589
|
+
if target_id && @tag_type[target_id] != :history
|
|
590
|
+
leaves, bundle = enter_descendants(target_id)
|
|
591
|
+
entered.concat(bundle[:entered])
|
|
592
|
+
actions.concat(bundle[:actions])
|
|
593
|
+
delta.merge!(bundle[:delta])
|
|
594
|
+
entered_leaves.concat(leaves)
|
|
595
|
+
else
|
|
596
|
+
# If history (deep): remembered leaves were appended as part of seq; if shallow: enter_descendants handled per remembered child in seq
|
|
597
|
+
# For shallow we should expand each remembered child; this happens implicitly since seq contains those child ids
|
|
598
|
+
# and we will expand below by calling enter_descendants on each non-history id in the sequence
|
|
599
|
+
end
|
|
600
|
+
# Expand descendants for any non-history ids in the sequence (covers shallow history children)
|
|
601
|
+
seq.each do |sid|
|
|
602
|
+
next if @tag_type[sid] == :history
|
|
603
|
+
leaves, bundle = enter_descendants(sid)
|
|
604
|
+
entered.concat(bundle[:entered])
|
|
605
|
+
actions.concat(bundle[:actions])
|
|
606
|
+
delta.merge!(bundle[:delta])
|
|
607
|
+
entered_leaves.concat(leaves)
|
|
608
|
+
end
|
|
609
|
+
end
|
|
610
|
+
|
|
611
|
+
# Merge entered leaves into configuration
|
|
612
|
+
entered_leaves.each { |leaf| new_config << leaf unless new_config.include?(leaf) }
|
|
613
|
+
@configuration = new_config
|
|
614
|
+
|
|
615
|
+
# After finishing entry, schedule invocations for entered states (including leaves)
|
|
616
|
+
schedule_invocations_for_entered((entered + entered_leaves).uniq)
|
|
617
|
+
|
|
618
|
+
[entered, exit_order, fired, actions, delta]
|
|
619
|
+
end
|
|
620
|
+
|
|
621
|
+
# Ancestor chain from a state up to root (inclusive), leaf->root
|
|
622
|
+
def ancestors(id)
|
|
623
|
+
chain = []
|
|
624
|
+
cur = id
|
|
625
|
+
while cur
|
|
626
|
+
chain << cur
|
|
627
|
+
cur = @parent[cur]
|
|
628
|
+
end
|
|
629
|
+
chain
|
|
630
|
+
end
|
|
631
|
+
|
|
632
|
+
def is_ancestor?(a, b)
|
|
633
|
+
return false if a.nil? || b.nil?
|
|
634
|
+
cur = b
|
|
635
|
+
while cur
|
|
636
|
+
return true if cur == a
|
|
637
|
+
cur = @parent[cur]
|
|
638
|
+
end
|
|
639
|
+
false
|
|
640
|
+
end
|
|
641
|
+
|
|
642
|
+
# ---- Entry helpers for composite states ----
|
|
643
|
+
def enter_descendants(state_id)
|
|
644
|
+
entered = []
|
|
645
|
+
actions = []
|
|
646
|
+
delta = {}
|
|
647
|
+
leaves = []
|
|
648
|
+
node = @states[state_id]
|
|
649
|
+
return [[], { entered: [], actions: [], delta: {} }] unless node
|
|
650
|
+
if composite?(node)
|
|
651
|
+
initial_child_ids_for(node).each do |cid|
|
|
652
|
+
a, d = run_onentry(cid)
|
|
653
|
+
actions.concat(a)
|
|
654
|
+
delta.merge!(d)
|
|
655
|
+
entered << cid
|
|
656
|
+
sub_leaves, sub = enter_descendants(cid)
|
|
657
|
+
actions.concat(sub[:actions])
|
|
658
|
+
delta.merge!(sub[:delta])
|
|
659
|
+
entered.concat(sub[:entered])
|
|
660
|
+
leaves.concat(sub_leaves)
|
|
661
|
+
end
|
|
662
|
+
else
|
|
663
|
+
leaves << state_id
|
|
664
|
+
end
|
|
665
|
+
[leaves, { entered: entered, actions: actions, delta: delta }]
|
|
666
|
+
end
|
|
667
|
+
|
|
668
|
+
def composite?(node)
|
|
669
|
+
node.is_a?(Hash) && (node.key?('state') || node.key?('parallel'))
|
|
670
|
+
end
|
|
671
|
+
|
|
672
|
+
def initial_child_ids_for(node)
|
|
673
|
+
# parallel node: enter initial leaf for each region (its immediate states)
|
|
674
|
+
if node.is_a?(Hash)
|
|
675
|
+
node_id = node['id']
|
|
676
|
+
if node.key?('parallel') || (node_id && @tag_type[node_id] == :parallel)
|
|
677
|
+
ids = []
|
|
678
|
+
wrap_list(node['state']).each do |s|
|
|
679
|
+
sid = s['id']
|
|
680
|
+
ids.concat(initial_leaves_for_id(sid.to_s)) if sid
|
|
681
|
+
end
|
|
682
|
+
return ids.uniq
|
|
683
|
+
end
|
|
684
|
+
end
|
|
685
|
+
# state: choose initial children or first
|
|
686
|
+
if node.key?('state')
|
|
687
|
+
initial_tokens = wrap_list(node['initial_attribute']).map(&:to_s)
|
|
688
|
+
if !initial_tokens.empty?
|
|
689
|
+
return initial_tokens
|
|
690
|
+
end
|
|
691
|
+
if node.key?('initial') && node['initial'].is_a?(Hash)
|
|
692
|
+
return wrap_list(node['initial']['transition']).flat_map { |t| wrap_list(t['target']).map(&:to_s) }
|
|
693
|
+
end
|
|
694
|
+
first = wrap_list(node['state']).map { |s| s['id'] }.compact.map(&:to_s).first
|
|
695
|
+
return first ? [first] : []
|
|
696
|
+
end
|
|
697
|
+
[]
|
|
698
|
+
end
|
|
699
|
+
|
|
700
|
+
def initial_leaves_for_id(state_id)
|
|
701
|
+
node = @states[state_id]
|
|
702
|
+
return [] unless node
|
|
703
|
+
return [state_id] unless composite?(node)
|
|
704
|
+
leaves = []
|
|
705
|
+
if node.key?('parallel')
|
|
706
|
+
wrap_list(node['parallel']).each do |p|
|
|
707
|
+
wrap_list(p['state']).each do |s|
|
|
708
|
+
sid = s['id']
|
|
709
|
+
leaves.concat(initial_leaves_for_id(sid.to_s)) if sid
|
|
710
|
+
end
|
|
711
|
+
end
|
|
712
|
+
return leaves
|
|
713
|
+
end
|
|
714
|
+
initial_child_ids_for(node).each do |cid|
|
|
715
|
+
leaves.concat(initial_leaves_for_id(cid))
|
|
716
|
+
end
|
|
717
|
+
leaves
|
|
718
|
+
end
|
|
719
|
+
|
|
720
|
+
# ---- Invoke / Finalize ----
|
|
721
|
+
def schedule_invocations_for_entered(state_ids)
|
|
722
|
+
state_ids.each do |sid|
|
|
723
|
+
node = @states[sid]
|
|
724
|
+
next unless node
|
|
725
|
+
wrap_list(node['invoke']).each do |inv|
|
|
726
|
+
next unless inv.is_a?(Hash)
|
|
727
|
+
iid = (inv['id'] && inv['id'].to_s)
|
|
728
|
+
unless iid && !iid.empty?
|
|
729
|
+
@invoke_seq += 1
|
|
730
|
+
iid = "i#{@invoke_seq}"
|
|
731
|
+
end
|
|
732
|
+
rec = { state: sid, node: inv, status: 'active' }
|
|
733
|
+
# Try to build a child context for inline content or src
|
|
734
|
+
child_ctx = build_child_context(inv, iid)
|
|
735
|
+
rec[:ctx] = child_ctx if child_ctx
|
|
736
|
+
@invocations[iid] = rec
|
|
737
|
+
@invocations_by_state[sid] << iid unless @invocations_by_state[sid].include?(iid)
|
|
738
|
+
# For now, only complete immediately when child is absent or already completed
|
|
739
|
+
if child_ctx
|
|
740
|
+
0.upto(100) do
|
|
741
|
+
tx0 = child_ctx.select_transitions_for_event(nil)
|
|
742
|
+
break if tx0.empty?
|
|
743
|
+
child_ctx.apply_transition_set(tx0, cause: nil)
|
|
744
|
+
end
|
|
745
|
+
if child_ctx.machine_completed?
|
|
746
|
+
@internal_queue << ({ 'name' => '__invoke_complete', 'data' => { 'invokeid' => iid } })
|
|
747
|
+
end
|
|
748
|
+
else
|
|
749
|
+
@internal_queue << ({ 'name' => '__invoke_complete', 'data' => { 'invokeid' => iid } })
|
|
750
|
+
end
|
|
751
|
+
end
|
|
752
|
+
end
|
|
753
|
+
end
|
|
754
|
+
|
|
755
|
+
def finalize_invoke(invoke_id, completed:)
|
|
756
|
+
rec = @invocations[invoke_id]
|
|
757
|
+
return [[], {}] unless rec
|
|
758
|
+
# run finalize actions
|
|
759
|
+
actions = []
|
|
760
|
+
delta = {}
|
|
761
|
+
inv_node = rec[:node]
|
|
762
|
+
# attempt to capture child donedata for done.invoke payload
|
|
763
|
+
if completed && rec[:ctx]
|
|
764
|
+
begin
|
|
765
|
+
rec[:done_payload] = extract_donedata_from_ctx(rec[:ctx])
|
|
766
|
+
rescue StandardError
|
|
767
|
+
rec[:done_payload] = nil
|
|
768
|
+
end
|
|
769
|
+
else
|
|
770
|
+
rec[:done_payload] = nil
|
|
771
|
+
end
|
|
772
|
+
fins = wrap_list(inv_node['finalize'])
|
|
773
|
+
if @defer_done
|
|
774
|
+
# Defer finalize actions to the next step
|
|
775
|
+
rec[:finalize_blocks] = fins
|
|
776
|
+
rec[:finalize_done] = false
|
|
777
|
+
else
|
|
778
|
+
fins.each do |fin|
|
|
779
|
+
a, d = run_actions_from_map(fin) if fin.is_a?(Hash)
|
|
780
|
+
actions.concat(a) if a
|
|
781
|
+
delta.merge!(d) if d
|
|
782
|
+
end
|
|
783
|
+
rec[:finalize_done] = true
|
|
784
|
+
end
|
|
785
|
+
rec[:status] = completed ? 'done' : 'canceled'
|
|
786
|
+
# remove mapping from state
|
|
787
|
+
sid = rec[:state]
|
|
788
|
+
if sid && @invocations_by_state[sid]
|
|
789
|
+
@invocations_by_state[sid].delete(invoke_id)
|
|
790
|
+
end
|
|
791
|
+
[actions, delta]
|
|
792
|
+
end
|
|
793
|
+
|
|
794
|
+
# Determine if a state is active or has an active descendant
|
|
795
|
+
def in_state?(state_id)
|
|
796
|
+
return false if state_id.nil? || state_id.to_s.strip.empty?
|
|
797
|
+
sid = state_id.to_s
|
|
798
|
+
@configuration.any? { |leaf| leaf == sid || is_ancestor?(sid, leaf) }
|
|
799
|
+
end
|
|
800
|
+
|
|
801
|
+
# Extract donedata payload from a completed child context if available
|
|
802
|
+
def extract_donedata_from_ctx(ctx)
|
|
803
|
+
return nil unless ctx && ctx.respond_to?(:instance_variable_get)
|
|
804
|
+
conf = ctx.instance_variable_get(:@configuration) || []
|
|
805
|
+
states = ctx.instance_variable_get(:@states) || {}
|
|
806
|
+
tag_type = ctx.instance_variable_get(:@tag_type) || {}
|
|
807
|
+
parent = ctx.instance_variable_get(:@parent) || {}
|
|
808
|
+
finals = conf.select { |sid| tag_type[sid] == :final && parent[sid].nil? }
|
|
809
|
+
return nil if finals.empty?
|
|
810
|
+
fnode = states[finals.first]
|
|
811
|
+
return nil unless fnode
|
|
812
|
+
dd = (fnode['donedata'].is_a?(Array) ? fnode['donedata'].first : fnode['donedata'])
|
|
813
|
+
return nil unless dd.is_a?(Hash)
|
|
814
|
+
payload = {}
|
|
815
|
+
# params
|
|
816
|
+
wrap_list(dd['param']).each do |pm|
|
|
817
|
+
next unless pm.is_a?(Hash)
|
|
818
|
+
nm = pm['name']
|
|
819
|
+
next unless nm
|
|
820
|
+
if pm['expr']
|
|
821
|
+
begin
|
|
822
|
+
payload[nm.to_s] = ctx.eval_expr(pm['expr'])
|
|
823
|
+
rescue StandardError
|
|
824
|
+
payload[nm.to_s] = nil
|
|
825
|
+
end
|
|
826
|
+
elsif pm['location']
|
|
827
|
+
begin
|
|
828
|
+
payload[nm.to_s] = ctx.resolve_path(pm['location'].to_s)
|
|
829
|
+
rescue StandardError
|
|
830
|
+
payload[nm.to_s] = nil
|
|
831
|
+
end
|
|
832
|
+
end
|
|
833
|
+
end
|
|
834
|
+
# content
|
|
835
|
+
if dd['content']
|
|
836
|
+
payload['content'] = dd['content']
|
|
837
|
+
end
|
|
838
|
+
payload.empty? ? nil : payload
|
|
839
|
+
end
|
|
840
|
+
|
|
841
|
+
def cancel_invocations_for_state(state_id)
|
|
842
|
+
actions = []
|
|
843
|
+
delta = {}
|
|
844
|
+
ids = (@invocations_by_state[state_id] || []).dup
|
|
845
|
+
ids.each do |iid|
|
|
846
|
+
rec = @invocations[iid]
|
|
847
|
+
next unless rec && rec[:status] == 'active'
|
|
848
|
+
a, d = finalize_invoke(iid, completed: false)
|
|
849
|
+
actions.concat(a)
|
|
850
|
+
delta.merge!(d)
|
|
851
|
+
end
|
|
852
|
+
[actions, delta]
|
|
853
|
+
end
|
|
854
|
+
|
|
855
|
+
# Route a send to a specific child invocation by id
|
|
856
|
+
def route_to_child(invoke_id, name, data, delay)
|
|
857
|
+
rec = @invocations[invoke_id]
|
|
858
|
+
return false unless rec
|
|
859
|
+
ctx = rec[:ctx]
|
|
860
|
+
return false unless ctx
|
|
861
|
+
if delay && delay > 0
|
|
862
|
+
ctx.schedule_internal_event(name, data, delay)
|
|
863
|
+
else
|
|
864
|
+
ctx.trace_step(name: name, data: data)
|
|
865
|
+
end
|
|
866
|
+
# Drive child to quiescence and check completion
|
|
867
|
+
0.upto(100) do
|
|
868
|
+
tx0 = ctx.select_transitions_for_event(nil)
|
|
869
|
+
break if tx0.empty?
|
|
870
|
+
ctx.apply_transition_set(tx0, cause: nil)
|
|
871
|
+
end
|
|
872
|
+
if ctx.machine_completed?
|
|
873
|
+
@internal_queue << ({ 'name' => '__invoke_complete', 'data' => { 'invokeid' => invoke_id } })
|
|
874
|
+
end
|
|
875
|
+
true
|
|
876
|
+
end
|
|
877
|
+
|
|
878
|
+
# Parent accepts events from a child context
|
|
879
|
+
def enqueue_from_child(name, data, invoke_id)
|
|
880
|
+
@internal_queue << ({ 'name' => name.to_s, 'data' => data, 'invokeid' => invoke_id })
|
|
881
|
+
end
|
|
882
|
+
|
|
883
|
+
# Build a child context from an <invoke> map if inline content or src is present
|
|
884
|
+
def build_child_context(inv, iid)
|
|
885
|
+
# Inline content: looks like scjson root
|
|
886
|
+
content = inv['content']
|
|
887
|
+
if content
|
|
888
|
+
root = if content.is_a?(Hash)
|
|
889
|
+
content
|
|
890
|
+
elsif content.is_a?(Array)
|
|
891
|
+
content.find { |x| x.is_a?(Hash) && (x.key?('state') || x.key?('parallel') || x.key?('final')) }
|
|
892
|
+
else
|
|
893
|
+
nil
|
|
894
|
+
end
|
|
895
|
+
if root
|
|
896
|
+
begin
|
|
897
|
+
child = DocumentContext.new(root, parent_link: self, child_invoke_id: iid, base_dir: @base_dir)
|
|
898
|
+
# Map <param> into child's datamodel
|
|
899
|
+
initial = {}
|
|
900
|
+
wrap_list(inv['param']).each do |pm|
|
|
901
|
+
next unless pm.is_a?(Hash)
|
|
902
|
+
nm = pm['name']
|
|
903
|
+
next unless nm
|
|
904
|
+
initial[nm.to_s] = eval_expr(pm['expr'])
|
|
905
|
+
end
|
|
906
|
+
child.set_initial_data(initial) unless initial.empty?
|
|
907
|
+
return child
|
|
908
|
+
rescue StandardError
|
|
909
|
+
return nil
|
|
910
|
+
end
|
|
911
|
+
end
|
|
912
|
+
end
|
|
913
|
+
# External src file reference
|
|
914
|
+
src = inv['src']
|
|
915
|
+
if src && src.is_a?(String)
|
|
916
|
+
path = src
|
|
917
|
+
if @base_dir && !(path.start_with?('/') || path =~ /^[A-Za-z]:\\/)
|
|
918
|
+
path = File.expand_path(File.join(@base_dir, path))
|
|
919
|
+
end
|
|
920
|
+
begin
|
|
921
|
+
if File.file?(path)
|
|
922
|
+
is_xml = File.extname(path).downcase == '.scxml'
|
|
923
|
+
child = DocumentContext.from_file(path, xml: is_xml, parent_link: self, child_invoke_id: iid)
|
|
924
|
+
# Map <param> into child's datamodel
|
|
925
|
+
initial = {}
|
|
926
|
+
wrap_list(inv['param']).each do |pm|
|
|
927
|
+
next unless pm.is_a?(Hash)
|
|
928
|
+
nm = pm['name']
|
|
929
|
+
next unless nm
|
|
930
|
+
initial[nm.to_s] = eval_expr(pm['expr'])
|
|
931
|
+
end
|
|
932
|
+
child.set_initial_data(initial) unless initial.empty?
|
|
933
|
+
return child
|
|
934
|
+
end
|
|
935
|
+
rescue StandardError
|
|
936
|
+
return nil
|
|
937
|
+
end
|
|
938
|
+
end
|
|
939
|
+
nil
|
|
940
|
+
end
|
|
941
|
+
|
|
942
|
+
# Record history for parents of exiting states using the previous configuration
|
|
943
|
+
def record_history_for_exits(exit_order, prev_config)
|
|
944
|
+
parents = exit_order.map { |sid| @parent[sid] }.compact.uniq
|
|
945
|
+
parents.each do |pid|
|
|
946
|
+
# Deep history: all leaves under pid in prev_config
|
|
947
|
+
deep = prev_config.select { |leaf| is_ancestor?(pid, leaf) }
|
|
948
|
+
@history_deep[pid] = deep.dup
|
|
949
|
+
# Shallow history: nearest child under pid for each leaf
|
|
950
|
+
shallow = []
|
|
951
|
+
prev_config.each do |leaf|
|
|
952
|
+
next unless is_ancestor?(pid, leaf)
|
|
953
|
+
chain = ancestors(leaf)
|
|
954
|
+
idx = chain.index(pid)
|
|
955
|
+
next unless idx && idx > 0
|
|
956
|
+
near = chain[idx - 1]
|
|
957
|
+
shallow << near if near
|
|
958
|
+
end
|
|
959
|
+
@history_shallow[pid] = shallow.uniq
|
|
960
|
+
end
|
|
961
|
+
end
|
|
962
|
+
|
|
963
|
+
# Lowest common ancestor (by id), returns nil if only the root matches
|
|
964
|
+
def lca(a, b)
|
|
965
|
+
return nil if a.nil? || b.nil?
|
|
966
|
+
aa = ancestors(a)
|
|
967
|
+
ab = ancestors(b)
|
|
968
|
+
set = aa.to_set
|
|
969
|
+
ab.each do |x|
|
|
970
|
+
return x if set.include?(x)
|
|
971
|
+
end
|
|
972
|
+
nil
|
|
973
|
+
end
|
|
974
|
+
|
|
975
|
+
# Choose the shallowest ancestor (closest to root) from a list
|
|
976
|
+
def choose_shallowest_ancestor(list)
|
|
977
|
+
# Compute depth by walking to root; pick one with max depth index reversed (shallowest)
|
|
978
|
+
return nil if list.nil? || list.empty?
|
|
979
|
+
# If all nil, return nil
|
|
980
|
+
found = list.compact
|
|
981
|
+
return nil if found.empty?
|
|
982
|
+
# Shallowest: minimal depth
|
|
983
|
+
found.min_by { |x| ancestors(x).length }
|
|
984
|
+
end
|
|
985
|
+
|
|
986
|
+
# Path from a node up to (but not including) the stop node
|
|
987
|
+
def path_up_exclusive(from_id, stop_id)
|
|
988
|
+
out = []
|
|
989
|
+
cur = from_id
|
|
990
|
+
while cur && cur != stop_id
|
|
991
|
+
out << cur
|
|
992
|
+
cur = @parent[cur]
|
|
993
|
+
end
|
|
994
|
+
out
|
|
995
|
+
end
|
|
996
|
+
|
|
997
|
+
# Path from LCA down to target, excluding LCA, shallow->deep
|
|
998
|
+
def path_down_from_lca(lca_id, target_id)
|
|
999
|
+
return [target_id] if lca_id.nil?
|
|
1000
|
+
chain = ancestors(target_id) # leaf->root
|
|
1001
|
+
idx = chain.index(lca_id)
|
|
1002
|
+
if idx.nil?
|
|
1003
|
+
# lca not on path; fallback to just the target
|
|
1004
|
+
return [target_id]
|
|
1005
|
+
end
|
|
1006
|
+
# chain[0..idx-1] are below LCA in leaf->... order; reverse to get shallow->deep
|
|
1007
|
+
below = chain[0...idx]
|
|
1008
|
+
below.reverse
|
|
1009
|
+
end
|
|
1010
|
+
|
|
1011
|
+
# Execute onexit actions for a state.
|
|
1012
|
+
def run_onexit(state_id)
|
|
1013
|
+
node = @states[state_id]
|
|
1014
|
+
return [[], {}] unless node
|
|
1015
|
+
actions = []
|
|
1016
|
+
delta = {}
|
|
1017
|
+
wrap_list(node['onexit']).each do |blk|
|
|
1018
|
+
next unless blk.is_a?(Hash)
|
|
1019
|
+
a, d = run_actions_from_map(blk, context_state: state_id)
|
|
1020
|
+
actions.concat(a)
|
|
1021
|
+
delta.merge!(d)
|
|
1022
|
+
end
|
|
1023
|
+
# Cancel active invocations for this state and run their finalize
|
|
1024
|
+
a2, d2 = cancel_invocations_for_state(state_id)
|
|
1025
|
+
actions.concat(a2)
|
|
1026
|
+
delta.merge!(d2)
|
|
1027
|
+
[actions, delta]
|
|
1028
|
+
end
|
|
1029
|
+
|
|
1030
|
+
# Execute onentry actions for a state.
|
|
1031
|
+
def run_onentry(state_id)
|
|
1032
|
+
node = @states[state_id]
|
|
1033
|
+
return [[], {}] unless node
|
|
1034
|
+
actions = []
|
|
1035
|
+
delta = {}
|
|
1036
|
+
wrap_list(node['onentry']).each do |blk|
|
|
1037
|
+
next unless blk.is_a?(Hash)
|
|
1038
|
+
a, d = run_actions_from_map(blk, context_state: state_id)
|
|
1039
|
+
actions.concat(a)
|
|
1040
|
+
delta.merge!(d)
|
|
1041
|
+
end
|
|
1042
|
+
[actions, delta]
|
|
1043
|
+
end
|
|
1044
|
+
|
|
1045
|
+
# Execute actions defined in a map: log, assign, raise, if/elseif/else.
|
|
1046
|
+
def run_actions_from_map(map, context_state: nil)
|
|
1047
|
+
actions = []
|
|
1048
|
+
delta = {}
|
|
1049
|
+
# log
|
|
1050
|
+
wrap_list(map['log']).each do |log|
|
|
1051
|
+
label = log.is_a?(Hash) ? log['label'] : nil
|
|
1052
|
+
expr = log.is_a?(Hash) ? log['expr'] : nil
|
|
1053
|
+
val = eval_expr(expr)
|
|
1054
|
+
actions << format_log(label, val)
|
|
1055
|
+
end
|
|
1056
|
+
# assign
|
|
1057
|
+
wrap_list(map['assign']).each do |as|
|
|
1058
|
+
loc = as.is_a?(Hash) ? as['location'] : nil
|
|
1059
|
+
expr = as.is_a?(Hash) ? as['expr'] : nil
|
|
1060
|
+
unless loc && loc.is_a?(String) && !loc.to_s.strip.empty? && loc.to_s !~ /\./
|
|
1061
|
+
schedule_internal_event('error.execution', { 'detail' => 'invalid assign target', 'location' => loc }, 0.0)
|
|
1062
|
+
next
|
|
1063
|
+
end
|
|
1064
|
+
val = eval_assign_expr(loc, expr)
|
|
1065
|
+
if val.nil? && !expr.nil?
|
|
1066
|
+
schedule_internal_event('error.execution', { 'detail' => 'assign expression error', 'location' => loc, 'expr' => expr }, 0.0)
|
|
1067
|
+
end
|
|
1068
|
+
@data[loc] = val
|
|
1069
|
+
delta[loc] = val
|
|
1070
|
+
end
|
|
1071
|
+
# raise
|
|
1072
|
+
wrap_list(map['raise']).each do |rz|
|
|
1073
|
+
ev = rz.is_a?(Hash) ? (rz['event'] || rz['name']) : rz
|
|
1074
|
+
@internal_queue << ev.to_s if ev
|
|
1075
|
+
end
|
|
1076
|
+
wrap_list(map['raise_value']).each do |rz|
|
|
1077
|
+
ev = rz.is_a?(Hash) ? (rz['event'] || rz['name']) : rz
|
|
1078
|
+
@internal_queue << ev.to_s if ev
|
|
1079
|
+
end
|
|
1080
|
+
# send
|
|
1081
|
+
wrap_list(map['send']).each do |sd|
|
|
1082
|
+
next unless sd.is_a?(Hash)
|
|
1083
|
+
ev_name = sd['event'] || sd['name']
|
|
1084
|
+
ev_name = eval_expr(sd['eventexpr']) if (ev_name.nil? && sd['eventexpr'])
|
|
1085
|
+
delay = parse_delay(sd['delay'] || '0s')
|
|
1086
|
+
delay = parse_delay(eval_expr(sd['delayexpr'])) if sd['delayexpr']
|
|
1087
|
+
target = sd['target']
|
|
1088
|
+
target = eval_expr(sd['targetexpr']) if (target.nil? && sd['targetexpr'])
|
|
1089
|
+
# Build payload: params, namelist, content
|
|
1090
|
+
payload = {}
|
|
1091
|
+
wrap_list(sd['param']).each do |pm|
|
|
1092
|
+
next unless pm.is_a?(Hash)
|
|
1093
|
+
nm = pm['name']
|
|
1094
|
+
next unless nm
|
|
1095
|
+
if pm['expr']
|
|
1096
|
+
payload[nm.to_s] = eval_expr(pm['expr'])
|
|
1097
|
+
elsif pm['location']
|
|
1098
|
+
payload[nm.to_s] = resolve_path(pm['location'].to_s)
|
|
1099
|
+
end
|
|
1100
|
+
end
|
|
1101
|
+
if sd['namelist'] && sd['namelist'].is_a?(String)
|
|
1102
|
+
sd['namelist'].split(/\s+/).each do |nm|
|
|
1103
|
+
payload[nm] = (@data || {})[nm]
|
|
1104
|
+
end
|
|
1105
|
+
end
|
|
1106
|
+
if sd['content']
|
|
1107
|
+
payload['content'] = sd['content']
|
|
1108
|
+
end
|
|
1109
|
+
data_payload = payload.empty? ? nil : payload
|
|
1110
|
+
# send id / idlocation
|
|
1111
|
+
send_id = sd['id']
|
|
1112
|
+
if !send_id && sd['idlocation'] && sd['idlocation'].is_a?(String)
|
|
1113
|
+
@send_seq += 1
|
|
1114
|
+
send_id = "s#{@send_seq}"
|
|
1115
|
+
@data[sd['idlocation']] = send_id
|
|
1116
|
+
delta[sd['idlocation']] = send_id
|
|
1117
|
+
end
|
|
1118
|
+
if ev_name
|
|
1119
|
+
if target.nil? || target == '#_internal' || target == 'internal'
|
|
1120
|
+
schedule_internal_event(ev_name.to_s, data_payload, delay, id: send_id)
|
|
1121
|
+
elsif target == '#_child' || target == '#_invokedChild'
|
|
1122
|
+
# route to all active children of the context state (or error if none)
|
|
1123
|
+
routed = false
|
|
1124
|
+
if context_state && @invocations_by_state[context_state]
|
|
1125
|
+
@invocations_by_state[context_state].each do |iid|
|
|
1126
|
+
routed |= route_to_child(iid, ev_name.to_s, data_payload, delay)
|
|
1127
|
+
end
|
|
1128
|
+
end
|
|
1129
|
+
unless routed
|
|
1130
|
+
schedule_internal_event('error.communication', { 'detail' => 'no child for #_child', 'event' => ev_name.to_s }, 0.0)
|
|
1131
|
+
end
|
|
1132
|
+
elsif target == '#_parent'
|
|
1133
|
+
if @parent_link
|
|
1134
|
+
@parent_link.enqueue_from_child(ev_name.to_s, data_payload, @child_invoke_id)
|
|
1135
|
+
else
|
|
1136
|
+
schedule_internal_event('error.communication', { 'detail' => 'no parent for #_parent', 'event' => ev_name.to_s }, 0.0)
|
|
1137
|
+
end
|
|
1138
|
+
elsif target.to_s.start_with?('#_')
|
|
1139
|
+
iid = target.to_s.sub(/^#_/, '')
|
|
1140
|
+
unless route_to_child(iid, ev_name.to_s, data_payload, delay)
|
|
1141
|
+
schedule_internal_event('error.communication', { 'detail' => 'unknown child', 'target' => target, 'event' => ev_name.to_s }, 0.0)
|
|
1142
|
+
end
|
|
1143
|
+
else
|
|
1144
|
+
# unsupported external targets -> error.communication
|
|
1145
|
+
schedule_internal_event('error.communication', { 'detail' => 'unsupported target', 'target' => target, 'event' => ev_name.to_s }, 0.0)
|
|
1146
|
+
end
|
|
1147
|
+
end
|
|
1148
|
+
end
|
|
1149
|
+
# cancel
|
|
1150
|
+
wrap_list(map['cancel']).each do |cz|
|
|
1151
|
+
next unless cz.is_a?(Hash)
|
|
1152
|
+
sid = cz['sendid']
|
|
1153
|
+
sid = eval_expr(cz['sendidexpr']) if sid.nil? && cz['sendidexpr']
|
|
1154
|
+
if sid.nil? || sid.to_s.strip.empty?
|
|
1155
|
+
schedule_internal_event('error.execution', { 'detail' => 'cancel missing id' }, 0.0)
|
|
1156
|
+
next
|
|
1157
|
+
end
|
|
1158
|
+
removed = cancel_send(sid.to_s)
|
|
1159
|
+
unless removed
|
|
1160
|
+
schedule_internal_event('error.execution', { 'detail' => 'cancel id not found', 'sendid' => sid.to_s }, 0.0)
|
|
1161
|
+
end
|
|
1162
|
+
end
|
|
1163
|
+
# foreach
|
|
1164
|
+
wrap_list(map['foreach']).each do |fe|
|
|
1165
|
+
next unless fe.is_a?(Hash)
|
|
1166
|
+
array_expr = fe['array']
|
|
1167
|
+
item_name = (fe['item'] || 'item').to_s
|
|
1168
|
+
index_name = (fe['index'] || 'index').to_s
|
|
1169
|
+
ary = eval_expr(array_expr)
|
|
1170
|
+
# Normalize ary to an array
|
|
1171
|
+
if ary.nil?
|
|
1172
|
+
ary = []
|
|
1173
|
+
elsif ary.is_a?(Hash)
|
|
1174
|
+
ary = ary.values
|
|
1175
|
+
elsif !ary.is_a?(Array) && ary.respond_to?(:to_a)
|
|
1176
|
+
ary = ary.to_a
|
|
1177
|
+
elsif !ary.is_a?(Array)
|
|
1178
|
+
ary = [ary]
|
|
1179
|
+
end
|
|
1180
|
+
had_item = @data.key?(item_name)
|
|
1181
|
+
had_index = @data.key?(index_name)
|
|
1182
|
+
old_item = @data[item_name]
|
|
1183
|
+
old_index = @data[index_name]
|
|
1184
|
+
ary.each_with_index do |elem, idx|
|
|
1185
|
+
@data[item_name] = elem
|
|
1186
|
+
@data[index_name] = idx
|
|
1187
|
+
a, d = run_actions_from_map(fe)
|
|
1188
|
+
actions.concat(a)
|
|
1189
|
+
delta.merge!(d)
|
|
1190
|
+
end
|
|
1191
|
+
if had_item
|
|
1192
|
+
@data[item_name] = old_item
|
|
1193
|
+
else
|
|
1194
|
+
@data.delete(item_name)
|
|
1195
|
+
end
|
|
1196
|
+
if had_index
|
|
1197
|
+
@data[index_name] = old_index
|
|
1198
|
+
else
|
|
1199
|
+
@data.delete(index_name)
|
|
1200
|
+
end
|
|
1201
|
+
end
|
|
1202
|
+
# if / elseif / else
|
|
1203
|
+
wrap_list(map['if_value']).each do |iff|
|
|
1204
|
+
next unless iff.is_a?(Hash)
|
|
1205
|
+
if cond_true?(iff['cond'])
|
|
1206
|
+
a, d = run_actions_from_map(iff)
|
|
1207
|
+
actions.concat(a)
|
|
1208
|
+
delta.merge!(d)
|
|
1209
|
+
else
|
|
1210
|
+
taken = false
|
|
1211
|
+
wrap_list(iff['elseif']).each do |eif|
|
|
1212
|
+
next unless eif.is_a?(Hash)
|
|
1213
|
+
if cond_true?(eif['cond'])
|
|
1214
|
+
a, d = run_actions_from_map(eif)
|
|
1215
|
+
actions.concat(a)
|
|
1216
|
+
delta.merge!(d)
|
|
1217
|
+
taken = true
|
|
1218
|
+
break
|
|
1219
|
+
end
|
|
1220
|
+
end
|
|
1221
|
+
unless taken
|
|
1222
|
+
wrap_list(iff['else_value']).each do |els|
|
|
1223
|
+
next unless els.is_a?(Hash)
|
|
1224
|
+
a, d = run_actions_from_map(els)
|
|
1225
|
+
actions.concat(a)
|
|
1226
|
+
delta.merge!(d)
|
|
1227
|
+
end
|
|
1228
|
+
end
|
|
1229
|
+
end
|
|
1230
|
+
end
|
|
1231
|
+
[actions, delta]
|
|
1232
|
+
end
|
|
1233
|
+
|
|
1234
|
+
def format_log(label, value)
|
|
1235
|
+
lbl = label ? label.to_s : 'log'
|
|
1236
|
+
val = value.nil? ? '' : value.to_s
|
|
1237
|
+
"#{lbl}:#{val}"
|
|
1238
|
+
end
|
|
1239
|
+
|
|
1240
|
+
# Very small expression handler for demo purposes.
|
|
1241
|
+
def eval_expr(expr)
|
|
1242
|
+
return nil if expr.nil?
|
|
1243
|
+
s = expr.to_s.strip
|
|
1244
|
+
# quoted string (single or double)
|
|
1245
|
+
if (m = s.match(/^\s*['\"](.*)['\"]\s*$/))
|
|
1246
|
+
return m[1]
|
|
1247
|
+
end
|
|
1248
|
+
# boolean literals
|
|
1249
|
+
return true if s.downcase == 'true'
|
|
1250
|
+
return false if s.downcase == 'false'
|
|
1251
|
+
# in(stateId) predicate
|
|
1252
|
+
if (m = s.match(/^\s*in\((.+)\)\s*$/i))
|
|
1253
|
+
sid_raw = m[1].strip
|
|
1254
|
+
sid = sid_raw
|
|
1255
|
+
if (mm = sid_raw.match(/^['\"](.*)['\"]$/))
|
|
1256
|
+
sid = mm[1]
|
|
1257
|
+
end
|
|
1258
|
+
return in_state?(sid.to_s)
|
|
1259
|
+
end
|
|
1260
|
+
# JSON object/array literals
|
|
1261
|
+
if (s.start_with?('[') && s.end_with?(']')) || (s.start_with?('{') && s.end_with?('}'))
|
|
1262
|
+
begin
|
|
1263
|
+
return JSON.parse(s)
|
|
1264
|
+
rescue StandardError
|
|
1265
|
+
# fall through
|
|
1266
|
+
end
|
|
1267
|
+
end
|
|
1268
|
+
# integer or float
|
|
1269
|
+
if s.match?(/^[-+]?\d+(?:\.\d+)?$/)
|
|
1270
|
+
return s.include?('.') ? s.to_f : s.to_i
|
|
1271
|
+
end
|
|
1272
|
+
# variable reference
|
|
1273
|
+
if (m = s.match(/^\s*([a-zA-Z_][\w]*(?:\.[A-Za-z0-9_\[\]']+)*)\s*$/))
|
|
1274
|
+
return resolve_path(m[1])
|
|
1275
|
+
end
|
|
1276
|
+
# pattern: <var> ... + <number>
|
|
1277
|
+
if (m = s.match(/([a-zA-Z_][\w]*).*?\+\s*([-+]?\d+)/))
|
|
1278
|
+
base = (@data || {})[m[1]]
|
|
1279
|
+
base = 0 unless base.is_a?(Numeric)
|
|
1280
|
+
return base + m[2].to_i
|
|
1281
|
+
end
|
|
1282
|
+
# fallback: return as-is
|
|
1283
|
+
s
|
|
1284
|
+
end
|
|
1285
|
+
|
|
1286
|
+
def eval_assign_expr(location, expr)
|
|
1287
|
+
val = eval_expr(expr)
|
|
1288
|
+
# If val is a string equal to location or empty due to parsing limits, default to increment when reasonable
|
|
1289
|
+
if val.is_a?(String) && val.strip.empty?
|
|
1290
|
+
base = (@data || {})[location]
|
|
1291
|
+
base = 0 unless base.is_a?(Numeric)
|
|
1292
|
+
return base + 1
|
|
1293
|
+
end
|
|
1294
|
+
val
|
|
1295
|
+
end
|
|
1296
|
+
|
|
1297
|
+
# Evaluate a condition into boolean truthiness.
|
|
1298
|
+
def cond_true?(expr)
|
|
1299
|
+
return true if expr.nil? || expr.to_s.strip.empty?
|
|
1300
|
+
s = expr.to_s.strip
|
|
1301
|
+
# logical and/or (left-associative, naive split)
|
|
1302
|
+
if (i = s.index(' and '))
|
|
1303
|
+
left = s[0...i]
|
|
1304
|
+
right = s[(i + 5)..-1]
|
|
1305
|
+
return cond_true?(left) && cond_true?(right)
|
|
1306
|
+
end
|
|
1307
|
+
if (i = s.index(' or '))
|
|
1308
|
+
left = s[0...i]
|
|
1309
|
+
right = s[(i + 4)..-1]
|
|
1310
|
+
return cond_true?(left) || cond_true?(right)
|
|
1311
|
+
end
|
|
1312
|
+
# unary not
|
|
1313
|
+
if s.start_with?('!')
|
|
1314
|
+
return !cond_true?(s[1..-1])
|
|
1315
|
+
end
|
|
1316
|
+
if s.downcase.start_with?('not ')
|
|
1317
|
+
return !cond_true?(s[4..-1])
|
|
1318
|
+
end
|
|
1319
|
+
# membership: X in Y / X not in Y
|
|
1320
|
+
if (m = s.match(/^(.+?)\s+not\s+in\s+(.+)$/i))
|
|
1321
|
+
lhs = eval_expr(m[1])
|
|
1322
|
+
rhs = eval_expr(m[2])
|
|
1323
|
+
return !member?(lhs, rhs)
|
|
1324
|
+
end
|
|
1325
|
+
if (m = s.match(/^(.+?)\s+in\s+(.+)$/i))
|
|
1326
|
+
lhs = eval_expr(m[1])
|
|
1327
|
+
rhs = eval_expr(m[2])
|
|
1328
|
+
return member?(lhs, rhs)
|
|
1329
|
+
end
|
|
1330
|
+
# general binary operators
|
|
1331
|
+
if (m = s.match(/^(.+?)\s*(==|!=|>=|<=|>|<)\s*(.+)$/))
|
|
1332
|
+
lhs = eval_expr(m[1])
|
|
1333
|
+
rhs = eval_expr(m[3])
|
|
1334
|
+
op = m[2]
|
|
1335
|
+
if %w[== !=].include?(op)
|
|
1336
|
+
lnum = lhs.is_a?(Numeric) || (lhs.is_a?(String) && lhs.match?(/^[-+]?\d+(?:\.\d+)?$/))
|
|
1337
|
+
rnum = rhs.is_a?(Numeric) || (rhs.is_a?(String) && rhs.match?(/^[-+]?\d+(?:\.\d+)?$/))
|
|
1338
|
+
if lnum && rnum
|
|
1339
|
+
lv = lhs.is_a?(Numeric) ? lhs.to_f : lhs.to_s.to_f
|
|
1340
|
+
rv = rhs.is_a?(Numeric) ? rhs.to_f : rhs.to_s.to_f
|
|
1341
|
+
return (lv == rv) if op == '=='
|
|
1342
|
+
return (lv != rv)
|
|
1343
|
+
else
|
|
1344
|
+
return (lhs == rhs) if op == '=='
|
|
1345
|
+
return (lhs != rhs)
|
|
1346
|
+
end
|
|
1347
|
+
else
|
|
1348
|
+
return false unless lhs.is_a?(Numeric) && rhs.is_a?(Numeric)
|
|
1349
|
+
case op
|
|
1350
|
+
when '>' then return lhs > rhs
|
|
1351
|
+
when '<' then return lhs < rhs
|
|
1352
|
+
when '>=' then return lhs >= rhs
|
|
1353
|
+
when '<=' then return lhs <= rhs
|
|
1354
|
+
end
|
|
1355
|
+
end
|
|
1356
|
+
end
|
|
1357
|
+
# direct variable truthiness
|
|
1358
|
+
val = eval_expr(s)
|
|
1359
|
+
truthy?(val)
|
|
1360
|
+
end
|
|
1361
|
+
|
|
1362
|
+
def member?(item, container)
|
|
1363
|
+
return false if container.nil?
|
|
1364
|
+
if container.is_a?(Array)
|
|
1365
|
+
return container.include?(item)
|
|
1366
|
+
elsif container.is_a?(Hash)
|
|
1367
|
+
return container.key?(item) || container.key?(item.to_s) || container.key?(item.to_sym)
|
|
1368
|
+
elsif container.is_a?(String)
|
|
1369
|
+
return container.include?(item.to_s)
|
|
1370
|
+
end
|
|
1371
|
+
false
|
|
1372
|
+
end
|
|
1373
|
+
|
|
1374
|
+
def truthy?(val)
|
|
1375
|
+
return false if val.nil? || val == false
|
|
1376
|
+
return false if val == ''
|
|
1377
|
+
return false if val == 0
|
|
1378
|
+
true
|
|
1379
|
+
end
|
|
1380
|
+
|
|
1381
|
+
# Resolve a dotted path from @data or _event context.
|
|
1382
|
+
def resolve_path(path)
|
|
1383
|
+
tokens = path.split('.')
|
|
1384
|
+
return nil if tokens.empty?
|
|
1385
|
+
if tokens[0] == '_event'
|
|
1386
|
+
cur = { 'name' => @current_event_name, 'data' => @current_event_data }
|
|
1387
|
+
tokens = tokens[1..-1]
|
|
1388
|
+
else
|
|
1389
|
+
cur = @data || {}
|
|
1390
|
+
end
|
|
1391
|
+
tokens.each do |tk|
|
|
1392
|
+
key = tk
|
|
1393
|
+
if cur.is_a?(Array)
|
|
1394
|
+
if key =~ /^\d+$/
|
|
1395
|
+
idx = key.to_i
|
|
1396
|
+
cur = cur[idx]
|
|
1397
|
+
else
|
|
1398
|
+
return nil
|
|
1399
|
+
end
|
|
1400
|
+
elsif cur.is_a?(Hash)
|
|
1401
|
+
# support string/symbol access
|
|
1402
|
+
cur = cur[key] || cur[key.to_sym]
|
|
1403
|
+
else
|
|
1404
|
+
return nil
|
|
1405
|
+
end
|
|
1406
|
+
break if cur.nil?
|
|
1407
|
+
end
|
|
1408
|
+
cur
|
|
1409
|
+
end
|
|
1410
|
+
|
|
1411
|
+
# Timers and scheduling
|
|
1412
|
+
def schedule_internal_event(name, data, delay, id: nil)
|
|
1413
|
+
if delay.nil? || delay <= 0
|
|
1414
|
+
@internal_queue << ({ 'name' => name, 'data' => data, 'sendid' => id })
|
|
1415
|
+
else
|
|
1416
|
+
rec = { time: (@time + delay.to_f), name: name, data: data }
|
|
1417
|
+
rec[:id] = id if id
|
|
1418
|
+
@timers << rec
|
|
1419
|
+
@timers.sort_by! { |t| t[:time] }
|
|
1420
|
+
end
|
|
1421
|
+
end
|
|
1422
|
+
|
|
1423
|
+
def cancel_send(sendid)
|
|
1424
|
+
before = @timers.length
|
|
1425
|
+
@timers.delete_if { |t| t[:id] && t[:id].to_s == sendid.to_s }
|
|
1426
|
+
after = @timers.length
|
|
1427
|
+
before != after
|
|
1428
|
+
end
|
|
1429
|
+
|
|
1430
|
+
def advance_time(seconds)
|
|
1431
|
+
return if seconds.nil? || seconds.to_f <= 0
|
|
1432
|
+
@time += seconds.to_f
|
|
1433
|
+
flush_timers
|
|
1434
|
+
# Propagate time to child contexts
|
|
1435
|
+
@invocations.each_value do |rec|
|
|
1436
|
+
ctx = rec[:ctx]
|
|
1437
|
+
next unless ctx
|
|
1438
|
+
begin
|
|
1439
|
+
ctx.advance_time(seconds.to_f)
|
|
1440
|
+
# Drain child internal events and eventless to quiescence
|
|
1441
|
+
ctx.trace_step(name: '__time__', data: nil)
|
|
1442
|
+
0.upto(100) do
|
|
1443
|
+
tx0 = ctx.select_transitions_for_event(nil)
|
|
1444
|
+
break if tx0.empty?
|
|
1445
|
+
ctx.apply_transition_set(tx0, cause: nil)
|
|
1446
|
+
end
|
|
1447
|
+
if ctx.machine_completed?
|
|
1448
|
+
@internal_queue << ({ 'name' => '__invoke_complete', 'data' => { 'invokeid' => rec[:node]['id'] || rec[:state] } })
|
|
1449
|
+
end
|
|
1450
|
+
rescue StandardError
|
|
1451
|
+
next
|
|
1452
|
+
end
|
|
1453
|
+
end
|
|
1454
|
+
end
|
|
1455
|
+
|
|
1456
|
+
def flush_timers
|
|
1457
|
+
while !@timers.empty? && @timers.first[:time] <= @time
|
|
1458
|
+
t = @timers.shift
|
|
1459
|
+
@internal_queue << ({ 'name' => t[:name], 'data' => t[:data], 'sendid' => t[:id] })
|
|
1460
|
+
end
|
|
1461
|
+
end
|
|
1462
|
+
|
|
1463
|
+
def parse_delay(str)
|
|
1464
|
+
return 0.0 if str.nil?
|
|
1465
|
+
return str if str.is_a?(Numeric)
|
|
1466
|
+
s = str.to_s.strip
|
|
1467
|
+
return 0.0 if s.empty?
|
|
1468
|
+
if (m = s.match(/^([-+]?\d+(?:\.\d+)?)\s*(ms|s)?$/i))
|
|
1469
|
+
val = m[1].to_f
|
|
1470
|
+
unit = (m[2] || 's').downcase
|
|
1471
|
+
return unit == 'ms' ? (val / 1000.0) : val
|
|
1472
|
+
end
|
|
1473
|
+
0.0
|
|
1474
|
+
end
|
|
1475
|
+
|
|
1476
|
+
def parse_event_tokens(str)
|
|
1477
|
+
return [] if str.nil?
|
|
1478
|
+
return [] unless str.is_a?(String)
|
|
1479
|
+
str.split(/\s+/)
|
|
1480
|
+
end
|
|
1481
|
+
|
|
1482
|
+
# Completed when a root-level <final> is active
|
|
1483
|
+
def machine_completed?
|
|
1484
|
+
@configuration.any? do |sid|
|
|
1485
|
+
@tag_type[sid] == :final && (@parent[sid].nil?)
|
|
1486
|
+
end
|
|
1487
|
+
end
|
|
1488
|
+
|
|
1489
|
+
# ---- Completion (done.state.*) ----
|
|
1490
|
+
def enqueue_done_events
|
|
1491
|
+
done_ids = compute_done_states
|
|
1492
|
+
done_ids.each do |sid|
|
|
1493
|
+
@internal_queue << ({ 'name' => "done.state.#{sid}", 'data' => nil })
|
|
1494
|
+
end
|
|
1495
|
+
end
|
|
1496
|
+
|
|
1497
|
+
def compute_done_states
|
|
1498
|
+
# Identify composite state completion and parallel completion
|
|
1499
|
+
finals = @configuration.select { |sid| @tag_type[sid] == :final }
|
|
1500
|
+
done = []
|
|
1501
|
+
# State is done if one of its direct 'final' children is active
|
|
1502
|
+
@states.each do |sid, node|
|
|
1503
|
+
next unless node.is_a?(Hash)
|
|
1504
|
+
# only consider composite states
|
|
1505
|
+
if node.key?('state') || node.key?('parallel')
|
|
1506
|
+
# direct final children ids
|
|
1507
|
+
direct_finals = wrap_list(node['final']).map { |f| f.is_a?(Hash) ? f['id'] : nil }.compact.map(&:to_s)
|
|
1508
|
+
if !direct_finals.empty? && (finals & direct_finals).any?
|
|
1509
|
+
done << sid
|
|
1510
|
+
next
|
|
1511
|
+
end
|
|
1512
|
+
# parallel: all regions have a final descendant
|
|
1513
|
+
if node.key?('parallel')
|
|
1514
|
+
region_ids = wrap_list(node['parallel']).flat_map { |p| wrap_list(p['state']).map { |s| s['id'] }.compact.map(&:to_s) }
|
|
1515
|
+
if !region_ids.empty?
|
|
1516
|
+
all_done = region_ids.all? do |rid|
|
|
1517
|
+
finals.any? { |fid| is_ancestor?(rid, fid) }
|
|
1518
|
+
end
|
|
1519
|
+
done << sid if all_done
|
|
1520
|
+
end
|
|
1521
|
+
end
|
|
1522
|
+
end
|
|
1523
|
+
end
|
|
1524
|
+
done.uniq
|
|
1525
|
+
end
|
|
1526
|
+
|
|
1527
|
+
def index_states(node, parent_id = nil, tag = nil)
|
|
1528
|
+
return unless node.is_a?(Hash)
|
|
1529
|
+
# Record this node if it looks like a state (has an id)
|
|
1530
|
+
sid = node['id']
|
|
1531
|
+
if sid
|
|
1532
|
+
@states[sid] = node
|
|
1533
|
+
@parent[sid] = parent_id if parent_id
|
|
1534
|
+
@tag_type[sid] = tag || :state
|
|
1535
|
+
end
|
|
1536
|
+
# Recurse into known containers
|
|
1537
|
+
wrap_list(node['state']).each { |child| index_states(child, sid, :state) }
|
|
1538
|
+
wrap_list(node['parallel']).each { |child| index_states(child, sid, :parallel) }
|
|
1539
|
+
wrap_list(node['history']).each { |child| index_states(child, sid, :history) }
|
|
1540
|
+
wrap_list(node['final']).each { |child| index_states(child, sid, :final) }
|
|
1541
|
+
end
|
|
1542
|
+
|
|
1543
|
+
def initial_configuration
|
|
1544
|
+
# Prefer explicit initial on root
|
|
1545
|
+
tokens = wrap_list(@root['initial']).map(&:to_s)
|
|
1546
|
+
leaves = []
|
|
1547
|
+
unless tokens.empty?
|
|
1548
|
+
tokens.each do |tid|
|
|
1549
|
+
leaves.concat(initial_leaves_for_token(tid))
|
|
1550
|
+
end
|
|
1551
|
+
return leaves.uniq
|
|
1552
|
+
end
|
|
1553
|
+
# Else first child state id -> expand to leaves
|
|
1554
|
+
first_state = wrap_list(@root['state']).find { |s| s.is_a?(Hash) && s['id'] }
|
|
1555
|
+
if first_state && first_state['id']
|
|
1556
|
+
return initial_leaves_for_token(first_state['id'].to_s)
|
|
1557
|
+
end
|
|
1558
|
+
[]
|
|
1559
|
+
end
|
|
1560
|
+
|
|
1561
|
+
def initial_leaves_for_token(token)
|
|
1562
|
+
# History tokens fallback to parent's defaults
|
|
1563
|
+
if @tag_type[token] == :history
|
|
1564
|
+
parent_id = @parent[token]
|
|
1565
|
+
return [] unless parent_id
|
|
1566
|
+
node = @states[token]
|
|
1567
|
+
deep = (node && node['type_value'].to_s.downcase == 'deep')
|
|
1568
|
+
if deep
|
|
1569
|
+
return initial_leaves_for_id(parent_id)
|
|
1570
|
+
else
|
|
1571
|
+
parent = @states[parent_id]
|
|
1572
|
+
return initial_child_ids_for(parent).flat_map { |cid| initial_leaves_for_id(cid) }
|
|
1573
|
+
end
|
|
1574
|
+
end
|
|
1575
|
+
initial_leaves_for_id(token)
|
|
1576
|
+
end
|
|
1577
|
+
end
|
|
1578
|
+
end
|
|
1579
|
+
end
|
|
1580
|
+
# Execute any pending finalize blocks for completed invocations (only used when @defer_done)
|
|
1581
|
+
def run_pending_finalizers
|
|
1582
|
+
actions = []
|
|
1583
|
+
delta = {}
|
|
1584
|
+
@invocations.each do |iid, rec|
|
|
1585
|
+
next unless rec && rec[:status] == 'done'
|
|
1586
|
+
blocks = rec[:finalize_blocks]
|
|
1587
|
+
next unless blocks && !rec[:finalize_done]
|
|
1588
|
+
blocks.each do |fin|
|
|
1589
|
+
next unless fin.is_a?(Hash)
|
|
1590
|
+
a, d = run_actions_from_map(fin, context_state: rec[:state])
|
|
1591
|
+
actions.concat(a)
|
|
1592
|
+
delta.merge!(d)
|
|
1593
|
+
end
|
|
1594
|
+
rec[:finalize_done] = true
|
|
1595
|
+
end
|
|
1596
|
+
[actions, delta]
|
|
1597
|
+
end
|