mycorrhizal 0.1.2__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mycorrhizal/_version.py +1 -1
- mycorrhizal/common/__init__.py +15 -3
- mycorrhizal/common/cache.py +114 -0
- mycorrhizal/common/compilation.py +263 -0
- mycorrhizal/common/interface_detection.py +159 -0
- mycorrhizal/common/interfaces.py +3 -50
- mycorrhizal/common/mermaid.py +124 -0
- mycorrhizal/common/wrappers.py +1 -1
- mycorrhizal/hypha/core/builder.py +11 -1
- mycorrhizal/hypha/core/runtime.py +242 -107
- mycorrhizal/mycelium/__init__.py +174 -0
- mycorrhizal/mycelium/core.py +619 -0
- mycorrhizal/mycelium/exceptions.py +30 -0
- mycorrhizal/mycelium/hypha_bridge.py +1143 -0
- mycorrhizal/mycelium/instance.py +440 -0
- mycorrhizal/mycelium/pn_context.py +276 -0
- mycorrhizal/mycelium/runner.py +165 -0
- mycorrhizal/mycelium/spores_integration.py +655 -0
- mycorrhizal/mycelium/tree_builder.py +102 -0
- mycorrhizal/mycelium/tree_spec.py +197 -0
- mycorrhizal/rhizomorph/README.md +82 -33
- mycorrhizal/rhizomorph/core.py +287 -119
- mycorrhizal/septum/TRANSITION_REFERENCE.md +385 -0
- mycorrhizal/{enoki → septum}/core.py +326 -100
- mycorrhizal/{enoki → septum}/testing_utils.py +7 -7
- mycorrhizal/{enoki → septum}/util.py +44 -21
- mycorrhizal/spores/__init__.py +3 -3
- mycorrhizal/spores/core.py +149 -28
- mycorrhizal/spores/dsl/__init__.py +8 -8
- mycorrhizal/spores/dsl/hypha.py +3 -15
- mycorrhizal/spores/dsl/rhizomorph.py +3 -11
- mycorrhizal/spores/dsl/{enoki.py → septum.py} +26 -77
- mycorrhizal/spores/encoder/json.py +21 -12
- mycorrhizal/spores/extraction.py +14 -11
- mycorrhizal/spores/models.py +53 -20
- mycorrhizal-0.2.1.dist-info/METADATA +335 -0
- mycorrhizal-0.2.1.dist-info/RECORD +54 -0
- mycorrhizal-0.1.2.dist-info/METADATA +0 -198
- mycorrhizal-0.1.2.dist-info/RECORD +0 -39
- /mycorrhizal/{enoki → septum}/__init__.py +0 -0
- {mycorrhizal-0.1.2.dist-info → mycorrhizal-0.2.1.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1143 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hypha Bridge - Wrapper/decorator system for BT-in-PN integration.
|
|
4
|
+
|
|
5
|
+
This module extends Hypha's API to support BT-in-PN integration without modifying
|
|
6
|
+
the core Hypha modules (specs.py, builder.py, runtime.py).
|
|
7
|
+
|
|
8
|
+
Uses a global registry to store metadata about BT-enabled transitions,
|
|
9
|
+
then wraps the Runner to intercept transition firing and execute BTs.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import Any, Callable, List, Optional, Dict
|
|
13
|
+
from functools import wraps
|
|
14
|
+
import asyncio
|
|
15
|
+
|
|
16
|
+
from ..hypha.core.specs import PlaceRef, TransitionRef
|
|
17
|
+
from ..rhizomorph.core import Status
|
|
18
|
+
from .tree_spec import PNIntegration
|
|
19
|
+
from .exceptions import MyceliumError
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Global registry for PN integrations
|
|
23
|
+
# Key: (net_name, transition_name) -> PNIntegration
|
|
24
|
+
_pn_integration_registry: Dict[tuple[str, str], PNIntegration] = {}
|
|
25
|
+
|
|
26
|
+
# Global set to track which transitions are currently processing batches (to prevent duplicate processing)
|
|
27
|
+
_batch_processing_transitions: set = set()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def register_pn_integration(net_name: str, transition_name: str, integration: PNIntegration) -> None:
|
|
31
|
+
"""Register a PN integration for a transition."""
|
|
32
|
+
key = (net_name, transition_name)
|
|
33
|
+
_pn_integration_registry[key] = integration
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def get_pn_integration(net_name: str, transition_name: str) -> Optional[PNIntegration]:
|
|
37
|
+
"""Get PN integration for a transition."""
|
|
38
|
+
key = (net_name, transition_name)
|
|
39
|
+
return _pn_integration_registry.get(key)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def clear_pn_integration_registry() -> None:
|
|
43
|
+
"""Clear the PN integration registry (useful for testing)."""
|
|
44
|
+
global _pn_integration_registry
|
|
45
|
+
_pn_integration_registry = {}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class MyceliumNetBuilder:
|
|
49
|
+
"""
|
|
50
|
+
Wrapper around Hypha's NetBuilder that adds BT-in-PN support.
|
|
51
|
+
|
|
52
|
+
Provides the same API as NetBuilder but intercepts transition() calls
|
|
53
|
+
to store BT integration metadata.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(self, net_builder: Any, net_name: str):
|
|
57
|
+
"""
|
|
58
|
+
Initialize the wrapper.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
net_builder: The original Hypha NetBuilder
|
|
62
|
+
net_name: Name of the net (for registry key)
|
|
63
|
+
"""
|
|
64
|
+
self._builder = net_builder
|
|
65
|
+
self._net_name = net_name
|
|
66
|
+
|
|
67
|
+
def place(self, *args, **kwargs):
|
|
68
|
+
"""Forward to original builder."""
|
|
69
|
+
return self._builder.place(*args, **kwargs)
|
|
70
|
+
|
|
71
|
+
def io_input_place(self, *args, **kwargs):
|
|
72
|
+
"""Forward to original builder."""
|
|
73
|
+
return self._builder.io_input_place(*args, **kwargs)
|
|
74
|
+
|
|
75
|
+
def io_output_place(self, *args, **kwargs):
|
|
76
|
+
"""Forward to original builder."""
|
|
77
|
+
return self._builder.io_output_place(*args, **kwargs)
|
|
78
|
+
|
|
79
|
+
def guard(self, *args, **kwargs):
|
|
80
|
+
"""Forward to original builder."""
|
|
81
|
+
return self._builder.guard(*args, **kwargs)
|
|
82
|
+
|
|
83
|
+
def subnet(self, *args, **kwargs):
|
|
84
|
+
"""Forward to original builder."""
|
|
85
|
+
return self._builder.subnet(*args, **kwargs)
|
|
86
|
+
|
|
87
|
+
def transition(self, bt=None, fsm=None, bt_mode="token", outputs=None, **kwargs):
|
|
88
|
+
"""
|
|
89
|
+
Decorator for transitions with optional BT or FSM integration.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
bt: Optional BT tree to integrate into this transition
|
|
93
|
+
fsm: Optional FSM initial state to integrate into this transition
|
|
94
|
+
bt_mode: "token" (process one token) or "batch" (process all tokens) - BT only
|
|
95
|
+
outputs: List of output PlaceRef objects (required when bt or fsm is provided)
|
|
96
|
+
**kwargs: Additional arguments passed to original builder.transition()
|
|
97
|
+
"""
|
|
98
|
+
# Validate that only one integration type is specified
|
|
99
|
+
if bt is not None and fsm is not None:
|
|
100
|
+
raise ValueError(
|
|
101
|
+
f"Transition cannot have both BT and FSM integration. "
|
|
102
|
+
f"Specify either 'bt' or 'fsm', not both."
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Validate BT integration parameters
|
|
106
|
+
if bt is not None:
|
|
107
|
+
if not outputs:
|
|
108
|
+
raise ValueError(
|
|
109
|
+
f"Transition with BT integration must specify 'outputs' parameter "
|
|
110
|
+
f"(list of output PlaceRef objects)"
|
|
111
|
+
)
|
|
112
|
+
if bt_mode not in ("token", "batch"):
|
|
113
|
+
raise ValueError(
|
|
114
|
+
f"Invalid bt_mode: {bt_mode}. Must be 'token' or 'batch'"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Validate FSM integration parameters
|
|
118
|
+
if fsm is not None:
|
|
119
|
+
if not outputs:
|
|
120
|
+
raise ValueError(
|
|
121
|
+
f"Transition with FSM integration must specify 'outputs' parameter "
|
|
122
|
+
f"(list of output PlaceRef objects)"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Get the original transition decorator
|
|
126
|
+
original_decorator = self._builder.transition(**kwargs)
|
|
127
|
+
|
|
128
|
+
def decorator(func: Callable) -> TransitionRef:
|
|
129
|
+
# Apply the original decorator to get the TransitionRef
|
|
130
|
+
transition_ref = original_decorator(func)
|
|
131
|
+
|
|
132
|
+
# If BT integration is specified, register it
|
|
133
|
+
if bt is not None:
|
|
134
|
+
transition_name = transition_ref.local_name
|
|
135
|
+
|
|
136
|
+
integration = PNIntegration(
|
|
137
|
+
transition_name=transition_name,
|
|
138
|
+
integration_type="bt",
|
|
139
|
+
mode=bt_mode,
|
|
140
|
+
bt_tree=bt,
|
|
141
|
+
output_places=outputs or [],
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
register_pn_integration(self._net_name, transition_name, integration)
|
|
145
|
+
|
|
146
|
+
# Store metadata on the transition function for runtime access
|
|
147
|
+
func._mycelium_bt_integration = integration # type: ignore[attr-defined]
|
|
148
|
+
|
|
149
|
+
# If FSM integration is specified, register it
|
|
150
|
+
if fsm is not None:
|
|
151
|
+
transition_name = transition_ref.local_name
|
|
152
|
+
|
|
153
|
+
integration = PNIntegration(
|
|
154
|
+
transition_name=transition_name,
|
|
155
|
+
integration_type="fsm",
|
|
156
|
+
initial_state=fsm,
|
|
157
|
+
output_places=outputs or [],
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
register_pn_integration(self._net_name, transition_name, integration)
|
|
161
|
+
|
|
162
|
+
# Store metadata on the transition function for runtime access
|
|
163
|
+
func._mycelium_fsm_integration = integration # type: ignore[attr-defined]
|
|
164
|
+
|
|
165
|
+
return transition_ref
|
|
166
|
+
|
|
167
|
+
return decorator
|
|
168
|
+
|
|
169
|
+
def arc(self, *args, **kwargs):
|
|
170
|
+
"""Forward to original builder."""
|
|
171
|
+
return self._builder.arc(*args, **kwargs)
|
|
172
|
+
|
|
173
|
+
def __getattr__(self, name: str):
|
|
174
|
+
"""Forward any other attributes to the original builder."""
|
|
175
|
+
return getattr(self._builder, name)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def create_net_wrapper(net_func: Callable, net_name: str) -> tuple[Any, MyceliumNetBuilder]:
|
|
179
|
+
"""
|
|
180
|
+
Create a wrapped net builder that supports BT-in-PN integration.
|
|
181
|
+
|
|
182
|
+
Called by the @pn_net decorator to wrap the Hypha NetBuilder.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
net_func: The net definition function
|
|
186
|
+
net_name: Name of the net
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Tuple of (original NetSpec, wrapped MyceliumNetBuilder)
|
|
190
|
+
"""
|
|
191
|
+
from ..hypha.core.builder import NetBuilder
|
|
192
|
+
|
|
193
|
+
# Create the original builder
|
|
194
|
+
original_builder = NetBuilder(net_name)
|
|
195
|
+
|
|
196
|
+
# Wrap it with our MyceliumNetBuilder
|
|
197
|
+
wrapped_builder = MyceliumNetBuilder(original_builder, net_name)
|
|
198
|
+
|
|
199
|
+
# Call the net function with the wrapped builder
|
|
200
|
+
net_func(wrapped_builder)
|
|
201
|
+
|
|
202
|
+
# Return the NetSpec from the original builder
|
|
203
|
+
return original_builder.spec, wrapped_builder
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
class TransitionRuntimeWrapper:
|
|
207
|
+
"""
|
|
208
|
+
Wrapper for TransitionRuntime that adds BT execution support.
|
|
209
|
+
|
|
210
|
+
Wraps the original transition handler to check for BT integration,
|
|
211
|
+
execute the BT, and use its routing decisions.
|
|
212
|
+
"""
|
|
213
|
+
|
|
214
|
+
def __init__(self, original_handler: Callable, pn_integration: Optional[PNIntegration], transition_ref=None):
|
|
215
|
+
"""
|
|
216
|
+
Initialize the wrapper.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
original_handler: The original transition handler function
|
|
220
|
+
pn_integration: PN integration metadata (if any)
|
|
221
|
+
transition_ref: Reference to the transition runtime for returning rejected tokens
|
|
222
|
+
"""
|
|
223
|
+
self._original_handler = original_handler
|
|
224
|
+
self._pn_integration = pn_integration
|
|
225
|
+
self._transition_ref = transition_ref
|
|
226
|
+
self._batch_processing = False # Flag to prevent duplicate batch processing
|
|
227
|
+
self._batch_lock = asyncio.Lock() # Lock for atomic batch processing
|
|
228
|
+
|
|
229
|
+
async def __call__(self, consumed, bb, timebase, state=None):
|
|
230
|
+
"""
|
|
231
|
+
Execute the transition with BT integration if configured.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
consumed: List of tokens (flattened from all input places)
|
|
235
|
+
bb: Blackboard
|
|
236
|
+
timebase: Timebase
|
|
237
|
+
state: Optional transition state
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
Yields of output tokens (like a normal transition)
|
|
241
|
+
"""
|
|
242
|
+
import logging
|
|
243
|
+
logger = logging.getLogger(__name__)
|
|
244
|
+
|
|
245
|
+
from .pn_context import PNContext
|
|
246
|
+
from ..rhizomorph.core import Runner as BTRunner, Status
|
|
247
|
+
|
|
248
|
+
# consumed is already a flattened list of tokens
|
|
249
|
+
tokens = consumed if isinstance(consumed, list) else [consumed]
|
|
250
|
+
|
|
251
|
+
logger.info(f"TransitionRuntimeWrapper called with {len(tokens)} tokens, pn_integration={self._pn_integration}, tokens={[t for t in tokens]}")
|
|
252
|
+
|
|
253
|
+
# If no integration, or FSM integration (not yet implemented), execute original handler
|
|
254
|
+
if self._pn_integration is None or self._pn_integration.integration_type == "fsm":
|
|
255
|
+
# FSM integration: visualization only, execution is manual in transition handler
|
|
256
|
+
if self._pn_integration and self._pn_integration.integration_type == "fsm":
|
|
257
|
+
logger.info(f"FSM integration detected (visualization only), executing original handler")
|
|
258
|
+
|
|
259
|
+
async def gen():
|
|
260
|
+
if state is not None:
|
|
261
|
+
async for result in self._original_handler(consumed, bb, timebase, state):
|
|
262
|
+
yield result
|
|
263
|
+
else:
|
|
264
|
+
async for result in self._original_handler(consumed, bb, timebase):
|
|
265
|
+
yield result
|
|
266
|
+
|
|
267
|
+
async for result in gen():
|
|
268
|
+
yield result
|
|
269
|
+
return
|
|
270
|
+
|
|
271
|
+
# BT integration: Execute BT and use its routing decisions
|
|
272
|
+
integration = self._pn_integration
|
|
273
|
+
bt_tree = integration.bt_tree
|
|
274
|
+
mode = integration.mode
|
|
275
|
+
output_places = integration.output_places
|
|
276
|
+
|
|
277
|
+
# Get net runtime from blackboard (injected by PN runner)
|
|
278
|
+
net_runtime = getattr(bb, '_mycelium_net_runtime', None)
|
|
279
|
+
if net_runtime is None:
|
|
280
|
+
raise RuntimeError(
|
|
281
|
+
f"Transition has BT integration but no net runtime available on blackboard. "
|
|
282
|
+
f"Did you use mycorrhizal.mycelium.Runner instead of mycorrhizal.hypha.core.Runner?"
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
# Determine which tokens to process
|
|
286
|
+
if mode == "token":
|
|
287
|
+
# Track token origins by scanning input places before processing
|
|
288
|
+
# Needed to properly return rejected tokens to their source
|
|
289
|
+
# Use id(token) as key because tokens (Pydantic models) aren't hashable
|
|
290
|
+
token_origins = {}
|
|
291
|
+
if self._transition_ref and self._transition_ref.input_arcs:
|
|
292
|
+
for place_parts, _ in self._transition_ref.input_arcs:
|
|
293
|
+
place = net_runtime.places.get(place_parts)
|
|
294
|
+
if place and place.tokens:
|
|
295
|
+
for token in place.tokens:
|
|
296
|
+
token_origins[id(token)] = (token, place_parts)
|
|
297
|
+
|
|
298
|
+
logger.info(f"Processing {len(tokens)} tokens in token mode")
|
|
299
|
+
for i, token in enumerate(tokens):
|
|
300
|
+
logger.info(f"Processing token {i+1}/{len(tokens)}: {token}")
|
|
301
|
+
await self._execute_bt_for_token(
|
|
302
|
+
bt_tree, token, bb, timebase, net_runtime, output_places, token_origins
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
# After processing tokens, check if more tokens remain in input places
|
|
306
|
+
# If so, set their token_added_event to wake the transition again
|
|
307
|
+
# Workaround for Hypha's event-driven runtime which clears the event before firing
|
|
308
|
+
for transition in net_runtime.transitions.values():
|
|
309
|
+
for place_parts, _ in transition.input_arcs:
|
|
310
|
+
place = net_runtime.places.get(place_parts)
|
|
311
|
+
if place and len(place.tokens) > 0:
|
|
312
|
+
# Tokens remain, set event to wake transition again
|
|
313
|
+
place.token_added_event.set()
|
|
314
|
+
logger.info(f"[BRIDGE] Set token_added_event for {place_parts} with {len(place.tokens)} tokens remaining")
|
|
315
|
+
else: # batch mode
|
|
316
|
+
# Batch mode limitation: Transitions fire immediately when tokens become available,
|
|
317
|
+
# leading to multiple firings before all tokens are accumulated.
|
|
318
|
+
# Works best when all tokens are added to input places before the net starts running.
|
|
319
|
+
# For most use cases, token mode (the default) is recommended.
|
|
320
|
+
|
|
321
|
+
# Create a unique key for this transition
|
|
322
|
+
transition_key = (id(net_runtime), id(self._transition_ref)) if self._transition_ref else (id(net_runtime), id(self))
|
|
323
|
+
|
|
324
|
+
# Check if we're already processing this batch
|
|
325
|
+
if transition_key in _batch_processing_transitions:
|
|
326
|
+
logger.info(f"Batch mode: already processing {transition_key}, skipping duplicate call")
|
|
327
|
+
if False:
|
|
328
|
+
yield
|
|
329
|
+
return
|
|
330
|
+
|
|
331
|
+
# Mark this transition as processing
|
|
332
|
+
_batch_processing_transitions.add(transition_key)
|
|
333
|
+
logger.info(f"Batch mode: starting processing {transition_key}")
|
|
334
|
+
|
|
335
|
+
try:
|
|
336
|
+
all_tokens = list(tokens) # Start with tokens from this firing
|
|
337
|
+
|
|
338
|
+
# First, consume ALL tokens from input places
|
|
339
|
+
if self._transition_ref and self._transition_ref.input_arcs:
|
|
340
|
+
for place_parts, _ in self._transition_ref.input_arcs:
|
|
341
|
+
place = net_runtime.places.get(place_parts)
|
|
342
|
+
if place and place.tokens:
|
|
343
|
+
# Consume all remaining tokens from this place
|
|
344
|
+
remaining = list(place.tokens)
|
|
345
|
+
place.remove_tokens(remaining)
|
|
346
|
+
all_tokens.extend(remaining)
|
|
347
|
+
logger.info(f"Batch mode: consumed {len(remaining)} more tokens from {place_parts}")
|
|
348
|
+
|
|
349
|
+
# Remove duplicates while preserving order
|
|
350
|
+
seen = set()
|
|
351
|
+
unique_tokens = []
|
|
352
|
+
for token in all_tokens:
|
|
353
|
+
if token not in seen:
|
|
354
|
+
seen.add(token)
|
|
355
|
+
unique_tokens.append(token)
|
|
356
|
+
|
|
357
|
+
logger.info(f"Batch mode: processing {len(unique_tokens)} unique tokens")
|
|
358
|
+
|
|
359
|
+
await self._execute_bt_for_batch(
|
|
360
|
+
bt_tree, unique_tokens, bb, timebase, net_runtime, output_places
|
|
361
|
+
)
|
|
362
|
+
finally:
|
|
363
|
+
# Remove from processing set
|
|
364
|
+
_batch_processing_transitions.discard(transition_key)
|
|
365
|
+
logger.info(f"Batch mode: done processing {transition_key}")
|
|
366
|
+
|
|
367
|
+
# Yield nothing - BT has already routed tokens directly to output places
|
|
368
|
+
# Must be an async generator, so check if there are more results
|
|
369
|
+
# The `yield` below makes this function an async generator
|
|
370
|
+
# Will only be reached if the loop above doesn't process all tokens
|
|
371
|
+
if False:
|
|
372
|
+
yield
|
|
373
|
+
|
|
374
|
+
async def _execute_bt_for_token(
|
|
375
|
+
self, bt_tree, token, bb, timebase, net_runtime, output_places, token_origins
|
|
376
|
+
):
|
|
377
|
+
"""Execute BT for a single token."""
|
|
378
|
+
import logging
|
|
379
|
+
logger = logging.getLogger(__name__)
|
|
380
|
+
|
|
381
|
+
from .pn_context import PNContext
|
|
382
|
+
from ..rhizomorph.core import Runner as BTRunner, Status
|
|
383
|
+
|
|
384
|
+
# Create PNContext with token origin tracking
|
|
385
|
+
# token_origins maps id(token) -> (token, place_parts)
|
|
386
|
+
pn_ctx = PNContext(
|
|
387
|
+
net_runtime=net_runtime,
|
|
388
|
+
output_places=output_places,
|
|
389
|
+
tokens=[token],
|
|
390
|
+
timebase=timebase,
|
|
391
|
+
token_origins=token_origins,
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# Inject PNContext into blackboard
|
|
395
|
+
# Use setattr with extra='allow' for Pydantic models
|
|
396
|
+
try:
|
|
397
|
+
bb.pn_ctx = pn_ctx
|
|
398
|
+
except Exception:
|
|
399
|
+
# If setattr fails, store in dict for cleanup
|
|
400
|
+
if not hasattr(bb, '_pn_ctx_dict'):
|
|
401
|
+
bb._pn_ctx_dict = {}
|
|
402
|
+
bb._pn_ctx_dict['pn_ctx'] = pn_ctx
|
|
403
|
+
|
|
404
|
+
try:
|
|
405
|
+
# Create BT runner (fresh runner for each execution to avoid state issues)
|
|
406
|
+
bt_runner = BTRunner(bt_tree, bb=bb, tb=timebase)
|
|
407
|
+
|
|
408
|
+
# Run BT to completion
|
|
409
|
+
max_ticks = 100 # Safety limit
|
|
410
|
+
ticks = 0
|
|
411
|
+
result = Status.RUNNING
|
|
412
|
+
|
|
413
|
+
while result == Status.RUNNING and ticks < max_ticks:
|
|
414
|
+
result = await bt_runner.tick()
|
|
415
|
+
ticks += 1
|
|
416
|
+
|
|
417
|
+
# Execute routing decisions made by the BT
|
|
418
|
+
routing_decisions = pn_ctx.get_routing_decisions()
|
|
419
|
+
rejected_tokens = pn_ctx.get_rejected_tokens()
|
|
420
|
+
deferred_tokens = pn_ctx.get_deferred_tokens()
|
|
421
|
+
|
|
422
|
+
print(f"[BRIDGE] BT completed with {len(routing_decisions)} routing decisions")
|
|
423
|
+
|
|
424
|
+
# Apply routing: add tokens to output places
|
|
425
|
+
for routed_token, place_ref in routing_decisions:
|
|
426
|
+
# Resolve place_ref to place runtime
|
|
427
|
+
place_parts = tuple(place_ref.get_parts()) # Convert to tuple
|
|
428
|
+
print(f"[BRIDGE] Routing token to place: {place_parts}")
|
|
429
|
+
if place_parts in net_runtime.places:
|
|
430
|
+
place_runtime = net_runtime.places[place_parts]
|
|
431
|
+
place_runtime.add_token(routed_token)
|
|
432
|
+
# Set token_added_event immediately after adding token
|
|
433
|
+
# Ensures dependent transitions wake up and process the token
|
|
434
|
+
place_runtime.token_added_event.set()
|
|
435
|
+
print(f"[BRIDGE] Token added successfully, place now has {len(place_runtime.tokens)} tokens")
|
|
436
|
+
else:
|
|
437
|
+
print(f"[BRIDGE] Place not found: {place_parts}")
|
|
438
|
+
|
|
439
|
+
# Handle rejected tokens - return them to their input places
|
|
440
|
+
if rejected_tokens:
|
|
441
|
+
for rejected_token, reason in rejected_tokens:
|
|
442
|
+
# Get the token's origin from PNContext
|
|
443
|
+
# token_origins maps id(token) -> (token, place_parts)
|
|
444
|
+
origin_parts = pn_ctx.get_token_origin(rejected_token)
|
|
445
|
+
|
|
446
|
+
# Fallback to first input place if origin not tracked
|
|
447
|
+
if origin_parts is None and self._transition_ref and self._transition_ref.input_arcs:
|
|
448
|
+
origin_parts = self._transition_ref.input_arcs[0][0]
|
|
449
|
+
|
|
450
|
+
if origin_parts:
|
|
451
|
+
input_place = net_runtime.places.get(origin_parts)
|
|
452
|
+
if input_place:
|
|
453
|
+
input_place.add_token(rejected_token)
|
|
454
|
+
logger.info(f"Returned rejected token to {origin_parts}: {reason}")
|
|
455
|
+
else:
|
|
456
|
+
logger.warning(f"Could not find origin place {origin_parts} for rejected token")
|
|
457
|
+
else:
|
|
458
|
+
logger.warning(f"No origin place found for rejected token (id={id(rejected_token)}): {reason}")
|
|
459
|
+
|
|
460
|
+
# Deferred tokens are implicitly handled by not removing them from input
|
|
461
|
+
# (they're already back in the input places)
|
|
462
|
+
|
|
463
|
+
if not routing_decisions and not rejected_tokens and not deferred_tokens:
|
|
464
|
+
# No routing decisions - might be an error
|
|
465
|
+
import logging
|
|
466
|
+
logger = logging.getLogger(__name__)
|
|
467
|
+
logger.warning(
|
|
468
|
+
f"BT completed without making routing decisions. "
|
|
469
|
+
f"Token will be lost."
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
finally:
|
|
473
|
+
# Clean up PNContext
|
|
474
|
+
if hasattr(bb, 'pn_ctx'):
|
|
475
|
+
delattr(bb, 'pn_ctx')
|
|
476
|
+
if hasattr(bb, '_pn_ctx_dict'):
|
|
477
|
+
bb._pn_ctx_dict.pop('pn_ctx', None)
|
|
478
|
+
|
|
479
|
+
async def _execute_bt_for_batch(
|
|
480
|
+
self, bt_tree, tokens, bb, timebase, net_runtime, output_places
|
|
481
|
+
):
|
|
482
|
+
"""Execute BT for all tokens in batch mode."""
|
|
483
|
+
from .pn_context import PNContext
|
|
484
|
+
from ..rhizomorph.core import Runner as BTRunner
|
|
485
|
+
|
|
486
|
+
# Create PNContext with all tokens
|
|
487
|
+
pn_ctx = PNContext(
|
|
488
|
+
net_runtime=net_runtime,
|
|
489
|
+
output_places=output_places,
|
|
490
|
+
tokens=tokens,
|
|
491
|
+
timebase=timebase,
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
# Inject PNContext into blackboard
|
|
495
|
+
# Use setattr with extra='allow' for Pydantic models
|
|
496
|
+
try:
|
|
497
|
+
bb.pn_ctx = pn_ctx
|
|
498
|
+
except Exception:
|
|
499
|
+
# If setattr fails, store in dict for cleanup
|
|
500
|
+
if not hasattr(bb, '_pn_ctx_dict'):
|
|
501
|
+
bb._pn_ctx_dict = {}
|
|
502
|
+
bb._pn_ctx_dict['pn_ctx'] = pn_ctx
|
|
503
|
+
|
|
504
|
+
try:
|
|
505
|
+
# Create BT runner
|
|
506
|
+
bt_runner = BTRunner(bt_tree, bb=bb, tb=timebase)
|
|
507
|
+
|
|
508
|
+
# Run BT to completion
|
|
509
|
+
max_ticks = 100
|
|
510
|
+
ticks = 0
|
|
511
|
+
result = Status.RUNNING
|
|
512
|
+
|
|
513
|
+
while result == Status.RUNNING and ticks < max_ticks:
|
|
514
|
+
result = await bt_runner.tick()
|
|
515
|
+
ticks += 1
|
|
516
|
+
|
|
517
|
+
# Execute routing decisions made by the BT
|
|
518
|
+
routing_decisions = pn_ctx.get_routing_decisions()
|
|
519
|
+
rejected_tokens = pn_ctx.get_rejected_tokens()
|
|
520
|
+
deferred_tokens = pn_ctx.get_deferred_tokens()
|
|
521
|
+
|
|
522
|
+
print(f"[BRIDGE] BT completed with {len(routing_decisions)} routing decisions")
|
|
523
|
+
|
|
524
|
+
# Apply routing: add tokens to output places
|
|
525
|
+
for routed_token, place_ref in routing_decisions:
|
|
526
|
+
# Resolve place_ref to place runtime
|
|
527
|
+
place_parts = tuple(place_ref.get_parts()) # Convert to tuple
|
|
528
|
+
print(f"[BRIDGE] Routing token to place: {place_parts}")
|
|
529
|
+
if place_parts in net_runtime.places:
|
|
530
|
+
place_runtime = net_runtime.places[place_parts]
|
|
531
|
+
place_runtime.add_token(routed_token)
|
|
532
|
+
# Set token_added_event immediately after adding token
|
|
533
|
+
# Ensures dependent transitions wake up and process the token
|
|
534
|
+
place_runtime.token_added_event.set()
|
|
535
|
+
print(f"[BRIDGE] Token added successfully, place now has {len(place_runtime.tokens)} tokens")
|
|
536
|
+
else:
|
|
537
|
+
print(f"[BRIDGE] Place not found: {place_parts}")
|
|
538
|
+
|
|
539
|
+
# Handle rejected tokens
|
|
540
|
+
if rejected_tokens:
|
|
541
|
+
import logging
|
|
542
|
+
logger = logging.getLogger(__name__)
|
|
543
|
+
for rejected_token, reason in rejected_tokens:
|
|
544
|
+
logger.warning(f"Token rejected: {reason}")
|
|
545
|
+
|
|
546
|
+
if not routing_decisions and not rejected_tokens and not deferred_tokens:
|
|
547
|
+
import logging
|
|
548
|
+
logger = logging.getLogger(__name__)
|
|
549
|
+
logger.warning(
|
|
550
|
+
f"BT completed without making routing decisions. "
|
|
551
|
+
f"Tokens will be lost."
|
|
552
|
+
)
|
|
553
|
+
|
|
554
|
+
finally:
|
|
555
|
+
# Clean up PNContext
|
|
556
|
+
if hasattr(bb, 'pn_ctx'):
|
|
557
|
+
delattr(bb, 'pn_ctx')
|
|
558
|
+
if hasattr(bb, '_pn_ctx_dict'):
|
|
559
|
+
bb._pn_ctx_dict.pop('pn_ctx', None)
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
class PNRunner:
|
|
563
|
+
"""
|
|
564
|
+
Wrapper around Hypha's Runner that adds BT-in-PN and FSM-in-PN support.
|
|
565
|
+
|
|
566
|
+
Provides the same API as Runner but injects BT/FSM execution support
|
|
567
|
+
into the NetRuntime.
|
|
568
|
+
"""
|
|
569
|
+
|
|
570
|
+
def __init__(self, net_func: Any, blackboard: Any):
|
|
571
|
+
"""
|
|
572
|
+
Initialize the runner.
|
|
573
|
+
|
|
574
|
+
Args:
|
|
575
|
+
net_func: The @pn_net decorated function
|
|
576
|
+
blackboard: The blackboard for shared state
|
|
577
|
+
"""
|
|
578
|
+
from ..hypha.core import Runner
|
|
579
|
+
|
|
580
|
+
# Create the original runner
|
|
581
|
+
self._runner = Runner(net_func, blackboard)
|
|
582
|
+
self._blackboard = blackboard
|
|
583
|
+
self._timebase = None
|
|
584
|
+
|
|
585
|
+
# Store net spec for BT/FSM integration lookup
|
|
586
|
+
self._spec = self._runner.spec
|
|
587
|
+
self._net_name = self._spec.name
|
|
588
|
+
|
|
589
|
+
async def start(self, timebase: Any):
|
|
590
|
+
"""
|
|
591
|
+
Start the PN runner with BT-in-PN support.
|
|
592
|
+
|
|
593
|
+
Args:
|
|
594
|
+
timebase: The timebase for timing operations
|
|
595
|
+
"""
|
|
596
|
+
from .hypha_bridge import get_pn_integration, TransitionRuntimeWrapper
|
|
597
|
+
|
|
598
|
+
self._timebase = timebase
|
|
599
|
+
|
|
600
|
+
# Start the original runner
|
|
601
|
+
await self._runner.start(timebase)
|
|
602
|
+
|
|
603
|
+
# Get the runtime
|
|
604
|
+
runtime = self._runner.runtime
|
|
605
|
+
|
|
606
|
+
# Inject net runtime into blackboard for BT access
|
|
607
|
+
self._blackboard._mycelium_net_runtime = runtime
|
|
608
|
+
|
|
609
|
+
# Wrap transitions with BT integration support
|
|
610
|
+
for trans_fqn, trans_runtime in runtime.transitions.items():
|
|
611
|
+
# Extract transition name from FQN
|
|
612
|
+
# trans_fqn is a tuple like ('BTInPNDemo', 'route_by_priority')
|
|
613
|
+
trans_name = trans_fqn[-1] if trans_fqn else ""
|
|
614
|
+
|
|
615
|
+
# Check if this transition has BT integration
|
|
616
|
+
integration = get_pn_integration(self._net_name, trans_name)
|
|
617
|
+
|
|
618
|
+
if integration is not None:
|
|
619
|
+
# Wrap the transition handler (stored in spec.handler)
|
|
620
|
+
handler = trans_runtime.spec.handler
|
|
621
|
+
if handler is None:
|
|
622
|
+
continue
|
|
623
|
+
|
|
624
|
+
wrapped_handler = TransitionRuntimeWrapper(handler, integration, transition_ref=trans_runtime)
|
|
625
|
+
trans_runtime.spec.handler = wrapped_handler
|
|
626
|
+
import logging
|
|
627
|
+
logger = logging.getLogger(__name__)
|
|
628
|
+
logger.info(f"Wrapped transition '{trans_name}' (FQN: {trans_fqn}) with BT integration")
|
|
629
|
+
|
|
630
|
+
async def stop(self, timeout: float = 5.0):
|
|
631
|
+
"""Stop the runner."""
|
|
632
|
+
# Clean up blackboard
|
|
633
|
+
if hasattr(self._blackboard, '_mycelium_net_runtime'):
|
|
634
|
+
delattr(self._blackboard, '_mycelium_net_runtime')
|
|
635
|
+
|
|
636
|
+
await self._runner.stop(timeout)
|
|
637
|
+
|
|
638
|
+
@property
|
|
639
|
+
def runtime(self):
|
|
640
|
+
"""Get the underlying PN runtime."""
|
|
641
|
+
return self._runner.runtime
|
|
642
|
+
|
|
643
|
+
@property
|
|
644
|
+
def spec(self):
|
|
645
|
+
"""Get the net specification."""
|
|
646
|
+
return self._runner.spec
|
|
647
|
+
|
|
648
|
+
@property
|
|
649
|
+
def timebase(self):
|
|
650
|
+
"""Get the timebase."""
|
|
651
|
+
return self._runner.timebase
|
|
652
|
+
|
|
653
|
+
def add_place(self, *args, **kwargs):
|
|
654
|
+
"""Forward to original runner."""
|
|
655
|
+
return self._runner.add_place(*args, **kwargs)
|
|
656
|
+
|
|
657
|
+
def add_transition(self, *args, **kwargs):
|
|
658
|
+
"""Forward to original runner."""
|
|
659
|
+
return self._runner.add_transition(*args, **kwargs)
|
|
660
|
+
|
|
661
|
+
def add_arc(self, *args, **kwargs):
|
|
662
|
+
"""Forward to original runner."""
|
|
663
|
+
return self._runner.add_arc(*args, **kwargs)
|
|
664
|
+
|
|
665
|
+
def to_mermaid(self) -> str:
|
|
666
|
+
"""
|
|
667
|
+
Generate Mermaid diagram of the net with BT/FSM integration information.
|
|
668
|
+
|
|
669
|
+
Extends the standard NetSpec.to_mermaid() to include information about
|
|
670
|
+
which transitions have BT or FSM integrations.
|
|
671
|
+
|
|
672
|
+
For FSM integrations, embeds the full FSM state diagram as a subgraph
|
|
673
|
+
within the transition node, showing all states and transitions.
|
|
674
|
+
"""
|
|
675
|
+
lines = ["graph TD"]
|
|
676
|
+
|
|
677
|
+
def add_subnet(spec, indent: str = " "):
|
|
678
|
+
spec_fqn = spec.get_fqn()
|
|
679
|
+
|
|
680
|
+
if spec.subnets:
|
|
681
|
+
for subnet_name, subnet_spec in spec.subnets.items():
|
|
682
|
+
subnet_fqn = subnet_spec.get_fqn()
|
|
683
|
+
lines.append(f"{indent}subgraph {subnet_fqn}")
|
|
684
|
+
add_subnet(subnet_spec, indent + " ")
|
|
685
|
+
lines.append(f"{indent}end")
|
|
686
|
+
|
|
687
|
+
for place_name, place_spec in spec.places.items():
|
|
688
|
+
place_fqn = spec.get_fqn(place_name)
|
|
689
|
+
shape = "(("
|
|
690
|
+
close = "))"
|
|
691
|
+
prefix = "[INPUT]</br>" if place_spec.is_io_input else "[OUTPUT]</br>" if place_spec.is_io_output else ""
|
|
692
|
+
lines.append(f'{indent}{place_fqn}{shape}"{prefix}{place_fqn}"{close}')
|
|
693
|
+
|
|
694
|
+
for trans_name, trans_spec in spec.transitions.items():
|
|
695
|
+
trans_fqn = spec.get_fqn(trans_name)
|
|
696
|
+
|
|
697
|
+
# Check if this transition has BT or FSM integration
|
|
698
|
+
integration = get_pn_integration(self._net_name, trans_name)
|
|
699
|
+
|
|
700
|
+
if integration is not None:
|
|
701
|
+
# Add integration label to transition
|
|
702
|
+
if integration.integration_type == "bt":
|
|
703
|
+
# BT integration - embed the full BT tree diagram
|
|
704
|
+
self._add_bt_to_diagram(lines, integration, trans_fqn, indent)
|
|
705
|
+
elif integration.integration_type == "fsm":
|
|
706
|
+
# FSM integration - embed the full FSM state diagram
|
|
707
|
+
self._add_fsm_to_diagram(lines, integration, trans_fqn, indent)
|
|
708
|
+
else:
|
|
709
|
+
# Vanilla transition
|
|
710
|
+
lines.append(f"{indent}{trans_fqn}[{trans_fqn}]")
|
|
711
|
+
|
|
712
|
+
for arc in spec.arcs:
|
|
713
|
+
source_fqn = arc.source.get_fqn()
|
|
714
|
+
target_fqn = arc.target.get_fqn()
|
|
715
|
+
|
|
716
|
+
# Check if source is a transition with FSM integration
|
|
717
|
+
# If so, point from the subgraph instead of the transition node
|
|
718
|
+
source_integration = None
|
|
719
|
+
if hasattr(arc.source, 'local_name'):
|
|
720
|
+
source_integration = get_pn_integration(self._net_name, arc.source.local_name)
|
|
721
|
+
|
|
722
|
+
# Check if target is a transition with FSM integration
|
|
723
|
+
# If so, point to the subgraph instead of the transition node
|
|
724
|
+
target_integration = None
|
|
725
|
+
if hasattr(arc.target, 'local_name'):
|
|
726
|
+
target_integration = get_pn_integration(self._net_name, arc.target.local_name)
|
|
727
|
+
|
|
728
|
+
# If source has FSM integration, use subgraph ID
|
|
729
|
+
if source_integration and source_integration.integration_type == "fsm":
|
|
730
|
+
# Replace transition FQN with subgraph ID
|
|
731
|
+
# e.g., "SimpleFSMNet.process_with_fsm" -> "SimpleFSMNet.process_with_fsm_fsm"
|
|
732
|
+
source_fqn = f"{source_fqn}_fsm"
|
|
733
|
+
|
|
734
|
+
# If source has BT integration, use subgraph ID
|
|
735
|
+
if source_integration and source_integration.integration_type == "bt":
|
|
736
|
+
# Replace transition FQN with subgraph ID
|
|
737
|
+
# e.g., "JobQueueProcessor.route_by_priority" -> "JobQueueProcessor.route_by_priority_bt"
|
|
738
|
+
source_fqn = f"{source_fqn}_bt"
|
|
739
|
+
|
|
740
|
+
# If target has FSM integration, use subgraph ID
|
|
741
|
+
if target_integration and target_integration.integration_type == "fsm":
|
|
742
|
+
# Replace transition FQN with subgraph ID
|
|
743
|
+
# e.g., "SimpleFSMNet.process_with_fsm" -> "SimpleFSMNet.process_with_fsm_fsm"
|
|
744
|
+
target_fqn = f"{target_fqn}_fsm"
|
|
745
|
+
|
|
746
|
+
# If target has BT integration, use subgraph ID
|
|
747
|
+
if target_integration and target_integration.integration_type == "bt":
|
|
748
|
+
# Replace transition FQN with subgraph ID
|
|
749
|
+
# e.g., "JobQueueProcessor.route_by_priority" -> "JobQueueProcessor.route_by_priority_bt"
|
|
750
|
+
target_fqn = f"{target_fqn}_bt"
|
|
751
|
+
|
|
752
|
+
if arc.weight > 1:
|
|
753
|
+
lines.append(f"{indent}{source_fqn} -->|weight={arc.weight}| {target_fqn}")
|
|
754
|
+
else:
|
|
755
|
+
lines.append(f"{indent}{source_fqn} --> {target_fqn}")
|
|
756
|
+
|
|
757
|
+
add_subnet(self._spec)
|
|
758
|
+
|
|
759
|
+
return "\n".join(lines)
|
|
760
|
+
|
|
761
|
+
def _add_fsm_to_diagram(self, lines: list, integration, trans_fqn: str, indent: str) -> None:
|
|
762
|
+
"""
|
|
763
|
+
Add FSM state diagram as a subgraph embedded in the transition node.
|
|
764
|
+
|
|
765
|
+
Generates a mini flowchart showing all FSM states and their transitions,
|
|
766
|
+
embedded within the PN transition node using Mermaid's subgraph syntax.
|
|
767
|
+
|
|
768
|
+
Args:
|
|
769
|
+
lines: List of Mermaid diagram lines (modified in-place)
|
|
770
|
+
integration: PNIntegration object with FSM metadata
|
|
771
|
+
trans_fqn: Fully qualified name of the transition
|
|
772
|
+
indent: Indentation string for diagram formatting
|
|
773
|
+
"""
|
|
774
|
+
from ..septum.core import StateSpec, LabeledTransition, Push, Pop, Again, Retry, Unhandled, Repeat, Restart
|
|
775
|
+
|
|
776
|
+
initial_state = integration.initial_state
|
|
777
|
+
|
|
778
|
+
# Collect all reachable states from the initial state
|
|
779
|
+
# Store both state objects and state names
|
|
780
|
+
visited_states = {} # state_name -> state_object
|
|
781
|
+
to_visit = [initial_state]
|
|
782
|
+
|
|
783
|
+
while to_visit:
|
|
784
|
+
state = to_visit.pop()
|
|
785
|
+
state_name = state.name if isinstance(state, StateSpec) else str(state)
|
|
786
|
+
|
|
787
|
+
if state_name in visited_states:
|
|
788
|
+
continue
|
|
789
|
+
|
|
790
|
+
visited_states[state_name] = state
|
|
791
|
+
|
|
792
|
+
# Get transitions from this state
|
|
793
|
+
if isinstance(state, StateSpec) and hasattr(state, 'get_transitions'):
|
|
794
|
+
transitions = state.get_transitions()
|
|
795
|
+
|
|
796
|
+
# Handle single transition
|
|
797
|
+
if not isinstance(transitions, list):
|
|
798
|
+
transitions = [transitions] if transitions else []
|
|
799
|
+
|
|
800
|
+
for trans in transitions:
|
|
801
|
+
# Extract target states from transitions
|
|
802
|
+
if isinstance(trans, LabeledTransition):
|
|
803
|
+
# LabeledTransition has .transition attribute (the target)
|
|
804
|
+
target = trans.transition
|
|
805
|
+
elif isinstance(trans, (Again, Retry, Unhandled, Repeat, Restart)):
|
|
806
|
+
# Self-transitions, no new state to visit
|
|
807
|
+
continue
|
|
808
|
+
elif isinstance(trans, Pop):
|
|
809
|
+
# Pop doesn't add a specific state
|
|
810
|
+
continue
|
|
811
|
+
elif isinstance(trans, Push):
|
|
812
|
+
# Add all push states
|
|
813
|
+
for push_state in trans.push_states:
|
|
814
|
+
to_visit.append(push_state)
|
|
815
|
+
continue
|
|
816
|
+
elif isinstance(trans, StateSpec):
|
|
817
|
+
# Direct state reference
|
|
818
|
+
target = trans
|
|
819
|
+
else:
|
|
820
|
+
# Unknown transition type, skip
|
|
821
|
+
continue
|
|
822
|
+
|
|
823
|
+
# Add target state to visit list
|
|
824
|
+
if target is not None and target != state:
|
|
825
|
+
to_visit.append(target)
|
|
826
|
+
|
|
827
|
+
# Create a subgraph for the FSM embedded in the transition
|
|
828
|
+
subgraph_id = f"{trans_fqn}_fsm"
|
|
829
|
+
lines.append(f'{indent}subgraph {subgraph_id}["{trans_fqn}</br>FSM: {integration.fsm_state_name}"]')
|
|
830
|
+
|
|
831
|
+
# Add all FSM states as nodes within the subgraph
|
|
832
|
+
# Use transition FQN as prefix to ensure unique IDs across all FSMs in the diagram
|
|
833
|
+
# Sanitize trans_fqn to create valid Mermaid node IDs (replace dots with underscores)
|
|
834
|
+
fsm_prefix = trans_fqn.replace(".", "_")
|
|
835
|
+
state_ids = {}
|
|
836
|
+
counter = 0
|
|
837
|
+
for state_name in visited_states.keys():
|
|
838
|
+
counter += 1
|
|
839
|
+
# Create unique state ID using transition FQN prefix
|
|
840
|
+
state_ids[state_name] = f"{fsm_prefix}_S{counter}"
|
|
841
|
+
# Use simple state name (last component)
|
|
842
|
+
simple_name = state_name.split(".")[-1]
|
|
843
|
+
lines.append(f'{indent} {state_ids[state_name]}["{simple_name}"]')
|
|
844
|
+
|
|
845
|
+
# Mark initial state with unique start node
|
|
846
|
+
initial_name = initial_state.name if isinstance(initial_state, StateSpec) else str(initial_state)
|
|
847
|
+
start_node_id = f"{fsm_prefix}_start"
|
|
848
|
+
lines.append(f'{indent} {start_node_id}((start)) --> {state_ids[initial_name]}')
|
|
849
|
+
|
|
850
|
+
# Add transitions between states
|
|
851
|
+
for state_name, state_obj in visited_states.items():
|
|
852
|
+
if not isinstance(state_obj, StateSpec):
|
|
853
|
+
continue
|
|
854
|
+
|
|
855
|
+
source_id = state_ids[state_name]
|
|
856
|
+
|
|
857
|
+
# Get transitions
|
|
858
|
+
transitions = state_obj.get_transitions()
|
|
859
|
+
if not isinstance(transitions, list):
|
|
860
|
+
transitions = [transitions] if transitions else []
|
|
861
|
+
|
|
862
|
+
for trans in transitions:
|
|
863
|
+
if isinstance(trans, LabeledTransition):
|
|
864
|
+
label = trans.label.name if hasattr(trans.label, 'name') else str(trans.label)
|
|
865
|
+
target = trans.transition
|
|
866
|
+
|
|
867
|
+
if isinstance(target, Again):
|
|
868
|
+
lines.append(f'{indent} {source_id} -->|"again"| {source_id}')
|
|
869
|
+
elif isinstance(target, Retry):
|
|
870
|
+
lines.append(f'{indent} {source_id} -->|"retry"| {source_id}')
|
|
871
|
+
elif isinstance(target, Unhandled):
|
|
872
|
+
lines.append(f'{indent} {source_id} -->|"unhandled"| {source_id}')
|
|
873
|
+
elif isinstance(target, Repeat):
|
|
874
|
+
lines.append(f'{indent} {source_id} -->|"repeat"| {source_id}')
|
|
875
|
+
elif isinstance(target, Restart):
|
|
876
|
+
lines.append(f'{indent} {source_id} -->|"restart"| {source_id}')
|
|
877
|
+
elif isinstance(target, Pop):
|
|
878
|
+
pop_node_id = f"{fsm_prefix}_pop"
|
|
879
|
+
lines.append(f'{indent} {source_id} -->|"{label}"| {pop_node_id}((pop))')
|
|
880
|
+
elif isinstance(target, Push):
|
|
881
|
+
# Push transitions to first pushed state
|
|
882
|
+
for push_state in target.push_states:
|
|
883
|
+
push_name = push_state.name if isinstance(push_state, StateSpec) else str(push_state)
|
|
884
|
+
if push_name in state_ids:
|
|
885
|
+
lines.append(f'{indent} {source_id} -->|"{label} → push"| {state_ids[push_name]}')
|
|
886
|
+
elif isinstance(target, StateSpec):
|
|
887
|
+
target_name = target.name
|
|
888
|
+
if target_name in state_ids:
|
|
889
|
+
lines.append(f'{indent} {source_id} -->|"{label}"| {state_ids[target_name]}')
|
|
890
|
+
|
|
891
|
+
elif isinstance(trans, Again):
|
|
892
|
+
lines.append(f'{indent} {source_id} -->|"again"| {source_id}')
|
|
893
|
+
elif isinstance(trans, Retry):
|
|
894
|
+
lines.append(f'{indent} {source_id} -->|"retry"| {source_id}')
|
|
895
|
+
elif isinstance(trans, Unhandled):
|
|
896
|
+
lines.append(f'{indent} {source_id} -->|"unhandled"| {source_id}')
|
|
897
|
+
elif isinstance(trans, Repeat):
|
|
898
|
+
lines.append(f'{indent} {source_id} -->|"repeat"| {source_id}')
|
|
899
|
+
elif isinstance(trans, Restart):
|
|
900
|
+
lines.append(f'{indent} {source_id} -->|"restart"| {source_id}')
|
|
901
|
+
elif isinstance(trans, Pop):
|
|
902
|
+
pop_node_id = f"{fsm_prefix}_pop"
|
|
903
|
+
lines.append(f'{indent} {source_id} -->|"pop"| {pop_node_id}((pop))')
|
|
904
|
+
elif isinstance(trans, Push):
|
|
905
|
+
# Push transitions
|
|
906
|
+
for push_state in trans.push_states:
|
|
907
|
+
push_name = push_state.name if isinstance(push_state, StateSpec) else str(push_state)
|
|
908
|
+
if push_name in state_ids:
|
|
909
|
+
lines.append(f'{indent} {source_id} -->|"push"| {state_ids[push_name]}')
|
|
910
|
+
|
|
911
|
+
# Check if any pop transitions exist
|
|
912
|
+
has_pop = False
|
|
913
|
+
for state_obj in visited_states.values():
|
|
914
|
+
if not isinstance(state_obj, StateSpec):
|
|
915
|
+
continue
|
|
916
|
+
state_transitions = state_obj.get_transitions()
|
|
917
|
+
if not isinstance(state_transitions, list):
|
|
918
|
+
state_transitions = [state_transitions] if state_transitions else []
|
|
919
|
+
for trans in state_transitions:
|
|
920
|
+
target = trans.transition if isinstance(trans, LabeledTransition) else trans
|
|
921
|
+
if isinstance(target, Pop):
|
|
922
|
+
has_pop = True
|
|
923
|
+
break
|
|
924
|
+
if has_pop:
|
|
925
|
+
break
|
|
926
|
+
|
|
927
|
+
if has_pop:
|
|
928
|
+
pop_node_id = f"{fsm_prefix}_pop"
|
|
929
|
+
lines.append(f'{indent} {pop_node_id}((pop))')
|
|
930
|
+
|
|
931
|
+
lines.append(f'{indent}end')
|
|
932
|
+
|
|
933
|
+
def _add_bt_to_diagram(self, lines: list, integration, trans_fqn: str, indent: str) -> None:
|
|
934
|
+
"""
|
|
935
|
+
Add BT tree diagram as a subgraph embedded in the transition node.
|
|
936
|
+
|
|
937
|
+
Generates a mini flowchart showing all BT nodes and their structure,
|
|
938
|
+
embedded within the PN transition node using Mermaid's subgraph syntax.
|
|
939
|
+
|
|
940
|
+
Args:
|
|
941
|
+
lines: List of Mermaid diagram lines (modified in-place)
|
|
942
|
+
integration: PNIntegration object with BT metadata
|
|
943
|
+
trans_fqn: Fully qualified name of the transition
|
|
944
|
+
indent: Indentation string for diagram formatting
|
|
945
|
+
"""
|
|
946
|
+
from ..rhizomorph.core import NodeSpecKind, _bt_expand_children
|
|
947
|
+
|
|
948
|
+
bt_tree = integration.bt_tree
|
|
949
|
+
|
|
950
|
+
# Get BT tree name
|
|
951
|
+
if hasattr(bt_tree, '_tree_name'):
|
|
952
|
+
bt_name = bt_tree._tree_name
|
|
953
|
+
elif hasattr(bt_tree, '_spec') and hasattr(bt_tree._spec, 'name'):
|
|
954
|
+
bt_name = bt_tree._spec.name
|
|
955
|
+
elif hasattr(bt_tree, '__name__'):
|
|
956
|
+
bt_name = bt_tree.__name__
|
|
957
|
+
else:
|
|
958
|
+
bt_name = "BT"
|
|
959
|
+
|
|
960
|
+
# Get the root node spec
|
|
961
|
+
if hasattr(bt_tree, 'root'):
|
|
962
|
+
root_spec = bt_tree.root
|
|
963
|
+
elif hasattr(bt_tree, '_spec'):
|
|
964
|
+
root_spec = bt_tree._spec
|
|
965
|
+
else:
|
|
966
|
+
# Fallback: just show the name without details
|
|
967
|
+
label = f"{trans_fqn}</br>BT: {bt_name}"
|
|
968
|
+
lines.append(f'{indent}{trans_fqn}["{label}"]')
|
|
969
|
+
return
|
|
970
|
+
|
|
971
|
+
# Create a subgraph for the BT embedded in the transition
|
|
972
|
+
subgraph_id = f"{trans_fqn}_bt"
|
|
973
|
+
lines.append(f'{indent}subgraph {subgraph_id}["{trans_fqn}</br>BT: {bt_name}"]')
|
|
974
|
+
|
|
975
|
+
# Helper to ensure children are expanded (similar to _generate_mermaid)
|
|
976
|
+
def ensure_children(spec):
|
|
977
|
+
"""Expand children of composite nodes using the factory function."""
|
|
978
|
+
if hasattr(spec, 'children') and spec.children:
|
|
979
|
+
# Already expanded
|
|
980
|
+
return spec.children
|
|
981
|
+
|
|
982
|
+
match spec.kind:
|
|
983
|
+
case NodeSpecKind.SEQUENCE | NodeSpecKind.SELECTOR | NodeSpecKind.PARALLEL:
|
|
984
|
+
factory = spec.payload.get("factory")
|
|
985
|
+
if factory:
|
|
986
|
+
spec.children = _bt_expand_children(factory)
|
|
987
|
+
return spec.children if hasattr(spec, 'children') else []
|
|
988
|
+
case NodeSpecKind.SUBTREE:
|
|
989
|
+
subtree_root = spec.payload.get("root")
|
|
990
|
+
if subtree_root:
|
|
991
|
+
spec.children = [subtree_root]
|
|
992
|
+
return spec.children if hasattr(spec, 'children') else []
|
|
993
|
+
case _:
|
|
994
|
+
return []
|
|
995
|
+
|
|
996
|
+
# Helper to add BT nodes
|
|
997
|
+
# Use transition FQN as prefix to ensure unique IDs across all BTs in the diagram
|
|
998
|
+
# Sanitize trans_fqn to create valid Mermaid node IDs (replace dots with underscores)
|
|
999
|
+
bt_prefix = trans_fqn.replace(".", "_")
|
|
1000
|
+
node_counter = 0
|
|
1001
|
+
node_map = {}
|
|
1002
|
+
|
|
1003
|
+
def add_bt_nodes(node_spec, parent_id=None, edge_label=""):
|
|
1004
|
+
nonlocal node_counter
|
|
1005
|
+
node_counter += 1
|
|
1006
|
+
# Create unique node ID using transition FQN prefix
|
|
1007
|
+
node_id = f"{bt_prefix}_BT{node_counter}"
|
|
1008
|
+
node_map[id(node_spec)] = node_id
|
|
1009
|
+
|
|
1010
|
+
# Get node kind and name
|
|
1011
|
+
kind = node_spec.kind if hasattr(node_spec, 'kind') else None
|
|
1012
|
+
name = node_spec.name if hasattr(node_spec, 'name') else "node"
|
|
1013
|
+
|
|
1014
|
+
# Format node based on kind
|
|
1015
|
+
if kind == NodeSpecKind.ACTION:
|
|
1016
|
+
shape = "((ACTION<br/>"
|
|
1017
|
+
close = "))"
|
|
1018
|
+
elif kind == NodeSpecKind.CONDITION:
|
|
1019
|
+
shape = "{{CONDITION<br/>"
|
|
1020
|
+
close = "}}"
|
|
1021
|
+
elif kind == NodeSpecKind.SEQUENCE:
|
|
1022
|
+
shape = "[SEQUENCE<br/>"
|
|
1023
|
+
close = "]"
|
|
1024
|
+
elif kind == NodeSpecKind.SELECTOR:
|
|
1025
|
+
shape = "[SELECTOR<br/>"
|
|
1026
|
+
close = "]"
|
|
1027
|
+
elif kind == NodeSpecKind.PARALLEL:
|
|
1028
|
+
shape = "[PARALLEL<br/>"
|
|
1029
|
+
close = "]"
|
|
1030
|
+
else:
|
|
1031
|
+
shape = "["
|
|
1032
|
+
close = "]"
|
|
1033
|
+
|
|
1034
|
+
lines.append(f'{indent} {node_id}{shape}{name}{close}')
|
|
1035
|
+
|
|
1036
|
+
# Add edge from parent if this is not the root
|
|
1037
|
+
if parent_id:
|
|
1038
|
+
if edge_label:
|
|
1039
|
+
lines.append(f'{indent} {parent_id} -->|{edge_label}| {node_id}')
|
|
1040
|
+
else:
|
|
1041
|
+
lines.append(f'{indent} {parent_id} --> {node_id}')
|
|
1042
|
+
|
|
1043
|
+
# Expand and recursively add children
|
|
1044
|
+
children = ensure_children(node_spec)
|
|
1045
|
+
for child in children:
|
|
1046
|
+
add_bt_nodes(child, node_id)
|
|
1047
|
+
|
|
1048
|
+
# Start adding nodes from the root
|
|
1049
|
+
if root_spec:
|
|
1050
|
+
add_bt_nodes(root_spec)
|
|
1051
|
+
|
|
1052
|
+
lines.append(f'{indent}end')
|
|
1053
|
+
|
|
1054
|
+
|
|
1055
|
+
def create_pn_net_decorator():
|
|
1056
|
+
"""
|
|
1057
|
+
Create the @pn_net decorator that supports BT-in-PN and FSM-in-PN integration.
|
|
1058
|
+
|
|
1059
|
+
This decorator wraps Hypha's @pn.net to add BT/FSM integration support.
|
|
1060
|
+
"""
|
|
1061
|
+
from ..hypha.core.builder import pn as hypha_pn
|
|
1062
|
+
|
|
1063
|
+
def pn_net(func: Callable) -> Callable:
|
|
1064
|
+
"""
|
|
1065
|
+
Decorator to define a Petri net with BT-in-PN and FSM-in-PN support.
|
|
1066
|
+
|
|
1067
|
+
Usage:
|
|
1068
|
+
# Vanilla transition (no integration)
|
|
1069
|
+
@pn_net
|
|
1070
|
+
def MyNet(builder):
|
|
1071
|
+
input_q = builder.place("input_q", type=PlaceType.QUEUE)
|
|
1072
|
+
output_q = builder.place("output_q", type=PlaceType.QUEUE)
|
|
1073
|
+
|
|
1074
|
+
@builder.transition()
|
|
1075
|
+
async def process(consumed, bb, timebase):
|
|
1076
|
+
yield {output_q: token}
|
|
1077
|
+
|
|
1078
|
+
builder.arc(input_q, process)
|
|
1079
|
+
builder.arc(process, output_q)
|
|
1080
|
+
|
|
1081
|
+
# With BT integration
|
|
1082
|
+
@pn_net
|
|
1083
|
+
def MyNet(builder):
|
|
1084
|
+
input_q = builder.place("input_q", type=PlaceType.QUEUE)
|
|
1085
|
+
output_q = builder.place("output_q", type=PlaceType.QUEUE)
|
|
1086
|
+
|
|
1087
|
+
@builder.transition(bt=MyBT, bt_mode="token", outputs=[output_q])
|
|
1088
|
+
async def route(consumed, bb, timebase):
|
|
1089
|
+
pass # BT handles routing
|
|
1090
|
+
|
|
1091
|
+
builder.arc(input_q, route)
|
|
1092
|
+
builder.arc(route, output_q)
|
|
1093
|
+
|
|
1094
|
+
# With FSM integration
|
|
1095
|
+
@pn_net
|
|
1096
|
+
def MyNet(builder):
|
|
1097
|
+
input_q = builder.place("input_q", type=PlaceType.QUEUE)
|
|
1098
|
+
success_q = builder.place("success_q", type=PlaceType.QUEUE)
|
|
1099
|
+
failure_q = builder.place("failure_q", type=PlaceType.QUEUE)
|
|
1100
|
+
|
|
1101
|
+
@builder.transition(fsm=MyFSMState, outputs=[success_q, failure_q])
|
|
1102
|
+
async def process(consumed, bb, timebase):
|
|
1103
|
+
pass # FSM handles processing
|
|
1104
|
+
|
|
1105
|
+
builder.arc(input_q, process)
|
|
1106
|
+
builder.arc(process, success_q)
|
|
1107
|
+
builder.arc(process, failure_q)
|
|
1108
|
+
"""
|
|
1109
|
+
net_name = func.__name__
|
|
1110
|
+
|
|
1111
|
+
# Create wrapped builder and get spec
|
|
1112
|
+
net_spec, wrapped_builder = create_net_wrapper(func, net_name)
|
|
1113
|
+
|
|
1114
|
+
# Create a net function that has the spec attached
|
|
1115
|
+
# (like Hypha's @pn.net does)
|
|
1116
|
+
def net_func():
|
|
1117
|
+
return net_spec
|
|
1118
|
+
|
|
1119
|
+
# Attach spec to the function (like Hypha does)
|
|
1120
|
+
net_func._spec = net_spec # type: ignore[attr-defined]
|
|
1121
|
+
net_func.__name__ = net_name
|
|
1122
|
+
net_func.__qualname__ = func.__qualname__
|
|
1123
|
+
net_func.__module__ = func.__module__
|
|
1124
|
+
net_func.__doc__ = func.__doc__
|
|
1125
|
+
|
|
1126
|
+
return net_func
|
|
1127
|
+
|
|
1128
|
+
return pn_net
|
|
1129
|
+
|
|
1130
|
+
|
|
1131
|
+
# Create the singleton pn_net decorator
|
|
1132
|
+
pn_net = create_pn_net_decorator()
|
|
1133
|
+
|
|
1134
|
+
|
|
1135
|
+
# Create a pn namespace for backwards compatibility
|
|
1136
|
+
class _PN:
|
|
1137
|
+
"""Namespace for PN decorators to match hypha.core.pn API."""
|
|
1138
|
+
|
|
1139
|
+
net = staticmethod(pn_net)
|
|
1140
|
+
|
|
1141
|
+
|
|
1142
|
+
# Create singleton instance
|
|
1143
|
+
pn = _PN()
|