soe-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- soe/builtin_tools/__init__.py +39 -0
- soe/builtin_tools/soe_add_signal.py +82 -0
- soe/builtin_tools/soe_call_tool.py +111 -0
- soe/builtin_tools/soe_copy_context.py +80 -0
- soe/builtin_tools/soe_explore_docs.py +290 -0
- soe/builtin_tools/soe_get_available_tools.py +42 -0
- soe/builtin_tools/soe_get_context.py +50 -0
- soe/builtin_tools/soe_get_workflows.py +63 -0
- soe/builtin_tools/soe_inject_node.py +86 -0
- soe/builtin_tools/soe_inject_workflow.py +105 -0
- soe/builtin_tools/soe_list_contexts.py +73 -0
- soe/builtin_tools/soe_remove_node.py +72 -0
- soe/builtin_tools/soe_remove_workflow.py +62 -0
- soe/builtin_tools/soe_update_context.py +54 -0
- soe/docs/_config.yml +10 -0
- soe/docs/advanced_patterns/guide_fanout_and_aggregations.md +318 -0
- soe/docs/advanced_patterns/guide_inheritance.md +435 -0
- soe/docs/advanced_patterns/hybrid_intelligence.md +237 -0
- soe/docs/advanced_patterns/index.md +49 -0
- soe/docs/advanced_patterns/operational.md +781 -0
- soe/docs/advanced_patterns/self_evolving_workflows.md +385 -0
- soe/docs/advanced_patterns/swarm_intelligence.md +211 -0
- soe/docs/builtins/context.md +164 -0
- soe/docs/builtins/explore_docs.md +135 -0
- soe/docs/builtins/tools.md +164 -0
- soe/docs/builtins/workflows.md +199 -0
- soe/docs/guide_00_getting_started.md +341 -0
- soe/docs/guide_01_tool.md +206 -0
- soe/docs/guide_02_llm.md +143 -0
- soe/docs/guide_03_router.md +146 -0
- soe/docs/guide_04_patterns.md +475 -0
- soe/docs/guide_05_agent.md +159 -0
- soe/docs/guide_06_schema.md +397 -0
- soe/docs/guide_07_identity.md +540 -0
- soe/docs/guide_08_child.md +612 -0
- soe/docs/guide_09_ecosystem.md +690 -0
- soe/docs/guide_10_infrastructure.md +427 -0
- soe/docs/guide_11_builtins.md +118 -0
- soe/docs/index.md +104 -0
- soe/docs/primitives/backends.md +281 -0
- soe/docs/primitives/context.md +256 -0
- soe/docs/primitives/node_reference.md +259 -0
- soe/docs/primitives/primitives.md +331 -0
- soe/docs/primitives/signals.md +865 -0
- soe/docs_index.py +1 -1
- soe/lib/__init__.py +0 -0
- soe/lib/child_context.py +46 -0
- soe/lib/context_fields.py +51 -0
- soe/lib/inheritance.py +172 -0
- soe/lib/jinja_render.py +113 -0
- soe/lib/operational.py +51 -0
- soe/lib/parent_sync.py +71 -0
- soe/lib/register_event.py +75 -0
- soe/lib/schema_validation.py +134 -0
- soe/lib/yaml_parser.py +14 -0
- soe/local_backends/__init__.py +18 -0
- soe/local_backends/factory.py +124 -0
- soe/local_backends/in_memory/context.py +38 -0
- soe/local_backends/in_memory/conversation_history.py +60 -0
- soe/local_backends/in_memory/identity.py +52 -0
- soe/local_backends/in_memory/schema.py +40 -0
- soe/local_backends/in_memory/telemetry.py +38 -0
- soe/local_backends/in_memory/workflow.py +33 -0
- soe/local_backends/storage/context.py +57 -0
- soe/local_backends/storage/conversation_history.py +82 -0
- soe/local_backends/storage/identity.py +118 -0
- soe/local_backends/storage/schema.py +96 -0
- soe/local_backends/storage/telemetry.py +72 -0
- soe/local_backends/storage/workflow.py +56 -0
- soe/nodes/__init__.py +13 -0
- soe/nodes/agent/__init__.py +10 -0
- soe/nodes/agent/factory.py +134 -0
- soe/nodes/agent/lib/loop_handlers.py +150 -0
- soe/nodes/agent/lib/loop_state.py +157 -0
- soe/nodes/agent/lib/prompts.py +65 -0
- soe/nodes/agent/lib/tools.py +35 -0
- soe/nodes/agent/stages/__init__.py +12 -0
- soe/nodes/agent/stages/parameter.py +37 -0
- soe/nodes/agent/stages/response.py +54 -0
- soe/nodes/agent/stages/router.py +37 -0
- soe/nodes/agent/state.py +111 -0
- soe/nodes/agent/types.py +66 -0
- soe/nodes/agent/validation/__init__.py +11 -0
- soe/nodes/agent/validation/config.py +95 -0
- soe/nodes/agent/validation/operational.py +24 -0
- soe/nodes/child/__init__.py +3 -0
- soe/nodes/child/factory.py +61 -0
- soe/nodes/child/state.py +59 -0
- soe/nodes/child/validation/__init__.py +11 -0
- soe/nodes/child/validation/config.py +126 -0
- soe/nodes/child/validation/operational.py +28 -0
- soe/nodes/lib/conditions.py +71 -0
- soe/nodes/lib/context.py +24 -0
- soe/nodes/lib/conversation_history.py +77 -0
- soe/nodes/lib/identity.py +64 -0
- soe/nodes/lib/llm_resolver.py +142 -0
- soe/nodes/lib/output.py +68 -0
- soe/nodes/lib/response_builder.py +91 -0
- soe/nodes/lib/signal_emission.py +79 -0
- soe/nodes/lib/signals.py +54 -0
- soe/nodes/lib/tools.py +100 -0
- soe/nodes/llm/__init__.py +7 -0
- soe/nodes/llm/factory.py +103 -0
- soe/nodes/llm/state.py +76 -0
- soe/nodes/llm/types.py +12 -0
- soe/nodes/llm/validation/__init__.py +11 -0
- soe/nodes/llm/validation/config.py +89 -0
- soe/nodes/llm/validation/operational.py +23 -0
- soe/nodes/router/__init__.py +3 -0
- soe/nodes/router/factory.py +37 -0
- soe/nodes/router/state.py +32 -0
- soe/nodes/router/validation/__init__.py +11 -0
- soe/nodes/router/validation/config.py +58 -0
- soe/nodes/router/validation/operational.py +16 -0
- soe/nodes/tool/factory.py +66 -0
- soe/nodes/tool/lib/__init__.py +11 -0
- soe/nodes/tool/lib/conditions.py +35 -0
- soe/nodes/tool/lib/failure.py +28 -0
- soe/nodes/tool/lib/parameters.py +67 -0
- soe/nodes/tool/state.py +66 -0
- soe/nodes/tool/types.py +27 -0
- soe/nodes/tool/validation/__init__.py +15 -0
- soe/nodes/tool/validation/config.py +132 -0
- soe/nodes/tool/validation/operational.py +16 -0
- soe/validation/__init__.py +18 -0
- soe/validation/config.py +195 -0
- soe/validation/jinja.py +54 -0
- soe/validation/operational.py +110 -0
- {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/METADATA +5 -5
- soe_ai-0.1.3.dist-info/RECORD +137 -0
- {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/WHEEL +1 -1
- soe_ai-0.1.1.dist-info/RECORD +0 -10
- {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/licenses/LICENSE +0 -0
- {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/top_level.txt +0 -0
soe/nodes/llm/factory.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM node factory
|
|
3
|
+
|
|
4
|
+
Simple node that calls LLM directly without agent loop, tools, or routing.
|
|
5
|
+
Supports conversation history via identity and schema validation via Pydantic.
|
|
6
|
+
Prompts use Jinja templates - variables are auto-extracted from {{ context.field }}.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Dict, Any, Callable
|
|
10
|
+
from ..lib.llm_resolver import resolve_llm_call
|
|
11
|
+
from ..lib.signal_emission import emit_completion_signals, handle_llm_failure
|
|
12
|
+
from ..lib.response_builder import (
|
|
13
|
+
build_response_model,
|
|
14
|
+
extract_output_from_response,
|
|
15
|
+
extract_signal_from_response,
|
|
16
|
+
)
|
|
17
|
+
from ...types import CallLlm, BroadcastSignalsCaller, Backends, LlmNodeCaller, EventTypes
|
|
18
|
+
from ...lib.register_event import register_event
|
|
19
|
+
from ..lib.context import save_output_to_context
|
|
20
|
+
from ..lib.conversation_history import save_conversation_turn
|
|
21
|
+
from ...lib.jinja_render import render_prompt
|
|
22
|
+
from ...validation.operational import validate_operational
|
|
23
|
+
from .validation import validate_node_config
|
|
24
|
+
from .state import get_operational_state
|
|
25
|
+
from .types import LlmNodeInput
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def create_llm_node_caller(
|
|
29
|
+
backends: Backends,
|
|
30
|
+
call_llm: CallLlm,
|
|
31
|
+
broadcast_signals_caller: BroadcastSignalsCaller,
|
|
32
|
+
) -> LlmNodeCaller:
|
|
33
|
+
"""Create LLM node caller with pre-loaded dependencies."""
|
|
34
|
+
|
|
35
|
+
def execute_llm_node(id: str, node_config: Dict[str, Any]) -> None:
|
|
36
|
+
validate_operational(id, backends)
|
|
37
|
+
validate_node_config(node_config)
|
|
38
|
+
|
|
39
|
+
state = get_operational_state(id, node_config, backends)
|
|
40
|
+
|
|
41
|
+
register_event(backends, id, EventTypes.LLM_CALL, {"identity": state.identity})
|
|
42
|
+
|
|
43
|
+
rendered_prompt, warnings = render_prompt(state.prompt, state.context)
|
|
44
|
+
|
|
45
|
+
if warnings:
|
|
46
|
+
register_event(backends, id, EventTypes.CONTEXT_WARNING, {"warnings": warnings})
|
|
47
|
+
|
|
48
|
+
input_data = LlmNodeInput(
|
|
49
|
+
prompt=rendered_prompt,
|
|
50
|
+
context=state.context_str,
|
|
51
|
+
conversation_history=state.history_str,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
response_model = build_response_model(
|
|
55
|
+
output_field=state.output_field,
|
|
56
|
+
output_schema=state.output_model,
|
|
57
|
+
signal_options=state.signal_options,
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
raw_response = resolve_llm_call(
|
|
62
|
+
call_llm=call_llm,
|
|
63
|
+
input_data=input_data,
|
|
64
|
+
config=node_config,
|
|
65
|
+
response_model=response_model,
|
|
66
|
+
max_retries=state.max_retries,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
output_value = extract_output_from_response(raw_response, state.output_field)
|
|
70
|
+
save_output_to_context(id, state.output_field, output_value, backends)
|
|
71
|
+
|
|
72
|
+
if state.output_field:
|
|
73
|
+
if state.output_field in state.context:
|
|
74
|
+
state.context[state.output_field].append(output_value)
|
|
75
|
+
else:
|
|
76
|
+
state.context[state.output_field] = [output_value]
|
|
77
|
+
|
|
78
|
+
save_conversation_turn(
|
|
79
|
+
state.history_key, state.conversation_history,
|
|
80
|
+
rendered_prompt, str(output_value), backends
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
selected_signal = extract_signal_from_response(raw_response)
|
|
84
|
+
|
|
85
|
+
emit_completion_signals(
|
|
86
|
+
selected_signal=selected_signal,
|
|
87
|
+
node_config=node_config,
|
|
88
|
+
operational_state=state,
|
|
89
|
+
broadcast_signals_caller=broadcast_signals_caller,
|
|
90
|
+
execution_id=id,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
except Exception as e:
|
|
94
|
+
handle_llm_failure(
|
|
95
|
+
failure_signal=state.llm_failure_signal,
|
|
96
|
+
error_message=str(e),
|
|
97
|
+
node_type="llm",
|
|
98
|
+
execution_id=id,
|
|
99
|
+
backends=backends,
|
|
100
|
+
broadcast_signals_caller=broadcast_signals_caller,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
return execute_llm_node
|
soe/nodes/llm/state.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""LLM node state retrieval."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Dict, Any, List, Optional
|
|
5
|
+
from pydantic import BaseModel, ConfigDict
|
|
6
|
+
from ...types import Backends
|
|
7
|
+
from ..lib.conversation_history import get_conversation_history, format_conversation_history
|
|
8
|
+
from ...lib.jinja_render import get_context_for_prompt
|
|
9
|
+
from ..lib.output import get_output_model, get_signal_options
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class LlmOperationalState(BaseModel):
|
|
13
|
+
"""All data needed for LLM node execution."""
|
|
14
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
15
|
+
|
|
16
|
+
context: Dict[str, Any]
|
|
17
|
+
main_execution_id: str
|
|
18
|
+
prompt: str
|
|
19
|
+
identity: Optional[str]
|
|
20
|
+
output_field: Optional[str]
|
|
21
|
+
event_emissions: List[Dict[str, Any]]
|
|
22
|
+
max_retries: int
|
|
23
|
+
llm_failure_signal: Optional[str]
|
|
24
|
+
current_workflow_name: str
|
|
25
|
+
history_key: Optional[str]
|
|
26
|
+
conversation_history: List[Dict[str, Any]]
|
|
27
|
+
context_data: Dict[str, Any]
|
|
28
|
+
context_str: str
|
|
29
|
+
history_str: str
|
|
30
|
+
output_model: Optional[Any]
|
|
31
|
+
signal_options: Optional[List[Dict[str, str]]]
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def get_operational_state(
|
|
35
|
+
execution_id: str,
|
|
36
|
+
node_config: Dict[str, Any],
|
|
37
|
+
backends: Backends,
|
|
38
|
+
) -> LlmOperationalState:
|
|
39
|
+
"""Retrieve all state needed for LLM node execution."""
|
|
40
|
+
context = backends.context.get_context(execution_id)
|
|
41
|
+
operational = context["__operational__"]
|
|
42
|
+
identity = node_config.get("identity")
|
|
43
|
+
prompt = node_config["prompt"]
|
|
44
|
+
output_field = node_config.get("output_field")
|
|
45
|
+
event_emissions = node_config.get("event_emissions", [])
|
|
46
|
+
current_workflow_name = backends.workflow.get_current_workflow_name(execution_id)
|
|
47
|
+
|
|
48
|
+
history_key, conversation_history = get_conversation_history(
|
|
49
|
+
execution_id, identity, backends
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
context_data, _ = get_context_for_prompt(context, prompt)
|
|
53
|
+
context_str = json.dumps(context_data, indent=2) if context_data else ""
|
|
54
|
+
history_str = format_conversation_history(conversation_history)
|
|
55
|
+
main_execution_id = operational["main_execution_id"]
|
|
56
|
+
output_model = get_output_model(backends, main_execution_id, output_field)
|
|
57
|
+
signal_options = get_signal_options(event_emissions)
|
|
58
|
+
|
|
59
|
+
return LlmOperationalState(
|
|
60
|
+
context=context,
|
|
61
|
+
main_execution_id=main_execution_id,
|
|
62
|
+
prompt=prompt,
|
|
63
|
+
identity=identity,
|
|
64
|
+
output_field=output_field,
|
|
65
|
+
event_emissions=event_emissions,
|
|
66
|
+
max_retries=node_config.get("retries", 3),
|
|
67
|
+
llm_failure_signal=node_config.get("llm_failure_signal"),
|
|
68
|
+
current_workflow_name=current_workflow_name,
|
|
69
|
+
history_key=history_key,
|
|
70
|
+
conversation_history=conversation_history,
|
|
71
|
+
context_data=context_data,
|
|
72
|
+
context_str=context_str,
|
|
73
|
+
history_str=history_str,
|
|
74
|
+
output_model=output_model,
|
|
75
|
+
signal_options=signal_options,
|
|
76
|
+
)
|
soe/nodes/llm/types.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM node validation.
|
|
3
|
+
|
|
4
|
+
- config.py: Config validation at orchestration start
|
|
5
|
+
- operational.py: Runtime validation before execution (fail-fast)
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .config import validate_node_config
|
|
9
|
+
from .operational import validate_llm_node_runtime
|
|
10
|
+
|
|
11
|
+
__all__ = ["validate_node_config", "validate_llm_node_runtime"]
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM node configuration validation.
|
|
3
|
+
|
|
4
|
+
Called once at orchestration start, not during node execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Any
|
|
8
|
+
from ....types import WorkflowValidationError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def validate_node_config(node_config: Dict[str, Any]) -> None:
|
|
12
|
+
"""
|
|
13
|
+
Validate LLM node configuration exhaustively.
|
|
14
|
+
Called once at orchestration start, not during node execution.
|
|
15
|
+
|
|
16
|
+
Raises:
|
|
17
|
+
WorkflowValidationError: If configuration is invalid
|
|
18
|
+
"""
|
|
19
|
+
event_triggers = node_config.get("event_triggers")
|
|
20
|
+
if not event_triggers:
|
|
21
|
+
raise WorkflowValidationError(
|
|
22
|
+
"'event_triggers' is required - specify which signals activate this LLM node"
|
|
23
|
+
)
|
|
24
|
+
if not isinstance(event_triggers, list):
|
|
25
|
+
raise WorkflowValidationError(
|
|
26
|
+
"'event_triggers' must be a list, e.g., [\"START\", \"PROCESS\"]"
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
if not node_config.get("prompt"):
|
|
30
|
+
raise WorkflowValidationError(
|
|
31
|
+
"'prompt' is required - provide the prompt template for the LLM"
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
if node_config.get("input_fields") is not None:
|
|
35
|
+
raise WorkflowValidationError(
|
|
36
|
+
"'input_fields' is no longer supported for LLM nodes. "
|
|
37
|
+
"Use Jinja syntax in prompts instead: {{ context.field_name }}"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
output_field = node_config.get("output_field")
|
|
41
|
+
if output_field is not None:
|
|
42
|
+
if not isinstance(output_field, str):
|
|
43
|
+
raise WorkflowValidationError(
|
|
44
|
+
"'output_field' must be a string - the context field name to store the LLM response"
|
|
45
|
+
)
|
|
46
|
+
if output_field == "__operational__":
|
|
47
|
+
raise WorkflowValidationError(
|
|
48
|
+
"'output_field' cannot be '__operational__' - this is a reserved system field"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
retries = node_config.get("retries")
|
|
52
|
+
if retries is not None:
|
|
53
|
+
if not isinstance(retries, int) or retries < 0:
|
|
54
|
+
raise WorkflowValidationError(
|
|
55
|
+
"'retries' must be a positive integer (default is 3)"
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
event_emissions = node_config.get("event_emissions")
|
|
59
|
+
if event_emissions is not None:
|
|
60
|
+
if not isinstance(event_emissions, list):
|
|
61
|
+
raise WorkflowValidationError(
|
|
62
|
+
"'event_emissions' must be a list of signal definitions"
|
|
63
|
+
)
|
|
64
|
+
for i, emission in enumerate(event_emissions):
|
|
65
|
+
if not isinstance(emission, dict):
|
|
66
|
+
raise WorkflowValidationError(
|
|
67
|
+
f"Each event_emission must be an object with 'signal_name', got invalid item at position {i + 1}"
|
|
68
|
+
)
|
|
69
|
+
if not emission.get("signal_name"):
|
|
70
|
+
raise WorkflowValidationError(
|
|
71
|
+
f"Event emission at position {i + 1} is missing 'signal_name'"
|
|
72
|
+
)
|
|
73
|
+
condition = emission.get("condition")
|
|
74
|
+
if condition is not None and not isinstance(condition, str):
|
|
75
|
+
raise WorkflowValidationError(
|
|
76
|
+
f"Event emission at position {i + 1} has invalid 'condition' - must be a jinja string"
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
identity = node_config.get("identity")
|
|
80
|
+
if identity is not None and not isinstance(identity, str):
|
|
81
|
+
raise WorkflowValidationError(
|
|
82
|
+
"'identity' must be a string - used to persist conversation history across executions"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
llm_failure_signal = node_config.get("llm_failure_signal")
|
|
86
|
+
if llm_failure_signal is not None and not isinstance(llm_failure_signal, str):
|
|
87
|
+
raise WorkflowValidationError(
|
|
88
|
+
"'llm_failure_signal' must be a string - the signal to emit when LLM call fails"
|
|
89
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""LLM node operational validation.
|
|
2
|
+
|
|
3
|
+
Calls shared operational validation + LLM-specific backend validation.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Dict, Any
|
|
7
|
+
from ....types import Backends
|
|
8
|
+
from ....validation.operational import validate_operational, OperationalValidationError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def validate_llm_node_runtime(
|
|
12
|
+
execution_id: str,
|
|
13
|
+
backends: Backends,
|
|
14
|
+
) -> Dict[str, Any]:
|
|
15
|
+
"""Validate runtime state for LLM node."""
|
|
16
|
+
context = validate_operational(execution_id, backends)
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
backends.workflow.get_current_workflow_name(execution_id)
|
|
20
|
+
except Exception as e:
|
|
21
|
+
raise OperationalValidationError(f"Cannot access workflow backend: {e}")
|
|
22
|
+
|
|
23
|
+
return context
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Router node factory
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
|
|
7
|
+
from ..lib.conditions import evaluate_conditions
|
|
8
|
+
from ...lib.context_fields import get_field
|
|
9
|
+
from ...validation.operational import validate_operational
|
|
10
|
+
from ...lib.register_event import register_event
|
|
11
|
+
from ...types import BroadcastSignalsCaller, RouterNodeCaller, EventTypes
|
|
12
|
+
from .validation import validate_node_config
|
|
13
|
+
from .state import get_operational_state
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def create_router_node_caller(
|
|
17
|
+
backends, broadcast_signals_caller: BroadcastSignalsCaller
|
|
18
|
+
) -> RouterNodeCaller:
|
|
19
|
+
"""Create router node caller with pre-loaded dependencies."""
|
|
20
|
+
|
|
21
|
+
def execute_router_node(id: str, node_config: Dict[str, Any]) -> None:
|
|
22
|
+
validate_operational(id, backends)
|
|
23
|
+
validate_node_config(node_config)
|
|
24
|
+
|
|
25
|
+
state = get_operational_state(id, node_config, backends)
|
|
26
|
+
|
|
27
|
+
register_event(backends, id, EventTypes.NODE_EXECUTION, {"node_type": "router"})
|
|
28
|
+
|
|
29
|
+
unwrapped = {k: get_field(state.context, k) for k in state.context if not k.startswith("__")}
|
|
30
|
+
for k, v in state.context.items():
|
|
31
|
+
if k.startswith("__"):
|
|
32
|
+
unwrapped[k] = v
|
|
33
|
+
signals = evaluate_conditions(state.event_emissions, {"context": unwrapped}, state.context)
|
|
34
|
+
if signals:
|
|
35
|
+
broadcast_signals_caller(id, signals)
|
|
36
|
+
|
|
37
|
+
return execute_router_node
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Router node state retrieval.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any, List
|
|
6
|
+
from pydantic import BaseModel, ConfigDict
|
|
7
|
+
from ...types import Backends
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RouterOperationalState(BaseModel):
|
|
11
|
+
"""All data needed for router node execution."""
|
|
12
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
13
|
+
|
|
14
|
+
context: Dict[str, Any]
|
|
15
|
+
main_execution_id: str
|
|
16
|
+
event_emissions: List[Dict[str, Any]]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def get_operational_state(
|
|
20
|
+
execution_id: str,
|
|
21
|
+
node_config: Dict[str, Any],
|
|
22
|
+
backends: Backends,
|
|
23
|
+
) -> RouterOperationalState:
|
|
24
|
+
"""Retrieve all state needed for router node execution."""
|
|
25
|
+
context = backends.context.get_context(execution_id)
|
|
26
|
+
operational = context["__operational__"]
|
|
27
|
+
|
|
28
|
+
return RouterOperationalState(
|
|
29
|
+
context=context,
|
|
30
|
+
main_execution_id=operational["main_execution_id"],
|
|
31
|
+
event_emissions=node_config["event_emissions"],
|
|
32
|
+
)
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Router node validation.
|
|
3
|
+
|
|
4
|
+
- config.py: Config validation at orchestration start
|
|
5
|
+
- operational.py: Runtime validation before execution (fail-fast)
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .config import validate_node_config
|
|
9
|
+
from .operational import validate_router_node_runtime
|
|
10
|
+
|
|
11
|
+
__all__ = ["validate_node_config", "validate_router_node_runtime"]
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Router node configuration validation.
|
|
3
|
+
|
|
4
|
+
Called once at orchestration start, not during node execution.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Any
|
|
8
|
+
from ....types import WorkflowValidationError
|
|
9
|
+
from ....validation.jinja import validate_jinja_syntax
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def validate_node_config(node_config: Dict[str, Any]) -> None:
|
|
13
|
+
"""
|
|
14
|
+
Validate router node configuration exhaustively.
|
|
15
|
+
Called once at orchestration start, not during node execution.
|
|
16
|
+
|
|
17
|
+
Raises:
|
|
18
|
+
WorkflowValidationError: If configuration is invalid
|
|
19
|
+
"""
|
|
20
|
+
event_triggers = node_config.get("event_triggers")
|
|
21
|
+
if not event_triggers:
|
|
22
|
+
raise WorkflowValidationError(
|
|
23
|
+
"'event_triggers' is required - specify which signals activate this router"
|
|
24
|
+
)
|
|
25
|
+
if not isinstance(event_triggers, list):
|
|
26
|
+
raise WorkflowValidationError(
|
|
27
|
+
"'event_triggers' must be a list, e.g., [\"START\", \"RETRY\"]"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
event_emissions = node_config.get("event_emissions")
|
|
31
|
+
if not event_emissions:
|
|
32
|
+
raise WorkflowValidationError(
|
|
33
|
+
"'event_emissions' is required - specify which signals to emit based on conditions"
|
|
34
|
+
)
|
|
35
|
+
if not isinstance(event_emissions, list):
|
|
36
|
+
raise WorkflowValidationError(
|
|
37
|
+
"'event_emissions' must be a list of signal definitions"
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
for i, emission in enumerate(event_emissions):
|
|
41
|
+
if not isinstance(emission, dict):
|
|
42
|
+
raise WorkflowValidationError(
|
|
43
|
+
f"Each event_emission must be an object with 'signal_name', got invalid item at position {i + 1}"
|
|
44
|
+
)
|
|
45
|
+
if not emission.get("signal_name"):
|
|
46
|
+
raise WorkflowValidationError(
|
|
47
|
+
f"Event emission at position {i + 1} is missing 'signal_name'"
|
|
48
|
+
)
|
|
49
|
+
condition = emission.get("condition")
|
|
50
|
+
if condition is not None and not isinstance(condition, str):
|
|
51
|
+
raise WorkflowValidationError(
|
|
52
|
+
f"Event emission at position {i + 1} has invalid 'condition' - must be a jinja string"
|
|
53
|
+
)
|
|
54
|
+
if condition:
|
|
55
|
+
validate_jinja_syntax(
|
|
56
|
+
condition,
|
|
57
|
+
f"Event emission '{emission.get('signal_name')}' condition"
|
|
58
|
+
)
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""Router node operational validation.
|
|
2
|
+
|
|
3
|
+
Calls shared operational validation. Router has no additional backend requirements.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from typing import Dict, Any
|
|
7
|
+
from ....types import Backends
|
|
8
|
+
from ....validation.operational import validate_operational, OperationalValidationError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def validate_router_node_runtime(
|
|
12
|
+
execution_id: str,
|
|
13
|
+
backends: Backends,
|
|
14
|
+
) -> Dict[str, Any]:
|
|
15
|
+
"""Validate runtime state for Router node. Delegates to shared validation."""
|
|
16
|
+
return validate_operational(execution_id, backends)
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool node factory
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, Any
|
|
6
|
+
|
|
7
|
+
from .validation import validate_tool_node_config
|
|
8
|
+
from .validation.operational import validate_tool_node_runtime
|
|
9
|
+
from .state import get_operational_state
|
|
10
|
+
from .lib.failure import handle_tool_failure
|
|
11
|
+
from .lib.conditions import evaluate_tool_conditions
|
|
12
|
+
from .types import ToolsRegistry
|
|
13
|
+
from ...lib.register_event import register_event
|
|
14
|
+
from ..lib.context import save_output_to_context
|
|
15
|
+
from ...types import Backends, BroadcastSignalsCaller, ToolNodeCaller, EventTypes
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def create_tool_node_caller(
|
|
19
|
+
backends: Backends,
|
|
20
|
+
tools_registry: ToolsRegistry,
|
|
21
|
+
broadcast_signals_caller: BroadcastSignalsCaller,
|
|
22
|
+
) -> ToolNodeCaller:
|
|
23
|
+
"""Create tool node caller with pre-loaded dependencies."""
|
|
24
|
+
|
|
25
|
+
def execute_tool_node(id: str, node_config: Dict[str, Any]) -> None:
|
|
26
|
+
validate_tool_node_config(node_config, tools_registry)
|
|
27
|
+
validate_tool_node_runtime(id, backends)
|
|
28
|
+
|
|
29
|
+
state = get_operational_state(id, node_config, backends, tools_registry)
|
|
30
|
+
|
|
31
|
+
register_event(
|
|
32
|
+
backends, id, EventTypes.TOOL_CALL,
|
|
33
|
+
{"tool_name": state.tool_name, "max_retries": state.max_retries}
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
last_error = None
|
|
37
|
+
for attempt in range(state.max_retries + 1):
|
|
38
|
+
try:
|
|
39
|
+
if state.process_accumulated and isinstance(state.parameters, list):
|
|
40
|
+
result = state.tool_function(state.parameters)
|
|
41
|
+
else:
|
|
42
|
+
result = state.tool_function(**state.parameters)
|
|
43
|
+
save_output_to_context(id, state.output_field, result, backends)
|
|
44
|
+
|
|
45
|
+
signals = evaluate_tool_conditions(
|
|
46
|
+
state.event_emissions, result, state.context, id, backends
|
|
47
|
+
)
|
|
48
|
+
if signals:
|
|
49
|
+
broadcast_signals_caller(id, signals)
|
|
50
|
+
return
|
|
51
|
+
|
|
52
|
+
except Exception as tool_error:
|
|
53
|
+
last_error = tool_error
|
|
54
|
+
if attempt < state.max_retries:
|
|
55
|
+
register_event(
|
|
56
|
+
backends, id, EventTypes.NODE_ERROR,
|
|
57
|
+
{"tool_name": state.tool_name, "retry_attempt": attempt + 1, "error": str(tool_error)}
|
|
58
|
+
)
|
|
59
|
+
continue
|
|
60
|
+
|
|
61
|
+
handle_tool_failure(
|
|
62
|
+
state.tool_name, state.failure_signal, state.output_field,
|
|
63
|
+
str(last_error), backends, broadcast_signals_caller, id
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
return execute_tool_node
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool-specific condition evaluation with error logging.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Dict, List, Any
|
|
6
|
+
|
|
7
|
+
from ...lib.conditions import evaluate_conditions
|
|
8
|
+
from ....lib.context_fields import get_field
|
|
9
|
+
from ....lib.register_event import register_event
|
|
10
|
+
from ....types import Backends, EventTypes
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def evaluate_tool_conditions(
|
|
14
|
+
event_emissions: List[Dict[str, Any]],
|
|
15
|
+
result: Any,
|
|
16
|
+
context: Dict[str, Any],
|
|
17
|
+
execution_id: str,
|
|
18
|
+
backends: Backends,
|
|
19
|
+
) -> List[str]:
|
|
20
|
+
"""Evaluate jinja conditions against tool result and context with error logging."""
|
|
21
|
+
if not event_emissions:
|
|
22
|
+
return []
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
unwrapped = {k: get_field(context, k) for k in context if not k.startswith("__")}
|
|
26
|
+
for k, v in context.items():
|
|
27
|
+
if k.startswith("__"):
|
|
28
|
+
unwrapped[k] = v
|
|
29
|
+
return evaluate_conditions(event_emissions, {"result": result, "context": unwrapped}, context)
|
|
30
|
+
except Exception as e:
|
|
31
|
+
register_event(
|
|
32
|
+
backends, execution_id, EventTypes.NODE_ERROR,
|
|
33
|
+
{"error": f"Condition evaluation failed: {e}"}
|
|
34
|
+
)
|
|
35
|
+
return []
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
"""Tool failure handling utilities."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
from ...lib.context import save_output_to_context
|
|
6
|
+
from ....lib.register_event import register_event
|
|
7
|
+
from ....types import Backends, BroadcastSignalsCaller, EventTypes
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def handle_tool_failure(
|
|
11
|
+
tool_name: str,
|
|
12
|
+
failure_signal: Optional[str],
|
|
13
|
+
output_field: Optional[str],
|
|
14
|
+
error_message: str,
|
|
15
|
+
backends: Backends,
|
|
16
|
+
broadcast_signals_caller: BroadcastSignalsCaller,
|
|
17
|
+
execution_id: str,
|
|
18
|
+
) -> None:
|
|
19
|
+
"""Handle tool execution failure by saving error and optionally emitting failure signal."""
|
|
20
|
+
save_output_to_context(execution_id, output_field, error_message, backends)
|
|
21
|
+
|
|
22
|
+
register_event(
|
|
23
|
+
backends, execution_id, EventTypes.NODE_ERROR,
|
|
24
|
+
{"tool_name": tool_name, "error": error_message}
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
if failure_signal:
|
|
28
|
+
broadcast_signals_caller(execution_id, [failure_signal])
|