soe-ai 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. soe/builtin_tools/__init__.py +39 -0
  2. soe/builtin_tools/soe_add_signal.py +82 -0
  3. soe/builtin_tools/soe_call_tool.py +111 -0
  4. soe/builtin_tools/soe_copy_context.py +80 -0
  5. soe/builtin_tools/soe_explore_docs.py +290 -0
  6. soe/builtin_tools/soe_get_available_tools.py +42 -0
  7. soe/builtin_tools/soe_get_context.py +50 -0
  8. soe/builtin_tools/soe_get_workflows.py +63 -0
  9. soe/builtin_tools/soe_inject_node.py +86 -0
  10. soe/builtin_tools/soe_inject_workflow.py +105 -0
  11. soe/builtin_tools/soe_list_contexts.py +73 -0
  12. soe/builtin_tools/soe_remove_node.py +72 -0
  13. soe/builtin_tools/soe_remove_workflow.py +62 -0
  14. soe/builtin_tools/soe_update_context.py +54 -0
  15. soe/docs/_config.yml +10 -0
  16. soe/docs/advanced_patterns/guide_fanout_and_aggregations.md +318 -0
  17. soe/docs/advanced_patterns/guide_inheritance.md +435 -0
  18. soe/docs/advanced_patterns/hybrid_intelligence.md +237 -0
  19. soe/docs/advanced_patterns/index.md +49 -0
  20. soe/docs/advanced_patterns/operational.md +781 -0
  21. soe/docs/advanced_patterns/self_evolving_workflows.md +385 -0
  22. soe/docs/advanced_patterns/swarm_intelligence.md +211 -0
  23. soe/docs/builtins/context.md +164 -0
  24. soe/docs/builtins/explore_docs.md +135 -0
  25. soe/docs/builtins/tools.md +164 -0
  26. soe/docs/builtins/workflows.md +199 -0
  27. soe/docs/guide_00_getting_started.md +341 -0
  28. soe/docs/guide_01_tool.md +206 -0
  29. soe/docs/guide_02_llm.md +143 -0
  30. soe/docs/guide_03_router.md +146 -0
  31. soe/docs/guide_04_patterns.md +475 -0
  32. soe/docs/guide_05_agent.md +159 -0
  33. soe/docs/guide_06_schema.md +397 -0
  34. soe/docs/guide_07_identity.md +540 -0
  35. soe/docs/guide_08_child.md +612 -0
  36. soe/docs/guide_09_ecosystem.md +690 -0
  37. soe/docs/guide_10_infrastructure.md +427 -0
  38. soe/docs/guide_11_builtins.md +118 -0
  39. soe/docs/index.md +104 -0
  40. soe/docs/primitives/backends.md +281 -0
  41. soe/docs/primitives/context.md +256 -0
  42. soe/docs/primitives/node_reference.md +259 -0
  43. soe/docs/primitives/primitives.md +331 -0
  44. soe/docs/primitives/signals.md +865 -0
  45. soe/docs_index.py +1 -1
  46. soe/lib/__init__.py +0 -0
  47. soe/lib/child_context.py +46 -0
  48. soe/lib/context_fields.py +51 -0
  49. soe/lib/inheritance.py +172 -0
  50. soe/lib/jinja_render.py +113 -0
  51. soe/lib/operational.py +51 -0
  52. soe/lib/parent_sync.py +71 -0
  53. soe/lib/register_event.py +75 -0
  54. soe/lib/schema_validation.py +134 -0
  55. soe/lib/yaml_parser.py +14 -0
  56. soe/local_backends/__init__.py +18 -0
  57. soe/local_backends/factory.py +124 -0
  58. soe/local_backends/in_memory/context.py +38 -0
  59. soe/local_backends/in_memory/conversation_history.py +60 -0
  60. soe/local_backends/in_memory/identity.py +52 -0
  61. soe/local_backends/in_memory/schema.py +40 -0
  62. soe/local_backends/in_memory/telemetry.py +38 -0
  63. soe/local_backends/in_memory/workflow.py +33 -0
  64. soe/local_backends/storage/context.py +57 -0
  65. soe/local_backends/storage/conversation_history.py +82 -0
  66. soe/local_backends/storage/identity.py +118 -0
  67. soe/local_backends/storage/schema.py +96 -0
  68. soe/local_backends/storage/telemetry.py +72 -0
  69. soe/local_backends/storage/workflow.py +56 -0
  70. soe/nodes/__init__.py +13 -0
  71. soe/nodes/agent/__init__.py +10 -0
  72. soe/nodes/agent/factory.py +134 -0
  73. soe/nodes/agent/lib/loop_handlers.py +150 -0
  74. soe/nodes/agent/lib/loop_state.py +157 -0
  75. soe/nodes/agent/lib/prompts.py +65 -0
  76. soe/nodes/agent/lib/tools.py +35 -0
  77. soe/nodes/agent/stages/__init__.py +12 -0
  78. soe/nodes/agent/stages/parameter.py +37 -0
  79. soe/nodes/agent/stages/response.py +54 -0
  80. soe/nodes/agent/stages/router.py +37 -0
  81. soe/nodes/agent/state.py +111 -0
  82. soe/nodes/agent/types.py +66 -0
  83. soe/nodes/agent/validation/__init__.py +11 -0
  84. soe/nodes/agent/validation/config.py +95 -0
  85. soe/nodes/agent/validation/operational.py +24 -0
  86. soe/nodes/child/__init__.py +3 -0
  87. soe/nodes/child/factory.py +61 -0
  88. soe/nodes/child/state.py +59 -0
  89. soe/nodes/child/validation/__init__.py +11 -0
  90. soe/nodes/child/validation/config.py +126 -0
  91. soe/nodes/child/validation/operational.py +28 -0
  92. soe/nodes/lib/conditions.py +71 -0
  93. soe/nodes/lib/context.py +24 -0
  94. soe/nodes/lib/conversation_history.py +77 -0
  95. soe/nodes/lib/identity.py +64 -0
  96. soe/nodes/lib/llm_resolver.py +142 -0
  97. soe/nodes/lib/output.py +68 -0
  98. soe/nodes/lib/response_builder.py +91 -0
  99. soe/nodes/lib/signal_emission.py +79 -0
  100. soe/nodes/lib/signals.py +54 -0
  101. soe/nodes/lib/tools.py +100 -0
  102. soe/nodes/llm/__init__.py +7 -0
  103. soe/nodes/llm/factory.py +103 -0
  104. soe/nodes/llm/state.py +76 -0
  105. soe/nodes/llm/types.py +12 -0
  106. soe/nodes/llm/validation/__init__.py +11 -0
  107. soe/nodes/llm/validation/config.py +89 -0
  108. soe/nodes/llm/validation/operational.py +23 -0
  109. soe/nodes/router/__init__.py +3 -0
  110. soe/nodes/router/factory.py +37 -0
  111. soe/nodes/router/state.py +32 -0
  112. soe/nodes/router/validation/__init__.py +11 -0
  113. soe/nodes/router/validation/config.py +58 -0
  114. soe/nodes/router/validation/operational.py +16 -0
  115. soe/nodes/tool/factory.py +66 -0
  116. soe/nodes/tool/lib/__init__.py +11 -0
  117. soe/nodes/tool/lib/conditions.py +35 -0
  118. soe/nodes/tool/lib/failure.py +28 -0
  119. soe/nodes/tool/lib/parameters.py +67 -0
  120. soe/nodes/tool/state.py +66 -0
  121. soe/nodes/tool/types.py +27 -0
  122. soe/nodes/tool/validation/__init__.py +15 -0
  123. soe/nodes/tool/validation/config.py +132 -0
  124. soe/nodes/tool/validation/operational.py +16 -0
  125. soe/validation/__init__.py +18 -0
  126. soe/validation/config.py +195 -0
  127. soe/validation/jinja.py +54 -0
  128. soe/validation/operational.py +110 -0
  129. {soe_ai-0.1.1.dist-info → soe_ai-0.1.2.dist-info}/METADATA +4 -4
  130. soe_ai-0.1.2.dist-info/RECORD +137 -0
  131. {soe_ai-0.1.1.dist-info → soe_ai-0.1.2.dist-info}/WHEEL +1 -1
  132. soe_ai-0.1.1.dist-info/RECORD +0 -10
  133. {soe_ai-0.1.1.dist-info → soe_ai-0.1.2.dist-info}/licenses/LICENSE +0 -0
  134. {soe_ai-0.1.1.dist-info → soe_ai-0.1.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,64 @@
1
+ """
2
+ Shared identity utilities for LLM and Agent nodes.
3
+
4
+ This module handles identity retrieval and system prompt generation
5
+ for nodes that support identity-based persona/role configuration.
6
+
7
+ Identities are stored per execution (main_execution_id) and define the initial
8
+ system prompt that will be used when conversation history starts.
9
+
10
+ Identity format is simple: identity_name -> system_prompt (string)
11
+ Example:
12
+ assistant: "You are a helpful assistant."
13
+ coding_expert: "You are an expert programmer."
14
+ """
15
+
16
+ from typing import Dict, Any, Optional
17
+ from ...types import Backends
18
+
19
+
20
+ def get_system_prompt_from_identity(
21
+ identity: Optional[str],
22
+ main_execution_id: str,
23
+ backends: Backends,
24
+ ) -> Optional[str]:
25
+ """
26
+ Get the system prompt for an identity.
27
+
28
+ Looks up the identity's system prompt from the identity backend using the
29
+ main_execution_id and identity key.
30
+
31
+ Args:
32
+ identity: The identity key (e.g., "assistant", "coding_assistant")
33
+ main_execution_id: Main execution ID for identity lookup
34
+ backends: Backend services
35
+
36
+ Returns:
37
+ System prompt string or None if not found
38
+ """
39
+ if not identity or not backends.identity:
40
+ return None
41
+
42
+ if hasattr(backends.identity, 'get_identity'):
43
+ return backends.identity.get_identity(main_execution_id, identity)
44
+
45
+ identities = backends.identity.get_identities(main_execution_id)
46
+ if not identities:
47
+ return None
48
+
49
+ return identities.get(identity)
50
+
51
+
52
+ def format_system_prompt_for_history(system_prompt: Optional[str]) -> str:
53
+ """
54
+ Format the system prompt for inclusion in conversation history.
55
+
56
+ Args:
57
+ system_prompt: The system prompt string
58
+
59
+ Returns:
60
+ Formatted system prompt or empty string
61
+ """
62
+ if not system_prompt:
63
+ return ""
64
+ return f"[system]: {system_prompt}"
@@ -0,0 +1,142 @@
1
+ """
2
+ Shared LLM Resolver and Parser.
3
+
4
+ Handles the orchestration of LLM calls, including:
5
+ 1. Parsing text responses into Pydantic models
6
+ 2. Removing thinking tags
7
+ 3. Extracting JSON from markdown
8
+ 4. Retrying on validation errors
9
+
10
+ Used by LLM and Agent nodes.
11
+ """
12
+
13
+ import json
14
+ import re
15
+ from typing import Type, Dict, Any, TypeVar
16
+ from pydantic import BaseModel, ValidationError
17
+ from ...types import CallLlm
18
+
19
+ T = TypeVar("T", bound=BaseModel)
20
+
21
+
22
+ def resolve_llm_call(
23
+ call_llm: CallLlm,
24
+ input_data: BaseModel,
25
+ config: Dict[str, Any],
26
+ response_model: Type[T],
27
+ max_retries: int = 3,
28
+ ) -> T:
29
+ """
30
+ Execute the LLM call loop:
31
+ 1. Convert input_data to JSON string
32
+ 2. Augment with format instructions for response_model
33
+ 3. Call LLM
34
+ 4. Parse and Validate
35
+ 5. Retry on failure
36
+ """
37
+ try:
38
+ prompt_base = input_data.model_dump_json()
39
+ except Exception as e:
40
+ raise ValueError(f"Failed to serialize input model: {e}")
41
+
42
+ instructions = _get_format_instructions(response_model)
43
+ current_prompt = f"{prompt_base}\n\n{instructions}"
44
+
45
+ last_error = None
46
+
47
+ for attempt in range(max_retries + 1):
48
+ try:
49
+ response_text = call_llm(current_prompt, config)
50
+ except Exception as e:
51
+ raise e
52
+
53
+ try:
54
+ return _parse_response(response_text, response_model)
55
+ except (ValidationError, ValueError) as e:
56
+ last_error = e
57
+ if attempt == max_retries:
58
+ break
59
+
60
+ error_msg = _format_validation_error(e)
61
+ current_prompt += f"\n\nPrevious response: {response_text}{error_msg}"
62
+
63
+ raise Exception(f"Max retries ({max_retries}) exceeded. Last error: {last_error}")
64
+
65
+
66
+ def _get_format_instructions(model: Type[BaseModel]) -> str:
67
+ """Generate instructions for JSON output based on the model schema."""
68
+ schema = model.model_json_schema()
69
+ return (
70
+ f"Respond ONLY with a valid JSON object matching this schema:\n"
71
+ f"{json.dumps(schema)}\n"
72
+ f"Do not return the schema itself. Return a JSON instance of the schema."
73
+ )
74
+
75
+
76
+ def _format_validation_error(error: Exception) -> str:
77
+ """Format validation errors with specific field information."""
78
+ if isinstance(error, ValidationError):
79
+ field_errors = [
80
+ f" - {'.'.join(str(loc) for loc in err['loc'])}: {err['msg']}"
81
+ for err in error.errors()
82
+ ]
83
+ return (
84
+ "\n\nValidation failed. Fix these fields:\n"
85
+ + "\n".join(field_errors)
86
+ + "\n\nRespond with valid JSON."
87
+ )
88
+ return f"\n\nJSON parse error: {error}. Output valid JSON."
89
+
90
+
91
+ def _parse_response(text: str, model: Type[T]) -> T:
92
+ """
93
+ Parse text response into a Pydantic model.
94
+ Removes <think> tags and extracts JSON from markdown blocks if present.
95
+ """
96
+ text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
97
+ json_str = _extract_json(text)
98
+ return model.model_validate_json(json_str)
99
+
100
+
101
+ def _extract_json(text: str) -> str:
102
+ """Extract JSON from text, handling nested objects and arrays."""
103
+ text = text.strip()
104
+
105
+ match = re.search(r"```(?:json)?\s*([\[\{].*?[\]\}])\s*```", text, re.DOTALL)
106
+ if match:
107
+ return match.group(1)
108
+
109
+ for i, char in enumerate(text):
110
+ if char in "{[":
111
+ return _extract_balanced(text, i)
112
+ return text
113
+
114
+
115
+ def _extract_balanced(text: str, start: int) -> str:
116
+ """Extract balanced JSON from start position."""
117
+ open_char = text[start]
118
+ close_char = "}" if open_char == "{" else "]"
119
+ depth = 0
120
+ in_string = False
121
+ escape = False
122
+
123
+ for i in range(start, len(text)):
124
+ c = text[i]
125
+ if escape:
126
+ escape = False
127
+ continue
128
+ if c == "\\":
129
+ escape = True
130
+ continue
131
+ if c == '"':
132
+ in_string = not in_string
133
+ continue
134
+ if in_string:
135
+ continue
136
+ if c == open_char:
137
+ depth += 1
138
+ elif c == close_char:
139
+ depth -= 1
140
+ if depth == 0:
141
+ return text[start:i + 1]
142
+ return text[start:]
@@ -0,0 +1,68 @@
1
+ """
2
+ Shared LLM output utilities for LLM and Agent nodes.
3
+ """
4
+
5
+ from typing import Dict, Any, List, Optional, Type
6
+ from pydantic import BaseModel
7
+
8
+ from .signals import has_jinja_conditions
9
+ from ...lib.schema_validation import schema_to_root_model
10
+ from ...lib.register_event import register_event
11
+ from ...types import EventTypes
12
+
13
+
14
+ def needs_llm_signal_selection(event_emissions: List[Dict[str, Any]]) -> bool:
15
+ """Check if LLM should select which signal to emit."""
16
+ if not event_emissions:
17
+ return False
18
+ if has_jinja_conditions(event_emissions):
19
+ return False
20
+
21
+ signal_count = sum(1 for e in event_emissions if e.get("signal_name"))
22
+ return signal_count > 1
23
+
24
+
25
+ def get_signal_options(event_emissions: List[Dict[str, Any]]) -> Optional[List[Dict[str, str]]]:
26
+ """
27
+ Get signal options if LLM should select which signal to emit.
28
+
29
+ Returns list of {name, description} dicts if multiple signals available.
30
+ Uses 'condition' field as description (when it's plain text, not jinja).
31
+ Returns None if no selection needed (single signal, jinja conditions, or empty).
32
+ """
33
+ if needs_llm_signal_selection(event_emissions):
34
+ return [
35
+ {
36
+ "name": e.get("signal_name"),
37
+ "description": e.get("condition", ""),
38
+ }
39
+ for e in event_emissions
40
+ if e.get("signal_name")
41
+ ]
42
+ return None
43
+
44
+
45
+ def get_output_model(
46
+ backends,
47
+ main_execution_id: str,
48
+ output_field: Optional[str]
49
+ ) -> Optional[Type[BaseModel]]:
50
+ """Get RootModel for flat output validation if schema exists."""
51
+ if not output_field or not backends.context_schema:
52
+ return None
53
+
54
+ schema = backends.context_schema.get_context_schema(main_execution_id)
55
+ if not schema or output_field not in schema:
56
+ register_event(
57
+ backends,
58
+ main_execution_id,
59
+ EventTypes.CONTEXT_WARNING,
60
+ {
61
+ "message": f"Output field '{output_field}' not found in context schema",
62
+ "output_field": output_field,
63
+ },
64
+ )
65
+ return None
66
+
67
+ field_def = schema.get(output_field)
68
+ return schema_to_root_model(field_def, f"{output_field.title()}Root")
@@ -0,0 +1,91 @@
1
+ """
2
+ Dynamic Pydantic response model builder.
3
+ """
4
+
5
+ from typing import Type, Any, Optional, List, Dict, Literal
6
+ from pydantic import RootModel
7
+ from pydantic import BaseModel, Field, create_model
8
+ from typing import Type, Any, Optional, List, Dict, Literal
9
+
10
+ from pydantic import RootModel
11
+ def build_response_model(
12
+ output_field: Optional[str] = None,
13
+ output_schema: Optional[Type[BaseModel]] = None,
14
+ signal_options: Optional[List[Dict[str, str]]] = None,
15
+ ) -> Type[BaseModel]:
16
+ """Dynamically build a Pydantic response model based on requirements."""
17
+ fields: Dict[str, Any] = {}
18
+
19
+ root_schema = None
20
+ if output_schema and isinstance(output_schema, type) and issubclass(output_schema, RootModel):
21
+ # Only return RootModel directly if no output_field is requested AND no signal selection needed
22
+ if not output_field and (not signal_options or len(signal_options) <= 1):
23
+ return output_schema
24
+ root_schema = output_schema
25
+ if output_field:
26
+ if root_schema:
27
+ root_type = root_schema.model_fields["root"].annotation
28
+ fields[output_field] = (
29
+ root_type,
30
+ Field(..., description=f"The {output_field} value matching the expected schema")
31
+ )
32
+ else:
33
+ fields[output_field] = (
34
+ Any,
35
+ Field(..., description=f"The {output_field} value")
36
+ )
37
+ else:
38
+ fields["output"] = (
39
+ str,
40
+ Field(..., description="The final output/result")
41
+ )
42
+
43
+ if signal_options and len(signal_options) > 1:
44
+ signal_names = [s["name"] for s in signal_options]
45
+ signal_literal = Literal[tuple(signal_names)]
46
+
47
+ descriptions = []
48
+ for s in signal_options:
49
+ if s.get("description"):
50
+ descriptions.append(f"- {s['name']}: {s['description']}")
51
+ else:
52
+ descriptions.append(f"- {s['name']}")
53
+
54
+ desc_text = "Select the most appropriate signal:\n" + "\n".join(descriptions)
55
+
56
+ fields["selected_signal"] = (
57
+ signal_literal,
58
+ Field(..., description=desc_text)
59
+ )
60
+
61
+ model_name = "DynamicResponse"
62
+ if output_field:
63
+ model_name = f"{output_field.title()}Response"
64
+
65
+ return create_model(model_name, **fields)
66
+
67
+
68
+ def extract_output_from_response(
69
+ response: BaseModel,
70
+ output_field: Optional[str],
71
+ ) -> Any:
72
+ """Extract the output value from a dynamic response model."""
73
+ if isinstance(response, RootModel):
74
+ value = response.root
75
+ if isinstance(value, BaseModel):
76
+ return value.model_dump()
77
+ return value
78
+ data = response.model_dump()
79
+ if output_field and output_field in data:
80
+ return data[output_field]
81
+ return data.get("output")
82
+
83
+
84
+ def extract_signal_from_response(response: BaseModel) -> Optional[str]:
85
+ """
86
+ Extract the selected signal from a dynamic response model.
87
+ """
88
+ data = response.model_dump()
89
+ if isinstance(data, dict):
90
+ return data.get("selected_signal")
91
+ return None
@@ -0,0 +1,79 @@
1
+ """
2
+ Shared signal emission utilities for LLM-based nodes.
3
+
4
+ Provides common logic for emitting signals after node completion,
5
+ handling both LLM-selected signals and jinja-conditioned emissions.
6
+ """
7
+
8
+ from typing import Dict, List, Any, Optional, Protocol
9
+
10
+ from ...types import BroadcastSignalsCaller, Backends, EventTypes
11
+ from ...lib.register_event import register_event
12
+ from .signals import has_jinja_conditions, handle_signal_emission
13
+
14
+
15
+ class OperationalState(Protocol):
16
+ """Protocol for operational state objects that can emit signals."""
17
+ context: Dict[str, Any]
18
+ event_emissions: List[Dict[str, Any]]
19
+
20
+
21
+ def handle_llm_failure(
22
+ failure_signal: Optional[str],
23
+ error_message: str,
24
+ node_type: str,
25
+ execution_id: str,
26
+ backends: Backends,
27
+ broadcast_signals_caller: BroadcastSignalsCaller,
28
+ ) -> None:
29
+ """Handle LLM/Agent node failure by logging and emitting failure signal or raising."""
30
+ register_event(
31
+ backends, execution_id, EventTypes.NODE_ERROR,
32
+ {"node_type": node_type, "error": error_message}
33
+ )
34
+
35
+ if failure_signal:
36
+ broadcast_signals_caller(execution_id, [failure_signal])
37
+ else:
38
+ raise RuntimeError(error_message)
39
+
40
+
41
+ def emit_completion_signals(
42
+ selected_signal: Optional[str],
43
+ node_config: Dict[str, Any],
44
+ operational_state: OperationalState,
45
+ broadcast_signals_caller: BroadcastSignalsCaller,
46
+ execution_id: str,
47
+ ) -> None:
48
+ """
49
+ Emit signals after successful node completion.
50
+
51
+ Signal emission priority:
52
+ 1. LLM-selected signal (when multiple signals with plain-text conditions)
53
+ 2. Jinja-conditioned emissions (evaluate {{ }} templates)
54
+ 3. Single unconditional signal (emit it)
55
+ 4. Multiple signals without selection → error (shouldn't happen)
56
+
57
+ The 'condition' field has dual purpose:
58
+ - Plain text: used as description for LLM signal selection
59
+ - Jinja template ({{ }}): evaluated to determine if signal should emit
60
+ """
61
+ if selected_signal:
62
+ broadcast_signals_caller(execution_id, [selected_signal])
63
+ elif operational_state.event_emissions:
64
+ if has_jinja_conditions(operational_state.event_emissions):
65
+ handle_signal_emission(
66
+ [], node_config, operational_state.context,
67
+ broadcast_signals_caller, execution_id
68
+ )
69
+ else:
70
+ signals = [
71
+ e.get("signal_name") for e in operational_state.event_emissions
72
+ if e.get("signal_name")
73
+ ]
74
+ if len(signals) == 1:
75
+ broadcast_signals_caller(execution_id, signals)
76
+ elif len(signals) > 1:
77
+ raise RuntimeError(
78
+ f"Multiple signals defined but no selection made: {signals}"
79
+ )
@@ -0,0 +1,54 @@
1
+ """
2
+ Signal handling utilities for LLM-based nodes.
3
+ """
4
+
5
+ import re
6
+ from typing import Dict, List, Any, Callable
7
+
8
+ from .conditions import evaluate_conditions
9
+ from ...lib.context_fields import get_field
10
+
11
+
12
+ def has_jinja_conditions(event_emissions: List[Dict[str, Any]]) -> bool:
13
+ """Check if any event emission has jinja template conditions."""
14
+ return any(
15
+ e.get("condition") and re.search(r"\{\{.*\}\}", e.get("condition", ""))
16
+ for e in event_emissions
17
+ )
18
+
19
+
20
+ def _evaluate_emission_conditions(
21
+ emitted_signals: List[str], node_config: Dict[str, Any], context: Dict[str, Any]
22
+ ) -> List[str]:
23
+ """Evaluate jinja conditions and filter signals against allowed emissions."""
24
+ event_emissions = node_config.get("event_emissions", [])
25
+
26
+ has_jinja = any(
27
+ e.get("condition") and re.search(r"\{\{.*\}\}", e.get("condition", ""))
28
+ for e in event_emissions
29
+ )
30
+
31
+ if not has_jinja:
32
+ allowed = {e.get("signal_name") for e in event_emissions if e.get("signal_name")}
33
+ return [s for s in emitted_signals if s in allowed]
34
+
35
+ unwrapped = {k: get_field(context, k) for k in context if not k.startswith("__")}
36
+ for k, v in context.items():
37
+ if k.startswith("__"):
38
+ unwrapped[k] = v
39
+ return evaluate_conditions(event_emissions, {"context": unwrapped}, context)
40
+
41
+
42
+ def handle_signal_emission(
43
+ emitted_signals: List[str],
44
+ node_config: Dict[str, Any],
45
+ context: Dict[str, Any],
46
+ broadcast_signals_caller: Callable[[str, List[str]], None],
47
+ execution_id: str,
48
+ ) -> None:
49
+ """Evaluate signal conditions and emit via broadcast_signals_caller."""
50
+ filtered_signals = _evaluate_emission_conditions(
51
+ emitted_signals, node_config, context
52
+ )
53
+ if filtered_signals:
54
+ broadcast_signals_caller(execution_id, filtered_signals)
soe/nodes/lib/tools.py ADDED
@@ -0,0 +1,100 @@
1
+ """
2
+ Shared tool utilities for nodes that work with callable tools.
3
+
4
+ Used by Agent node and Tool node for introspection and registry lookup.
5
+ """
6
+
7
+ import inspect
8
+ from typing import Dict, Any, Callable, Type, Optional, Union
9
+ from pydantic import BaseModel, create_model
10
+
11
+ from ...types import Backends
12
+ from ...builtin_tools import get_builtin_tool_factory
13
+
14
+
15
+ DEFAULT_MAX_RETRIES = 0
16
+
17
+
18
+ def get_tool_signature(tool_func: Callable) -> str:
19
+ """Extract function signature and docstring for prompt."""
20
+ sig = inspect.signature(tool_func)
21
+ params = []
22
+ for name, param in sig.parameters.items():
23
+ param_type = (
24
+ param.annotation if param.annotation != inspect.Parameter.empty else "Any"
25
+ )
26
+ params.append(f"{name}: {param_type}")
27
+
28
+ func_name = tool_func.__name__
29
+ params_str = ", ".join(params)
30
+ doc = inspect.getdoc(tool_func) or "No description"
31
+
32
+ return f"{func_name}({params_str})\n {doc}"
33
+
34
+
35
+ def create_tool_schema(tool_func: Callable) -> Type[BaseModel]:
36
+ """Dynamically create a Pydantic model from a function signature."""
37
+ sig = inspect.signature(tool_func)
38
+ fields: Dict[str, Any] = {}
39
+
40
+ for name, param in sig.parameters.items():
41
+ annotation = param.annotation
42
+ if annotation == inspect.Parameter.empty:
43
+ annotation = Any
44
+
45
+ default = param.default
46
+ if default == inspect.Parameter.empty:
47
+ field_info = (annotation, ...)
48
+ else:
49
+ field_info = (annotation, default)
50
+
51
+ fields[name] = field_info
52
+
53
+ return create_model(f"{tool_func.__name__}Schema", **fields)
54
+
55
+
56
+ def _normalize_registry_entry(
57
+ entry: Union[Callable, Dict[str, Any]],
58
+ ) -> tuple[Callable, int, Optional[str], bool]:
59
+ """Normalize a tool registry entry to extract function and configuration."""
60
+ if callable(entry):
61
+ return entry, DEFAULT_MAX_RETRIES, None, False
62
+
63
+ return (
64
+ entry["function"],
65
+ entry.get("max_retries", DEFAULT_MAX_RETRIES),
66
+ entry.get("failure_signal"),
67
+ entry.get("process_accumulated", False),
68
+ )
69
+
70
+
71
+ def get_tool_from_registry(
72
+ tool_name: str,
73
+ tools_registry: Dict[str, Any],
74
+ execution_id: str,
75
+ backends: Backends,
76
+ ) -> tuple[Callable, int, Optional[str], bool]:
77
+ """
78
+ Get tool function from registry or builtin tools, normalizing the entry.
79
+
80
+ Caches builtin tools in registry after creation.
81
+
82
+ Returns:
83
+ Tuple of (tool_function, max_retries, failure_signal, process_accumulated)
84
+ """
85
+ entry = tools_registry.get(tool_name)
86
+
87
+ if entry is not None:
88
+ return _normalize_registry_entry(entry)
89
+
90
+ builtin_factory = get_builtin_tool_factory(tool_name)
91
+ if builtin_factory:
92
+ tool_function = builtin_factory(
93
+ execution_id=execution_id,
94
+ backends=backends,
95
+ tools_registry=tools_registry,
96
+ )
97
+ tools_registry[tool_name] = {"function": tool_function, "max_retries": DEFAULT_MAX_RETRIES}
98
+ return tool_function, DEFAULT_MAX_RETRIES, None, False
99
+
100
+ raise ValueError(f"Tool '{tool_name}' not found in registry or builtins")
@@ -0,0 +1,7 @@
1
+ """
2
+ LLM node - Simple direct LLM call with conversation history support
3
+ """
4
+
5
+ from .factory import create_llm_node_caller
6
+
7
+ __all__ = ["create_llm_node_caller"]