loom-agent 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. loom/__init__.py +1 -0
  2. loom/adapters/converters.py +77 -0
  3. loom/adapters/registry.py +43 -0
  4. loom/api/factory.py +77 -0
  5. loom/api/main.py +201 -0
  6. loom/builtin/__init__.py +3 -0
  7. loom/builtin/memory/__init__.py +3 -0
  8. loom/builtin/memory/metabolic.py +96 -0
  9. loom/builtin/memory/pso.py +41 -0
  10. loom/builtin/memory/sanitizers.py +39 -0
  11. loom/builtin/memory/validators.py +55 -0
  12. loom/config/tool.py +63 -0
  13. loom/infra/__init__.py +0 -0
  14. loom/infra/llm.py +44 -0
  15. loom/infra/logging.py +42 -0
  16. loom/infra/store.py +39 -0
  17. loom/infra/transport/memory.py +112 -0
  18. loom/infra/transport/nats.py +170 -0
  19. loom/infra/transport/redis.py +161 -0
  20. loom/interfaces/llm.py +45 -0
  21. loom/interfaces/memory.py +50 -0
  22. loom/interfaces/store.py +29 -0
  23. loom/interfaces/transport.py +35 -0
  24. loom/kernel/__init__.py +0 -0
  25. loom/kernel/base_interceptor.py +97 -0
  26. loom/kernel/bus.py +85 -0
  27. loom/kernel/dispatcher.py +58 -0
  28. loom/kernel/interceptors/__init__.py +14 -0
  29. loom/kernel/interceptors/adaptive.py +567 -0
  30. loom/kernel/interceptors/budget.py +60 -0
  31. loom/kernel/interceptors/depth.py +45 -0
  32. loom/kernel/interceptors/hitl.py +51 -0
  33. loom/kernel/interceptors/studio.py +129 -0
  34. loom/kernel/interceptors/timeout.py +27 -0
  35. loom/kernel/state.py +71 -0
  36. loom/memory/hierarchical.py +124 -0
  37. loom/node/__init__.py +0 -0
  38. loom/node/agent.py +252 -0
  39. loom/node/base.py +121 -0
  40. loom/node/crew.py +105 -0
  41. loom/node/router.py +77 -0
  42. loom/node/tool.py +50 -0
  43. loom/protocol/__init__.py +0 -0
  44. loom/protocol/cloudevents.py +73 -0
  45. loom/protocol/interfaces.py +164 -0
  46. loom/protocol/mcp.py +97 -0
  47. loom/protocol/memory_operations.py +51 -0
  48. loom/protocol/patch.py +93 -0
  49. loom_agent-0.3.3.dist-info/LICENSE +204 -0
  50. loom_agent-0.3.3.dist-info/METADATA +139 -0
  51. loom_agent-0.3.3.dist-info/RECORD +52 -0
  52. loom_agent-0.3.3.dist-info/WHEEL +4 -0
@@ -0,0 +1,51 @@
1
+ """
2
+ Human-in-the-Loop Interceptor
3
+ """
4
+
5
+ import asyncio
6
+ from typing import List, Optional, Any
7
+ from loom.kernel.base_interceptor import Interceptor
8
+ from loom.protocol.cloudevents import CloudEvent
9
+
10
+ class HITLInterceptor(Interceptor):
11
+ """
12
+ Pauses execution for Human Approval on sensitive events.
13
+ """
14
+
15
+ def __init__(self, patterns: List[str]):
16
+ """
17
+ Args:
18
+ patterns: List of substring matches for Event Type or Subject.
19
+ e.g. ["tool.execute/delete_file", "payment"]
20
+ """
21
+ self.patterns = patterns
22
+
23
+ async def pre_invoke(self, event: CloudEvent) -> Optional[CloudEvent]:
24
+ # Check simple pattern match
25
+ identifier = f"{event.type}/{event.subject or ''}"
26
+
27
+ should_pause = any(p in identifier for p in self.patterns)
28
+
29
+ if should_pause:
30
+ print(f"\n[HITL] ⚠️ STOP! Event requires approval: {identifier}")
31
+ print(f" Data: {str(event.data)[:200]}")
32
+
33
+ # This blocks the Dispatcher!
34
+ # In a real async web app, this would suspend and wait for an API call (Webhook/Signal).
35
+ # For this CLI SDK, we use blocking input (in a separate thread if needed, or just sync).
36
+ # Since standard input() is blocking, it pauses the loop.
37
+ # In purely async heavily concurrent apps, use a non-blocking wrapper.
38
+ # Here: simplistic CLI approach.
39
+
40
+ approval = await asyncio.to_thread(input, " Approve? (y/N): ")
41
+
42
+ if approval.lower().strip() != "y":
43
+ print(" ❌ Denied.")
44
+ return None # Drop event
45
+
46
+ print(" ✅ Approved.")
47
+
48
+ return event
49
+
50
+ async def post_invoke(self, event: CloudEvent) -> None:
51
+ pass
@@ -0,0 +1,129 @@
1
+
2
+ import asyncio
3
+ import json
4
+ import os
5
+ import time
6
+ from typing import Optional, List, Dict, Any
7
+ import logging
8
+
9
+ try:
10
+ import websockets
11
+ from websockets.client import WebSocketClientProtocol
12
+ except ImportError:
13
+ websockets = None
14
+ WebSocketClientProtocol = Any
15
+
16
+ from loom.kernel.base_interceptor import Interceptor
17
+ from loom.protocol.cloudevents import CloudEvent
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ class StudioInterceptor(Interceptor):
22
+ """
23
+ Studio Interceptor: Captures all events and forwards them to Studio Server.
24
+
25
+ Features:
26
+ - Async non-blocking: Uses asyncio.create_task to avoid blocking main flow.
27
+ - Optional: Controlled by LOOM_STUDIO_ENABLED environment variable.
28
+ - Batching: Buffers events and sends in batches to reduce network overhead.
29
+ """
30
+
31
+ def __init__(self, studio_url: str = "ws://localhost:8765", enabled: bool = False):
32
+ self.studio_url = studio_url
33
+ self.ws: Optional[WebSocketClientProtocol] = None
34
+ self.event_buffer: List[CloudEvent] = []
35
+ self.buffer_size = 10
36
+ # Priority: Argument > Env Var
37
+ self.enabled = enabled or os.getenv("LOOM_STUDIO_ENABLED", "false").lower() == "true"
38
+ self._loop = None
39
+
40
+ if self.enabled and not websockets:
41
+ logger.warning("LOOM_STUDIO_ENABLED is true but websockets is not installed. Disabling Studio.")
42
+ self.enabled = False
43
+
44
+ if self.enabled:
45
+ asyncio.create_task(self._ensure_connection())
46
+
47
+ async def _ensure_connection(self):
48
+ """Ensure WebSocket connection is established"""
49
+ if not self.enabled:
50
+ return
51
+
52
+ if self.ws:
53
+ try:
54
+ # Basic check if open
55
+ if self.ws.state == 1: # Open
56
+ return
57
+ except Exception:
58
+ pass
59
+ self.ws = None
60
+
61
+ try:
62
+ # Append /ws/ingest to the base URL if not present
63
+ url = self.studio_url
64
+ if not url.endswith("/ws/ingest"):
65
+ url = f"{url.rstrip('/')}/ws/ingest"
66
+
67
+ # Simple debounce/lock could go here but for now just log
68
+ print(f"DEBUG: Connecting to {url}")
69
+ self.ws = await websockets.connect(url)
70
+ logger.info(f"Connected to Loom Studio at {url}")
71
+ print(f"DEBUG: Connected successfully")
72
+ except Exception as e:
73
+ # Silent fail to not disrupt agent operation, but log it
74
+ logger.debug(f"Failed to connect to Studio: {e}")
75
+ print(f"DEBUG: Failed to connect: {e}")
76
+ self.ws = None
77
+
78
+ async def pre_invoke(self, event: CloudEvent) -> Optional[CloudEvent]:
79
+ """Capture event (pre-phase) - 不发送事件,只在 post_invoke 发送以避免重复"""
80
+ # 不在 pre 阶段发送事件,只在 post 阶段发送完整的事件
81
+ return event
82
+
83
+ async def post_invoke(self, event: CloudEvent) -> None:
84
+ """Capture event (post-phase) - 只在此阶段发送事件,避免重复"""
85
+ if self.enabled:
86
+ enriched_event_data = event.model_dump(mode='json')
87
+ if "extensions" not in enriched_event_data:
88
+ enriched_event_data["extensions"] = {}
89
+
90
+ # 标记为 post 阶段(虽然现在只在 post 发送,但保留标记以便将来扩展)
91
+ enriched_event_data["extensions"]["studio_phase"] = "post"
92
+ enriched_event_data["extensions"]["studio_timestamp"] = time.time()
93
+
94
+ asyncio.create_task(self._send_event_data(enriched_event_data))
95
+
96
+ async def _send_event_data(self, event_data: Dict[str, Any]):
97
+ """Buffer and send event data"""
98
+ try:
99
+ self.event_buffer.append(event_data)
100
+
101
+ if len(self.event_buffer) >= self.buffer_size:
102
+ await self._flush_buffer()
103
+ except Exception as e:
104
+ logger.error(f"Error in StudioInterceptor: {e}")
105
+
106
+ async def _flush_buffer(self):
107
+ """Flush buffered events to server"""
108
+ if not self.event_buffer:
109
+ return
110
+
111
+ # Snapshot and clear immediately to avoid duplicates/race
112
+ current_batch = list(self.event_buffer)
113
+ self.event_buffer = []
114
+
115
+ await self._ensure_connection()
116
+
117
+ if self.ws:
118
+ try:
119
+ batch = {
120
+ "type": "event_batch",
121
+ "events": current_batch
122
+ }
123
+ await self.ws.send(json.dumps(batch))
124
+ except Exception as e:
125
+ logger.debug(f"Failed to send batch to Studio: {e}")
126
+ print(f"DEBUG: Failed to send batch: {e}")
127
+ # Re-queue? For now drop to avoid complexity
128
+ else:
129
+ print(f"DEBUG: No connection, dropping batch of {len(current_batch)}")
@@ -0,0 +1,27 @@
1
+ """
2
+ Standard Kernel Interceptors.
3
+ """
4
+
5
+ from typing import Optional
6
+ from loom.protocol.cloudevents import CloudEvent
7
+ from loom.kernel.base_interceptor import Interceptor
8
+
9
+ class TimeoutInterceptor(Interceptor):
10
+ """
11
+ Enforces a timeout on event processing by injecting a deadline constraint.
12
+ The Dispatcher or Transport is responsible for respecting this constraint.
13
+ """
14
+ def __init__(self, default_timeout_sec: float = 30.0):
15
+ self.default_timeout_sec = default_timeout_sec
16
+
17
+ async def pre_invoke(self, event: CloudEvent) -> Optional[CloudEvent]:
18
+ # If timeout not already set in extensions, inject it
19
+ extensions = event.extensions or {}
20
+ if "timeout" not in extensions:
21
+ extensions["timeout"] = self.default_timeout_sec
22
+ event.extensions = extensions
23
+
24
+ return event
25
+
26
+ async def post_invoke(self, event: CloudEvent) -> None:
27
+ pass
loom/kernel/state.py ADDED
@@ -0,0 +1,71 @@
1
+ """
2
+ State Management (Kernel)
3
+ """
4
+
5
+ from typing import Any, Dict, List, Optional
6
+ import copy
7
+
8
+ from loom.protocol.cloudevents import CloudEvent
9
+ from loom.protocol.patch import StatePatch, apply_patch as apply_dict_patch
10
+
11
+ class StateStore:
12
+ """
13
+ Manages the application state tree.
14
+ Updates state by applying 'state.patch' events.
15
+ """
16
+
17
+ def __init__(self):
18
+ self._root: Dict[str, Any] = {}
19
+
20
+ def apply_event(self, event: CloudEvent) -> None:
21
+ """
22
+ Update state if event contains patches.
23
+ Expected event.type = "state.patch"
24
+ Expected event.data = {"patches": [...]}
25
+ """
26
+ if event.type != "state.patch":
27
+ return
28
+
29
+ patches_data = event.data.get("patches", [])
30
+ if not patches_data:
31
+ return
32
+
33
+ for p_data in patches_data:
34
+ try:
35
+ patch = StatePatch(**p_data)
36
+ # Apply strictly to root
37
+ apply_dict_patch(self._root, patch)
38
+ except Exception as e:
39
+ # In a real system, we might want to dead-letter queue this
40
+ print(f"Failed to apply patch: {e}")
41
+
42
+ def get_snapshot(self, path: str = "/") -> Any:
43
+ """
44
+ Get a deep copy of the state at a specific path.
45
+ """
46
+ if path == "/":
47
+ return copy.deepcopy(self._root)
48
+
49
+ tokens = [t for t in path.split('/') if t]
50
+ current = self._root
51
+
52
+ for token in tokens:
53
+ if isinstance(current, dict):
54
+ current = current.get(token)
55
+ elif isinstance(current, list):
56
+ try:
57
+ idx = int(token)
58
+ if 0 <= idx < len(current):
59
+ current = current[idx]
60
+ else:
61
+ current = None
62
+ except ValueError:
63
+ current = None
64
+
65
+ if current is None:
66
+ return None
67
+
68
+ return copy.deepcopy(current)
69
+
70
+ def clear(self):
71
+ self._root = {}
@@ -0,0 +1,124 @@
1
+ """
2
+ Hierarchical Memory Implementation
3
+ """
4
+
5
+ import time
6
+ from typing import List, Dict, Any, Optional
7
+
8
+ from loom.interfaces.memory import MemoryInterface, MemoryEntry
9
+
10
+ class HierarchicalMemory(MemoryInterface):
11
+ """
12
+ A simplified 4-tier memory system.
13
+
14
+ Tiers:
15
+ 1. Ephemeral: Tool outputs (not implemented separate storage in this MVP, just tagged)
16
+ 2. Working: Recent N items.
17
+ 3. Session: Full conversation history.
18
+ 4. Long-term: (Stub) Vector interactions.
19
+ """
20
+
21
+ def __init__(self, session_limit: int = 100, working_limit: int = 5):
22
+ self.session_limit = session_limit
23
+ self.working_limit = working_limit
24
+
25
+ self._session: List[MemoryEntry] = []
26
+ # working memory is a dynamic view or separate buffer?
27
+ # In legacy design, it was promoted. Here, let's treat it as "Recent Window" logic for start.
28
+
29
+ async def add(self, role: str, content: str, metadata: Optional[Dict[str, Any]] = None) -> None:
30
+ """Add to session memory."""
31
+ # print(f"[DEBUG Memory] Adding {role}: {content[:20]}...")
32
+ metadata = metadata or {}
33
+ tier = metadata.get("tier", "session")
34
+
35
+ entry = MemoryEntry(
36
+ role=role,
37
+ content=content,
38
+ timestamp=time.time(),
39
+ metadata=metadata,
40
+ tier=tier
41
+ )
42
+
43
+ self._session.append(entry)
44
+
45
+ # Enforce limits
46
+ if len(self._session) > self.session_limit:
47
+ self._session.pop(0) # Simple FIFO
48
+
49
+ async def get_context(self, task: str = "") -> str:
50
+ """
51
+ Construct a context string for the Agent.
52
+
53
+ Format:
54
+ --- Long Term Memory ---
55
+ (Stub)
56
+
57
+ --- Session History ---
58
+ User: ...
59
+ Assistant: ...
60
+ """
61
+ # Long term stub
62
+ long_term = ""
63
+
64
+ # Working/Session view
65
+ # We perform a simple sliding window for now suitable for LLM Context Window
66
+ # or return full session if it fits.
67
+
68
+ history_str = []
69
+ for entry in self._session:
70
+ history_str.append(f"{entry.role.capitalize()}: {entry.content}")
71
+
72
+ return "\n".join(history_str)
73
+
74
+ async def get_recent(self, limit: int = 10) -> List[Dict[str, Any]]:
75
+ """Get recent raw messages for LLM API."""
76
+ messages = []
77
+ for entry in self._session[-limit:]:
78
+ msg = {"role": entry.role, "content": entry.content}
79
+
80
+ # Include tool_calls for assistant messages
81
+ if entry.role == "assistant" and "tool_calls" in entry.metadata:
82
+ msg["tool_calls"] = entry.metadata["tool_calls"]
83
+
84
+ # Include tool_call_id for tool messages
85
+ if entry.role == "tool" and "tool_call_id" in entry.metadata:
86
+ msg["tool_call_id"] = entry.metadata["tool_call_id"]
87
+ if "tool_name" in entry.metadata:
88
+ msg["name"] = entry.metadata["tool_name"]
89
+
90
+ messages.append(msg)
91
+ return messages
92
+
93
+ async def clear(self) -> None:
94
+ self._session.clear()
95
+
96
+ def should_reflect(self, threshold: int = 20) -> bool:
97
+ """Check if memory needs reflection (session memory exceeds threshold)."""
98
+ return len(self._session) > threshold
99
+
100
+ def get_reflection_candidates(self, count: int = 10) -> List[MemoryEntry]:
101
+ """Get the oldest 'count' records for summarization."""
102
+ return self._session[:count]
103
+
104
+ async def consolidate(self, summary: str, remove_count: int) -> None:
105
+ """
106
+ Consolidate memory:
107
+ 1. Remove the oldest 'remove_count' entries.
108
+ 2. Insert a 'Summary' entry at the beginning (or logically older).
109
+ """
110
+ # Remove old entries
111
+ del self._session[:remove_count]
112
+
113
+ # Create summary entry
114
+ summary_entry = MemoryEntry(
115
+ role="system",
116
+ content=f"[Memory Reflection] Summary of previous conversation:\n{summary}",
117
+ timestamp=time.time(),
118
+ metadata={"type": "reflection_summary"},
119
+ tier="long-term"
120
+ )
121
+
122
+ # Insert at start
123
+ self._session.insert(0, summary_entry)
124
+
loom/node/__init__.py ADDED
File without changes
loom/node/agent.py ADDED
@@ -0,0 +1,252 @@
1
+ """
2
+ Agent Node (Fractal System)
3
+ """
4
+
5
+ from typing import Any, Dict, List, Optional
6
+ from dataclasses import dataclass, field
7
+ import uuid
8
+
9
+ from loom.protocol.cloudevents import CloudEvent
10
+ from loom.protocol.interfaces import ReflectiveMemoryStrategy
11
+ from loom.node.base import Node
12
+ from loom.node.tool import ToolNode
13
+ from loom.kernel.dispatcher import Dispatcher
14
+
15
+ from loom.interfaces.llm import LLMProvider
16
+ from loom.infra.llm import MockLLMProvider
17
+ from loom.interfaces.memory import MemoryInterface
18
+ from loom.memory.hierarchical import HierarchicalMemory
19
+
20
+
21
+ @dataclass
22
+ class ReflectionConfig:
23
+ """
24
+ Configuration for Memory Reflection (Human Factors Engineering).
25
+
26
+ Framework DETECTS when reflection is needed.
27
+ Developer CONFIGURES how reflection should behave.
28
+ System EXECUTES the reflection according to config.
29
+ """
30
+ threshold: int = 20
31
+ """Number of entries before reflection is triggered"""
32
+
33
+ candidate_count: int = 10
34
+ """Number of memory entries to include in reflection"""
35
+
36
+ remove_count: int = 10
37
+ """Number of entries to remove after consolidation"""
38
+
39
+ prompt_template: str = "Summarize the following conversation segment into a concise knowledge entry:\n\n{history}"
40
+ """Template for the reflection prompt. {history} will be replaced with actual history."""
41
+
42
+ enabled: bool = True
43
+ """Whether reflection is enabled"""
44
+
45
+
46
+ class AgentNode(Node):
47
+ """
48
+ A Node that acts as an Intelligent Agent (MCP Client).
49
+
50
+ FIXED: Now accepts ReflectionConfig for configurable memory reflection,
51
+ following Human Factors Engineering principle (developer controls strategy).
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ node_id: str,
57
+ dispatcher: Dispatcher,
58
+ role: str = "Assistant",
59
+ system_prompt: str = "You are a helpful assistant.",
60
+ tools: Optional[List[ToolNode]] = None,
61
+ provider: Optional[LLMProvider] = None,
62
+ memory: Optional[MemoryInterface] = None,
63
+ enable_auto_reflection: bool = False,
64
+ reflection_config: Optional[ReflectionConfig] = None
65
+ ):
66
+ super().__init__(node_id, dispatcher)
67
+ self.role = role
68
+ self.system_prompt = system_prompt
69
+ self.known_tools = {t.tool_def.name: t for t in tools} if tools else {}
70
+ # Replaced internal list list with Memory Interface
71
+ self.memory = memory or HierarchicalMemory()
72
+ self.provider = provider or MockLLMProvider()
73
+ self.enable_auto_reflection = enable_auto_reflection
74
+ # FIXED: Configurable reflection parameters (Human Factors Engineering)
75
+ self.reflection_config = reflection_config or ReflectionConfig()
76
+
77
+ async def _perform_reflection(self) -> None:
78
+ """
79
+ Check and perform metabolic memory reflection.
80
+
81
+ FIXED: Now uses developer-configured parameters instead of hardcoded values.
82
+ Framework DETECTS, Developer CONFIGURES, System EXECUTES.
83
+
84
+ FIXED: Uses Protocol check instead of isinstance for better abstraction.
85
+ """
86
+ # 0. Check if reflection is enabled
87
+ if not self.reflection_config.enabled:
88
+ return
89
+
90
+ # 1. Check if memory supports reflection (Protocol-First)
91
+ if not isinstance(self.memory, ReflectiveMemoryStrategy):
92
+ # Memory doesn't support reflection, skip silently
93
+ return
94
+
95
+ # 2. Check if memory needs reflection (Framework DETECTS)
96
+ if not self.memory.should_reflect(threshold=self.reflection_config.threshold):
97
+ return
98
+
99
+ # 3. Get candidates (Developer CONFIGURED count)
100
+ candidates = self.memory.get_reflection_candidates(
101
+ count=self.reflection_config.candidate_count
102
+ )
103
+
104
+ # 4. Summarize with LLM (Developer CONFIGURED prompt)
105
+ history_text = "\n".join([f"{e.role}: {e.content}" for e in candidates])
106
+ prompt = self.reflection_config.prompt_template.format(history=history_text)
107
+
108
+ try:
109
+ # We use a separate call (not affecting main context)
110
+ response = await self.provider.chat([{"role": "user", "content": prompt}])
111
+ summary = response.content
112
+
113
+ # 5. Consolidate (Developer CONFIGURED remove_count)
114
+ await self.memory.consolidate(
115
+ summary,
116
+ remove_count=self.reflection_config.remove_count
117
+ )
118
+
119
+ # 6. Emit Event
120
+ await self.dispatcher.dispatch(CloudEvent.create(
121
+ source=self.source_uri,
122
+ type="agent.reflection",
123
+ data={"summary": summary},
124
+ ))
125
+ except Exception as e:
126
+ # Reflection shouldn't crash the agent
127
+ # FIXED: Should emit event instead of just print
128
+ error_event = CloudEvent.create(
129
+ source=self.source_uri,
130
+ type="agent.reflection.failed",
131
+ data={"error": str(e)}
132
+ )
133
+ await self.dispatcher.dispatch(error_event)
134
+
135
+ async def process(self, event: CloudEvent) -> Any:
136
+ """
137
+ Agent Loop with Memory:
138
+ 1. Receive Task -> Add to Memory
139
+ 2. Get Context from Memory
140
+ 3. Think (LLM)
141
+ 4. Tool Call -> Add Result to Memory
142
+ 5. Final Response
143
+ """
144
+ # Hook: Auto Reflection
145
+ if self.enable_auto_reflection:
146
+ await self._perform_reflection()
147
+
148
+ return await self._execute_loop(event)
149
+
150
+ async def _execute_loop(self, event: CloudEvent) -> Any:
151
+ """
152
+ Execute the ReAct Loop.
153
+ """
154
+ task = event.data.get("task", "") or event.data.get("content", "")
155
+ max_iterations = event.data.get("max_iterations", 5)
156
+
157
+ # 1. Perceive (Add to Memory)
158
+ await self.memory.add("user", task)
159
+
160
+ iterations = 0
161
+ final_response = ""
162
+
163
+ while iterations < max_iterations:
164
+ iterations += 1
165
+
166
+ # 2. Recall (Get Context)
167
+ history = await self.memory.get_recent(limit=20)
168
+ messages = [{"role": "system", "content": self.system_prompt}] + history
169
+
170
+ # 3. Think
171
+ mcp_tools = [t.tool_def.model_dump() for t in self.known_tools.values()]
172
+
173
+ # Check for Adaptive Control Overrides (from Interceptors)
174
+ llm_config = event.extensions.get("llm_config_override")
175
+
176
+ response = await self.provider.chat(messages, tools=mcp_tools, config=llm_config)
177
+ final_text = response.content
178
+
179
+ # 4. Act (Tool Usage or Final Answer)
180
+ if response.tool_calls:
181
+ # Record the "thought" / call intent
182
+ # ALWAYS store assistant message with tool_calls (even if content is empty)
183
+ await self.memory.add("assistant", final_text or "", metadata={
184
+ "tool_calls": response.tool_calls
185
+ })
186
+
187
+ # Execute tools (Parallel support possible, here sequential)
188
+ for tc in response.tool_calls:
189
+ tc_name = tc.get("name")
190
+ tc_args = tc.get("arguments")
191
+
192
+ # Emit thought event
193
+ await self.dispatcher.dispatch(CloudEvent.create(
194
+ source=self.source_uri,
195
+ type="agent.thought",
196
+ data={"thought": f"Calling {tc_name}", "tool_call": tc},
197
+ traceparent=event.traceparent
198
+ ))
199
+
200
+ target_tool = self.known_tools.get(tc_name)
201
+
202
+ if target_tool:
203
+ # FIXED: Use self.call() to invoke through event bus
204
+ # This ensures:
205
+ # - Tool calls are visible in Studio
206
+ # - Interceptors can control tool execution
207
+ # - Supports distributed tool nodes
208
+ # - Maintains fractal uniformity
209
+ try:
210
+ tool_result = await self.call(
211
+ target_node=target_tool.source_uri,
212
+ data={"arguments": tc_args}
213
+ )
214
+
215
+ # Extract result content
216
+ if isinstance(tool_result, dict):
217
+ result_content = tool_result.get("result", str(tool_result))
218
+ else:
219
+ result_content = str(tool_result)
220
+
221
+ # Add Result to Memory (Observation)
222
+ await self.memory.add("tool", str(result_content), metadata={
223
+ "tool_name": tc_name,
224
+ "tool_call_id": tc.get("id")
225
+ })
226
+ except Exception as e:
227
+ # Tool call failed through event bus
228
+ err_msg = f"Tool {tc_name} failed: {str(e)}"
229
+ await self.memory.add("system", err_msg)
230
+ else:
231
+ err_msg = f"Tool {tc_name} not found."
232
+ await self.memory.add("system", err_msg)
233
+
234
+ # Loop continues to reflect on tool results
235
+ continue
236
+
237
+ else:
238
+ # Final Answer
239
+ await self.memory.add("assistant", final_text)
240
+ final_response = final_text
241
+ break
242
+
243
+ if not final_response and iterations >= max_iterations:
244
+ final_response = "Error: Maximum iterations reached without final answer."
245
+ await self.memory.add("system", final_response)
246
+
247
+ # Hook: Check reflection after new memories added
248
+ if self.enable_auto_reflection:
249
+ await self._perform_reflection()
250
+
251
+ return {"response": final_response, "iterations": iterations}
252
+