loom-agent 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of loom-agent might be problematic. Click here for more details.
- loom/builtin/tools/calculator.py +4 -0
- loom/builtin/tools/document_search.py +5 -0
- loom/builtin/tools/glob.py +4 -0
- loom/builtin/tools/grep.py +4 -0
- loom/builtin/tools/http_request.py +5 -0
- loom/builtin/tools/python_repl.py +5 -0
- loom/builtin/tools/read_file.py +4 -0
- loom/builtin/tools/task.py +105 -0
- loom/builtin/tools/web_search.py +4 -0
- loom/builtin/tools/write_file.py +4 -0
- loom/components/agent.py +121 -5
- loom/core/agent_executor.py +777 -321
- loom/core/compression_manager.py +17 -10
- loom/core/context_assembly.py +437 -0
- loom/core/events.py +660 -0
- loom/core/execution_context.py +119 -0
- loom/core/tool_orchestrator.py +383 -0
- loom/core/turn_state.py +188 -0
- loom/core/types.py +15 -4
- loom/core/unified_coordination.py +389 -0
- loom/interfaces/event_producer.py +172 -0
- loom/interfaces/tool.py +22 -1
- loom/security/__init__.py +13 -0
- loom/security/models.py +85 -0
- loom/security/path_validator.py +128 -0
- loom/security/validator.py +346 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.1_agent_events.md +121 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.2_streaming_api.md +521 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.3_context_assembler.md +606 -0
- loom/tasks/PHASE_2_CORE_FEATURES/task_2.1_tool_orchestrator.md +743 -0
- loom/tasks/PHASE_2_CORE_FEATURES/task_2.2_security_validator.md +676 -0
- loom/tasks/README.md +109 -0
- loom/tasks/__init__.py +11 -0
- loom/tasks/sql_placeholder.py +100 -0
- loom_agent-0.0.3.dist-info/METADATA +292 -0
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/RECORD +38 -19
- loom_agent-0.0.1.dist-info/METADATA +0 -457
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/WHEEL +0 -0
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Execution Context for tt Recursive Control Loop
|
|
3
|
+
|
|
4
|
+
Provides shared runtime configuration that persists across recursive calls.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Optional, Dict, Any
|
|
13
|
+
from uuid import uuid4
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class ExecutionContext:
|
|
18
|
+
"""
|
|
19
|
+
Shared execution context for tt recursion.
|
|
20
|
+
|
|
21
|
+
Contains runtime configuration and state that doesn't change
|
|
22
|
+
between recursive calls. This is passed down the recursion chain
|
|
23
|
+
alongside messages and TurnState.
|
|
24
|
+
|
|
25
|
+
Design Principles:
|
|
26
|
+
- Immutable configuration: working_dir, correlation_id don't change
|
|
27
|
+
- Shared cancellation: cancel_token is shared across all recursive calls
|
|
28
|
+
- Extensible: metadata dict for custom data
|
|
29
|
+
|
|
30
|
+
Attributes:
|
|
31
|
+
working_dir: Working directory for file operations
|
|
32
|
+
correlation_id: Unique ID for request tracing
|
|
33
|
+
cancel_token: Optional cancellation event (shared)
|
|
34
|
+
git_context: Git repository context (future feature)
|
|
35
|
+
project_context: Project-specific context (future feature)
|
|
36
|
+
metadata: Additional runtime data
|
|
37
|
+
|
|
38
|
+
Example:
|
|
39
|
+
```python
|
|
40
|
+
context = ExecutionContext(
|
|
41
|
+
working_dir=Path.cwd(),
|
|
42
|
+
correlation_id="req-12345"
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# All recursive tt calls share this context
|
|
46
|
+
async for event in executor.tt(messages, turn_state, context):
|
|
47
|
+
...
|
|
48
|
+
```
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
working_dir: Path
|
|
52
|
+
correlation_id: str
|
|
53
|
+
cancel_token: Optional[asyncio.Event] = None
|
|
54
|
+
git_context: Optional[Dict[str, Any]] = None
|
|
55
|
+
project_context: Optional[Dict[str, Any]] = None
|
|
56
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
57
|
+
|
|
58
|
+
@staticmethod
|
|
59
|
+
def create(
|
|
60
|
+
working_dir: Optional[Path] = None,
|
|
61
|
+
correlation_id: Optional[str] = None,
|
|
62
|
+
cancel_token: Optional[asyncio.Event] = None,
|
|
63
|
+
**metadata
|
|
64
|
+
) -> ExecutionContext:
|
|
65
|
+
"""
|
|
66
|
+
Create execution context with defaults.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
working_dir: Working directory (defaults to cwd)
|
|
70
|
+
correlation_id: Request ID (defaults to new UUID)
|
|
71
|
+
cancel_token: Cancellation event
|
|
72
|
+
**metadata: Additional metadata
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
ExecutionContext: New context
|
|
76
|
+
"""
|
|
77
|
+
return ExecutionContext(
|
|
78
|
+
working_dir=working_dir or Path.cwd(),
|
|
79
|
+
correlation_id=correlation_id or str(uuid4()),
|
|
80
|
+
cancel_token=cancel_token,
|
|
81
|
+
metadata=metadata
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def is_cancelled(self) -> bool:
|
|
85
|
+
"""
|
|
86
|
+
Check if execution is cancelled.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
bool: True if cancel_token is set
|
|
90
|
+
"""
|
|
91
|
+
return self.cancel_token is not None and self.cancel_token.is_set()
|
|
92
|
+
|
|
93
|
+
def with_metadata(self, **kwargs) -> ExecutionContext:
|
|
94
|
+
"""
|
|
95
|
+
Create new context with updated metadata.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
**kwargs: Metadata updates
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
ExecutionContext: New context with merged metadata
|
|
102
|
+
"""
|
|
103
|
+
new_metadata = {**self.metadata, **kwargs}
|
|
104
|
+
|
|
105
|
+
return ExecutionContext(
|
|
106
|
+
working_dir=self.working_dir,
|
|
107
|
+
correlation_id=self.correlation_id,
|
|
108
|
+
cancel_token=self.cancel_token,
|
|
109
|
+
git_context=self.git_context,
|
|
110
|
+
project_context=self.project_context,
|
|
111
|
+
metadata=new_metadata
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
def __repr__(self) -> str:
|
|
115
|
+
"""Human-readable representation."""
|
|
116
|
+
return (
|
|
117
|
+
f"ExecutionContext(cwd={self.working_dir}, "
|
|
118
|
+
f"correlation_id={self.correlation_id[:8]}...)"
|
|
119
|
+
)
|
|
@@ -0,0 +1,383 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Tool Orchestrator Module
|
|
3
|
+
|
|
4
|
+
Intelligent tool execution orchestration with parallel/sequential execution
|
|
5
|
+
based on tool safety characteristics.
|
|
6
|
+
|
|
7
|
+
This module prevents race conditions by distinguishing between read-only
|
|
8
|
+
and write tools, executing them appropriately.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
from enum import Enum
|
|
15
|
+
from typing import AsyncGenerator, Dict, List, Optional, Tuple
|
|
16
|
+
|
|
17
|
+
from loom.core.types import ToolCall, ToolResult
|
|
18
|
+
from loom.core.events import AgentEvent, AgentEventType, ToolCall as EventToolCall, ToolResult as EventToolResult
|
|
19
|
+
from loom.interfaces.tool import BaseTool
|
|
20
|
+
from loom.core.permissions import PermissionManager, PermissionAction
|
|
21
|
+
from loom.core.errors import PermissionDeniedError
|
|
22
|
+
|
|
23
|
+
# 🆕 Loom 2.0 - Security validation
|
|
24
|
+
try:
|
|
25
|
+
from loom.security import SecurityValidator
|
|
26
|
+
except ImportError:
|
|
27
|
+
SecurityValidator = None # type: ignore
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ToolCategory(str, Enum):
|
|
31
|
+
"""
|
|
32
|
+
Tool execution categories for safety classification.
|
|
33
|
+
|
|
34
|
+
Categories:
|
|
35
|
+
READ_ONLY: Safe to parallelize (no side effects)
|
|
36
|
+
WRITE: Must execute sequentially (has side effects)
|
|
37
|
+
NETWORK: May need rate limiting (future enhancement)
|
|
38
|
+
DESTRUCTIVE: Requires extra validation (future enhancement)
|
|
39
|
+
"""
|
|
40
|
+
READ_ONLY = "read_only"
|
|
41
|
+
WRITE = "write"
|
|
42
|
+
NETWORK = "network"
|
|
43
|
+
DESTRUCTIVE = "destructive"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ToolOrchestrator:
|
|
47
|
+
"""
|
|
48
|
+
Intelligent tool execution orchestrator.
|
|
49
|
+
|
|
50
|
+
Features:
|
|
51
|
+
- Categorize tools by safety (read-only vs write)
|
|
52
|
+
- Execute read-only tools in parallel (up to max_parallel)
|
|
53
|
+
- Execute write tools sequentially
|
|
54
|
+
- Yield AgentEvent for observability
|
|
55
|
+
- Integration with permission system
|
|
56
|
+
|
|
57
|
+
Example:
|
|
58
|
+
```python
|
|
59
|
+
orchestrator = ToolOrchestrator(
|
|
60
|
+
tools={"Read": ReadTool(), "Edit": EditTool()},
|
|
61
|
+
permission_manager=pm,
|
|
62
|
+
max_parallel=5
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
tool_calls = [
|
|
66
|
+
ToolCall(name="Read", arguments={"path": "a.txt"}),
|
|
67
|
+
ToolCall(name="Read", arguments={"path": "b.txt"}),
|
|
68
|
+
ToolCall(name="Edit", arguments={"path": "c.txt"})
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
async for event in orchestrator.execute_batch(tool_calls):
|
|
72
|
+
if event.type == AgentEventType.TOOL_RESULT:
|
|
73
|
+
print(event.tool_result.content)
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
Attributes:
|
|
77
|
+
tools: Dictionary of available tools
|
|
78
|
+
permission_manager: Optional permission manager
|
|
79
|
+
max_parallel: Maximum number of parallel executions
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
tools: Dict[str, BaseTool],
|
|
85
|
+
permission_manager: Optional[PermissionManager] = None,
|
|
86
|
+
security_validator: Optional["SecurityValidator"] = None, # 🆕 Loom 2.0
|
|
87
|
+
max_parallel: int = 5
|
|
88
|
+
):
|
|
89
|
+
"""
|
|
90
|
+
Initialize the orchestrator.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
tools: Dictionary mapping tool names to tool instances
|
|
94
|
+
permission_manager: Optional permission manager for access control
|
|
95
|
+
security_validator: Optional security validator (🆕 Loom 2.0)
|
|
96
|
+
max_parallel: Maximum number of tools to execute in parallel (default: 5)
|
|
97
|
+
"""
|
|
98
|
+
self.tools = tools
|
|
99
|
+
self.permission_manager = permission_manager
|
|
100
|
+
self.security_validator = security_validator # 🆕 Loom 2.0
|
|
101
|
+
self.max_parallel = max_parallel
|
|
102
|
+
|
|
103
|
+
async def execute_batch(
|
|
104
|
+
self,
|
|
105
|
+
tool_calls: List[ToolCall]
|
|
106
|
+
) -> AsyncGenerator[AgentEvent, None]:
|
|
107
|
+
"""
|
|
108
|
+
Execute a batch of tool calls with intelligent orchestration.
|
|
109
|
+
|
|
110
|
+
Strategy:
|
|
111
|
+
1. Categorize tools (read-only vs write)
|
|
112
|
+
2. Execute read-only in parallel (up to max_parallel)
|
|
113
|
+
3. Execute write tools sequentially
|
|
114
|
+
4. Yield AgentEvent for each execution phase
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
tool_calls: List of tool calls to execute
|
|
118
|
+
|
|
119
|
+
Yields:
|
|
120
|
+
AgentEvent: Execution progress events
|
|
121
|
+
"""
|
|
122
|
+
if not tool_calls:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
# Emit batch start event
|
|
126
|
+
yield AgentEvent(
|
|
127
|
+
type=AgentEventType.TOOL_CALLS_START,
|
|
128
|
+
metadata={
|
|
129
|
+
"total_tools": len(tool_calls),
|
|
130
|
+
"max_parallel": self.max_parallel
|
|
131
|
+
}
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Categorize tools
|
|
135
|
+
read_only_calls, write_calls = self.categorize_tools(tool_calls)
|
|
136
|
+
|
|
137
|
+
# Execute read-only in parallel
|
|
138
|
+
if read_only_calls:
|
|
139
|
+
yield AgentEvent(
|
|
140
|
+
type=AgentEventType.PHASE_START,
|
|
141
|
+
metadata={
|
|
142
|
+
"phase": "parallel_read_only",
|
|
143
|
+
"count": len(read_only_calls),
|
|
144
|
+
"tool_names": [tc.name for tc in read_only_calls]
|
|
145
|
+
}
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
async for event in self.execute_parallel(read_only_calls):
|
|
149
|
+
yield event
|
|
150
|
+
|
|
151
|
+
yield AgentEvent(
|
|
152
|
+
type=AgentEventType.PHASE_END,
|
|
153
|
+
metadata={
|
|
154
|
+
"phase": "parallel_read_only",
|
|
155
|
+
"count": len(read_only_calls)
|
|
156
|
+
}
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
# Execute write tools sequentially
|
|
160
|
+
if write_calls:
|
|
161
|
+
yield AgentEvent(
|
|
162
|
+
type=AgentEventType.PHASE_START,
|
|
163
|
+
metadata={
|
|
164
|
+
"phase": "sequential_write",
|
|
165
|
+
"count": len(write_calls),
|
|
166
|
+
"tool_names": [tc.name for tc in write_calls]
|
|
167
|
+
}
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
async for event in self.execute_sequential(write_calls):
|
|
171
|
+
yield event
|
|
172
|
+
|
|
173
|
+
yield AgentEvent(
|
|
174
|
+
type=AgentEventType.PHASE_END,
|
|
175
|
+
metadata={
|
|
176
|
+
"phase": "sequential_write",
|
|
177
|
+
"count": len(write_calls)
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
def categorize_tools(
|
|
182
|
+
self,
|
|
183
|
+
tool_calls: List[ToolCall]
|
|
184
|
+
) -> Tuple[List[ToolCall], List[ToolCall]]:
|
|
185
|
+
"""
|
|
186
|
+
Categorize tool calls into read-only and write.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
tool_calls: List of tool calls to categorize
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Tuple of (read_only_calls, write_calls)
|
|
193
|
+
"""
|
|
194
|
+
read_only_calls: List[ToolCall] = []
|
|
195
|
+
write_calls: List[ToolCall] = []
|
|
196
|
+
|
|
197
|
+
for tc in tool_calls:
|
|
198
|
+
tool = self.tools.get(tc.name)
|
|
199
|
+
if tool and getattr(tool, "is_read_only", False):
|
|
200
|
+
read_only_calls.append(tc)
|
|
201
|
+
else:
|
|
202
|
+
# Default to write (safer)
|
|
203
|
+
write_calls.append(tc)
|
|
204
|
+
|
|
205
|
+
return read_only_calls, write_calls
|
|
206
|
+
|
|
207
|
+
async def execute_parallel(
|
|
208
|
+
self,
|
|
209
|
+
tool_calls: List[ToolCall]
|
|
210
|
+
) -> AsyncGenerator[AgentEvent, None]:
|
|
211
|
+
"""
|
|
212
|
+
Execute read-only tools in parallel with concurrency limiting.
|
|
213
|
+
|
|
214
|
+
Uses asyncio.Semaphore to limit concurrent executions to max_parallel.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
tool_calls: List of read-only tool calls
|
|
218
|
+
|
|
219
|
+
Yields:
|
|
220
|
+
AgentEvent: Execution events (start, result, error)
|
|
221
|
+
"""
|
|
222
|
+
if not tool_calls:
|
|
223
|
+
return
|
|
224
|
+
|
|
225
|
+
# Create semaphore for concurrency control
|
|
226
|
+
semaphore = asyncio.Semaphore(self.max_parallel)
|
|
227
|
+
|
|
228
|
+
async def execute_with_semaphore(tc: ToolCall) -> AsyncGenerator[AgentEvent, None]:
|
|
229
|
+
"""Execute a single tool with semaphore control."""
|
|
230
|
+
async with semaphore:
|
|
231
|
+
async for event in self.execute_one(tc):
|
|
232
|
+
yield event
|
|
233
|
+
|
|
234
|
+
# Create tasks for all tools
|
|
235
|
+
tasks = []
|
|
236
|
+
for tc in tool_calls:
|
|
237
|
+
task = execute_with_semaphore(tc)
|
|
238
|
+
tasks.append(task)
|
|
239
|
+
|
|
240
|
+
# Execute and yield results as they complete
|
|
241
|
+
# Use asyncio.gather to run all in parallel
|
|
242
|
+
async def collect_events():
|
|
243
|
+
for task in tasks:
|
|
244
|
+
async for event in task:
|
|
245
|
+
yield event
|
|
246
|
+
|
|
247
|
+
async for event in collect_events():
|
|
248
|
+
yield event
|
|
249
|
+
|
|
250
|
+
async def execute_sequential(
|
|
251
|
+
self,
|
|
252
|
+
tool_calls: List[ToolCall]
|
|
253
|
+
) -> AsyncGenerator[AgentEvent, None]:
|
|
254
|
+
"""
|
|
255
|
+
Execute write tools sequentially (one after another).
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
tool_calls: List of write tool calls
|
|
259
|
+
|
|
260
|
+
Yields:
|
|
261
|
+
AgentEvent: Execution events (start, result, error)
|
|
262
|
+
"""
|
|
263
|
+
for tc in tool_calls:
|
|
264
|
+
async for event in self.execute_one(tc):
|
|
265
|
+
yield event
|
|
266
|
+
|
|
267
|
+
async def execute_one(
|
|
268
|
+
self,
|
|
269
|
+
tool_call: ToolCall
|
|
270
|
+
) -> AsyncGenerator[AgentEvent, None]:
|
|
271
|
+
"""
|
|
272
|
+
Execute a single tool call.
|
|
273
|
+
|
|
274
|
+
Phases:
|
|
275
|
+
1. Permission check
|
|
276
|
+
2. Tool execution
|
|
277
|
+
3. Result formatting
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
tool_call: Tool call to execute
|
|
281
|
+
|
|
282
|
+
Yields:
|
|
283
|
+
AgentEvent: Execution events
|
|
284
|
+
"""
|
|
285
|
+
# Create event tool call
|
|
286
|
+
event_tool_call = EventToolCall(
|
|
287
|
+
id=tool_call.id,
|
|
288
|
+
name=tool_call.name,
|
|
289
|
+
arguments=tool_call.arguments
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Emit start event
|
|
293
|
+
yield AgentEvent(
|
|
294
|
+
type=AgentEventType.TOOL_EXECUTION_START,
|
|
295
|
+
tool_call=event_tool_call
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
try:
|
|
299
|
+
# Phase 1: Security validation (🆕 Loom 2.0 - Multi-layer security)
|
|
300
|
+
if self.security_validator:
|
|
301
|
+
tool = self.tools.get(tool_call.name)
|
|
302
|
+
if tool:
|
|
303
|
+
decision = await self.security_validator.validate(
|
|
304
|
+
tool_call=tool_call,
|
|
305
|
+
tool=tool,
|
|
306
|
+
context={}
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
if not decision.allow:
|
|
310
|
+
raise PermissionDeniedError(
|
|
311
|
+
f"Security validation failed: {decision.reason}"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
# Fallback: Permission check (backward compatibility)
|
|
315
|
+
elif self.permission_manager:
|
|
316
|
+
action = self.permission_manager.check(tool_call.name, tool_call.arguments)
|
|
317
|
+
if action == PermissionAction.DENY:
|
|
318
|
+
raise PermissionDeniedError(f"Tool {tool_call.name} not allowed")
|
|
319
|
+
if action == PermissionAction.ASK:
|
|
320
|
+
# TODO: Implement user confirmation flow
|
|
321
|
+
# For now, treat as DENY
|
|
322
|
+
raise PermissionDeniedError(f"Tool {tool_call.name} requires confirmation")
|
|
323
|
+
|
|
324
|
+
# Phase 2: Tool execution
|
|
325
|
+
tool = self.tools.get(tool_call.name)
|
|
326
|
+
if not tool:
|
|
327
|
+
raise ValueError(f"Tool {tool_call.name} not found")
|
|
328
|
+
|
|
329
|
+
result_content = await tool.run(**tool_call.arguments)
|
|
330
|
+
|
|
331
|
+
# Phase 3: Create result
|
|
332
|
+
result = EventToolResult(
|
|
333
|
+
tool_call_id=tool_call.id,
|
|
334
|
+
tool_name=tool_call.name,
|
|
335
|
+
content=str(result_content) if result_content is not None else "",
|
|
336
|
+
is_error=False
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Emit result event
|
|
340
|
+
yield AgentEvent.tool_result(result)
|
|
341
|
+
|
|
342
|
+
except Exception as e:
|
|
343
|
+
# Handle error
|
|
344
|
+
result = EventToolResult(
|
|
345
|
+
tool_call_id=tool_call.id,
|
|
346
|
+
tool_name=tool_call.name,
|
|
347
|
+
content=str(e),
|
|
348
|
+
is_error=True
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
yield AgentEvent(
|
|
352
|
+
type=AgentEventType.TOOL_ERROR,
|
|
353
|
+
tool_result=result,
|
|
354
|
+
error=e
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
def get_orchestration_summary(self, tool_calls: List[ToolCall]) -> Dict:
|
|
358
|
+
"""
|
|
359
|
+
Get a summary of how tools will be orchestrated.
|
|
360
|
+
|
|
361
|
+
Useful for debugging and understanding execution plans.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
tool_calls: List of tool calls to analyze
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
Dictionary with orchestration details
|
|
368
|
+
"""
|
|
369
|
+
read_only, write = self.categorize_tools(tool_calls)
|
|
370
|
+
|
|
371
|
+
return {
|
|
372
|
+
"total_tools": len(tool_calls),
|
|
373
|
+
"read_only_count": len(read_only),
|
|
374
|
+
"write_count": len(write),
|
|
375
|
+
"read_only_tools": [tc.name for tc in read_only],
|
|
376
|
+
"write_tools": [tc.name for tc in write],
|
|
377
|
+
"execution_mode": {
|
|
378
|
+
"read_only": "parallel" if read_only else "none",
|
|
379
|
+
"write": "sequential" if write else "none"
|
|
380
|
+
},
|
|
381
|
+
"max_parallel": self.max_parallel,
|
|
382
|
+
"estimated_phases": (1 if read_only else 0) + (1 if write else 0)
|
|
383
|
+
}
|
loom/core/turn_state.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Turn State Management for tt Recursive Execution
|
|
3
|
+
|
|
4
|
+
Provides immutable state tracking for the tt (tail-recursive) control loop.
|
|
5
|
+
Inspired by Claude Code's recursive conversation management.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from typing import Dict, Any, Optional
|
|
12
|
+
from uuid import uuid4
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass(frozen=True)
|
|
16
|
+
class TurnState:
|
|
17
|
+
"""
|
|
18
|
+
Immutable state for tt recursive execution.
|
|
19
|
+
|
|
20
|
+
Design Principles:
|
|
21
|
+
- Immutable: Uses frozen=True to prevent accidental mutation
|
|
22
|
+
- Serializable: All fields are JSON-serializable for save/restore
|
|
23
|
+
- Traceable: Contains turn_id and parent_turn_id for debugging
|
|
24
|
+
|
|
25
|
+
Each recursive call to tt() creates a new TurnState via next_turn(),
|
|
26
|
+
maintaining a clear lineage of turns.
|
|
27
|
+
|
|
28
|
+
Attributes:
|
|
29
|
+
turn_counter: Current recursion depth (0-based)
|
|
30
|
+
turn_id: Unique identifier for this turn (UUID)
|
|
31
|
+
max_iterations: Maximum recursion depth allowed
|
|
32
|
+
compacted: Whether conversation history was compacted this turn
|
|
33
|
+
parent_turn_id: ID of the parent turn (None for initial turn)
|
|
34
|
+
metadata: Additional turn-specific data
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
```python
|
|
38
|
+
# Initial turn
|
|
39
|
+
state = TurnState.initial(max_iterations=10)
|
|
40
|
+
print(state.turn_counter) # 0
|
|
41
|
+
print(state.is_initial) # True
|
|
42
|
+
|
|
43
|
+
# Next turn
|
|
44
|
+
next_state = state.next_turn(compacted=False)
|
|
45
|
+
print(next_state.turn_counter) # 1
|
|
46
|
+
print(next_state.parent_turn_id) # <original turn_id>
|
|
47
|
+
```
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
turn_counter: int
|
|
51
|
+
turn_id: str
|
|
52
|
+
max_iterations: int = 10
|
|
53
|
+
compacted: bool = False
|
|
54
|
+
parent_turn_id: Optional[str] = None
|
|
55
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
56
|
+
|
|
57
|
+
@staticmethod
|
|
58
|
+
def initial(max_iterations: int = 10, **metadata) -> TurnState:
|
|
59
|
+
"""
|
|
60
|
+
Create initial turn state for a new conversation.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
max_iterations: Maximum recursion depth
|
|
64
|
+
**metadata: Additional metadata to store
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
TurnState: Initial state with turn_counter=0
|
|
68
|
+
"""
|
|
69
|
+
return TurnState(
|
|
70
|
+
turn_counter=0,
|
|
71
|
+
turn_id=str(uuid4()),
|
|
72
|
+
max_iterations=max_iterations,
|
|
73
|
+
compacted=False,
|
|
74
|
+
parent_turn_id=None,
|
|
75
|
+
metadata=metadata
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
def next_turn(self, compacted: bool = False, **metadata_updates) -> TurnState:
|
|
79
|
+
"""
|
|
80
|
+
Create next turn state (immutable update).
|
|
81
|
+
|
|
82
|
+
This is the key method for tail recursion: it creates a new TurnState
|
|
83
|
+
with incremented counter while preserving other configuration.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
compacted: Whether history was compacted in the next turn
|
|
87
|
+
**metadata_updates: Updates to metadata (merged with existing)
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
TurnState: New state with turn_counter + 1
|
|
91
|
+
|
|
92
|
+
Example:
|
|
93
|
+
```python
|
|
94
|
+
state0 = TurnState.initial()
|
|
95
|
+
state1 = state0.next_turn() # turn_counter=1
|
|
96
|
+
state2 = state1.next_turn() # turn_counter=2
|
|
97
|
+
```
|
|
98
|
+
"""
|
|
99
|
+
new_metadata = {**self.metadata, **metadata_updates}
|
|
100
|
+
|
|
101
|
+
return TurnState(
|
|
102
|
+
turn_counter=self.turn_counter + 1,
|
|
103
|
+
turn_id=str(uuid4()), # New unique ID
|
|
104
|
+
max_iterations=self.max_iterations,
|
|
105
|
+
compacted=compacted,
|
|
106
|
+
parent_turn_id=self.turn_id, # Link to parent
|
|
107
|
+
metadata=new_metadata
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
def with_metadata(self, **kwargs) -> TurnState:
|
|
111
|
+
"""
|
|
112
|
+
Create new state with updated metadata (without incrementing turn).
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
**kwargs: Metadata updates
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
TurnState: New state with same turn_counter
|
|
119
|
+
"""
|
|
120
|
+
new_metadata = {**self.metadata, **kwargs}
|
|
121
|
+
|
|
122
|
+
return TurnState(
|
|
123
|
+
turn_counter=self.turn_counter,
|
|
124
|
+
turn_id=self.turn_id,
|
|
125
|
+
max_iterations=self.max_iterations,
|
|
126
|
+
compacted=self.compacted,
|
|
127
|
+
parent_turn_id=self.parent_turn_id,
|
|
128
|
+
metadata=new_metadata
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
@property
|
|
132
|
+
def is_initial(self) -> bool:
|
|
133
|
+
"""Check if this is the initial turn."""
|
|
134
|
+
return self.turn_counter == 0
|
|
135
|
+
|
|
136
|
+
@property
|
|
137
|
+
def is_final(self) -> bool:
|
|
138
|
+
"""Check if this turn has reached maximum depth."""
|
|
139
|
+
return self.turn_counter >= self.max_iterations
|
|
140
|
+
|
|
141
|
+
@property
|
|
142
|
+
def remaining_iterations(self) -> int:
|
|
143
|
+
"""Get remaining recursion depth."""
|
|
144
|
+
return max(0, self.max_iterations - self.turn_counter)
|
|
145
|
+
|
|
146
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
147
|
+
"""
|
|
148
|
+
Serialize to dict for saving/restoration.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Dict: JSON-serializable representation
|
|
152
|
+
"""
|
|
153
|
+
return {
|
|
154
|
+
"turn_counter": self.turn_counter,
|
|
155
|
+
"turn_id": self.turn_id,
|
|
156
|
+
"max_iterations": self.max_iterations,
|
|
157
|
+
"compacted": self.compacted,
|
|
158
|
+
"parent_turn_id": self.parent_turn_id,
|
|
159
|
+
"metadata": self.metadata
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
@staticmethod
|
|
163
|
+
def from_dict(data: Dict[str, Any]) -> TurnState:
|
|
164
|
+
"""
|
|
165
|
+
Deserialize from dict.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
data: Dictionary from to_dict()
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
TurnState: Restored state
|
|
172
|
+
"""
|
|
173
|
+
return TurnState(
|
|
174
|
+
turn_counter=data["turn_counter"],
|
|
175
|
+
turn_id=data["turn_id"],
|
|
176
|
+
max_iterations=data.get("max_iterations", 10),
|
|
177
|
+
compacted=data.get("compacted", False),
|
|
178
|
+
parent_turn_id=data.get("parent_turn_id"),
|
|
179
|
+
metadata=data.get("metadata", {})
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
def __repr__(self) -> str:
|
|
183
|
+
"""Human-readable representation."""
|
|
184
|
+
return (
|
|
185
|
+
f"TurnState(counter={self.turn_counter}/{self.max_iterations}, "
|
|
186
|
+
f"id={self.turn_id[:8]}..., "
|
|
187
|
+
f"compacted={self.compacted})"
|
|
188
|
+
)
|