aloop 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent/__init__.py +0 -0
- agent/agent.py +182 -0
- agent/base.py +406 -0
- agent/context.py +126 -0
- agent/prompts/__init__.py +1 -0
- agent/todo.py +149 -0
- agent/tool_executor.py +54 -0
- agent/verification.py +135 -0
- aloop-0.1.1.dist-info/METADATA +252 -0
- aloop-0.1.1.dist-info/RECORD +66 -0
- aloop-0.1.1.dist-info/WHEEL +5 -0
- aloop-0.1.1.dist-info/entry_points.txt +2 -0
- aloop-0.1.1.dist-info/licenses/LICENSE +21 -0
- aloop-0.1.1.dist-info/top_level.txt +9 -0
- cli.py +19 -0
- config.py +146 -0
- interactive.py +865 -0
- llm/__init__.py +51 -0
- llm/base.py +26 -0
- llm/compat.py +226 -0
- llm/content_utils.py +309 -0
- llm/litellm_adapter.py +450 -0
- llm/message_types.py +245 -0
- llm/model_manager.py +265 -0
- llm/retry.py +95 -0
- main.py +246 -0
- memory/__init__.py +20 -0
- memory/compressor.py +554 -0
- memory/manager.py +538 -0
- memory/serialization.py +82 -0
- memory/short_term.py +88 -0
- memory/store/__init__.py +6 -0
- memory/store/memory_store.py +100 -0
- memory/store/yaml_file_memory_store.py +414 -0
- memory/token_tracker.py +203 -0
- memory/types.py +51 -0
- tools/__init__.py +6 -0
- tools/advanced_file_ops.py +557 -0
- tools/base.py +51 -0
- tools/calculator.py +50 -0
- tools/code_navigator.py +975 -0
- tools/explore.py +254 -0
- tools/file_ops.py +150 -0
- tools/git_tools.py +791 -0
- tools/notify.py +69 -0
- tools/parallel_execute.py +420 -0
- tools/session_manager.py +205 -0
- tools/shell.py +147 -0
- tools/shell_background.py +470 -0
- tools/smart_edit.py +491 -0
- tools/todo.py +130 -0
- tools/web_fetch.py +673 -0
- tools/web_search.py +61 -0
- utils/__init__.py +15 -0
- utils/logger.py +105 -0
- utils/model_pricing.py +49 -0
- utils/runtime.py +75 -0
- utils/terminal_ui.py +422 -0
- utils/tui/__init__.py +39 -0
- utils/tui/command_registry.py +49 -0
- utils/tui/components.py +306 -0
- utils/tui/input_handler.py +393 -0
- utils/tui/model_ui.py +204 -0
- utils/tui/progress.py +292 -0
- utils/tui/status_bar.py +178 -0
- utils/tui/theme.py +165 -0
tools/notify.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""Email notification tool using Resend API."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from config import Config
|
|
8
|
+
from tools.base import BaseTool
|
|
9
|
+
|
|
10
|
+
RESEND_API_URL = "https://api.resend.com/emails"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class NotifyTool(BaseTool):
|
|
14
|
+
"""Send email notifications via Resend."""
|
|
15
|
+
|
|
16
|
+
@property
|
|
17
|
+
def name(self) -> str:
|
|
18
|
+
return "notify"
|
|
19
|
+
|
|
20
|
+
@property
|
|
21
|
+
def description(self) -> str:
|
|
22
|
+
return "Send an email notification via Resend."
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
def parameters(self) -> Dict[str, Any]:
|
|
26
|
+
return {
|
|
27
|
+
"recipient": {
|
|
28
|
+
"type": "string",
|
|
29
|
+
"description": "Recipient email address.",
|
|
30
|
+
},
|
|
31
|
+
"subject": {
|
|
32
|
+
"type": "string",
|
|
33
|
+
"description": "Email subject line.",
|
|
34
|
+
},
|
|
35
|
+
"body": {
|
|
36
|
+
"type": "string",
|
|
37
|
+
"description": "Email body (plain text).",
|
|
38
|
+
},
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async def execute(self, recipient: str, subject: str, body: str) -> str:
|
|
42
|
+
if not recipient:
|
|
43
|
+
return "Error: recipient email address is required."
|
|
44
|
+
|
|
45
|
+
api_key = Config.RESEND_API_KEY
|
|
46
|
+
if not api_key:
|
|
47
|
+
return "Error: RESEND_API_KEY is not configured in .aloop/config."
|
|
48
|
+
|
|
49
|
+
from_addr = Config.NOTIFY_EMAIL_FROM
|
|
50
|
+
if not from_addr:
|
|
51
|
+
return "Error: NOTIFY_EMAIL_FROM is not configured in .aloop/config."
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
async with httpx.AsyncClient() as client:
|
|
55
|
+
resp = await client.post(
|
|
56
|
+
RESEND_API_URL,
|
|
57
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
58
|
+
json={
|
|
59
|
+
"from": from_addr,
|
|
60
|
+
"to": [recipient],
|
|
61
|
+
"subject": subject,
|
|
62
|
+
"text": body,
|
|
63
|
+
},
|
|
64
|
+
)
|
|
65
|
+
if resp.status_code == 200:
|
|
66
|
+
return f"Email sent successfully to {recipient}."
|
|
67
|
+
return f"Error sending email: {resp.status_code} {resp.text}"
|
|
68
|
+
except Exception as e:
|
|
69
|
+
return f"Error sending email: {e}"
|
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
"""Parallel execution tool for dependency-aware task execution."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Set
|
|
5
|
+
|
|
6
|
+
from llm import LLMMessage
|
|
7
|
+
|
|
8
|
+
from .base import BaseTool
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from agent.base import BaseAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# Allowed tools for subtask execution
|
|
15
|
+
# Note: explore_context is allowed (one level of nesting), but parallel_execute is NOT
|
|
16
|
+
ALLOWED_SUBTASK_TOOLS = {
|
|
17
|
+
"glob_files",
|
|
18
|
+
"grep_content",
|
|
19
|
+
"read_file",
|
|
20
|
+
"write_file",
|
|
21
|
+
"edit_file",
|
|
22
|
+
"smart_edit",
|
|
23
|
+
"search_files",
|
|
24
|
+
"code_navigator",
|
|
25
|
+
"shell",
|
|
26
|
+
"calculate",
|
|
27
|
+
"web_search",
|
|
28
|
+
"web_fetch",
|
|
29
|
+
"explore_context", # Allow one level of nesting
|
|
30
|
+
# "parallel_execute" - NOT included to prevent recursion
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ParallelExecutionTool(BaseTool):
|
|
35
|
+
"""Tool for executing tasks with dependencies in parallel.
|
|
36
|
+
|
|
37
|
+
This tool enables the main agent to execute multiple tasks concurrently
|
|
38
|
+
while respecting dependency relationships between them.
|
|
39
|
+
|
|
40
|
+
Key features:
|
|
41
|
+
- Dependency-aware execution ordering
|
|
42
|
+
- Parallel execution of independent tasks
|
|
43
|
+
- Cycle detection to prevent deadlocks
|
|
44
|
+
- One level of nesting (subtasks can use explore_context but not parallel_execute)
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
# Configuration
|
|
48
|
+
MAX_PARALLEL_TASKS = 4
|
|
49
|
+
MAX_RESULT_CHARS = 2000
|
|
50
|
+
|
|
51
|
+
def __init__(self, agent: "BaseAgent"):
|
|
52
|
+
"""Initialize parallel execution tool.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
agent: The parent agent instance that will execute tasks
|
|
56
|
+
"""
|
|
57
|
+
self.agent = agent
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def name(self) -> str:
|
|
61
|
+
return "parallel_execute"
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def description(self) -> str:
|
|
65
|
+
return """Execute multiple tasks with dependencies in parallel.
|
|
66
|
+
|
|
67
|
+
Use this tool when you need to:
|
|
68
|
+
- Execute 3+ independent or semi-dependent tasks concurrently
|
|
69
|
+
- Perform operations that can be parallelized for efficiency
|
|
70
|
+
- Execute a structured plan with dependency relationships
|
|
71
|
+
|
|
72
|
+
DO NOT use this for:
|
|
73
|
+
- Simple sequential tasks (execute them directly)
|
|
74
|
+
- Tasks with complex interdependencies (use regular sequential execution)
|
|
75
|
+
- Single tasks (use regular tools directly)
|
|
76
|
+
|
|
77
|
+
Input parameters:
|
|
78
|
+
- tasks (required): Array of task descriptions (strings)
|
|
79
|
+
- dependencies (optional): Object mapping task index to array of dependency indices
|
|
80
|
+
Example: {"2": ["0", "1"]} means task 2 depends on tasks 0 and 1
|
|
81
|
+
|
|
82
|
+
The tool executes tasks in batches based on dependency order.
|
|
83
|
+
Tasks with no unmet dependencies run in parallel."""
|
|
84
|
+
|
|
85
|
+
@property
|
|
86
|
+
def parameters(self) -> Dict[str, Any]:
|
|
87
|
+
return {
|
|
88
|
+
"tasks": {
|
|
89
|
+
"type": "array",
|
|
90
|
+
"description": "List of task descriptions to execute",
|
|
91
|
+
"items": {"type": "string"},
|
|
92
|
+
},
|
|
93
|
+
"dependencies": {
|
|
94
|
+
"type": "object",
|
|
95
|
+
"description": "Map of task index to array of dependency indices",
|
|
96
|
+
"additionalProperties": {
|
|
97
|
+
"type": "array",
|
|
98
|
+
"items": {"type": "string"},
|
|
99
|
+
},
|
|
100
|
+
"default": {},
|
|
101
|
+
},
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
def to_anthropic_schema(self) -> Dict[str, Any]:
|
|
105
|
+
"""Convert to Anthropic tool schema format."""
|
|
106
|
+
return {
|
|
107
|
+
"name": self.name,
|
|
108
|
+
"description": self.description,
|
|
109
|
+
"input_schema": {
|
|
110
|
+
"type": "object",
|
|
111
|
+
"properties": self.parameters,
|
|
112
|
+
"required": ["tasks"],
|
|
113
|
+
},
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
async def execute(self, tasks: List[str], dependencies: Dict[str, List[str]] = None) -> str:
|
|
117
|
+
"""Execute tasks with dependency awareness.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
tasks: List of task descriptions
|
|
121
|
+
dependencies: Map of task index to dependency indices
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Combined results from all tasks
|
|
125
|
+
"""
|
|
126
|
+
if not tasks:
|
|
127
|
+
return "Error: No tasks provided"
|
|
128
|
+
|
|
129
|
+
dependencies = dependencies or {}
|
|
130
|
+
|
|
131
|
+
# Validate dependencies
|
|
132
|
+
validation_error = self._validate_dependencies(tasks, dependencies)
|
|
133
|
+
if validation_error:
|
|
134
|
+
return validation_error
|
|
135
|
+
|
|
136
|
+
# Get allowed tools for subtasks
|
|
137
|
+
subtask_tools = self._get_subtask_tools()
|
|
138
|
+
|
|
139
|
+
# Execute tasks in dependency order
|
|
140
|
+
results = await self._execute_with_dependencies(tasks, dependencies, subtask_tools)
|
|
141
|
+
|
|
142
|
+
# Format and return results
|
|
143
|
+
return self._format_results(tasks, results)
|
|
144
|
+
|
|
145
|
+
def _validate_dependencies(
|
|
146
|
+
self, tasks: List[str], dependencies: Dict[str, List[str]]
|
|
147
|
+
) -> str | None:
|
|
148
|
+
"""Validate dependency graph for cycles and invalid references.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
tasks: List of task descriptions
|
|
152
|
+
dependencies: Dependency mapping
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Error message if invalid, None if valid
|
|
156
|
+
"""
|
|
157
|
+
task_count = len(tasks)
|
|
158
|
+
|
|
159
|
+
def validate_index(index_str: str, max_val: int) -> int | None:
|
|
160
|
+
"""Validate and convert string index to int. Returns None if invalid."""
|
|
161
|
+
try:
|
|
162
|
+
idx = int(index_str)
|
|
163
|
+
return idx if 0 <= idx < max_val else None
|
|
164
|
+
except ValueError:
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
# Check for invalid task indices
|
|
168
|
+
for task_idx, deps in dependencies.items():
|
|
169
|
+
if validate_index(task_idx, task_count) is None:
|
|
170
|
+
return f"Error: Invalid task index {task_idx}"
|
|
171
|
+
|
|
172
|
+
for dep in deps:
|
|
173
|
+
if validate_index(dep, task_count) is None:
|
|
174
|
+
return f"Error: Invalid dependency index {dep}"
|
|
175
|
+
|
|
176
|
+
# Check for cycles using DFS
|
|
177
|
+
if self._has_cycle(task_count, dependencies):
|
|
178
|
+
return "Error: Circular dependency detected in tasks"
|
|
179
|
+
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
def _has_cycle(self, task_count: int, dependencies: Dict[str, List[str]]) -> bool:
|
|
183
|
+
"""Detect cycles in dependency graph using DFS.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
task_count: Number of tasks
|
|
187
|
+
dependencies: Dependency mapping
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
True if cycle exists
|
|
191
|
+
"""
|
|
192
|
+
# Build adjacency list
|
|
193
|
+
graph: Dict[int, List[int]] = {i: [] for i in range(task_count)}
|
|
194
|
+
for task_idx, deps in dependencies.items():
|
|
195
|
+
idx = int(task_idx)
|
|
196
|
+
for dep in deps:
|
|
197
|
+
graph[int(dep)].append(idx)
|
|
198
|
+
|
|
199
|
+
# DFS cycle detection
|
|
200
|
+
WHITE, GRAY, BLACK = 0, 1, 2
|
|
201
|
+
colors = [WHITE] * task_count
|
|
202
|
+
|
|
203
|
+
def dfs(node: int) -> bool:
|
|
204
|
+
colors[node] = GRAY
|
|
205
|
+
for neighbor in graph[node]:
|
|
206
|
+
if colors[neighbor] == GRAY:
|
|
207
|
+
return True
|
|
208
|
+
if colors[neighbor] == WHITE and dfs(neighbor):
|
|
209
|
+
return True
|
|
210
|
+
colors[node] = BLACK
|
|
211
|
+
return False
|
|
212
|
+
|
|
213
|
+
return any(colors[i] == WHITE and dfs(i) for i in range(task_count))
|
|
214
|
+
|
|
215
|
+
def _get_subtask_tools(self) -> List[Dict[str, Any]]:
|
|
216
|
+
"""Get filtered tools for subtask execution.
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
List of tool schemas allowed for subtasks
|
|
220
|
+
"""
|
|
221
|
+
all_tools = self.agent.tool_executor.get_tool_schemas()
|
|
222
|
+
return [
|
|
223
|
+
t
|
|
224
|
+
for t in all_tools
|
|
225
|
+
if t.get("name") in ALLOWED_SUBTASK_TOOLS
|
|
226
|
+
or t.get("function", {}).get("name") in ALLOWED_SUBTASK_TOOLS
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
async def _execute_with_dependencies(
|
|
230
|
+
self,
|
|
231
|
+
tasks: List[str],
|
|
232
|
+
dependencies: Dict[str, List[str]],
|
|
233
|
+
tools: List[Dict[str, Any]],
|
|
234
|
+
) -> Dict[int, str]:
|
|
235
|
+
"""Execute tasks respecting dependency order.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
tasks: List of task descriptions
|
|
239
|
+
dependencies: Dependency mapping
|
|
240
|
+
tools: Available tools for subtasks
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
Dict mapping task index to result
|
|
244
|
+
"""
|
|
245
|
+
results: Dict[int, str] = {}
|
|
246
|
+
completed: Set[int] = set()
|
|
247
|
+
task_count = len(tasks)
|
|
248
|
+
|
|
249
|
+
# Convert dependencies to int keys
|
|
250
|
+
deps: Dict[int, Set[int]] = {}
|
|
251
|
+
for task_idx, dep_list in dependencies.items():
|
|
252
|
+
deps[int(task_idx)] = {int(d) for d in dep_list}
|
|
253
|
+
|
|
254
|
+
while len(completed) < task_count:
|
|
255
|
+
# Find tasks ready to execute (no unmet dependencies)
|
|
256
|
+
ready = []
|
|
257
|
+
for i in range(task_count):
|
|
258
|
+
if i not in completed:
|
|
259
|
+
task_deps = deps.get(i, set())
|
|
260
|
+
if task_deps.issubset(completed):
|
|
261
|
+
ready.append(i)
|
|
262
|
+
|
|
263
|
+
if not ready:
|
|
264
|
+
# No progress possible - should not happen after cycle check
|
|
265
|
+
break
|
|
266
|
+
|
|
267
|
+
# Limit batch size
|
|
268
|
+
batch = ready[: self.MAX_PARALLEL_TASKS]
|
|
269
|
+
|
|
270
|
+
# Execute batch in parallel
|
|
271
|
+
batch_results = await self._execute_batch(batch, tasks, tools, results)
|
|
272
|
+
|
|
273
|
+
# Update results and completed set
|
|
274
|
+
for idx, result in batch_results.items():
|
|
275
|
+
results[idx] = result
|
|
276
|
+
completed.add(idx)
|
|
277
|
+
|
|
278
|
+
return results
|
|
279
|
+
|
|
280
|
+
async def _execute_batch(
|
|
281
|
+
self,
|
|
282
|
+
batch: List[int],
|
|
283
|
+
tasks: List[str],
|
|
284
|
+
tools: List[Dict[str, Any]],
|
|
285
|
+
previous_results: Dict[int, str],
|
|
286
|
+
) -> Dict[int, str]:
|
|
287
|
+
"""Execute a batch of tasks in parallel.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
batch: List of task indices to execute
|
|
291
|
+
tasks: Full task list
|
|
292
|
+
tools: Available tools
|
|
293
|
+
previous_results: Results from completed tasks
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
Dict mapping task index to result
|
|
297
|
+
"""
|
|
298
|
+
|
|
299
|
+
async def execute_single(idx: int) -> tuple:
|
|
300
|
+
task_desc = tasks[idx]
|
|
301
|
+
try:
|
|
302
|
+
result = await self._execute_single_task(idx, task_desc, tools, previous_results)
|
|
303
|
+
return idx, result
|
|
304
|
+
except asyncio.CancelledError:
|
|
305
|
+
raise
|
|
306
|
+
except Exception as e:
|
|
307
|
+
return idx, f"Task failed: {str(e)}"
|
|
308
|
+
|
|
309
|
+
# Since execute_single catches all exceptions internally (except CancelledError),
|
|
310
|
+
# any ExceptionGroup raised here indicates cancellation which should propagate
|
|
311
|
+
results = {}
|
|
312
|
+
async with asyncio.TaskGroup() as tg:
|
|
313
|
+
task_list = [tg.create_task(execute_single(idx)) for idx in batch]
|
|
314
|
+
|
|
315
|
+
for task in task_list:
|
|
316
|
+
idx, result = task.result()
|
|
317
|
+
results[idx] = result
|
|
318
|
+
|
|
319
|
+
return results
|
|
320
|
+
|
|
321
|
+
async def _execute_single_task(
|
|
322
|
+
self,
|
|
323
|
+
idx: int,
|
|
324
|
+
task_desc: str,
|
|
325
|
+
tools: List[Dict[str, Any]],
|
|
326
|
+
previous_results: Dict[int, str],
|
|
327
|
+
) -> str:
|
|
328
|
+
"""Execute a single task using isolated mini-loop.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
idx: Task index
|
|
332
|
+
task_desc: Task description
|
|
333
|
+
tools: Available tools
|
|
334
|
+
previous_results: Results from completed tasks
|
|
335
|
+
|
|
336
|
+
Returns:
|
|
337
|
+
Task result string
|
|
338
|
+
"""
|
|
339
|
+
# Build context from previous results
|
|
340
|
+
context = self._build_task_context(previous_results)
|
|
341
|
+
|
|
342
|
+
# Build task prompt
|
|
343
|
+
prompt = f"""<role>
|
|
344
|
+
You are executing a subtask as part of a larger parallel execution.
|
|
345
|
+
Focus on completing this specific task efficiently.
|
|
346
|
+
</role>
|
|
347
|
+
|
|
348
|
+
<task>
|
|
349
|
+
Task #{idx}: {task_desc}
|
|
350
|
+
</task>
|
|
351
|
+
|
|
352
|
+
{context}
|
|
353
|
+
|
|
354
|
+
<instructions>
|
|
355
|
+
1. Execute the task using available tools
|
|
356
|
+
2. Focus ONLY on completing this specific task
|
|
357
|
+
3. Provide a clear summary of what was accomplished
|
|
358
|
+
4. Do NOT try to execute other tasks
|
|
359
|
+
</instructions>
|
|
360
|
+
|
|
361
|
+
Execute the task now:"""
|
|
362
|
+
|
|
363
|
+
messages = [LLMMessage(role="user", content=prompt)]
|
|
364
|
+
|
|
365
|
+
# Run in isolated context
|
|
366
|
+
result = await self.agent._react_loop(
|
|
367
|
+
messages=messages,
|
|
368
|
+
tools=tools,
|
|
369
|
+
use_memory=False,
|
|
370
|
+
save_to_memory=False,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
return result
|
|
374
|
+
|
|
375
|
+
def _build_task_context(self, previous_results: Dict[int, str]) -> str:
|
|
376
|
+
"""Build context string from previous task results.
|
|
377
|
+
|
|
378
|
+
Args:
|
|
379
|
+
previous_results: Results from completed tasks
|
|
380
|
+
|
|
381
|
+
Returns:
|
|
382
|
+
Context string
|
|
383
|
+
"""
|
|
384
|
+
if not previous_results:
|
|
385
|
+
return ""
|
|
386
|
+
|
|
387
|
+
parts = ["<previous_results>"]
|
|
388
|
+
for idx, result in sorted(previous_results.items()):
|
|
389
|
+
# Truncate long results
|
|
390
|
+
truncated = result
|
|
391
|
+
if len(result) > 500:
|
|
392
|
+
truncated = result[:500] + "... [truncated]"
|
|
393
|
+
parts.append(f"Task #{idx}:\n{truncated}\n")
|
|
394
|
+
parts.append("</previous_results>")
|
|
395
|
+
|
|
396
|
+
return "\n".join(parts)
|
|
397
|
+
|
|
398
|
+
def _format_results(self, tasks: List[str], results: Dict[int, str]) -> str:
|
|
399
|
+
"""Format all task results into a combined summary.
|
|
400
|
+
|
|
401
|
+
Args:
|
|
402
|
+
tasks: Original task list
|
|
403
|
+
results: Dict mapping task index to result
|
|
404
|
+
|
|
405
|
+
Returns:
|
|
406
|
+
Formatted combined results
|
|
407
|
+
"""
|
|
408
|
+
parts = ["# Parallel Execution Results\n"]
|
|
409
|
+
|
|
410
|
+
for idx, task_desc in enumerate(tasks):
|
|
411
|
+
result = results.get(idx, "Not executed")
|
|
412
|
+
|
|
413
|
+
# Truncate long results
|
|
414
|
+
if len(result) > self.MAX_RESULT_CHARS:
|
|
415
|
+
result = result[: self.MAX_RESULT_CHARS] + "... [truncated]"
|
|
416
|
+
|
|
417
|
+
status = "Completed" if idx in results else "Failed"
|
|
418
|
+
parts.append(f"## Task {idx}: {task_desc[:100]}\n**Status:** {status}\n{result}\n")
|
|
419
|
+
|
|
420
|
+
return "\n".join(parts)
|
tools/session_manager.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""CLI tool for managing memory sessions.
|
|
3
|
+
|
|
4
|
+
Usage:
|
|
5
|
+
python tools/session_manager.py list
|
|
6
|
+
python tools/session_manager.py show <session_id>
|
|
7
|
+
python tools/session_manager.py delete <session_id>
|
|
8
|
+
python tools/session_manager.py stats <session_id>
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import argparse
|
|
12
|
+
import asyncio
|
|
13
|
+
import sys
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
# Add parent directory to path
|
|
18
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
19
|
+
|
|
20
|
+
from memory.store import YamlFileMemoryStore
|
|
21
|
+
from utils.runtime import get_sessions_dir
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def format_timestamp(ts: str) -> str:
|
|
25
|
+
"""Format ISO timestamp to readable string."""
|
|
26
|
+
try:
|
|
27
|
+
dt = datetime.fromisoformat(ts)
|
|
28
|
+
return dt.strftime("%Y-%m-%d %H:%M:%S")
|
|
29
|
+
except ValueError:
|
|
30
|
+
return ts
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def list_sessions(store: YamlFileMemoryStore, limit: int = 50):
|
|
34
|
+
"""List all sessions."""
|
|
35
|
+
sessions = await store.list_sessions(limit=limit)
|
|
36
|
+
|
|
37
|
+
if not sessions:
|
|
38
|
+
print("No sessions found.")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
print(f"\nš Sessions (showing {len(sessions)}):")
|
|
42
|
+
print("=" * 100)
|
|
43
|
+
print(f"{'Session ID':<38} {'Updated':<20} {'Messages':<10} {'Preview':<30}")
|
|
44
|
+
print("-" * 100)
|
|
45
|
+
|
|
46
|
+
for session in sessions:
|
|
47
|
+
session_id = session["id"]
|
|
48
|
+
updated = format_timestamp(session.get("updated_at", session.get("created_at", "")))
|
|
49
|
+
msg_count = session["message_count"]
|
|
50
|
+
preview = session.get("preview", "")[:30]
|
|
51
|
+
|
|
52
|
+
print(f"{session_id:<38} {updated:<20} {msg_count:<10} {preview:<30}")
|
|
53
|
+
|
|
54
|
+
print("=" * 100)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
async def show_session(store: YamlFileMemoryStore, session_id: str, show_messages: bool = False):
|
|
58
|
+
"""Show detailed session information."""
|
|
59
|
+
session_data = await store.load_session(session_id)
|
|
60
|
+
|
|
61
|
+
if not session_data:
|
|
62
|
+
print(f"ā Session {session_id} not found")
|
|
63
|
+
return
|
|
64
|
+
|
|
65
|
+
stats = session_data["stats"]
|
|
66
|
+
|
|
67
|
+
print(f"\nš Session: {session_id}")
|
|
68
|
+
print("=" * 100)
|
|
69
|
+
|
|
70
|
+
# Stats
|
|
71
|
+
print("\nš Statistics:")
|
|
72
|
+
print(f" Created: {format_timestamp(stats['created_at'])}")
|
|
73
|
+
print(f" System Messages: {len(session_data['system_messages'])}")
|
|
74
|
+
print(f" Messages: {len(session_data['messages'])}")
|
|
75
|
+
|
|
76
|
+
# Messages (if requested)
|
|
77
|
+
if show_messages and session_data["messages"]:
|
|
78
|
+
print(f"\nš¬ Messages ({len(session_data['messages'])}):")
|
|
79
|
+
for i, msg in enumerate(session_data["messages"], 1):
|
|
80
|
+
role = msg.role
|
|
81
|
+
content = str(msg.content)
|
|
82
|
+
if len(content) > 100:
|
|
83
|
+
content = content[:100] + "..."
|
|
84
|
+
|
|
85
|
+
print(f"\n Message {i} [{role}]:")
|
|
86
|
+
print(f" {content}")
|
|
87
|
+
|
|
88
|
+
print("=" * 100)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
async def show_stats(store: YamlFileMemoryStore, session_id: str):
|
|
92
|
+
"""Show session statistics."""
|
|
93
|
+
stats = await store.get_session_stats(session_id)
|
|
94
|
+
|
|
95
|
+
if not stats:
|
|
96
|
+
print(f"ā Session {session_id} not found")
|
|
97
|
+
return
|
|
98
|
+
|
|
99
|
+
print(f"\nš Session Statistics: {session_id}")
|
|
100
|
+
print("=" * 80)
|
|
101
|
+
|
|
102
|
+
print("\nā° Timing:")
|
|
103
|
+
print(f" Created: {format_timestamp(stats['created_at'])}")
|
|
104
|
+
if stats.get("updated_at"):
|
|
105
|
+
print(f" Updated: {format_timestamp(stats['updated_at'])}")
|
|
106
|
+
|
|
107
|
+
print("\nšØ Messages:")
|
|
108
|
+
print(f" System Messages: {stats['system_message_count']}")
|
|
109
|
+
print(f" Regular Messages: {stats['message_count']}")
|
|
110
|
+
print(f" Total Messages: {stats['system_message_count'] + stats['message_count']}")
|
|
111
|
+
|
|
112
|
+
print("\nš« Tokens:")
|
|
113
|
+
print(f" Message Tokens: {stats['total_message_tokens']:,}")
|
|
114
|
+
|
|
115
|
+
print("=" * 80)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
async def delete_session(store: YamlFileMemoryStore, session_id: str, confirm: bool = False):
|
|
119
|
+
"""Delete a session."""
|
|
120
|
+
if not confirm:
|
|
121
|
+
response = input(f"Are you sure you want to delete session {session_id}? (yes/no): ")
|
|
122
|
+
if response.lower() not in ["yes", "y"]:
|
|
123
|
+
print("Cancelled.")
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
success = await store.delete_session(session_id)
|
|
127
|
+
if success:
|
|
128
|
+
print(f"ā
Session {session_id} deleted")
|
|
129
|
+
else:
|
|
130
|
+
print(f"ā Session {session_id} not found")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
async def main():
|
|
134
|
+
parser = argparse.ArgumentParser(
|
|
135
|
+
description="Manage memory sessions",
|
|
136
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
137
|
+
epilog="""
|
|
138
|
+
Examples:
|
|
139
|
+
List all sessions:
|
|
140
|
+
python tools/session_manager.py list
|
|
141
|
+
|
|
142
|
+
Show session details:
|
|
143
|
+
python tools/session_manager.py show <session_id>
|
|
144
|
+
|
|
145
|
+
Show session with messages:
|
|
146
|
+
python tools/session_manager.py show <session_id> --messages
|
|
147
|
+
|
|
148
|
+
Show session statistics:
|
|
149
|
+
python tools/session_manager.py stats <session_id>
|
|
150
|
+
|
|
151
|
+
Delete a session:
|
|
152
|
+
python tools/session_manager.py delete <session_id>
|
|
153
|
+
""",
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
parser.add_argument(
|
|
157
|
+
"--sessions-dir",
|
|
158
|
+
type=str,
|
|
159
|
+
default=None,
|
|
160
|
+
help="Path to sessions directory (default: .aloop/sessions/)",
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
subparsers = parser.add_subparsers(dest="command", help="Command to run")
|
|
164
|
+
|
|
165
|
+
# List command
|
|
166
|
+
list_parser = subparsers.add_parser("list", help="List all sessions")
|
|
167
|
+
list_parser.add_argument("--limit", type=int, default=50, help="Max sessions to show")
|
|
168
|
+
|
|
169
|
+
# Show command
|
|
170
|
+
show_parser = subparsers.add_parser("show", help="Show session details")
|
|
171
|
+
show_parser.add_argument("session_id", help="Session ID")
|
|
172
|
+
show_parser.add_argument("--messages", action="store_true", help="Show messages")
|
|
173
|
+
|
|
174
|
+
# Stats command
|
|
175
|
+
stats_parser = subparsers.add_parser("stats", help="Show session statistics")
|
|
176
|
+
stats_parser.add_argument("session_id", help="Session ID")
|
|
177
|
+
|
|
178
|
+
# Delete command
|
|
179
|
+
delete_parser = subparsers.add_parser("delete", help="Delete a session")
|
|
180
|
+
delete_parser.add_argument("session_id", help="Session ID")
|
|
181
|
+
delete_parser.add_argument("--yes", action="store_true", help="Skip confirmation")
|
|
182
|
+
|
|
183
|
+
args = parser.parse_args()
|
|
184
|
+
|
|
185
|
+
if not args.command:
|
|
186
|
+
parser.print_help()
|
|
187
|
+
return
|
|
188
|
+
|
|
189
|
+
# Initialize store
|
|
190
|
+
sessions_dir = args.sessions_dir if args.sessions_dir else get_sessions_dir()
|
|
191
|
+
store = YamlFileMemoryStore(sessions_dir=sessions_dir)
|
|
192
|
+
|
|
193
|
+
# Execute command
|
|
194
|
+
if args.command == "list":
|
|
195
|
+
await list_sessions(store, limit=args.limit)
|
|
196
|
+
elif args.command == "show":
|
|
197
|
+
await show_session(store, args.session_id, show_messages=args.messages)
|
|
198
|
+
elif args.command == "stats":
|
|
199
|
+
await show_stats(store, args.session_id)
|
|
200
|
+
elif args.command == "delete":
|
|
201
|
+
await delete_session(store, args.session_id, confirm=args.yes)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
if __name__ == "__main__":
|
|
205
|
+
asyncio.run(main())
|