astraagent 2.25.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.template +22 -0
- package/LICENSE +21 -0
- package/README.md +333 -0
- package/astra/__init__.py +15 -0
- package/astra/__pycache__/__init__.cpython-314.pyc +0 -0
- package/astra/__pycache__/chat.cpython-314.pyc +0 -0
- package/astra/__pycache__/cli.cpython-314.pyc +0 -0
- package/astra/__pycache__/prompts.cpython-314.pyc +0 -0
- package/astra/__pycache__/updater.cpython-314.pyc +0 -0
- package/astra/chat.py +763 -0
- package/astra/cli.py +913 -0
- package/astra/core/__init__.py +8 -0
- package/astra/core/__pycache__/__init__.cpython-314.pyc +0 -0
- package/astra/core/__pycache__/agent.cpython-314.pyc +0 -0
- package/astra/core/__pycache__/config.cpython-314.pyc +0 -0
- package/astra/core/__pycache__/memory.cpython-314.pyc +0 -0
- package/astra/core/__pycache__/reasoning.cpython-314.pyc +0 -0
- package/astra/core/__pycache__/state.cpython-314.pyc +0 -0
- package/astra/core/agent.py +515 -0
- package/astra/core/config.py +247 -0
- package/astra/core/memory.py +782 -0
- package/astra/core/reasoning.py +423 -0
- package/astra/core/state.py +366 -0
- package/astra/core/voice.py +144 -0
- package/astra/llm/__init__.py +32 -0
- package/astra/llm/__pycache__/__init__.cpython-314.pyc +0 -0
- package/astra/llm/__pycache__/providers.cpython-314.pyc +0 -0
- package/astra/llm/providers.py +530 -0
- package/astra/planning/__init__.py +117 -0
- package/astra/prompts.py +289 -0
- package/astra/reflection/__init__.py +181 -0
- package/astra/search.py +469 -0
- package/astra/tasks.py +466 -0
- package/astra/tools/__init__.py +17 -0
- package/astra/tools/__pycache__/__init__.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/advanced.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/base.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/browser.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/file.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/git.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/memory_tool.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/python.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/shell.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/web.cpython-314.pyc +0 -0
- package/astra/tools/__pycache__/windows.cpython-314.pyc +0 -0
- package/astra/tools/advanced.py +251 -0
- package/astra/tools/base.py +344 -0
- package/astra/tools/browser.py +93 -0
- package/astra/tools/file.py +476 -0
- package/astra/tools/git.py +74 -0
- package/astra/tools/memory_tool.py +89 -0
- package/astra/tools/python.py +238 -0
- package/astra/tools/shell.py +183 -0
- package/astra/tools/web.py +804 -0
- package/astra/tools/windows.py +542 -0
- package/astra/updater.py +450 -0
- package/astra/utils/__init__.py +230 -0
- package/bin/astraagent.js +73 -0
- package/bin/postinstall.js +25 -0
- package/config.json.template +52 -0
- package/main.py +16 -0
- package/package.json +51 -0
- package/pyproject.toml +72 -0
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
"""Core components of AstraAgent."""
|
|
2
|
+
|
|
3
|
+
from astra.core.agent import AstraAgent
|
|
4
|
+
from astra.core.config import AgentConfig
|
|
5
|
+
from astra.core.state import AgentState
|
|
6
|
+
from astra.core.memory import MemoryManager
|
|
7
|
+
|
|
8
|
+
__all__ = ["AstraAgent", "AgentConfig", "AgentState", "MemoryManager"]
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -0,0 +1,515 @@
|
|
|
1
|
+
"""
|
|
2
|
+
AstraAgent Core - The Main Autonomous Agent.
|
|
3
|
+
An elite AI agent more powerful than ChatGPT, Claude, or Gemini.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import sys
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from typing import Optional, Dict, Any, List
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from astra.core.config import AgentConfig, ExecutionMode
|
|
15
|
+
from astra.core.state import AgentState, Action, ActionStatus, Task, TaskStatus, Plan
|
|
16
|
+
from astra.core.memory import MemoryManager
|
|
17
|
+
from astra.core.reasoning import ReasoningEngine, ReasoningMode
|
|
18
|
+
from astra.tools.base import ToolRegistry, create_default_registry, ToolResult
|
|
19
|
+
from astra.tools.memory_tool import MemoryTool
|
|
20
|
+
from astra.llm import create_provider, Message, LLMProvider, LLMResponse
|
|
21
|
+
from astra.prompts import SYSTEM_PROMPT, build_system_prompt, get_chain_of_thought_prompt
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AstraAgent:
|
|
25
|
+
"""
|
|
26
|
+
The main autonomous agent.
|
|
27
|
+
An elite AI agent more powerful than ChatGPT, Claude, or Gemini.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, config: AgentConfig = None):
|
|
31
|
+
self.config = config or AgentConfig()
|
|
32
|
+
self.state = AgentState()
|
|
33
|
+
self.logger = self._setup_logging()
|
|
34
|
+
|
|
35
|
+
# Initialize Unified Memory System
|
|
36
|
+
self.memory = MemoryManager(
|
|
37
|
+
persistence_path=self.config.memory.persistence_path or "./.astra_memory",
|
|
38
|
+
auto_save=self.config.memory.auto_save
|
|
39
|
+
)
|
|
40
|
+
MemoryTool.set_memory_manager(self.memory)
|
|
41
|
+
|
|
42
|
+
# Initialize Reasoning Engine
|
|
43
|
+
self.reasoning = ReasoningEngine()
|
|
44
|
+
|
|
45
|
+
self.tools = create_default_registry()
|
|
46
|
+
self.llm: Optional[LLMProvider] = None
|
|
47
|
+
|
|
48
|
+
# Message history
|
|
49
|
+
self.messages: List[Message] = []
|
|
50
|
+
self._format_retry_count: int = 0
|
|
51
|
+
|
|
52
|
+
# Check for API key upfront
|
|
53
|
+
self._validate_config()
|
|
54
|
+
|
|
55
|
+
def _validate_config(self):
|
|
56
|
+
"""Validate configuration and warn about issues."""
|
|
57
|
+
if not self.config.llm.api_key:
|
|
58
|
+
self.logger.warning("No API key found. Set LOCAL_API_KEY environment variable.")
|
|
59
|
+
|
|
60
|
+
def _setup_logging(self) -> logging.Logger:
|
|
61
|
+
"""Setup logging with UTF-8 support for Windows."""
|
|
62
|
+
logger = logging.getLogger("AstraAgent")
|
|
63
|
+
logger.setLevel(getattr(logging, self.config.logging.level.value.upper()))
|
|
64
|
+
|
|
65
|
+
# Clear existing handlers
|
|
66
|
+
logger.handlers.clear()
|
|
67
|
+
|
|
68
|
+
if self.config.logging.log_to_console:
|
|
69
|
+
# Use UTF-8 encoding for console on Windows
|
|
70
|
+
handler = logging.StreamHandler(sys.stdout)
|
|
71
|
+
handler.setFormatter(logging.Formatter(self.config.logging.log_format))
|
|
72
|
+
# Set encoding to handle unicode on Windows
|
|
73
|
+
if hasattr(handler.stream, 'reconfigure'):
|
|
74
|
+
try:
|
|
75
|
+
handler.stream.reconfigure(encoding='utf-8', errors='replace')
|
|
76
|
+
except Exception:
|
|
77
|
+
pass
|
|
78
|
+
logger.addHandler(handler)
|
|
79
|
+
|
|
80
|
+
if self.config.logging.log_to_file:
|
|
81
|
+
log_dir = Path(self.config.logging.log_directory)
|
|
82
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
83
|
+
log_file = log_dir / f"astra_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
|
|
84
|
+
handler = logging.FileHandler(log_file, encoding='utf-8')
|
|
85
|
+
handler.setFormatter(logging.Formatter(self.config.logging.log_format))
|
|
86
|
+
logger.addHandler(handler)
|
|
87
|
+
|
|
88
|
+
return logger
|
|
89
|
+
|
|
90
|
+
def _init_llm(self):
|
|
91
|
+
"""Initialize LLM provider."""
|
|
92
|
+
if self.llm is None:
|
|
93
|
+
if not self.config.llm.api_key:
|
|
94
|
+
raise RuntimeError(
|
|
95
|
+
f"No API key configured for local server.\n"
|
|
96
|
+
f"Please set the environment variable:\n"
|
|
97
|
+
f" set LOCAL_API_KEY=your-key\n"
|
|
98
|
+
f"Or add it to config.json"
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
self.llm = create_provider(
|
|
102
|
+
self.config.llm.provider,
|
|
103
|
+
api_key=self.config.llm.api_key,
|
|
104
|
+
model=self.config.llm.model,
|
|
105
|
+
api_base=self.config.llm.api_base
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
def _build_system_prompt(self, mode: str = "default", goal: Optional[str] = None) -> str:
|
|
109
|
+
"""Build the enhanced system prompt with context."""
|
|
110
|
+
tool_names = ", ".join(self.tools.list_enabled())
|
|
111
|
+
|
|
112
|
+
# Build base prompt
|
|
113
|
+
prompt = build_system_prompt(
|
|
114
|
+
workspace=self.config.workspace_path,
|
|
115
|
+
tools=tool_names,
|
|
116
|
+
mode=mode
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
# Add memory context if available
|
|
120
|
+
try:
|
|
121
|
+
user_facts = self.memory.get_user_facts()
|
|
122
|
+
if user_facts:
|
|
123
|
+
prompt += "\n\n=== KNOWN ABOUT USER ===\n"
|
|
124
|
+
for fact in user_facts[:5]:
|
|
125
|
+
prompt += f"- {fact.content}\n"
|
|
126
|
+
|
|
127
|
+
# Add compact goal-aware memory context
|
|
128
|
+
if goal:
|
|
129
|
+
goal_context = self.memory.get_goal_context(goal, max_items=12)
|
|
130
|
+
if goal_context:
|
|
131
|
+
prompt += "\n\n=== MEMORY CONTEXT FOR CURRENT GOAL ===\n"
|
|
132
|
+
prompt += goal_context[:3000]
|
|
133
|
+
except (AttributeError, ValueError, KeyError) as e:
|
|
134
|
+
self.logger.warning(f"Failed to retrieve user facts: {e}")
|
|
135
|
+
except Exception as e:
|
|
136
|
+
self.logger.error(f"Unexpected error retrieving user facts: {e}", exc_info=True)
|
|
137
|
+
|
|
138
|
+
return prompt
|
|
139
|
+
|
|
140
|
+
def _is_unstructured_final(self, raw_content: str, parsed: Dict[str, Any]) -> bool:
|
|
141
|
+
"""Detect plain-text final answers that violate required JSON format."""
|
|
142
|
+
if "final" not in parsed:
|
|
143
|
+
return False
|
|
144
|
+
content = (raw_content or "").strip()
|
|
145
|
+
if not content:
|
|
146
|
+
return False
|
|
147
|
+
if "```json" in content:
|
|
148
|
+
return False
|
|
149
|
+
if content.startswith("{") and content.endswith("}"):
|
|
150
|
+
return False
|
|
151
|
+
return True
|
|
152
|
+
|
|
153
|
+
def _parse_response(self, content: str) -> Optional[Dict[str, Any]]:
|
|
154
|
+
"""Parse response from LLM. Handles both JSON and plain text."""
|
|
155
|
+
if not content or not content.strip():
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
content = content.strip()
|
|
159
|
+
|
|
160
|
+
# Try to extract JSON from markdown code blocks
|
|
161
|
+
if "```json" in content:
|
|
162
|
+
start = content.find("```json") + 7
|
|
163
|
+
end = content.find("```", start)
|
|
164
|
+
if end > start:
|
|
165
|
+
json_content = content[start:end].strip()
|
|
166
|
+
try:
|
|
167
|
+
parsed = json.loads(json_content)
|
|
168
|
+
if parsed:
|
|
169
|
+
return parsed
|
|
170
|
+
except json.JSONDecodeError:
|
|
171
|
+
pass
|
|
172
|
+
elif "```" in content:
|
|
173
|
+
start = content.find("```") + 3
|
|
174
|
+
end = content.find("```", start)
|
|
175
|
+
if end > start:
|
|
176
|
+
json_content = content[start:end].strip()
|
|
177
|
+
if json_content.startswith("{"):
|
|
178
|
+
try:
|
|
179
|
+
parsed = json.loads(json_content)
|
|
180
|
+
if parsed:
|
|
181
|
+
return parsed
|
|
182
|
+
except json.JSONDecodeError:
|
|
183
|
+
pass
|
|
184
|
+
|
|
185
|
+
# Try to find JSON object in content
|
|
186
|
+
if "{" in content and "}" in content:
|
|
187
|
+
start = content.find("{")
|
|
188
|
+
end = content.rfind("}") + 1
|
|
189
|
+
if end > start:
|
|
190
|
+
try:
|
|
191
|
+
parsed = json.loads(content[start:end])
|
|
192
|
+
if parsed:
|
|
193
|
+
return parsed
|
|
194
|
+
except json.JSONDecodeError:
|
|
195
|
+
pass
|
|
196
|
+
|
|
197
|
+
# Smart intent detection for common commands in natural language responses
|
|
198
|
+
content_lower = content.lower()
|
|
199
|
+
|
|
200
|
+
# Check for lock computer intent
|
|
201
|
+
if any(kw in content_lower for kw in ["lock screen", "lockworkstation", "lock computer", "locking"]):
|
|
202
|
+
if "rundll32" in content_lower or "win + l" in content_lower or "lock" in content_lower:
|
|
203
|
+
return {
|
|
204
|
+
"thought": "User wants to lock the computer, executing now",
|
|
205
|
+
"action": "system_control",
|
|
206
|
+
"args": {"action": "lock"}
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
# Check for volume intent
|
|
210
|
+
if "volume" in content_lower and any(char.isdigit() for char in content):
|
|
211
|
+
import re
|
|
212
|
+
match = re.search(r'(\d+)\s*%?', content)
|
|
213
|
+
if match:
|
|
214
|
+
return {
|
|
215
|
+
"thought": "Setting volume to specified percentage",
|
|
216
|
+
"action": "system_control",
|
|
217
|
+
"args": {"action": "set_volume", "value": match.group(1)}
|
|
218
|
+
}
|
|
219
|
+
|
|
220
|
+
# Check for mute intent
|
|
221
|
+
if "mute" in content_lower and ("volume" in content_lower or "sound" in content_lower):
|
|
222
|
+
return {
|
|
223
|
+
"thought": "Muting the system",
|
|
224
|
+
"action": "system_control",
|
|
225
|
+
"args": {"action": "mute"}
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
# If no valid JSON found, treat the entire response as a final answer
|
|
229
|
+
# This handles models that don't output structured JSON
|
|
230
|
+
return {"final": content}
|
|
231
|
+
|
|
232
|
+
async def _execute_tool(self, tool_name: str, args: Dict[str, Any]) -> ToolResult:
|
|
233
|
+
"""Execute a tool."""
|
|
234
|
+
self.logger.debug(f"Executing tool: {tool_name} with args: {args}")
|
|
235
|
+
|
|
236
|
+
result = await self.tools.execute(tool_name, **args)
|
|
237
|
+
|
|
238
|
+
if result.success:
|
|
239
|
+
self.logger.debug(f"Tool {tool_name} succeeded")
|
|
240
|
+
for artifact in result.artifacts:
|
|
241
|
+
self.state.add_artifact(artifact)
|
|
242
|
+
else:
|
|
243
|
+
self.logger.warning(f"Tool {tool_name} failed: {result.error}")
|
|
244
|
+
|
|
245
|
+
return result
|
|
246
|
+
|
|
247
|
+
async def _think(self, goal: str) -> LLMResponse:
|
|
248
|
+
"""Generate next action from LLM."""
|
|
249
|
+
self._init_llm()
|
|
250
|
+
|
|
251
|
+
# Update messages if needed
|
|
252
|
+
if not self.messages:
|
|
253
|
+
self.messages.append(Message(role="system", content=self._build_system_prompt(goal=goal)))
|
|
254
|
+
self.messages.append(Message(role="user", content=f"Goal: {goal}"))
|
|
255
|
+
|
|
256
|
+
# Add memory context as explicit runtime instruction message
|
|
257
|
+
try:
|
|
258
|
+
memory_context = self.memory.get_goal_context(goal, max_items=10)
|
|
259
|
+
if memory_context:
|
|
260
|
+
self.messages.append(
|
|
261
|
+
Message(
|
|
262
|
+
role="user",
|
|
263
|
+
content=(
|
|
264
|
+
"Memory context (must be considered while planning):\n"
|
|
265
|
+
f"{memory_context[:2500]}"
|
|
266
|
+
)
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
except Exception as e:
|
|
270
|
+
self.logger.warning(f"Failed to attach memory context: {e}")
|
|
271
|
+
|
|
272
|
+
# Add context about recent actions
|
|
273
|
+
if self.state.action_history:
|
|
274
|
+
recent = self.state.action_history[-5:]
|
|
275
|
+
context = "Recent actions:\n"
|
|
276
|
+
for action in recent:
|
|
277
|
+
status = "[OK]" if action.status == ActionStatus.SUCCESS else "[FAIL]"
|
|
278
|
+
result_preview = str(action.result)[:200] if action.result else ""
|
|
279
|
+
context += f"{status} {action.tool}: {result_preview}\n"
|
|
280
|
+
|
|
281
|
+
# Add as context if not already in messages
|
|
282
|
+
if len(self.messages) <= 2:
|
|
283
|
+
self.messages.append(Message(role="assistant", content=context))
|
|
284
|
+
|
|
285
|
+
response = await self.llm.generate(
|
|
286
|
+
self.messages,
|
|
287
|
+
tools=self.tools.get_all_schemas(),
|
|
288
|
+
temperature=self.config.llm.temperature,
|
|
289
|
+
max_tokens=self.config.llm.max_tokens
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
self.state.total_tokens_used += response.tokens_used
|
|
293
|
+
return response
|
|
294
|
+
|
|
295
|
+
async def step(self) -> bool:
|
|
296
|
+
"""Execute one step of the agent loop. Returns True if complete."""
|
|
297
|
+
self.state.increment_iteration()
|
|
298
|
+
self.logger.debug(f"=== Iteration {self.state.iteration_count} ===")
|
|
299
|
+
|
|
300
|
+
# Check iteration limit
|
|
301
|
+
if self.state.iteration_count > self.config.max_iterations:
|
|
302
|
+
self.logger.warning("Max iterations reached")
|
|
303
|
+
return True
|
|
304
|
+
|
|
305
|
+
# Get LLM response
|
|
306
|
+
try:
|
|
307
|
+
response = await self._think(self.state.current_goal)
|
|
308
|
+
except RuntimeError as e:
|
|
309
|
+
# API key or configuration error - stop immediately
|
|
310
|
+
self.logger.debug(str(e))
|
|
311
|
+
self.state.last_error = str(e)
|
|
312
|
+
return True # Stop the loop
|
|
313
|
+
except Exception as e:
|
|
314
|
+
self.logger.debug(f"LLM error: {e}")
|
|
315
|
+
self.state.last_error = str(e)
|
|
316
|
+
return False
|
|
317
|
+
|
|
318
|
+
# Handle tool calls from LLM
|
|
319
|
+
if response.tool_calls:
|
|
320
|
+
if response.content:
|
|
321
|
+
self.memory.remember_conversation("assistant", response.content, metadata={"kind": "tool_call"})
|
|
322
|
+
for tc in response.tool_calls:
|
|
323
|
+
action = Action(
|
|
324
|
+
tool=tc["name"],
|
|
325
|
+
args=tc["arguments"],
|
|
326
|
+
thought=response.content
|
|
327
|
+
)
|
|
328
|
+
action.mark_executing()
|
|
329
|
+
|
|
330
|
+
result = await self._execute_tool(tc["name"], tc["arguments"])
|
|
331
|
+
|
|
332
|
+
if result.success:
|
|
333
|
+
action.mark_success(result.output)
|
|
334
|
+
else:
|
|
335
|
+
action.mark_failed(result.error)
|
|
336
|
+
|
|
337
|
+
self.state.record_action(action)
|
|
338
|
+
|
|
339
|
+
# Add result to messages for context
|
|
340
|
+
self.messages.append(Message(
|
|
341
|
+
role="tool",
|
|
342
|
+
content=result.output or result.error or "No output",
|
|
343
|
+
tool_call_id=tc.get("id"),
|
|
344
|
+
name=tc["name"]
|
|
345
|
+
))
|
|
346
|
+
return False
|
|
347
|
+
|
|
348
|
+
# Parse response content
|
|
349
|
+
parsed = self._parse_response(response.content)
|
|
350
|
+
|
|
351
|
+
if not parsed:
|
|
352
|
+
self.logger.warning("Could not parse LLM response")
|
|
353
|
+
self.messages.append(Message(role="assistant", content=response.content))
|
|
354
|
+
return False
|
|
355
|
+
|
|
356
|
+
# Check for final response
|
|
357
|
+
if "final" in parsed:
|
|
358
|
+
if self._is_unstructured_final(response.content, parsed) and self._format_retry_count < 2:
|
|
359
|
+
self._format_retry_count += 1
|
|
360
|
+
self.messages.append(Message(
|
|
361
|
+
role="user",
|
|
362
|
+
content=(
|
|
363
|
+
"FORMAT ERROR: Reply ONLY with valid JSON in the required schema. "
|
|
364
|
+
"Do not output plain text."
|
|
365
|
+
)
|
|
366
|
+
))
|
|
367
|
+
return False
|
|
368
|
+
|
|
369
|
+
self._format_retry_count = 0
|
|
370
|
+
self.logger.debug(f"Task complete: {parsed['final']}")
|
|
371
|
+
self.memory.remember_conversation("assistant", str(parsed["final"]))
|
|
372
|
+
return True
|
|
373
|
+
|
|
374
|
+
# Check for action
|
|
375
|
+
if "action" in parsed:
|
|
376
|
+
self._format_retry_count = 0
|
|
377
|
+
action = Action(
|
|
378
|
+
tool=parsed["action"],
|
|
379
|
+
args=parsed.get("args", {}),
|
|
380
|
+
thought=parsed.get("thought", "")
|
|
381
|
+
)
|
|
382
|
+
if action.thought:
|
|
383
|
+
self.memory.remember_conversation("assistant", action.thought, metadata={"kind": "reasoning"})
|
|
384
|
+
action.mark_executing()
|
|
385
|
+
|
|
386
|
+
result = await self._execute_tool(action.tool, action.args)
|
|
387
|
+
|
|
388
|
+
if result.success:
|
|
389
|
+
action.mark_success(result.output)
|
|
390
|
+
else:
|
|
391
|
+
action.mark_failed(result.error)
|
|
392
|
+
|
|
393
|
+
self.state.record_action(action)
|
|
394
|
+
|
|
395
|
+
# Remember the action in memory
|
|
396
|
+
self.memory.remember_action(
|
|
397
|
+
action=f"{action.tool}({action.args})",
|
|
398
|
+
result=str(result.output or result.error)[:500],
|
|
399
|
+
success=result.success
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
# Add action result to messages
|
|
403
|
+
self.messages.append(Message(role="assistant", content=json.dumps(parsed)))
|
|
404
|
+
self.messages.append(Message(
|
|
405
|
+
role="user",
|
|
406
|
+
content=f"Tool result: {result.output or result.error}"
|
|
407
|
+
))
|
|
408
|
+
return False
|
|
409
|
+
|
|
410
|
+
# No clear action, continue
|
|
411
|
+
self.messages.append(Message(role="assistant", content=response.content))
|
|
412
|
+
return False
|
|
413
|
+
|
|
414
|
+
async def run(self, goal: str) -> str:
|
|
415
|
+
"""Run the agent to accomplish a goal."""
|
|
416
|
+
# Validate configuration before starting
|
|
417
|
+
try:
|
|
418
|
+
self.config.llm.validate()
|
|
419
|
+
except ValueError as e:
|
|
420
|
+
self.logger.debug(f"Configuration error: {e}")
|
|
421
|
+
return f"Failed to start: {e}"
|
|
422
|
+
|
|
423
|
+
self.logger.debug(f"Starting AstraAgent with goal: {goal}")
|
|
424
|
+
self.state.set_goal(goal)
|
|
425
|
+
self.state.is_running = True
|
|
426
|
+
self.messages = [] # Reset messages
|
|
427
|
+
self._format_retry_count = 0
|
|
428
|
+
|
|
429
|
+
# Remember the goal
|
|
430
|
+
self.memory.remember(
|
|
431
|
+
content=f"User goal: {goal}",
|
|
432
|
+
memory_type="conversation",
|
|
433
|
+
importance=0.8,
|
|
434
|
+
tags=["goal", "user_request"]
|
|
435
|
+
)
|
|
436
|
+
self.memory.remember_conversation("user", goal, metadata={"kind": "goal"})
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
while not await self.step():
|
|
440
|
+
if self.state.is_paused:
|
|
441
|
+
self.logger.debug("Agent paused")
|
|
442
|
+
break
|
|
443
|
+
|
|
444
|
+
# Check for critical errors
|
|
445
|
+
if self.state.last_error and "No API key" in self.state.last_error:
|
|
446
|
+
break
|
|
447
|
+
|
|
448
|
+
# Brief pause between iterations
|
|
449
|
+
await asyncio.sleep(0.1)
|
|
450
|
+
|
|
451
|
+
summary = self._generate_summary()
|
|
452
|
+
self.logger.debug(f"Agent finished:\n{summary}")
|
|
453
|
+
|
|
454
|
+
# Remember the result
|
|
455
|
+
self.memory.remember(
|
|
456
|
+
content=f"Completed goal: {goal} | Result: {summary[:200]}",
|
|
457
|
+
memory_type="observation",
|
|
458
|
+
importance=0.7,
|
|
459
|
+
tags=["completion", "result"]
|
|
460
|
+
)
|
|
461
|
+
|
|
462
|
+
return summary
|
|
463
|
+
|
|
464
|
+
except KeyboardInterrupt:
|
|
465
|
+
self.logger.debug("Agent interrupted")
|
|
466
|
+
return "Agent was interrupted by user."
|
|
467
|
+
except Exception as e:
|
|
468
|
+
self.logger.error(f"Agent error: {e}")
|
|
469
|
+
return f"Agent failed with error: {e}"
|
|
470
|
+
finally:
|
|
471
|
+
self.state.is_running = False
|
|
472
|
+
self.memory.save()
|
|
473
|
+
|
|
474
|
+
def _generate_summary(self) -> str:
|
|
475
|
+
"""Generate a summary of what was accomplished."""
|
|
476
|
+
# Use ASCII-safe characters for Windows compatibility
|
|
477
|
+
summary = [
|
|
478
|
+
f"=== AstraAgent Session Summary ===",
|
|
479
|
+
f"Goal: {self.state.current_goal}",
|
|
480
|
+
f"Iterations: {self.state.iteration_count}",
|
|
481
|
+
f"Actions: {self.state.total_actions} (OK:{self.state.successful_actions} FAIL:{self.state.failed_actions})",
|
|
482
|
+
f"Success Rate: {self.state.success_rate:.1f}%",
|
|
483
|
+
f"Duration: {self.state.duration:.1f}s",
|
|
484
|
+
f"Tokens Used: {self.state.total_tokens_used}",
|
|
485
|
+
]
|
|
486
|
+
|
|
487
|
+
if self.state.last_error:
|
|
488
|
+
summary.append(f"\nLast Error: {self.state.last_error}")
|
|
489
|
+
|
|
490
|
+
if self.state.artifacts_created:
|
|
491
|
+
summary.append(f"\nArtifacts Created:")
|
|
492
|
+
for artifact in self.state.artifacts_created:
|
|
493
|
+
summary.append(f" - {artifact}")
|
|
494
|
+
|
|
495
|
+
if self.state.errors_encountered:
|
|
496
|
+
summary.append(f"\nErrors Encountered: {len(self.state.errors_encountered)}")
|
|
497
|
+
|
|
498
|
+
return "\n".join(summary)
|
|
499
|
+
|
|
500
|
+
def pause(self):
|
|
501
|
+
"""Pause the agent."""
|
|
502
|
+
self.state.is_paused = True
|
|
503
|
+
|
|
504
|
+
def resume(self):
|
|
505
|
+
"""Resume the agent."""
|
|
506
|
+
self.state.is_paused = False
|
|
507
|
+
|
|
508
|
+
def get_status(self) -> Dict[str, Any]:
|
|
509
|
+
"""Get current agent status."""
|
|
510
|
+
return self.state.to_dict()
|
|
511
|
+
|
|
512
|
+
def shutdown(self):
|
|
513
|
+
"""Shutdown the agent cleanly."""
|
|
514
|
+
self.memory.shutdown()
|
|
515
|
+
self.logger.debug("AstraAgent shutdown complete")
|