supyagent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of supyagent might be problematic. Click here for more details.
- supyagent/__init__.py +5 -0
- supyagent/__main__.py +8 -0
- supyagent/cli/__init__.py +1 -0
- supyagent/cli/main.py +946 -0
- supyagent/core/__init__.py +21 -0
- supyagent/core/agent.py +379 -0
- supyagent/core/context.py +158 -0
- supyagent/core/credentials.py +275 -0
- supyagent/core/delegation.py +286 -0
- supyagent/core/executor.py +232 -0
- supyagent/core/llm.py +73 -0
- supyagent/core/registry.py +238 -0
- supyagent/core/session_manager.py +233 -0
- supyagent/core/tools.py +235 -0
- supyagent/models/__init__.py +6 -0
- supyagent/models/agent_config.py +86 -0
- supyagent/models/session.py +43 -0
- supyagent/utils/__init__.py +1 -0
- supyagent/utils/paths.py +31 -0
- supyagent-0.1.0.dist-info/METADATA +328 -0
- supyagent-0.1.0.dist-info/RECORD +24 -0
- supyagent-0.1.0.dist-info/WHEEL +4 -0
- supyagent-0.1.0.dist-info/entry_points.txt +2 -0
- supyagent-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Execution runner for non-interactive agent execution.
|
|
3
|
+
|
|
4
|
+
Execution agents are stateless, input→output pipelines designed for automation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import re
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from supyagent.core.credentials import CredentialManager
|
|
13
|
+
from supyagent.core.llm import LLMClient
|
|
14
|
+
from supyagent.core.tools import (
|
|
15
|
+
discover_tools,
|
|
16
|
+
execute_tool,
|
|
17
|
+
filter_tools,
|
|
18
|
+
is_credential_request,
|
|
19
|
+
supypowers_to_openai_tools,
|
|
20
|
+
)
|
|
21
|
+
from supyagent.models.agent_config import AgentConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ExecutionRunner:
|
|
25
|
+
"""
|
|
26
|
+
Runs agents in non-interactive execution mode.
|
|
27
|
+
|
|
28
|
+
Key differences from interactive mode:
|
|
29
|
+
- No session persistence
|
|
30
|
+
- No credential prompting (must be pre-provided)
|
|
31
|
+
- Single input → output execution
|
|
32
|
+
- Designed for automation and pipelines
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
def __init__(
|
|
36
|
+
self,
|
|
37
|
+
config: AgentConfig,
|
|
38
|
+
credential_manager: CredentialManager | None = None,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initialize the execution runner.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
config: Agent configuration
|
|
45
|
+
credential_manager: Optional credential manager for stored credentials
|
|
46
|
+
"""
|
|
47
|
+
self.config = config
|
|
48
|
+
self.credential_manager = credential_manager or CredentialManager()
|
|
49
|
+
|
|
50
|
+
# Initialize LLM client
|
|
51
|
+
self.llm = LLMClient(
|
|
52
|
+
model=config.model.provider,
|
|
53
|
+
temperature=config.model.temperature,
|
|
54
|
+
max_tokens=config.model.max_tokens,
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Load available tools (excluding credential request tool for execution mode)
|
|
58
|
+
self.tools = self._load_tools()
|
|
59
|
+
|
|
60
|
+
def _load_tools(self) -> list[dict[str, Any]]:
|
|
61
|
+
"""
|
|
62
|
+
Load tools for execution mode.
|
|
63
|
+
|
|
64
|
+
Note: Does NOT include request_credential tool since
|
|
65
|
+
execution mode cannot prompt for credentials.
|
|
66
|
+
"""
|
|
67
|
+
# If no tools allowed, return empty
|
|
68
|
+
if not self.config.tools.allow:
|
|
69
|
+
return []
|
|
70
|
+
|
|
71
|
+
# Discover tools from supypowers
|
|
72
|
+
sp_tools = discover_tools()
|
|
73
|
+
openai_tools = supypowers_to_openai_tools(sp_tools)
|
|
74
|
+
filtered = filter_tools(openai_tools, self.config.tools)
|
|
75
|
+
|
|
76
|
+
return filtered
|
|
77
|
+
|
|
78
|
+
def run(
|
|
79
|
+
self,
|
|
80
|
+
task: str | dict[str, Any],
|
|
81
|
+
secrets: dict[str, str] | None = None,
|
|
82
|
+
output_format: str = "raw",
|
|
83
|
+
) -> dict[str, Any]:
|
|
84
|
+
"""
|
|
85
|
+
Execute a task and return the result.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
task: Task description (string) or structured input (dict)
|
|
89
|
+
secrets: Pre-provided credentials (KEY=VALUE)
|
|
90
|
+
output_format: "raw" | "json" | "markdown"
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
{"ok": True, "data": ...} or {"ok": False, "error": ...}
|
|
94
|
+
"""
|
|
95
|
+
# Merge secrets: stored credentials + provided secrets
|
|
96
|
+
all_secrets = self.credential_manager.get_all_for_tools(self.config.name)
|
|
97
|
+
if secrets:
|
|
98
|
+
all_secrets.update(secrets)
|
|
99
|
+
|
|
100
|
+
# Inject secrets into environment for this execution
|
|
101
|
+
for key, value in all_secrets.items():
|
|
102
|
+
os.environ[key] = value
|
|
103
|
+
|
|
104
|
+
try:
|
|
105
|
+
# Build the prompt
|
|
106
|
+
if isinstance(task, dict):
|
|
107
|
+
user_content = self._format_structured_input(task)
|
|
108
|
+
else:
|
|
109
|
+
user_content = str(task)
|
|
110
|
+
|
|
111
|
+
messages: list[dict[str, Any]] = [
|
|
112
|
+
{"role": "system", "content": self.config.system_prompt},
|
|
113
|
+
{"role": "user", "content": user_content},
|
|
114
|
+
]
|
|
115
|
+
|
|
116
|
+
# Run with tool loop (max iterations for safety)
|
|
117
|
+
max_iterations = self.config.limits.get("max_tool_calls_per_turn", 20)
|
|
118
|
+
iterations = 0
|
|
119
|
+
|
|
120
|
+
while iterations < max_iterations:
|
|
121
|
+
iterations += 1
|
|
122
|
+
|
|
123
|
+
response = self.llm.chat(
|
|
124
|
+
messages=messages,
|
|
125
|
+
tools=self.tools if self.tools else None,
|
|
126
|
+
)
|
|
127
|
+
assistant_msg = response.choices[0].message
|
|
128
|
+
|
|
129
|
+
# Build message for history
|
|
130
|
+
msg_dict: dict[str, Any] = {
|
|
131
|
+
"role": "assistant",
|
|
132
|
+
"content": assistant_msg.content,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
if assistant_msg.tool_calls:
|
|
136
|
+
msg_dict["tool_calls"] = [
|
|
137
|
+
{
|
|
138
|
+
"id": tc.id,
|
|
139
|
+
"type": "function",
|
|
140
|
+
"function": {
|
|
141
|
+
"name": tc.function.name,
|
|
142
|
+
"arguments": tc.function.arguments,
|
|
143
|
+
},
|
|
144
|
+
}
|
|
145
|
+
for tc in assistant_msg.tool_calls
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
messages.append(msg_dict)
|
|
149
|
+
|
|
150
|
+
# If no tool calls, we're done
|
|
151
|
+
if not assistant_msg.tool_calls:
|
|
152
|
+
return self._format_output(assistant_msg.content or "", output_format)
|
|
153
|
+
|
|
154
|
+
# Execute tools
|
|
155
|
+
for tool_call in assistant_msg.tool_calls:
|
|
156
|
+
# Credential requests fail in execution mode
|
|
157
|
+
if is_credential_request(tool_call):
|
|
158
|
+
try:
|
|
159
|
+
args = json.loads(tool_call.function.arguments)
|
|
160
|
+
cred_name = args.get("name", "unknown")
|
|
161
|
+
except json.JSONDecodeError:
|
|
162
|
+
cred_name = "unknown"
|
|
163
|
+
|
|
164
|
+
return {
|
|
165
|
+
"ok": False,
|
|
166
|
+
"error": f"Credential '{cred_name}' required but not provided. "
|
|
167
|
+
f"Pass it via --secrets {cred_name}=<value>",
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
result = self._execute_tool(tool_call, all_secrets)
|
|
171
|
+
messages.append({
|
|
172
|
+
"role": "tool",
|
|
173
|
+
"tool_call_id": tool_call.id,
|
|
174
|
+
"content": json.dumps(result),
|
|
175
|
+
})
|
|
176
|
+
|
|
177
|
+
return {"ok": False, "error": "Max tool iterations exceeded"}
|
|
178
|
+
|
|
179
|
+
except Exception as e:
|
|
180
|
+
return {"ok": False, "error": str(e)}
|
|
181
|
+
|
|
182
|
+
def _format_structured_input(self, task: dict[str, Any]) -> str:
|
|
183
|
+
"""Format a structured input dict into a prompt."""
|
|
184
|
+
return json.dumps(task, indent=2)
|
|
185
|
+
|
|
186
|
+
def _format_output(self, content: str, output_format: str) -> dict[str, Any]:
|
|
187
|
+
"""Format the output according to requested format."""
|
|
188
|
+
if output_format == "json":
|
|
189
|
+
# Try to parse as JSON
|
|
190
|
+
try:
|
|
191
|
+
data = json.loads(content)
|
|
192
|
+
return {"ok": True, "data": data}
|
|
193
|
+
except json.JSONDecodeError:
|
|
194
|
+
pass
|
|
195
|
+
|
|
196
|
+
# Try to extract JSON from markdown code block
|
|
197
|
+
match = re.search(r"```(?:json)?\s*([\s\S]*?)\s*```", content)
|
|
198
|
+
if match:
|
|
199
|
+
try:
|
|
200
|
+
data = json.loads(match.group(1))
|
|
201
|
+
return {"ok": True, "data": data}
|
|
202
|
+
except json.JSONDecodeError:
|
|
203
|
+
pass
|
|
204
|
+
|
|
205
|
+
# Return as-is
|
|
206
|
+
return {"ok": True, "data": content}
|
|
207
|
+
|
|
208
|
+
elif output_format == "markdown":
|
|
209
|
+
return {"ok": True, "data": content, "format": "markdown"}
|
|
210
|
+
|
|
211
|
+
else: # raw
|
|
212
|
+
return {"ok": True, "data": content}
|
|
213
|
+
|
|
214
|
+
def _execute_tool(
|
|
215
|
+
self,
|
|
216
|
+
tool_call: Any,
|
|
217
|
+
secrets: dict[str, str] | None = None,
|
|
218
|
+
) -> dict[str, Any]:
|
|
219
|
+
"""Execute a tool call."""
|
|
220
|
+
name = tool_call.function.name
|
|
221
|
+
arguments_str = tool_call.function.arguments
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
args = json.loads(arguments_str)
|
|
225
|
+
except json.JSONDecodeError:
|
|
226
|
+
return {"ok": False, "error": f"Invalid JSON arguments: {arguments_str}"}
|
|
227
|
+
|
|
228
|
+
if "__" not in name:
|
|
229
|
+
return {"ok": False, "error": f"Invalid tool name format: {name}"}
|
|
230
|
+
|
|
231
|
+
script, func = name.split("__", 1)
|
|
232
|
+
return execute_tool(script, func, args, secrets=secrets)
|
supyagent/core/llm.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LiteLLM wrapper for unified LLM access.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from litellm import completion
|
|
8
|
+
from litellm.types.utils import ModelResponse
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class LLMClient:
|
|
12
|
+
"""
|
|
13
|
+
Wrapper around LiteLLM for consistent LLM access.
|
|
14
|
+
|
|
15
|
+
Supports any provider that LiteLLM supports:
|
|
16
|
+
- OpenAI: openai/gpt-4o
|
|
17
|
+
- Anthropic: anthropic/claude-3-5-sonnet-20241022
|
|
18
|
+
- Ollama: ollama/llama3
|
|
19
|
+
- And 100+ more...
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
model: str,
|
|
25
|
+
temperature: float = 0.7,
|
|
26
|
+
max_tokens: int = 4096,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the LLM client.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: LiteLLM model identifier (e.g., 'anthropic/claude-3-5-sonnet-20241022')
|
|
33
|
+
temperature: Sampling temperature (0-2)
|
|
34
|
+
max_tokens: Maximum tokens in response
|
|
35
|
+
"""
|
|
36
|
+
self.model = model
|
|
37
|
+
self.temperature = temperature
|
|
38
|
+
self.max_tokens = max_tokens
|
|
39
|
+
|
|
40
|
+
def chat(
|
|
41
|
+
self,
|
|
42
|
+
messages: list[dict[str, Any]],
|
|
43
|
+
tools: list[dict[str, Any]] | None = None,
|
|
44
|
+
stream: bool = False,
|
|
45
|
+
) -> ModelResponse:
|
|
46
|
+
"""
|
|
47
|
+
Send a chat completion request.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
messages: List of message dicts with 'role' and 'content'
|
|
51
|
+
tools: Optional list of tool definitions (OpenAI format)
|
|
52
|
+
stream: Whether to stream the response
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
LiteLLM ModelResponse
|
|
56
|
+
"""
|
|
57
|
+
kwargs: dict[str, Any] = {
|
|
58
|
+
"model": self.model,
|
|
59
|
+
"messages": messages,
|
|
60
|
+
"temperature": self.temperature,
|
|
61
|
+
"max_tokens": self.max_tokens,
|
|
62
|
+
"stream": stream,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
if tools:
|
|
66
|
+
kwargs["tools"] = tools
|
|
67
|
+
kwargs["tool_choice"] = "auto"
|
|
68
|
+
|
|
69
|
+
return completion(**kwargs)
|
|
70
|
+
|
|
71
|
+
def change_model(self, model: str) -> None:
|
|
72
|
+
"""Change the model being used."""
|
|
73
|
+
self.model = model
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Registry for tracking active agent instances.
|
|
3
|
+
|
|
4
|
+
Enables multi-agent orchestration by tracking parent-child relationships
|
|
5
|
+
and managing agent lifecycles.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import uuid
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime, UTC
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import TYPE_CHECKING, Any
|
|
14
|
+
|
|
15
|
+
if TYPE_CHECKING:
|
|
16
|
+
from supyagent.core.agent import Agent
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class AgentInstance:
|
|
21
|
+
"""
|
|
22
|
+
Metadata for a running agent instance.
|
|
23
|
+
|
|
24
|
+
Attributes:
|
|
25
|
+
name: The agent type name (e.g., "planner", "researcher")
|
|
26
|
+
instance_id: Unique identifier for this instance
|
|
27
|
+
created_at: When the instance was created
|
|
28
|
+
parent_id: Instance ID of the parent agent (if spawned by another)
|
|
29
|
+
status: Current status (active, completed, failed)
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
name: str
|
|
33
|
+
instance_id: str
|
|
34
|
+
created_at: datetime
|
|
35
|
+
parent_id: str | None = None
|
|
36
|
+
status: str = "active"
|
|
37
|
+
depth: int = 0 # Delegation depth for preventing infinite loops
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> dict[str, Any]:
|
|
40
|
+
"""Serialize to dictionary."""
|
|
41
|
+
return {
|
|
42
|
+
"name": self.name,
|
|
43
|
+
"instance_id": self.instance_id,
|
|
44
|
+
"created_at": self.created_at.isoformat(),
|
|
45
|
+
"parent_id": self.parent_id,
|
|
46
|
+
"status": self.status,
|
|
47
|
+
"depth": self.depth,
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def from_dict(cls, data: dict[str, Any]) -> "AgentInstance":
|
|
52
|
+
"""Deserialize from dictionary."""
|
|
53
|
+
created_at = data["created_at"]
|
|
54
|
+
if isinstance(created_at, str):
|
|
55
|
+
# Handle both aware and naive datetime strings
|
|
56
|
+
if created_at.endswith("Z"):
|
|
57
|
+
created_at = created_at[:-1] + "+00:00"
|
|
58
|
+
created_at = datetime.fromisoformat(created_at)
|
|
59
|
+
|
|
60
|
+
return cls(
|
|
61
|
+
name=data["name"],
|
|
62
|
+
instance_id=data["instance_id"],
|
|
63
|
+
created_at=created_at,
|
|
64
|
+
parent_id=data.get("parent_id"),
|
|
65
|
+
status=data.get("status", "active"),
|
|
66
|
+
depth=data.get("depth", 0),
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class AgentRegistry:
|
|
71
|
+
"""
|
|
72
|
+
Manages agent instances and their relationships.
|
|
73
|
+
|
|
74
|
+
Enables agents to spawn and communicate with sub-agents while
|
|
75
|
+
tracking the hierarchy for debugging and resource management.
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
# Maximum delegation depth to prevent infinite loops
|
|
79
|
+
MAX_DEPTH = 5
|
|
80
|
+
|
|
81
|
+
def __init__(self, base_dir: Path | None = None):
|
|
82
|
+
"""
|
|
83
|
+
Initialize the registry.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
base_dir: Base directory for storing registry data.
|
|
87
|
+
Defaults to ~/.supyagent/
|
|
88
|
+
"""
|
|
89
|
+
if base_dir is None:
|
|
90
|
+
base_dir = Path.home() / ".supyagent"
|
|
91
|
+
|
|
92
|
+
self.base_dir = Path(base_dir)
|
|
93
|
+
self.registry_path = self.base_dir / "registry.json"
|
|
94
|
+
|
|
95
|
+
self._instances: dict[str, AgentInstance] = {}
|
|
96
|
+
self._agents: dict[str, "Agent"] = {} # Live agent objects
|
|
97
|
+
|
|
98
|
+
self._load()
|
|
99
|
+
|
|
100
|
+
def _load(self) -> None:
|
|
101
|
+
"""Load registry from disk."""
|
|
102
|
+
if self.registry_path.exists():
|
|
103
|
+
try:
|
|
104
|
+
with open(self.registry_path) as f:
|
|
105
|
+
data = json.load(f)
|
|
106
|
+
for item in data.get("instances", []):
|
|
107
|
+
inst = AgentInstance.from_dict(item)
|
|
108
|
+
self._instances[inst.instance_id] = inst
|
|
109
|
+
except (json.JSONDecodeError, KeyError):
|
|
110
|
+
# Corrupted registry, start fresh
|
|
111
|
+
self._instances = {}
|
|
112
|
+
|
|
113
|
+
def _save(self) -> None:
|
|
114
|
+
"""Persist registry to disk."""
|
|
115
|
+
self.base_dir.mkdir(parents=True, exist_ok=True)
|
|
116
|
+
|
|
117
|
+
with open(self.registry_path, "w") as f:
|
|
118
|
+
json.dump(
|
|
119
|
+
{
|
|
120
|
+
"instances": [
|
|
121
|
+
inst.to_dict()
|
|
122
|
+
for inst in self._instances.values()
|
|
123
|
+
]
|
|
124
|
+
},
|
|
125
|
+
f,
|
|
126
|
+
indent=2,
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
def register(
|
|
130
|
+
self,
|
|
131
|
+
agent: "Agent",
|
|
132
|
+
parent_id: str | None = None,
|
|
133
|
+
) -> str:
|
|
134
|
+
"""
|
|
135
|
+
Register an agent instance and return its ID.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
agent: The agent to register
|
|
139
|
+
parent_id: Instance ID of the parent agent (if any)
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
The new instance ID
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
ValueError: If max delegation depth would be exceeded
|
|
146
|
+
"""
|
|
147
|
+
instance_id = str(uuid.uuid4())[:8]
|
|
148
|
+
|
|
149
|
+
# Calculate depth
|
|
150
|
+
depth = 0
|
|
151
|
+
if parent_id:
|
|
152
|
+
parent = self._instances.get(parent_id)
|
|
153
|
+
if parent:
|
|
154
|
+
depth = parent.depth + 1
|
|
155
|
+
|
|
156
|
+
if depth > self.MAX_DEPTH:
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Maximum delegation depth ({self.MAX_DEPTH}) exceeded. "
|
|
159
|
+
"Cannot create more sub-agents."
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
instance = AgentInstance(
|
|
163
|
+
name=agent.config.name,
|
|
164
|
+
instance_id=instance_id,
|
|
165
|
+
created_at=datetime.now(UTC),
|
|
166
|
+
parent_id=parent_id,
|
|
167
|
+
depth=depth,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
self._instances[instance_id] = instance
|
|
171
|
+
self._agents[instance_id] = agent
|
|
172
|
+
self._save()
|
|
173
|
+
|
|
174
|
+
return instance_id
|
|
175
|
+
|
|
176
|
+
def get_agent(self, instance_id: str) -> "Agent | None":
|
|
177
|
+
"""Get a live agent by instance ID."""
|
|
178
|
+
return self._agents.get(instance_id)
|
|
179
|
+
|
|
180
|
+
def get_instance(self, instance_id: str) -> AgentInstance | None:
|
|
181
|
+
"""Get instance metadata by ID."""
|
|
182
|
+
return self._instances.get(instance_id)
|
|
183
|
+
|
|
184
|
+
def list_all(self) -> list[AgentInstance]:
|
|
185
|
+
"""List all registered instances."""
|
|
186
|
+
return list(self._instances.values())
|
|
187
|
+
|
|
188
|
+
def list_active(self) -> list[AgentInstance]:
|
|
189
|
+
"""List all active instances."""
|
|
190
|
+
return [i for i in self._instances.values() if i.status == "active"]
|
|
191
|
+
|
|
192
|
+
def list_children(self, parent_id: str) -> list[AgentInstance]:
|
|
193
|
+
"""List all agents spawned by a parent."""
|
|
194
|
+
return [i for i in self._instances.values() if i.parent_id == parent_id]
|
|
195
|
+
|
|
196
|
+
def mark_completed(self, instance_id: str) -> None:
|
|
197
|
+
"""Mark an agent as completed."""
|
|
198
|
+
if instance_id in self._instances:
|
|
199
|
+
self._instances[instance_id].status = "completed"
|
|
200
|
+
self._save()
|
|
201
|
+
|
|
202
|
+
def mark_failed(self, instance_id: str) -> None:
|
|
203
|
+
"""Mark an agent as failed."""
|
|
204
|
+
if instance_id in self._instances:
|
|
205
|
+
self._instances[instance_id].status = "failed"
|
|
206
|
+
self._save()
|
|
207
|
+
|
|
208
|
+
def cleanup(self, instance_id: str) -> None:
|
|
209
|
+
"""Remove an agent instance."""
|
|
210
|
+
if instance_id in self._agents:
|
|
211
|
+
del self._agents[instance_id]
|
|
212
|
+
|
|
213
|
+
if instance_id in self._instances:
|
|
214
|
+
del self._instances[instance_id]
|
|
215
|
+
self._save()
|
|
216
|
+
|
|
217
|
+
def cleanup_completed(self) -> int:
|
|
218
|
+
"""
|
|
219
|
+
Remove all completed/failed instances.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Number of instances removed
|
|
223
|
+
"""
|
|
224
|
+
to_remove = [
|
|
225
|
+
iid
|
|
226
|
+
for iid, inst in self._instances.items()
|
|
227
|
+
if inst.status in ("completed", "failed")
|
|
228
|
+
]
|
|
229
|
+
|
|
230
|
+
for iid in to_remove:
|
|
231
|
+
self.cleanup(iid)
|
|
232
|
+
|
|
233
|
+
return len(to_remove)
|
|
234
|
+
|
|
235
|
+
def get_depth(self, instance_id: str) -> int:
|
|
236
|
+
"""Get the delegation depth of an instance."""
|
|
237
|
+
inst = self._instances.get(instance_id)
|
|
238
|
+
return inst.depth if inst else 0
|