vaal-code 0.6.850__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. vaal/__init__.py +2 -0
  2. vaal/__main__.py +249 -0
  3. vaal/core/__init__.py +0 -0
  4. vaal/core/agent.py +398 -0
  5. vaal/core/auto_fix.py +55 -0
  6. vaal/core/bootstrap.py +255 -0
  7. vaal/core/branching.py +157 -0
  8. vaal/core/bridge.py +184 -0
  9. vaal/core/changelog.py +306 -0
  10. vaal/core/clipboard.py +76 -0
  11. vaal/core/code_review.py +271 -0
  12. vaal/core/collaborative.py +340 -0
  13. vaal/core/command_palette.py +268 -0
  14. vaal/core/config.py +55 -0
  15. vaal/core/context.py +147 -0
  16. vaal/core/cost_tracker.py +57 -0
  17. vaal/core/cot_display.py +198 -0
  18. vaal/core/deps.py +422 -0
  19. vaal/core/env_manager.py +288 -0
  20. vaal/core/error_explain.py +227 -0
  21. vaal/core/font_screen.py +157 -0
  22. vaal/core/generate_icon.py +61 -0
  23. vaal/core/history.py +59 -0
  24. vaal/core/hooks.py +134 -0
  25. vaal/core/image_input.py +151 -0
  26. vaal/core/init_project.py +229 -0
  27. vaal/core/interactive.py +412 -0
  28. vaal/core/keybindings.py +94 -0
  29. vaal/core/llm.py +180 -0
  30. vaal/core/login_screen.py +347 -0
  31. vaal/core/logout_screen.py +201 -0
  32. vaal/core/mcp_client.py +312 -0
  33. vaal/core/memory.py +202 -0
  34. vaal/core/migration.py +321 -0
  35. vaal/core/model_manager.py +458 -0
  36. vaal/core/multiline.py +166 -0
  37. vaal/core/notifications.py +223 -0
  38. vaal/core/oauth.py +229 -0
  39. vaal/core/output_styles.py +80 -0
  40. vaal/core/permissions.py +57 -0
  41. vaal/core/plans.py +197 -0
  42. vaal/core/plugins.py +113 -0
  43. vaal/core/preview_overlay.py +701 -0
  44. vaal/core/preview_server.py +781 -0
  45. vaal/core/progress.py +244 -0
  46. vaal/core/providers.py +780 -0
  47. vaal/core/raw_input.py +530 -0
  48. vaal/core/remote_control.py +191 -0
  49. vaal/core/repl.py +4767 -0
  50. vaal/core/scheduler.py +242 -0
  51. vaal/core/self_repair.py +276 -0
  52. vaal/core/server.py +330 -0
  53. vaal/core/session.py +89 -0
  54. vaal/core/skills.py +148 -0
  55. vaal/core/split_pane.py +198 -0
  56. vaal/core/ssh_remote.py +250 -0
  57. vaal/core/task_manager.py +167 -0
  58. vaal/core/terminal_profile.py +180 -0
  59. vaal/core/terminal_theme.py +201 -0
  60. vaal/core/theme.py +48 -0
  61. vaal/core/theme_screen.py +185 -0
  62. vaal/core/tool_executor.py +105 -0
  63. vaal/core/undo_system.py +313 -0
  64. vaal/core/user_config.py +125 -0
  65. vaal/core/vim_mode.py +30 -0
  66. vaal/core/voice.py +211 -0
  67. vaal/core/watch_mode.py +169 -0
  68. vaal/tools/__init__.py +0 -0
  69. vaal/tools/agent_tools.py +225 -0
  70. vaal/tools/api_tools.py +319 -0
  71. vaal/tools/bash_tool.py +179 -0
  72. vaal/tools/db_tools.py +407 -0
  73. vaal/tools/diff_tools.py +140 -0
  74. vaal/tools/file_tools.py +116 -0
  75. vaal/tools/git_tools.py +311 -0
  76. vaal/tools/interaction_tools.py +58 -0
  77. vaal/tools/lsp_tools.py +304 -0
  78. vaal/tools/memory_tools.py +79 -0
  79. vaal/tools/notebook_tools.py +310 -0
  80. vaal/tools/plugin_tools.py +272 -0
  81. vaal/tools/regex_tools.py +209 -0
  82. vaal/tools/registry.py +57 -0
  83. vaal/tools/search_tools.py +117 -0
  84. vaal/tools/sql_tools.py +222 -0
  85. vaal/tools/ssh_tools.py +36 -0
  86. vaal/tools/todo_tools.py +96 -0
  87. vaal/tools/web_tools.py +152 -0
  88. vaal_code-0.6.850.dist-info/METADATA +106 -0
  89. vaal_code-0.6.850.dist-info/RECORD +92 -0
  90. vaal_code-0.6.850.dist-info/WHEEL +5 -0
  91. vaal_code-0.6.850.dist-info/entry_points.txt +2 -0
  92. vaal_code-0.6.850.dist-info/top_level.txt +1 -0
vaal/__init__.py ADDED
@@ -0,0 +1,2 @@
1
+ """Vaal — Local LLM coding assistant CLI."""
2
+ __version__ = "0.6.850"
vaal/__main__.py ADDED
@@ -0,0 +1,249 @@
1
+ """Vaal CLI entry point — python -m vaal"""
2
+
3
+ import argparse
4
+ import sys
5
+ import os
6
+
7
+ # Force UTF-8 output on Windows
8
+ if sys.platform == "win32":
9
+ os.system("") # Enable VT100 escape sequences
10
+ sys.stdout.reconfigure(encoding="utf-8")
11
+ sys.stderr.reconfigure(encoding="utf-8")
12
+
13
+ from . import __version__
14
+
15
+
16
+ def _is_piped_input() -> bool:
17
+ """Check if stdin is piped (not a TTY)."""
18
+ try:
19
+ return not sys.stdin.isatty()
20
+ except Exception:
21
+ return False
22
+
23
+
24
+ def _run_single_shot(prompt: str, model: str, json_output: bool, think: bool):
25
+ """Run a single prompt, print response, exit. No REPL."""
26
+ import json
27
+ import time
28
+ from .core.config import load_project_instructions
29
+ from .core.llm import (
30
+ chat_with_tools_stream, build_system_prompt, check_connection,
31
+ get_ollama_tools,
32
+ )
33
+ from .core.tool_executor import execute_tool, format_tool_result
34
+ from .core.permissions import PermissionManager
35
+ from .core.history import record_prompt
36
+ from .tools import bash_tool, file_tools, search_tools
37
+ from .tools.registry import all_tools
38
+
39
+ if not check_connection():
40
+ print("Error: Cannot connect to Ollama at localhost:11434", file=sys.stderr)
41
+ sys.exit(1)
42
+
43
+ project_instructions = load_project_instructions()
44
+ system_prompt = build_system_prompt(project_instructions=project_instructions)
45
+ ollama_tools = get_ollama_tools(all_tools())
46
+ permissions = PermissionManager(mode="yolo") # Non-interactive = auto-approve
47
+
48
+ session_id = "single"
49
+ record_prompt(prompt, model, session_id)
50
+
51
+ cwd = os.getcwd().replace("\\", "/")
52
+ augmented = f"[CWD: {cwd}]\n{prompt}"
53
+
54
+ messages = [
55
+ {"role": "system", "content": system_prompt},
56
+ {"role": "user", "content": augmented},
57
+ ]
58
+
59
+ max_rounds = 15
60
+ final_content = ""
61
+
62
+ def auto_approve(name, params):
63
+ return True
64
+
65
+ for _ in range(max_rounds):
66
+ content = ""
67
+ tool_calls = []
68
+
69
+ for chunk in chat_with_tools_stream(messages, ollama_tools, model=model, think=think):
70
+ if chunk.get("content"):
71
+ token = chunk["content"]
72
+ # Skip think tags
73
+ if "<think>" not in token and "</think>" not in token:
74
+ content += token
75
+ if chunk.get("tool_calls"):
76
+ tool_calls.extend(chunk["tool_calls"])
77
+
78
+ content = content.strip()
79
+ final_content = content
80
+
81
+ if not tool_calls:
82
+ break
83
+
84
+ # Build assistant message with tool_use content blocks for Anthropic
85
+ if "anthropic" in model.lower() or "claude" in model.lower():
86
+ assistant_content = []
87
+ if content:
88
+ assistant_content.append({"type": "text", "text": content})
89
+ for tc in tool_calls:
90
+ func = tc.get("function", {})
91
+ assistant_content.append({
92
+ "type": "tool_use",
93
+ "id": tc.get("id", f"tool_{func.get('name', 'unknown')}"),
94
+ "name": func.get("name", ""),
95
+ "input": func.get("arguments", {}),
96
+ })
97
+ messages.append({"role": "assistant", "content": assistant_content})
98
+ else:
99
+ messages.append({"role": "assistant", "content": content or "(tool calls)"})
100
+
101
+ for tc in tool_calls:
102
+ func = tc.get("function", {})
103
+ tool_name = func.get("name", "")
104
+ tool_args = func.get("arguments", {})
105
+ tool_use_id = tc.get("id", f"tool_{tool_name}")
106
+
107
+ result, executed = execute_tool(tool_name, tool_args, permissions, auto_approve)
108
+ formatted = format_tool_result(tool_name, result)
109
+ messages.append({"role": "tool", "tool_use_id": tool_use_id, "content": formatted})
110
+
111
+ if json_output:
112
+ output = {
113
+ "model": model,
114
+ "prompt": prompt,
115
+ "response": final_content,
116
+ "timestamp": time.time(),
117
+ }
118
+ print(json.dumps(output, indent=2))
119
+ else:
120
+ print(final_content)
121
+
122
+
123
+ def main():
124
+ parser = argparse.ArgumentParser(
125
+ prog="vaal",
126
+ description="Vaal — Local LLM coding assistant",
127
+ )
128
+ parser.add_argument("--version", action="version", version=f"vaal {__version__}")
129
+ parser.add_argument("-m", "--model", default=None, help="Model to use (default: qwen3:8b)")
130
+ parser.add_argument("--mode", choices=["ask", "auto", "yolo"], default="auto",
131
+ help="Permission mode (default: auto)")
132
+ parser.add_argument("--resume", "-r", default=None, help="Resume a session by ID")
133
+ parser.add_argument("--think", action="store_true", help="Enable thinking/reasoning mode")
134
+ parser.add_argument("--sessions", action="store_true", help="List saved sessions")
135
+ parser.add_argument("--models", action="store_true", help="List available models")
136
+ parser.add_argument("--json", action="store_true", dest="json_output",
137
+ help="Output response as structured JSON (single-shot mode)")
138
+ parser.add_argument("--server", action="store_true", help="Start OpenAI-compatible API server")
139
+ parser.add_argument("--port", type=int, default=8484, help="Server port (default: 8484)")
140
+ parser.add_argument("--history", action="store_true", help="Show prompt history")
141
+ parser.add_argument("prompt", nargs="*", help="Prompt (if provided, runs single-shot without REPL)")
142
+
143
+ args = parser.parse_args()
144
+
145
+ # ── --history: show prompt history and exit ──────────────────────
146
+ if args.history:
147
+ from .core.history import load_history, format_history
148
+ entries = load_history(limit=20)
149
+ print(format_history(entries))
150
+ return
151
+
152
+ # ── --sessions: list sessions and exit ───────────────────────────
153
+ if args.sessions:
154
+ from .core.session import Session
155
+ from rich.console import Console
156
+ console = Console()
157
+ sessions = Session.list_sessions()
158
+ if not sessions:
159
+ console.print("[dim]No saved sessions.[/dim]")
160
+ for s in sessions[:20]:
161
+ console.print(f" [cyan]{s['id']}[/cyan] | {s['model']} | {s['messages']} msgs")
162
+ return
163
+
164
+ # ── --models: list models and exit ───────────────────────────────
165
+ if args.models:
166
+ from .core.llm import list_models
167
+ from rich.console import Console
168
+ console = Console()
169
+ models = list_models()
170
+ if not models:
171
+ console.print("[red]Cannot connect to Ollama.[/red]")
172
+ return
173
+ for m in models:
174
+ console.print(f" [cyan]{m}[/cyan]")
175
+ return
176
+
177
+ # ── --server: start API server ───────────────────────────────────
178
+ if args.server:
179
+ from .core.config import DEFAULT_MODEL
180
+ from .core.server import run_server
181
+ model = args.model or DEFAULT_MODEL
182
+ run_server(port=args.port, model=model)
183
+ return
184
+
185
+ from .core.config import DEFAULT_MODEL
186
+ model = args.model or DEFAULT_MODEL
187
+
188
+ # ── Piped stdin: read prompt from pipe ───────────────────────────
189
+ if _is_piped_input():
190
+ piped_text = sys.stdin.read().strip()
191
+ if args.prompt:
192
+ # Combine: positional args as prefix, piped as context
193
+ prompt = " ".join(args.prompt) + "\n\n" + piped_text
194
+ else:
195
+ prompt = piped_text
196
+ if not prompt:
197
+ print("Error: No input provided.", file=sys.stderr)
198
+ sys.exit(1)
199
+ _run_single_shot(prompt, model, args.json_output, args.think)
200
+ return
201
+
202
+ # ── Positional prompt args: single-shot mode ─────────────────────
203
+ if args.prompt:
204
+ prompt = " ".join(args.prompt)
205
+ # Clear screen only for interactive, not single-shot
206
+ _run_single_shot(prompt, model, args.json_output, args.think)
207
+ return
208
+
209
+ # ── Interactive REPL ─────────────────────────────────────────────
210
+ if sys.platform == "win32":
211
+ w = sys.stdout.write
212
+ w("\033[2J\033[H") # Clear screen, cursor to top
213
+ w("\033]0;corrupted\007") # Set window title
214
+ sys.stdout.flush()
215
+
216
+ # Apply saved terminal theme (or default Vaal Orb theme)
217
+ from .core.terminal_theme import apply_theme, load_preference
218
+ saved_theme = load_preference()
219
+ apply_theme(saved_theme)
220
+
221
+ # Auto-install Windows Terminal profile + icon on first run
222
+ if sys.platform == "win32":
223
+ from .core.terminal_profile import install_profile, get_icon_path
224
+ from pathlib import Path
225
+ icon = Path(get_icon_path())
226
+ if not icon.exists():
227
+ # Generate the Vaal Orb icon
228
+ try:
229
+ from .core.generate_icon import generate_vaal_icon
230
+ generate_vaal_icon()
231
+ except Exception:
232
+ pass
233
+ # Install/update profile silently
234
+ try:
235
+ install_profile()
236
+ except Exception:
237
+ pass
238
+
239
+ from .core.repl import run_repl
240
+ run_repl(
241
+ model=model,
242
+ permission_mode=args.mode,
243
+ resume=args.resume,
244
+ think=args.think,
245
+ )
246
+
247
+
248
+ if __name__ == "__main__":
249
+ main()
vaal/core/__init__.py ADDED
File without changes
vaal/core/agent.py ADDED
@@ -0,0 +1,398 @@
1
+ """Agent system -- manages LLM conversation loops with tool use, including sub-agents."""
2
+
3
+ import uuid
4
+ import time
5
+ import threading
6
+ from enum import Enum
7
+ from dataclasses import dataclass, field
8
+ from typing import Optional, Callable
9
+
10
+ from .config import MAX_TURNS_PER_MESSAGE, DEFAULT_MODEL
11
+ from .task_manager import get_task_manager, TaskStatus
12
+
13
+
14
+ class AgentStatus(str, Enum):
15
+ IDLE = "idle"
16
+ THINKING = "thinking"
17
+ RUNNING_TOOL = "running_tool"
18
+ COMPLETE = "complete"
19
+ ERROR = "error"
20
+
21
+
22
+ STATUS_LABELS = {
23
+ AgentStatus.IDLE: "idle",
24
+ AgentStatus.THINKING: "thinking...",
25
+ AgentStatus.RUNNING_TOOL: "running tool...",
26
+ AgentStatus.COMPLETE: "done",
27
+ AgentStatus.ERROR: "error",
28
+ }
29
+
30
+
31
+ @dataclass
32
+ class AgentResult:
33
+ """The final output of an agent run."""
34
+ agent_id: str
35
+ content: str = ""
36
+ tool_calls_made: int = 0
37
+ rounds: int = 0
38
+ error: Optional[str] = None
39
+ elapsed: float = 0.0
40
+
41
+
42
+ @dataclass
43
+ class Agent:
44
+ """A single agentic loop that runs a conversation with tool calling."""
45
+
46
+ id: str = field(default_factory=lambda: str(uuid.uuid4())[:8])
47
+ model: str = DEFAULT_MODEL
48
+ messages: list[dict] = field(default_factory=list)
49
+ tools: list[dict] = field(default_factory=list) # Ollama-format tool defs
50
+ tool_defs: dict = field(default_factory=dict) # Our ToolDef registry subset
51
+ max_rounds: int = MAX_TURNS_PER_MESSAGE
52
+ status: AgentStatus = AgentStatus.IDLE
53
+ result: Optional[AgentResult] = None
54
+ task_id: Optional[str] = None # Linked task in TaskManager
55
+ label: str = "" # Human-readable description
56
+ parent_id: Optional[str] = None
57
+
58
+ # Internal
59
+ _thread: Optional[threading.Thread] = field(default=None, repr=False)
60
+ _on_complete: Optional[Callable] = field(default=None, repr=False)
61
+
62
+ def run(
63
+ self,
64
+ permissions=None,
65
+ ask_fn=None,
66
+ on_status: Optional[Callable] = None,
67
+ ) -> AgentResult:
68
+ """Execute the agentic loop synchronously.
69
+
70
+ Sends messages to the LLM, processes tool calls, feeds results back,
71
+ and repeats until the LLM responds with no tool calls or max_rounds hit.
72
+
73
+ Args:
74
+ permissions: PermissionManager instance for tool approval.
75
+ ask_fn: Callback for interactive tool approval (None for sub-agents = auto-approve safe).
76
+ on_status: Optional callback(agent_id, status) for status updates.
77
+ """
78
+ from .llm import chat_with_tools_stream
79
+ from .tool_executor import execute_tool, format_tool_result
80
+
81
+ start = time.time()
82
+ rounds = 0
83
+ total_tool_calls = 0
84
+ last_content = ""
85
+
86
+ # Mark linked task as in progress
87
+ if self.task_id:
88
+ tm = get_task_manager()
89
+ tm.update(self.task_id, status=TaskStatus.IN_PROGRESS)
90
+
91
+ try:
92
+ while rounds < self.max_rounds:
93
+ rounds += 1
94
+ self.status = AgentStatus.THINKING
95
+ if on_status:
96
+ on_status(self.id, self.status)
97
+
98
+ # Stream the response and collect it
99
+ content_parts = []
100
+ tool_calls = []
101
+
102
+ try:
103
+ for chunk in chat_with_tools_stream(
104
+ self.messages, self.tools, model=self.model
105
+ ):
106
+ if chunk.get("content"):
107
+ content_parts.append(chunk["content"])
108
+ if chunk.get("tool_calls"):
109
+ tool_calls.extend(chunk["tool_calls"])
110
+ except Exception as e:
111
+ self.status = AgentStatus.ERROR
112
+ self.result = AgentResult(
113
+ agent_id=self.id,
114
+ error=str(e),
115
+ rounds=rounds,
116
+ elapsed=time.time() - start,
117
+ )
118
+ if self.task_id:
119
+ tm = get_task_manager()
120
+ tm.update(self.task_id, status=TaskStatus.BLOCKED)
121
+ return self.result
122
+
123
+ content = "".join(content_parts)
124
+ if content:
125
+ last_content = content
126
+
127
+ # Build assistant message — include tool_use content blocks for Anthropic compatibility
128
+ if tool_calls:
129
+ assistant_content = []
130
+ if content:
131
+ assistant_content.append({"type": "text", "text": content})
132
+ for tc in tool_calls:
133
+ func = tc.get("function", {})
134
+ assistant_content.append({
135
+ "type": "tool_use",
136
+ "id": tc.get("id", f"tool_{func.get('name', 'unknown')}"),
137
+ "name": func.get("name", ""),
138
+ "input": func.get("arguments", {}),
139
+ })
140
+ self.messages.append({"role": "assistant", "content": assistant_content})
141
+ else:
142
+ self.messages.append({"role": "assistant", "content": content or ""})
143
+
144
+ if not tool_calls:
145
+ # Done -- no more tool calls
146
+ break
147
+
148
+ # Execute tool calls
149
+ self.status = AgentStatus.RUNNING_TOOL
150
+ if on_status:
151
+ on_status(self.id, self.status)
152
+
153
+ for tc in tool_calls:
154
+ func = tc.get("function", {})
155
+ tool_name = func.get("name", "")
156
+ tool_args = func.get("arguments", {})
157
+ tool_use_id = tc.get("id", f"tool_{tool_name}")
158
+
159
+ if permissions:
160
+ result_text, executed = execute_tool(
161
+ tool_name, tool_args, permissions, ask_fn
162
+ )
163
+ else:
164
+ # Sub-agent without permissions: execute directly
165
+ from ..tools.registry import get_tool
166
+ tool = get_tool(tool_name)
167
+ if tool:
168
+ try:
169
+ result_text = tool.handler(**tool_args)
170
+ executed = True
171
+ except Exception as e:
172
+ result_text = f"[tool error: {e}]"
173
+ executed = False
174
+ else:
175
+ result_text = f"[error: unknown tool '{tool_name}']"
176
+ executed = False
177
+
178
+ total_tool_calls += 1
179
+ formatted = format_tool_result(tool_name, result_text)
180
+ self.messages.append({"role": "tool", "tool_use_id": tool_use_id, "content": formatted})
181
+
182
+ except Exception as e:
183
+ self.status = AgentStatus.ERROR
184
+ self.result = AgentResult(
185
+ agent_id=self.id,
186
+ error=str(e),
187
+ rounds=rounds,
188
+ tool_calls_made=total_tool_calls,
189
+ elapsed=time.time() - start,
190
+ )
191
+ return self.result
192
+
193
+ self.status = AgentStatus.COMPLETE
194
+ if on_status:
195
+ on_status(self.id, self.status)
196
+
197
+ # Mark linked task as complete
198
+ if self.task_id:
199
+ tm = get_task_manager()
200
+ tm.complete(self.task_id)
201
+
202
+ self.result = AgentResult(
203
+ agent_id=self.id,
204
+ content=last_content,
205
+ tool_calls_made=total_tool_calls,
206
+ rounds=rounds,
207
+ elapsed=time.time() - start,
208
+ )
209
+ return self.result
210
+
211
+ def run_in_background(
212
+ self,
213
+ permissions=None,
214
+ ask_fn=None,
215
+ on_complete: Optional[Callable] = None,
216
+ on_status: Optional[Callable] = None,
217
+ ):
218
+ """Start the agent loop in a background thread.
219
+
220
+ Args:
221
+ on_complete: Callback(AgentResult) when agent finishes.
222
+ on_status: Callback(agent_id, status) for status updates.
223
+ """
224
+ self._on_complete = on_complete
225
+
226
+ def _run():
227
+ result = self.run(permissions=permissions, ask_fn=ask_fn, on_status=on_status)
228
+ if self._on_complete:
229
+ self._on_complete(result)
230
+
231
+ self._thread = threading.Thread(target=_run, daemon=True, name=f"agent-{self.id}")
232
+ self._thread.start()
233
+
234
+ @property
235
+ def is_running(self) -> bool:
236
+ return self.status in (AgentStatus.THINKING, AgentStatus.RUNNING_TOOL)
237
+
238
+ @property
239
+ def is_done(self) -> bool:
240
+ return self.status in (AgentStatus.COMPLETE, AgentStatus.ERROR)
241
+
242
+
243
+ class AgentPool:
244
+ """Manages multiple concurrent agents (sub-agents)."""
245
+
246
+ def __init__(self):
247
+ self._agents: dict[str, Agent] = {}
248
+ self._results: dict[str, AgentResult] = {}
249
+ self._lock = threading.Lock()
250
+
251
+ def spawn(
252
+ self,
253
+ label: str,
254
+ system_prompt: str,
255
+ user_prompt: str,
256
+ model: str = DEFAULT_MODEL,
257
+ tools: list[dict] = None,
258
+ tool_defs: dict = None,
259
+ max_rounds: int = 10,
260
+ parent_id: str = None,
261
+ permissions=None,
262
+ task_id: str = None,
263
+ ) -> Agent:
264
+ """Create and start a background sub-agent.
265
+
266
+ Args:
267
+ label: Human-readable description of what this agent does.
268
+ system_prompt: System message for the agent.
269
+ user_prompt: The task/instruction for the agent.
270
+ model: LLM model to use.
271
+ tools: Ollama-format tool definitions.
272
+ tool_defs: Tool registry definitions.
273
+ max_rounds: Maximum tool-call rounds.
274
+ parent_id: ID of the parent agent.
275
+ permissions: PermissionManager for tool approval.
276
+ task_id: TaskManager task ID to link.
277
+
278
+ Returns:
279
+ The spawned Agent (already running in background).
280
+ """
281
+ agent = Agent(
282
+ model=model,
283
+ messages=[
284
+ {"role": "system", "content": system_prompt},
285
+ {"role": "user", "content": user_prompt},
286
+ ],
287
+ tools=tools or [],
288
+ tool_defs=tool_defs or {},
289
+ max_rounds=max_rounds,
290
+ label=label,
291
+ parent_id=parent_id,
292
+ task_id=task_id,
293
+ )
294
+
295
+ # Link task to agent
296
+ if task_id:
297
+ tm = get_task_manager()
298
+ task = tm.get(task_id)
299
+ if task:
300
+ task.agent_id = agent.id
301
+
302
+ with self._lock:
303
+ self._agents[agent.id] = agent
304
+
305
+ def on_complete(result: AgentResult):
306
+ with self._lock:
307
+ self._results[agent.id] = result
308
+
309
+ agent.run_in_background(
310
+ permissions=permissions,
311
+ on_complete=on_complete,
312
+ )
313
+
314
+ return agent
315
+
316
+ def get(self, agent_id: str) -> Optional[Agent]:
317
+ """Get an agent by ID or prefix."""
318
+ with self._lock:
319
+ if agent_id in self._agents:
320
+ return self._agents[agent_id]
321
+ for aid, agent in self._agents.items():
322
+ if aid.startswith(agent_id):
323
+ return agent
324
+ return None
325
+
326
+ def get_result(self, agent_id: str) -> Optional[AgentResult]:
327
+ """Get a completed agent's result."""
328
+ with self._lock:
329
+ return self._results.get(agent_id)
330
+
331
+ def list_agents(self) -> list[dict]:
332
+ """Return summary info for all agents."""
333
+ with self._lock:
334
+ out = []
335
+ for aid, agent in self._agents.items():
336
+ info = {
337
+ "id": aid,
338
+ "label": agent.label,
339
+ "status": agent.status.value,
340
+ "model": agent.model,
341
+ "task_id": agent.task_id,
342
+ "is_running": agent.is_running,
343
+ }
344
+ result = self._results.get(aid)
345
+ if result:
346
+ info["rounds"] = result.rounds
347
+ info["tool_calls"] = result.tool_calls_made
348
+ info["elapsed"] = f"{result.elapsed:.1f}s"
349
+ if result.error:
350
+ info["error"] = result.error
351
+ out.append(info)
352
+ return out
353
+
354
+ def collect_completed(self) -> list[AgentResult]:
355
+ """Pop all completed results (for injection into parent conversation)."""
356
+ completed = []
357
+ with self._lock:
358
+ done_ids = [aid for aid, agent in self._agents.items() if agent.is_done]
359
+ for aid in done_ids:
360
+ if aid in self._results:
361
+ completed.append(self._results.pop(aid))
362
+ return completed
363
+
364
+ @property
365
+ def active_count(self) -> int:
366
+ with self._lock:
367
+ return sum(1 for a in self._agents.values() if a.is_running)
368
+
369
+ @property
370
+ def total_count(self) -> int:
371
+ with self._lock:
372
+ return len(self._agents)
373
+
374
+ def status_summary(self) -> str:
375
+ """One-line summary for the status bar."""
376
+ active = self.active_count
377
+ total = self.total_count
378
+ if total == 0:
379
+ return ""
380
+ return f"{active}/{total} agents"
381
+
382
+
383
+ # Module-level singleton
384
+ _pool: Optional[AgentPool] = None
385
+
386
+
387
+ def get_agent_pool() -> AgentPool:
388
+ """Get or create the global AgentPool."""
389
+ global _pool
390
+ if _pool is None:
391
+ _pool = AgentPool()
392
+ return _pool
393
+
394
+
395
+ def reset_agent_pool():
396
+ """Reset the global pool (for /clear)."""
397
+ global _pool
398
+ _pool = AgentPool()