ragnarbot-ai 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragnarbot/__init__.py +6 -0
- ragnarbot/__main__.py +8 -0
- ragnarbot/agent/__init__.py +8 -0
- ragnarbot/agent/context.py +223 -0
- ragnarbot/agent/loop.py +365 -0
- ragnarbot/agent/memory.py +109 -0
- ragnarbot/agent/skills.py +228 -0
- ragnarbot/agent/subagent.py +241 -0
- ragnarbot/agent/tools/__init__.py +6 -0
- ragnarbot/agent/tools/base.py +102 -0
- ragnarbot/agent/tools/cron.py +114 -0
- ragnarbot/agent/tools/filesystem.py +191 -0
- ragnarbot/agent/tools/message.py +86 -0
- ragnarbot/agent/tools/registry.py +73 -0
- ragnarbot/agent/tools/shell.py +141 -0
- ragnarbot/agent/tools/spawn.py +65 -0
- ragnarbot/agent/tools/web.py +163 -0
- ragnarbot/bus/__init__.py +6 -0
- ragnarbot/bus/events.py +37 -0
- ragnarbot/bus/queue.py +81 -0
- ragnarbot/channels/__init__.py +6 -0
- ragnarbot/channels/base.py +121 -0
- ragnarbot/channels/manager.py +129 -0
- ragnarbot/channels/telegram.py +302 -0
- ragnarbot/cli/__init__.py +1 -0
- ragnarbot/cli/commands.py +568 -0
- ragnarbot/config/__init__.py +6 -0
- ragnarbot/config/loader.py +95 -0
- ragnarbot/config/schema.py +114 -0
- ragnarbot/cron/__init__.py +6 -0
- ragnarbot/cron/service.py +346 -0
- ragnarbot/cron/types.py +59 -0
- ragnarbot/heartbeat/__init__.py +5 -0
- ragnarbot/heartbeat/service.py +130 -0
- ragnarbot/providers/__init__.py +6 -0
- ragnarbot/providers/base.py +69 -0
- ragnarbot/providers/litellm_provider.py +135 -0
- ragnarbot/providers/transcription.py +67 -0
- ragnarbot/session/__init__.py +5 -0
- ragnarbot/session/manager.py +202 -0
- ragnarbot/skills/README.md +24 -0
- ragnarbot/skills/cron/SKILL.md +40 -0
- ragnarbot/skills/github/SKILL.md +48 -0
- ragnarbot/skills/skill-creator/SKILL.md +371 -0
- ragnarbot/skills/summarize/SKILL.md +67 -0
- ragnarbot/skills/tmux/SKILL.md +121 -0
- ragnarbot/skills/tmux/scripts/find-sessions.sh +112 -0
- ragnarbot/skills/tmux/scripts/wait-for-text.sh +83 -0
- ragnarbot/skills/weather/SKILL.md +49 -0
- ragnarbot/utils/__init__.py +5 -0
- ragnarbot/utils/helpers.py +91 -0
- ragnarbot_ai-0.1.0.dist-info/METADATA +28 -0
- ragnarbot_ai-0.1.0.dist-info/RECORD +56 -0
- ragnarbot_ai-0.1.0.dist-info/WHEEL +4 -0
- ragnarbot_ai-0.1.0.dist-info/entry_points.txt +2 -0
- ragnarbot_ai-0.1.0.dist-info/licenses/LICENSE +22 -0
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
"""Memory system for persistent agent memory."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
from ragnarbot.utils.helpers import ensure_dir, today_date
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MemoryStore:
|
|
10
|
+
"""
|
|
11
|
+
Memory system for the agent.
|
|
12
|
+
|
|
13
|
+
Supports daily notes (memory/YYYY-MM-DD.md) and long-term memory (MEMORY.md).
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, workspace: Path):
|
|
17
|
+
self.workspace = workspace
|
|
18
|
+
self.memory_dir = ensure_dir(workspace / "memory")
|
|
19
|
+
self.memory_file = self.memory_dir / "MEMORY.md"
|
|
20
|
+
|
|
21
|
+
def get_today_file(self) -> Path:
|
|
22
|
+
"""Get path to today's memory file."""
|
|
23
|
+
return self.memory_dir / f"{today_date()}.md"
|
|
24
|
+
|
|
25
|
+
def read_today(self) -> str:
|
|
26
|
+
"""Read today's memory notes."""
|
|
27
|
+
today_file = self.get_today_file()
|
|
28
|
+
if today_file.exists():
|
|
29
|
+
return today_file.read_text(encoding="utf-8")
|
|
30
|
+
return ""
|
|
31
|
+
|
|
32
|
+
def append_today(self, content: str) -> None:
|
|
33
|
+
"""Append content to today's memory notes."""
|
|
34
|
+
today_file = self.get_today_file()
|
|
35
|
+
|
|
36
|
+
if today_file.exists():
|
|
37
|
+
existing = today_file.read_text(encoding="utf-8")
|
|
38
|
+
content = existing + "\n" + content
|
|
39
|
+
else:
|
|
40
|
+
# Add header for new day
|
|
41
|
+
header = f"# {today_date()}\n\n"
|
|
42
|
+
content = header + content
|
|
43
|
+
|
|
44
|
+
today_file.write_text(content, encoding="utf-8")
|
|
45
|
+
|
|
46
|
+
def read_long_term(self) -> str:
|
|
47
|
+
"""Read long-term memory (MEMORY.md)."""
|
|
48
|
+
if self.memory_file.exists():
|
|
49
|
+
return self.memory_file.read_text(encoding="utf-8")
|
|
50
|
+
return ""
|
|
51
|
+
|
|
52
|
+
def write_long_term(self, content: str) -> None:
|
|
53
|
+
"""Write to long-term memory (MEMORY.md)."""
|
|
54
|
+
self.memory_file.write_text(content, encoding="utf-8")
|
|
55
|
+
|
|
56
|
+
def get_recent_memories(self, days: int = 7) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Get memories from the last N days.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
days: Number of days to look back.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Combined memory content.
|
|
65
|
+
"""
|
|
66
|
+
from datetime import timedelta
|
|
67
|
+
|
|
68
|
+
memories = []
|
|
69
|
+
today = datetime.now().date()
|
|
70
|
+
|
|
71
|
+
for i in range(days):
|
|
72
|
+
date = today - timedelta(days=i)
|
|
73
|
+
date_str = date.strftime("%Y-%m-%d")
|
|
74
|
+
file_path = self.memory_dir / f"{date_str}.md"
|
|
75
|
+
|
|
76
|
+
if file_path.exists():
|
|
77
|
+
content = file_path.read_text(encoding="utf-8")
|
|
78
|
+
memories.append(content)
|
|
79
|
+
|
|
80
|
+
return "\n\n---\n\n".join(memories)
|
|
81
|
+
|
|
82
|
+
def list_memory_files(self) -> list[Path]:
|
|
83
|
+
"""List all memory files sorted by date (newest first)."""
|
|
84
|
+
if not self.memory_dir.exists():
|
|
85
|
+
return []
|
|
86
|
+
|
|
87
|
+
files = list(self.memory_dir.glob("????-??-??.md"))
|
|
88
|
+
return sorted(files, reverse=True)
|
|
89
|
+
|
|
90
|
+
def get_memory_context(self) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Get memory context for the agent.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
Formatted memory context including long-term and recent memories.
|
|
96
|
+
"""
|
|
97
|
+
parts = []
|
|
98
|
+
|
|
99
|
+
# Long-term memory
|
|
100
|
+
long_term = self.read_long_term()
|
|
101
|
+
if long_term:
|
|
102
|
+
parts.append("## Long-term Memory\n" + long_term)
|
|
103
|
+
|
|
104
|
+
# Today's notes
|
|
105
|
+
today = self.read_today()
|
|
106
|
+
if today:
|
|
107
|
+
parts.append("## Today's Notes\n" + today)
|
|
108
|
+
|
|
109
|
+
return "\n\n".join(parts) if parts else ""
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
"""Skills loader for agent capabilities."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
import shutil
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
# Default builtin skills directory (relative to this file)
|
|
10
|
+
BUILTIN_SKILLS_DIR = Path(__file__).parent.parent / "skills"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class SkillsLoader:
|
|
14
|
+
"""
|
|
15
|
+
Loader for agent skills.
|
|
16
|
+
|
|
17
|
+
Skills are markdown files (SKILL.md) that teach the agent how to use
|
|
18
|
+
specific tools or perform certain tasks.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self, workspace: Path, builtin_skills_dir: Path | None = None):
|
|
22
|
+
self.workspace = workspace
|
|
23
|
+
self.workspace_skills = workspace / "skills"
|
|
24
|
+
self.builtin_skills = builtin_skills_dir or BUILTIN_SKILLS_DIR
|
|
25
|
+
|
|
26
|
+
def list_skills(self, filter_unavailable: bool = True) -> list[dict[str, str]]:
|
|
27
|
+
"""
|
|
28
|
+
List all available skills.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
filter_unavailable: If True, filter out skills with unmet requirements.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
List of skill info dicts with 'name', 'path', 'source'.
|
|
35
|
+
"""
|
|
36
|
+
skills = []
|
|
37
|
+
|
|
38
|
+
# Workspace skills (highest priority)
|
|
39
|
+
if self.workspace_skills.exists():
|
|
40
|
+
for skill_dir in self.workspace_skills.iterdir():
|
|
41
|
+
if skill_dir.is_dir():
|
|
42
|
+
skill_file = skill_dir / "SKILL.md"
|
|
43
|
+
if skill_file.exists():
|
|
44
|
+
skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "workspace"})
|
|
45
|
+
|
|
46
|
+
# Built-in skills
|
|
47
|
+
if self.builtin_skills and self.builtin_skills.exists():
|
|
48
|
+
for skill_dir in self.builtin_skills.iterdir():
|
|
49
|
+
if skill_dir.is_dir():
|
|
50
|
+
skill_file = skill_dir / "SKILL.md"
|
|
51
|
+
if skill_file.exists() and not any(s["name"] == skill_dir.name for s in skills):
|
|
52
|
+
skills.append({"name": skill_dir.name, "path": str(skill_file), "source": "builtin"})
|
|
53
|
+
|
|
54
|
+
# Filter by requirements
|
|
55
|
+
if filter_unavailable:
|
|
56
|
+
return [s for s in skills if self._check_requirements(self._get_skill_meta(s["name"]))]
|
|
57
|
+
return skills
|
|
58
|
+
|
|
59
|
+
def load_skill(self, name: str) -> str | None:
|
|
60
|
+
"""
|
|
61
|
+
Load a skill by name.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
name: Skill name (directory name).
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
Skill content or None if not found.
|
|
68
|
+
"""
|
|
69
|
+
# Check workspace first
|
|
70
|
+
workspace_skill = self.workspace_skills / name / "SKILL.md"
|
|
71
|
+
if workspace_skill.exists():
|
|
72
|
+
return workspace_skill.read_text(encoding="utf-8")
|
|
73
|
+
|
|
74
|
+
# Check built-in
|
|
75
|
+
if self.builtin_skills:
|
|
76
|
+
builtin_skill = self.builtin_skills / name / "SKILL.md"
|
|
77
|
+
if builtin_skill.exists():
|
|
78
|
+
return builtin_skill.read_text(encoding="utf-8")
|
|
79
|
+
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
def load_skills_for_context(self, skill_names: list[str]) -> str:
|
|
83
|
+
"""
|
|
84
|
+
Load specific skills for inclusion in agent context.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
skill_names: List of skill names to load.
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Formatted skills content.
|
|
91
|
+
"""
|
|
92
|
+
parts = []
|
|
93
|
+
for name in skill_names:
|
|
94
|
+
content = self.load_skill(name)
|
|
95
|
+
if content:
|
|
96
|
+
content = self._strip_frontmatter(content)
|
|
97
|
+
parts.append(f"### Skill: {name}\n\n{content}")
|
|
98
|
+
|
|
99
|
+
return "\n\n---\n\n".join(parts) if parts else ""
|
|
100
|
+
|
|
101
|
+
def build_skills_summary(self) -> str:
|
|
102
|
+
"""
|
|
103
|
+
Build a summary of all skills (name, description, path, availability).
|
|
104
|
+
|
|
105
|
+
This is used for progressive loading - the agent can read the full
|
|
106
|
+
skill content using read_file when needed.
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
XML-formatted skills summary.
|
|
110
|
+
"""
|
|
111
|
+
all_skills = self.list_skills(filter_unavailable=False)
|
|
112
|
+
if not all_skills:
|
|
113
|
+
return ""
|
|
114
|
+
|
|
115
|
+
def escape_xml(s: str) -> str:
|
|
116
|
+
return s.replace("&", "&").replace("<", "<").replace(">", ">")
|
|
117
|
+
|
|
118
|
+
lines = ["<skills>"]
|
|
119
|
+
for s in all_skills:
|
|
120
|
+
name = escape_xml(s["name"])
|
|
121
|
+
path = s["path"]
|
|
122
|
+
desc = escape_xml(self._get_skill_description(s["name"]))
|
|
123
|
+
skill_meta = self._get_skill_meta(s["name"])
|
|
124
|
+
available = self._check_requirements(skill_meta)
|
|
125
|
+
|
|
126
|
+
lines.append(f" <skill available=\"{str(available).lower()}\">")
|
|
127
|
+
lines.append(f" <name>{name}</name>")
|
|
128
|
+
lines.append(f" <description>{desc}</description>")
|
|
129
|
+
lines.append(f" <location>{path}</location>")
|
|
130
|
+
|
|
131
|
+
# Show missing requirements for unavailable skills
|
|
132
|
+
if not available:
|
|
133
|
+
missing = self._get_missing_requirements(skill_meta)
|
|
134
|
+
if missing:
|
|
135
|
+
lines.append(f" <requires>{escape_xml(missing)}</requires>")
|
|
136
|
+
|
|
137
|
+
lines.append(f" </skill>")
|
|
138
|
+
lines.append("</skills>")
|
|
139
|
+
|
|
140
|
+
return "\n".join(lines)
|
|
141
|
+
|
|
142
|
+
def _get_missing_requirements(self, skill_meta: dict) -> str:
|
|
143
|
+
"""Get a description of missing requirements."""
|
|
144
|
+
missing = []
|
|
145
|
+
requires = skill_meta.get("requires", {})
|
|
146
|
+
for b in requires.get("bins", []):
|
|
147
|
+
if not shutil.which(b):
|
|
148
|
+
missing.append(f"CLI: {b}")
|
|
149
|
+
for env in requires.get("env", []):
|
|
150
|
+
if not os.environ.get(env):
|
|
151
|
+
missing.append(f"ENV: {env}")
|
|
152
|
+
return ", ".join(missing)
|
|
153
|
+
|
|
154
|
+
def _get_skill_description(self, name: str) -> str:
|
|
155
|
+
"""Get the description of a skill from its frontmatter."""
|
|
156
|
+
meta = self.get_skill_metadata(name)
|
|
157
|
+
if meta and meta.get("description"):
|
|
158
|
+
return meta["description"]
|
|
159
|
+
return name # Fallback to skill name
|
|
160
|
+
|
|
161
|
+
def _strip_frontmatter(self, content: str) -> str:
|
|
162
|
+
"""Remove YAML frontmatter from markdown content."""
|
|
163
|
+
if content.startswith("---"):
|
|
164
|
+
match = re.match(r"^---\n.*?\n---\n", content, re.DOTALL)
|
|
165
|
+
if match:
|
|
166
|
+
return content[match.end():].strip()
|
|
167
|
+
return content
|
|
168
|
+
|
|
169
|
+
def _parse_ragnarbot_metadata(self, raw: str) -> dict:
|
|
170
|
+
"""Parse ragnarbot metadata JSON from frontmatter."""
|
|
171
|
+
try:
|
|
172
|
+
data = json.loads(raw)
|
|
173
|
+
return data.get("ragnarbot", {}) if isinstance(data, dict) else {}
|
|
174
|
+
except (json.JSONDecodeError, TypeError):
|
|
175
|
+
return {}
|
|
176
|
+
|
|
177
|
+
def _check_requirements(self, skill_meta: dict) -> bool:
|
|
178
|
+
"""Check if skill requirements are met (bins, env vars)."""
|
|
179
|
+
requires = skill_meta.get("requires", {})
|
|
180
|
+
for b in requires.get("bins", []):
|
|
181
|
+
if not shutil.which(b):
|
|
182
|
+
return False
|
|
183
|
+
for env in requires.get("env", []):
|
|
184
|
+
if not os.environ.get(env):
|
|
185
|
+
return False
|
|
186
|
+
return True
|
|
187
|
+
|
|
188
|
+
def _get_skill_meta(self, name: str) -> dict:
|
|
189
|
+
"""Get ragnarbot metadata for a skill (cached in frontmatter)."""
|
|
190
|
+
meta = self.get_skill_metadata(name) or {}
|
|
191
|
+
return self._parse_ragnarbot_metadata(meta.get("metadata", ""))
|
|
192
|
+
|
|
193
|
+
def get_always_skills(self) -> list[str]:
|
|
194
|
+
"""Get skills marked as always=true that meet requirements."""
|
|
195
|
+
result = []
|
|
196
|
+
for s in self.list_skills(filter_unavailable=True):
|
|
197
|
+
meta = self.get_skill_metadata(s["name"]) or {}
|
|
198
|
+
skill_meta = self._parse_ragnarbot_metadata(meta.get("metadata", ""))
|
|
199
|
+
if skill_meta.get("always") or meta.get("always"):
|
|
200
|
+
result.append(s["name"])
|
|
201
|
+
return result
|
|
202
|
+
|
|
203
|
+
def get_skill_metadata(self, name: str) -> dict | None:
|
|
204
|
+
"""
|
|
205
|
+
Get metadata from a skill's frontmatter.
|
|
206
|
+
|
|
207
|
+
Args:
|
|
208
|
+
name: Skill name.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Metadata dict or None.
|
|
212
|
+
"""
|
|
213
|
+
content = self.load_skill(name)
|
|
214
|
+
if not content:
|
|
215
|
+
return None
|
|
216
|
+
|
|
217
|
+
if content.startswith("---"):
|
|
218
|
+
match = re.match(r"^---\n(.*?)\n---", content, re.DOTALL)
|
|
219
|
+
if match:
|
|
220
|
+
# Simple YAML parsing
|
|
221
|
+
metadata = {}
|
|
222
|
+
for line in match.group(1).split("\n"):
|
|
223
|
+
if ":" in line:
|
|
224
|
+
key, value = line.split(":", 1)
|
|
225
|
+
metadata[key.strip()] = value.strip().strip('"\'')
|
|
226
|
+
return metadata
|
|
227
|
+
|
|
228
|
+
return None
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""Subagent manager for background task execution."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import uuid
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
from loguru import logger
|
|
10
|
+
|
|
11
|
+
from ragnarbot.bus.events import InboundMessage
|
|
12
|
+
from ragnarbot.bus.queue import MessageBus
|
|
13
|
+
from ragnarbot.providers.base import LLMProvider
|
|
14
|
+
from ragnarbot.agent.tools.registry import ToolRegistry
|
|
15
|
+
from ragnarbot.agent.tools.filesystem import ReadFileTool, WriteFileTool, ListDirTool
|
|
16
|
+
from ragnarbot.agent.tools.shell import ExecTool
|
|
17
|
+
from ragnarbot.agent.tools.web import WebSearchTool, WebFetchTool
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class SubagentManager:
|
|
21
|
+
"""
|
|
22
|
+
Manages background subagent execution.
|
|
23
|
+
|
|
24
|
+
Subagents are lightweight agent instances that run in the background
|
|
25
|
+
to handle specific tasks. They share the same LLM provider but have
|
|
26
|
+
isolated context and a focused system prompt.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
provider: LLMProvider,
|
|
32
|
+
workspace: Path,
|
|
33
|
+
bus: MessageBus,
|
|
34
|
+
model: str | None = None,
|
|
35
|
+
brave_api_key: str | None = None,
|
|
36
|
+
exec_config: "ExecToolConfig | None" = None,
|
|
37
|
+
):
|
|
38
|
+
from ragnarbot.config.schema import ExecToolConfig
|
|
39
|
+
self.provider = provider
|
|
40
|
+
self.workspace = workspace
|
|
41
|
+
self.bus = bus
|
|
42
|
+
self.model = model or provider.get_default_model()
|
|
43
|
+
self.brave_api_key = brave_api_key
|
|
44
|
+
self.exec_config = exec_config or ExecToolConfig()
|
|
45
|
+
self._running_tasks: dict[str, asyncio.Task[None]] = {}
|
|
46
|
+
|
|
47
|
+
async def spawn(
|
|
48
|
+
self,
|
|
49
|
+
task: str,
|
|
50
|
+
label: str | None = None,
|
|
51
|
+
origin_channel: str = "cli",
|
|
52
|
+
origin_chat_id: str = "direct",
|
|
53
|
+
) -> str:
|
|
54
|
+
"""
|
|
55
|
+
Spawn a subagent to execute a task in the background.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
task: The task description for the subagent.
|
|
59
|
+
label: Optional human-readable label for the task.
|
|
60
|
+
origin_channel: The channel to announce results to.
|
|
61
|
+
origin_chat_id: The chat ID to announce results to.
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
Status message indicating the subagent was started.
|
|
65
|
+
"""
|
|
66
|
+
task_id = str(uuid.uuid4())[:8]
|
|
67
|
+
display_label = label or task[:30] + ("..." if len(task) > 30 else "")
|
|
68
|
+
|
|
69
|
+
origin = {
|
|
70
|
+
"channel": origin_channel,
|
|
71
|
+
"chat_id": origin_chat_id,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
# Create background task
|
|
75
|
+
bg_task = asyncio.create_task(
|
|
76
|
+
self._run_subagent(task_id, task, display_label, origin)
|
|
77
|
+
)
|
|
78
|
+
self._running_tasks[task_id] = bg_task
|
|
79
|
+
|
|
80
|
+
# Cleanup when done
|
|
81
|
+
bg_task.add_done_callback(lambda _: self._running_tasks.pop(task_id, None))
|
|
82
|
+
|
|
83
|
+
logger.info(f"Spawned subagent [{task_id}]: {display_label}")
|
|
84
|
+
return f"Subagent [{display_label}] started (id: {task_id}). I'll notify you when it completes."
|
|
85
|
+
|
|
86
|
+
async def _run_subagent(
|
|
87
|
+
self,
|
|
88
|
+
task_id: str,
|
|
89
|
+
task: str,
|
|
90
|
+
label: str,
|
|
91
|
+
origin: dict[str, str],
|
|
92
|
+
) -> None:
|
|
93
|
+
"""Execute the subagent task and announce the result."""
|
|
94
|
+
logger.info(f"Subagent [{task_id}] starting task: {label}")
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Build subagent tools (no message tool, no spawn tool)
|
|
98
|
+
tools = ToolRegistry()
|
|
99
|
+
tools.register(ReadFileTool())
|
|
100
|
+
tools.register(WriteFileTool())
|
|
101
|
+
tools.register(ListDirTool())
|
|
102
|
+
tools.register(ExecTool(
|
|
103
|
+
working_dir=str(self.workspace),
|
|
104
|
+
timeout=self.exec_config.timeout,
|
|
105
|
+
restrict_to_workspace=self.exec_config.restrict_to_workspace,
|
|
106
|
+
))
|
|
107
|
+
tools.register(WebSearchTool(api_key=self.brave_api_key))
|
|
108
|
+
tools.register(WebFetchTool())
|
|
109
|
+
|
|
110
|
+
# Build messages with subagent-specific prompt
|
|
111
|
+
system_prompt = self._build_subagent_prompt(task)
|
|
112
|
+
messages: list[dict[str, Any]] = [
|
|
113
|
+
{"role": "system", "content": system_prompt},
|
|
114
|
+
{"role": "user", "content": task},
|
|
115
|
+
]
|
|
116
|
+
|
|
117
|
+
# Run agent loop (limited iterations)
|
|
118
|
+
max_iterations = 15
|
|
119
|
+
iteration = 0
|
|
120
|
+
final_result: str | None = None
|
|
121
|
+
|
|
122
|
+
while iteration < max_iterations:
|
|
123
|
+
iteration += 1
|
|
124
|
+
|
|
125
|
+
response = await self.provider.chat(
|
|
126
|
+
messages=messages,
|
|
127
|
+
tools=tools.get_definitions(),
|
|
128
|
+
model=self.model,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
if response.has_tool_calls:
|
|
132
|
+
# Add assistant message with tool calls
|
|
133
|
+
tool_call_dicts = [
|
|
134
|
+
{
|
|
135
|
+
"id": tc.id,
|
|
136
|
+
"type": "function",
|
|
137
|
+
"function": {
|
|
138
|
+
"name": tc.name,
|
|
139
|
+
"arguments": json.dumps(tc.arguments),
|
|
140
|
+
},
|
|
141
|
+
}
|
|
142
|
+
for tc in response.tool_calls
|
|
143
|
+
]
|
|
144
|
+
messages.append({
|
|
145
|
+
"role": "assistant",
|
|
146
|
+
"content": response.content or "",
|
|
147
|
+
"tool_calls": tool_call_dicts,
|
|
148
|
+
})
|
|
149
|
+
|
|
150
|
+
# Execute tools
|
|
151
|
+
for tool_call in response.tool_calls:
|
|
152
|
+
args_str = json.dumps(tool_call.arguments)
|
|
153
|
+
logger.debug(f"Subagent [{task_id}] executing: {tool_call.name} with arguments: {args_str}")
|
|
154
|
+
result = await tools.execute(tool_call.name, tool_call.arguments)
|
|
155
|
+
messages.append({
|
|
156
|
+
"role": "tool",
|
|
157
|
+
"tool_call_id": tool_call.id,
|
|
158
|
+
"name": tool_call.name,
|
|
159
|
+
"content": result,
|
|
160
|
+
})
|
|
161
|
+
else:
|
|
162
|
+
final_result = response.content
|
|
163
|
+
break
|
|
164
|
+
|
|
165
|
+
if final_result is None:
|
|
166
|
+
final_result = "Task completed but no final response was generated."
|
|
167
|
+
|
|
168
|
+
logger.info(f"Subagent [{task_id}] completed successfully")
|
|
169
|
+
await self._announce_result(task_id, label, task, final_result, origin, "ok")
|
|
170
|
+
|
|
171
|
+
except Exception as e:
|
|
172
|
+
error_msg = f"Error: {str(e)}"
|
|
173
|
+
logger.error(f"Subagent [{task_id}] failed: {e}")
|
|
174
|
+
await self._announce_result(task_id, label, task, error_msg, origin, "error")
|
|
175
|
+
|
|
176
|
+
async def _announce_result(
|
|
177
|
+
self,
|
|
178
|
+
task_id: str,
|
|
179
|
+
label: str,
|
|
180
|
+
task: str,
|
|
181
|
+
result: str,
|
|
182
|
+
origin: dict[str, str],
|
|
183
|
+
status: str,
|
|
184
|
+
) -> None:
|
|
185
|
+
"""Announce the subagent result to the main agent via the message bus."""
|
|
186
|
+
status_text = "completed successfully" if status == "ok" else "failed"
|
|
187
|
+
|
|
188
|
+
announce_content = f"""[Subagent '{label}' {status_text}]
|
|
189
|
+
|
|
190
|
+
Task: {task}
|
|
191
|
+
|
|
192
|
+
Result:
|
|
193
|
+
{result}
|
|
194
|
+
|
|
195
|
+
Summarize this naturally for the user. Keep it brief (1-2 sentences). Do not mention technical details like "subagent" or task IDs."""
|
|
196
|
+
|
|
197
|
+
# Inject as system message to trigger main agent
|
|
198
|
+
msg = InboundMessage(
|
|
199
|
+
channel="system",
|
|
200
|
+
sender_id="subagent",
|
|
201
|
+
chat_id=f"{origin['channel']}:{origin['chat_id']}",
|
|
202
|
+
content=announce_content,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
await self.bus.publish_inbound(msg)
|
|
206
|
+
logger.debug(f"Subagent [{task_id}] announced result to {origin['channel']}:{origin['chat_id']}")
|
|
207
|
+
|
|
208
|
+
def _build_subagent_prompt(self, task: str) -> str:
|
|
209
|
+
"""Build a focused system prompt for the subagent."""
|
|
210
|
+
return f"""# Subagent
|
|
211
|
+
|
|
212
|
+
You are a subagent spawned by the main agent to complete a specific task.
|
|
213
|
+
|
|
214
|
+
## Your Task
|
|
215
|
+
{task}
|
|
216
|
+
|
|
217
|
+
## Rules
|
|
218
|
+
1. Stay focused - complete only the assigned task, nothing else
|
|
219
|
+
2. Your final response will be reported back to the main agent
|
|
220
|
+
3. Do not initiate conversations or take on side tasks
|
|
221
|
+
4. Be concise but informative in your findings
|
|
222
|
+
|
|
223
|
+
## What You Can Do
|
|
224
|
+
- Read and write files in the workspace
|
|
225
|
+
- Execute shell commands
|
|
226
|
+
- Search the web and fetch web pages
|
|
227
|
+
- Complete the task thoroughly
|
|
228
|
+
|
|
229
|
+
## What You Cannot Do
|
|
230
|
+
- Send messages directly to users (no message tool available)
|
|
231
|
+
- Spawn other subagents
|
|
232
|
+
- Access the main agent's conversation history
|
|
233
|
+
|
|
234
|
+
## Workspace
|
|
235
|
+
Your workspace is at: {self.workspace}
|
|
236
|
+
|
|
237
|
+
When you have completed the task, provide a clear summary of your findings or actions."""
|
|
238
|
+
|
|
239
|
+
def get_running_count(self) -> int:
|
|
240
|
+
"""Return the number of currently running subagents."""
|
|
241
|
+
return len(self._running_tasks)
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"""Base class for agent tools."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Tool(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Abstract base class for agent tools.
|
|
10
|
+
|
|
11
|
+
Tools are capabilities that the agent can use to interact with
|
|
12
|
+
the environment, such as reading files, executing commands, etc.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
_TYPE_MAP = {
|
|
16
|
+
"string": str,
|
|
17
|
+
"integer": int,
|
|
18
|
+
"number": (int, float),
|
|
19
|
+
"boolean": bool,
|
|
20
|
+
"array": list,
|
|
21
|
+
"object": dict,
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
@property
|
|
25
|
+
@abstractmethod
|
|
26
|
+
def name(self) -> str:
|
|
27
|
+
"""Tool name used in function calls."""
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
@abstractmethod
|
|
32
|
+
def description(self) -> str:
|
|
33
|
+
"""Description of what the tool does."""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def parameters(self) -> dict[str, Any]:
|
|
39
|
+
"""JSON Schema for tool parameters."""
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
@abstractmethod
|
|
43
|
+
async def execute(self, **kwargs: Any) -> str:
|
|
44
|
+
"""
|
|
45
|
+
Execute the tool with given parameters.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
**kwargs: Tool-specific parameters.
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
String result of the tool execution.
|
|
52
|
+
"""
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
def validate_params(self, params: dict[str, Any]) -> list[str]:
|
|
56
|
+
"""Validate tool parameters against JSON schema. Returns error list (empty if valid)."""
|
|
57
|
+
schema = self.parameters or {}
|
|
58
|
+
if schema.get("type", "object") != "object":
|
|
59
|
+
raise ValueError(f"Schema must be object type, got {schema.get('type')!r}")
|
|
60
|
+
return self._validate(params, {**schema, "type": "object"}, "")
|
|
61
|
+
|
|
62
|
+
def _validate(self, val: Any, schema: dict[str, Any], path: str) -> list[str]:
|
|
63
|
+
t, label = schema.get("type"), path or "parameter"
|
|
64
|
+
if t in self._TYPE_MAP and not isinstance(val, self._TYPE_MAP[t]):
|
|
65
|
+
return [f"{label} should be {t}"]
|
|
66
|
+
|
|
67
|
+
errors = []
|
|
68
|
+
if "enum" in schema and val not in schema["enum"]:
|
|
69
|
+
errors.append(f"{label} must be one of {schema['enum']}")
|
|
70
|
+
if t in ("integer", "number"):
|
|
71
|
+
if "minimum" in schema and val < schema["minimum"]:
|
|
72
|
+
errors.append(f"{label} must be >= {schema['minimum']}")
|
|
73
|
+
if "maximum" in schema and val > schema["maximum"]:
|
|
74
|
+
errors.append(f"{label} must be <= {schema['maximum']}")
|
|
75
|
+
if t == "string":
|
|
76
|
+
if "minLength" in schema and len(val) < schema["minLength"]:
|
|
77
|
+
errors.append(f"{label} must be at least {schema['minLength']} chars")
|
|
78
|
+
if "maxLength" in schema and len(val) > schema["maxLength"]:
|
|
79
|
+
errors.append(f"{label} must be at most {schema['maxLength']} chars")
|
|
80
|
+
if t == "object":
|
|
81
|
+
props = schema.get("properties", {})
|
|
82
|
+
for k in schema.get("required", []):
|
|
83
|
+
if k not in val:
|
|
84
|
+
errors.append(f"missing required {path + '.' + k if path else k}")
|
|
85
|
+
for k, v in val.items():
|
|
86
|
+
if k in props:
|
|
87
|
+
errors.extend(self._validate(v, props[k], path + '.' + k if path else k))
|
|
88
|
+
if t == "array" and "items" in schema:
|
|
89
|
+
for i, item in enumerate(val):
|
|
90
|
+
errors.extend(self._validate(item, schema["items"], f"{path}[{i}]" if path else f"[{i}]"))
|
|
91
|
+
return errors
|
|
92
|
+
|
|
93
|
+
def to_schema(self) -> dict[str, Any]:
|
|
94
|
+
"""Convert tool to OpenAI function schema format."""
|
|
95
|
+
return {
|
|
96
|
+
"type": "function",
|
|
97
|
+
"function": {
|
|
98
|
+
"name": self.name,
|
|
99
|
+
"description": self.description,
|
|
100
|
+
"parameters": self.parameters,
|
|
101
|
+
}
|
|
102
|
+
}
|