emdash-core 0.1.33__py3-none-any.whl → 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/agent/agents.py +93 -23
- emdash_core/agent/background.py +481 -0
- emdash_core/agent/hooks.py +419 -0
- emdash_core/agent/inprocess_subagent.py +114 -10
- emdash_core/agent/mcp/config.py +78 -2
- emdash_core/agent/prompts/main_agent.py +88 -1
- emdash_core/agent/prompts/plan_mode.py +65 -44
- emdash_core/agent/prompts/subagents.py +96 -8
- emdash_core/agent/prompts/workflow.py +215 -50
- emdash_core/agent/providers/models.py +1 -1
- emdash_core/agent/providers/openai_provider.py +10 -0
- emdash_core/agent/research/researcher.py +154 -45
- emdash_core/agent/runner/agent_runner.py +157 -19
- emdash_core/agent/runner/context.py +28 -9
- emdash_core/agent/runner/sdk_runner.py +29 -2
- emdash_core/agent/skills.py +81 -1
- emdash_core/agent/toolkit.py +87 -11
- emdash_core/agent/toolkits/__init__.py +117 -18
- emdash_core/agent/toolkits/base.py +87 -2
- emdash_core/agent/toolkits/explore.py +18 -0
- emdash_core/agent/toolkits/plan.py +18 -0
- emdash_core/agent/tools/__init__.py +2 -0
- emdash_core/agent/tools/coding.py +344 -52
- emdash_core/agent/tools/lsp.py +361 -0
- emdash_core/agent/tools/skill.py +21 -1
- emdash_core/agent/tools/task.py +27 -23
- emdash_core/agent/tools/task_output.py +262 -32
- emdash_core/agent/verifier/__init__.py +11 -0
- emdash_core/agent/verifier/manager.py +295 -0
- emdash_core/agent/verifier/models.py +97 -0
- emdash_core/{swarm/worktree_manager.py → agent/worktree.py} +19 -1
- emdash_core/api/agent.py +451 -5
- emdash_core/api/research.py +3 -3
- emdash_core/api/router.py +0 -4
- emdash_core/context/longevity.py +197 -0
- emdash_core/context/providers/explored_areas.py +83 -39
- emdash_core/context/reranker.py +35 -144
- emdash_core/context/simple_reranker.py +500 -0
- emdash_core/context/tool_relevance.py +84 -0
- emdash_core/core/config.py +8 -0
- emdash_core/graph/__init__.py +8 -1
- emdash_core/graph/connection.py +24 -3
- emdash_core/graph/writer.py +7 -1
- emdash_core/ingestion/repository.py +17 -198
- emdash_core/models/agent.py +14 -0
- emdash_core/server.py +1 -6
- emdash_core/sse/stream.py +16 -1
- emdash_core/utils/__init__.py +0 -2
- emdash_core/utils/git.py +103 -0
- emdash_core/utils/image.py +147 -160
- {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/METADATA +7 -5
- {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/RECORD +54 -58
- emdash_core/api/swarm.py +0 -223
- emdash_core/db/__init__.py +0 -67
- emdash_core/db/auth.py +0 -134
- emdash_core/db/models.py +0 -91
- emdash_core/db/provider.py +0 -222
- emdash_core/db/providers/__init__.py +0 -5
- emdash_core/db/providers/supabase.py +0 -452
- emdash_core/swarm/__init__.py +0 -17
- emdash_core/swarm/merge_agent.py +0 -383
- emdash_core/swarm/session_manager.py +0 -274
- emdash_core/swarm/swarm_runner.py +0 -226
- emdash_core/swarm/task_definition.py +0 -137
- emdash_core/swarm/worker_spawner.py +0 -319
- {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/WHEEL +0 -0
- {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,419 @@
|
|
|
1
|
+
"""Hook system for running commands on agent events.
|
|
2
|
+
|
|
3
|
+
Hooks allow users to run shell commands when specific events occur
|
|
4
|
+
during agent execution. Hooks are configured per-project in
|
|
5
|
+
.emdash/hooks.json and run asynchronously (non-blocking).
|
|
6
|
+
|
|
7
|
+
Example .emdash/hooks.json:
|
|
8
|
+
{
|
|
9
|
+
"hooks": [
|
|
10
|
+
{
|
|
11
|
+
"id": "notify-done",
|
|
12
|
+
"event": "session_end",
|
|
13
|
+
"command": "notify-send 'Agent finished'",
|
|
14
|
+
"enabled": true
|
|
15
|
+
}
|
|
16
|
+
]
|
|
17
|
+
}
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from dataclasses import dataclass, field, asdict
|
|
21
|
+
from enum import Enum
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Any
|
|
24
|
+
import json
|
|
25
|
+
import os
|
|
26
|
+
import subprocess
|
|
27
|
+
import threading
|
|
28
|
+
|
|
29
|
+
from .events import AgentEvent, EventHandler, EventType
|
|
30
|
+
from ..utils.logger import log
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class HookEventType(str, Enum):
|
|
34
|
+
"""Event types that can trigger hooks.
|
|
35
|
+
|
|
36
|
+
This is a subset of EventType exposed for hook configuration.
|
|
37
|
+
"""
|
|
38
|
+
TOOL_START = "tool_start"
|
|
39
|
+
TOOL_RESULT = "tool_result"
|
|
40
|
+
SESSION_START = "session_start"
|
|
41
|
+
SESSION_END = "session_end"
|
|
42
|
+
RESPONSE = "response"
|
|
43
|
+
ERROR = "error"
|
|
44
|
+
|
|
45
|
+
@classmethod
|
|
46
|
+
def from_event_type(cls, event_type: EventType) -> "HookEventType | None":
|
|
47
|
+
"""Convert an EventType to HookEventType if mappable."""
|
|
48
|
+
mapping = {
|
|
49
|
+
EventType.TOOL_START: cls.TOOL_START,
|
|
50
|
+
EventType.TOOL_RESULT: cls.TOOL_RESULT,
|
|
51
|
+
EventType.SESSION_START: cls.SESSION_START,
|
|
52
|
+
EventType.SESSION_END: cls.SESSION_END,
|
|
53
|
+
EventType.RESPONSE: cls.RESPONSE,
|
|
54
|
+
EventType.ERROR: cls.ERROR,
|
|
55
|
+
}
|
|
56
|
+
return mapping.get(event_type)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class HookEventData:
|
|
61
|
+
"""Data passed to hook commands via stdin as JSON.
|
|
62
|
+
|
|
63
|
+
Attributes:
|
|
64
|
+
event: The event type that triggered the hook
|
|
65
|
+
timestamp: ISO format timestamp of when the event occurred
|
|
66
|
+
session_id: The session ID (if available)
|
|
67
|
+
|
|
68
|
+
# Tool-specific fields (for tool_start, tool_result)
|
|
69
|
+
tool_name: Name of the tool being executed
|
|
70
|
+
tool_args: Arguments passed to the tool (tool_start only)
|
|
71
|
+
tool_result: Result summary from the tool (tool_result only)
|
|
72
|
+
tool_success: Whether the tool succeeded (tool_result only)
|
|
73
|
+
tool_error: Error message if tool failed (tool_result only)
|
|
74
|
+
|
|
75
|
+
# Response fields (for response event)
|
|
76
|
+
response_text: The response content
|
|
77
|
+
|
|
78
|
+
# Session fields
|
|
79
|
+
goal: The goal/query for the session (session_start only)
|
|
80
|
+
success: Whether the session completed successfully (session_end only)
|
|
81
|
+
|
|
82
|
+
# Error fields
|
|
83
|
+
error_message: Error message (error event only)
|
|
84
|
+
error_details: Additional error details (error event only)
|
|
85
|
+
"""
|
|
86
|
+
event: str
|
|
87
|
+
timestamp: str
|
|
88
|
+
session_id: str | None = None
|
|
89
|
+
|
|
90
|
+
# Tool fields
|
|
91
|
+
tool_name: str | None = None
|
|
92
|
+
tool_args: dict[str, Any] | None = None
|
|
93
|
+
tool_result: str | None = None
|
|
94
|
+
tool_success: bool | None = None
|
|
95
|
+
tool_error: str | None = None
|
|
96
|
+
|
|
97
|
+
# Response fields
|
|
98
|
+
response_text: str | None = None
|
|
99
|
+
|
|
100
|
+
# Session fields
|
|
101
|
+
goal: str | None = None
|
|
102
|
+
success: bool | None = None
|
|
103
|
+
|
|
104
|
+
# Error fields
|
|
105
|
+
error_message: str | None = None
|
|
106
|
+
error_details: str | None = None
|
|
107
|
+
|
|
108
|
+
def to_json(self) -> str:
|
|
109
|
+
"""Convert to JSON string, excluding None values."""
|
|
110
|
+
data = {k: v for k, v in asdict(self).items() if v is not None}
|
|
111
|
+
return json.dumps(data)
|
|
112
|
+
|
|
113
|
+
def to_env_vars(self) -> dict[str, str]:
|
|
114
|
+
"""Convert to environment variables for quick access.
|
|
115
|
+
|
|
116
|
+
Returns a dict of EMDASH_* prefixed env vars.
|
|
117
|
+
"""
|
|
118
|
+
env = {
|
|
119
|
+
"EMDASH_EVENT": self.event,
|
|
120
|
+
"EMDASH_TIMESTAMP": self.timestamp,
|
|
121
|
+
}
|
|
122
|
+
if self.session_id:
|
|
123
|
+
env["EMDASH_SESSION_ID"] = self.session_id
|
|
124
|
+
if self.tool_name:
|
|
125
|
+
env["EMDASH_TOOL_NAME"] = self.tool_name
|
|
126
|
+
if self.tool_success is not None:
|
|
127
|
+
env["EMDASH_TOOL_SUCCESS"] = str(self.tool_success).lower()
|
|
128
|
+
if self.goal:
|
|
129
|
+
env["EMDASH_GOAL"] = self.goal
|
|
130
|
+
if self.success is not None:
|
|
131
|
+
env["EMDASH_SUCCESS"] = str(self.success).lower()
|
|
132
|
+
if self.error_message:
|
|
133
|
+
env["EMDASH_ERROR"] = self.error_message
|
|
134
|
+
return env
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@dataclass
|
|
138
|
+
class HookConfig:
|
|
139
|
+
"""Configuration for a single hook.
|
|
140
|
+
|
|
141
|
+
Attributes:
|
|
142
|
+
id: Unique identifier for the hook
|
|
143
|
+
event: Event type that triggers this hook
|
|
144
|
+
command: Shell command to execute
|
|
145
|
+
enabled: Whether the hook is active
|
|
146
|
+
"""
|
|
147
|
+
id: str
|
|
148
|
+
event: HookEventType
|
|
149
|
+
command: str
|
|
150
|
+
enabled: bool = True
|
|
151
|
+
|
|
152
|
+
def to_dict(self) -> dict[str, Any]:
|
|
153
|
+
"""Convert to dictionary for JSON serialization."""
|
|
154
|
+
return {
|
|
155
|
+
"id": self.id,
|
|
156
|
+
"event": self.event.value,
|
|
157
|
+
"command": self.command,
|
|
158
|
+
"enabled": self.enabled,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
@classmethod
|
|
162
|
+
def from_dict(cls, data: dict[str, Any]) -> "HookConfig":
|
|
163
|
+
"""Create from dictionary."""
|
|
164
|
+
return cls(
|
|
165
|
+
id=data["id"],
|
|
166
|
+
event=HookEventType(data["event"]),
|
|
167
|
+
command=data["command"],
|
|
168
|
+
enabled=data.get("enabled", True),
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
@dataclass
|
|
173
|
+
class HooksFile:
|
|
174
|
+
"""The .emdash/hooks.json file structure.
|
|
175
|
+
|
|
176
|
+
Attributes:
|
|
177
|
+
hooks: List of hook configurations
|
|
178
|
+
"""
|
|
179
|
+
hooks: list[HookConfig] = field(default_factory=list)
|
|
180
|
+
|
|
181
|
+
def to_dict(self) -> dict[str, Any]:
|
|
182
|
+
"""Convert to dictionary for JSON serialization."""
|
|
183
|
+
return {
|
|
184
|
+
"hooks": [h.to_dict() for h in self.hooks],
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
@classmethod
|
|
188
|
+
def from_dict(cls, data: dict[str, Any]) -> "HooksFile":
|
|
189
|
+
"""Create from dictionary."""
|
|
190
|
+
hooks = [HookConfig.from_dict(h) for h in data.get("hooks", [])]
|
|
191
|
+
return cls(hooks=hooks)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class HookManager:
|
|
195
|
+
"""Manages hook loading, execution, and configuration.
|
|
196
|
+
|
|
197
|
+
Hooks are loaded from .emdash/hooks.json and executed asynchronously
|
|
198
|
+
when matching events occur.
|
|
199
|
+
"""
|
|
200
|
+
|
|
201
|
+
def __init__(self, repo_root: Path | None = None):
|
|
202
|
+
"""Initialize the hook manager.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
repo_root: Root directory of the repository.
|
|
206
|
+
Defaults to current working directory.
|
|
207
|
+
"""
|
|
208
|
+
self._repo_root = repo_root or Path.cwd()
|
|
209
|
+
self._hooks_file = self._repo_root / ".emdash" / "hooks.json"
|
|
210
|
+
self._hooks: list[HookConfig] = []
|
|
211
|
+
self._session_id: str | None = None
|
|
212
|
+
self._load_hooks()
|
|
213
|
+
|
|
214
|
+
@property
|
|
215
|
+
def hooks_file_path(self) -> Path:
|
|
216
|
+
"""Get the path to the hooks file."""
|
|
217
|
+
return self._hooks_file
|
|
218
|
+
|
|
219
|
+
def set_session_id(self, session_id: str | None) -> None:
|
|
220
|
+
"""Set the current session ID for event data."""
|
|
221
|
+
self._session_id = session_id
|
|
222
|
+
|
|
223
|
+
def _load_hooks(self) -> None:
|
|
224
|
+
"""Load hooks from .emdash/hooks.json."""
|
|
225
|
+
if not self._hooks_file.exists():
|
|
226
|
+
self._hooks = []
|
|
227
|
+
return
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
data = json.loads(self._hooks_file.read_text())
|
|
231
|
+
hooks_file = HooksFile.from_dict(data)
|
|
232
|
+
self._hooks = hooks_file.hooks
|
|
233
|
+
log.debug(f"Loaded {len(self._hooks)} hooks from {self._hooks_file}")
|
|
234
|
+
except Exception as e:
|
|
235
|
+
log.warning(f"Failed to load hooks: {e}")
|
|
236
|
+
self._hooks = []
|
|
237
|
+
|
|
238
|
+
def reload(self) -> None:
|
|
239
|
+
"""Reload hooks from disk."""
|
|
240
|
+
self._load_hooks()
|
|
241
|
+
|
|
242
|
+
def get_hooks(self) -> list[HookConfig]:
|
|
243
|
+
"""Get all configured hooks."""
|
|
244
|
+
return self._hooks.copy()
|
|
245
|
+
|
|
246
|
+
def get_enabled_hooks(self, event: HookEventType) -> list[HookConfig]:
|
|
247
|
+
"""Get enabled hooks for a specific event type."""
|
|
248
|
+
return [h for h in self._hooks if h.enabled and h.event == event]
|
|
249
|
+
|
|
250
|
+
def add_hook(self, hook: HookConfig) -> None:
|
|
251
|
+
"""Add a new hook and save to disk."""
|
|
252
|
+
# Check for duplicate ID
|
|
253
|
+
if any(h.id == hook.id for h in self._hooks):
|
|
254
|
+
raise ValueError(f"Hook with id '{hook.id}' already exists")
|
|
255
|
+
|
|
256
|
+
self._hooks.append(hook)
|
|
257
|
+
self._save_hooks()
|
|
258
|
+
|
|
259
|
+
def remove_hook(self, hook_id: str) -> bool:
|
|
260
|
+
"""Remove a hook by ID. Returns True if removed."""
|
|
261
|
+
for i, h in enumerate(self._hooks):
|
|
262
|
+
if h.id == hook_id:
|
|
263
|
+
self._hooks.pop(i)
|
|
264
|
+
self._save_hooks()
|
|
265
|
+
return True
|
|
266
|
+
return False
|
|
267
|
+
|
|
268
|
+
def toggle_hook(self, hook_id: str) -> bool | None:
|
|
269
|
+
"""Toggle a hook's enabled state. Returns new state or None if not found."""
|
|
270
|
+
for h in self._hooks:
|
|
271
|
+
if h.id == hook_id:
|
|
272
|
+
h.enabled = not h.enabled
|
|
273
|
+
self._save_hooks()
|
|
274
|
+
return h.enabled
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
def _save_hooks(self) -> None:
|
|
278
|
+
"""Save hooks to .emdash/hooks.json."""
|
|
279
|
+
self._hooks_file.parent.mkdir(parents=True, exist_ok=True)
|
|
280
|
+
hooks_file = HooksFile(hooks=self._hooks)
|
|
281
|
+
self._hooks_file.write_text(
|
|
282
|
+
json.dumps(hooks_file.to_dict(), indent=2) + "\n"
|
|
283
|
+
)
|
|
284
|
+
log.debug(f"Saved {len(self._hooks)} hooks to {self._hooks_file}")
|
|
285
|
+
|
|
286
|
+
def _build_event_data(self, event: AgentEvent, hook_event: HookEventType) -> HookEventData:
|
|
287
|
+
"""Build HookEventData from an AgentEvent."""
|
|
288
|
+
data = HookEventData(
|
|
289
|
+
event=hook_event.value,
|
|
290
|
+
timestamp=event.timestamp.isoformat(),
|
|
291
|
+
session_id=self._session_id,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Populate event-specific fields
|
|
295
|
+
if hook_event == HookEventType.TOOL_START:
|
|
296
|
+
data.tool_name = event.data.get("name")
|
|
297
|
+
data.tool_args = event.data.get("args")
|
|
298
|
+
|
|
299
|
+
elif hook_event == HookEventType.TOOL_RESULT:
|
|
300
|
+
data.tool_name = event.data.get("name")
|
|
301
|
+
data.tool_success = event.data.get("success")
|
|
302
|
+
data.tool_result = event.data.get("summary")
|
|
303
|
+
if not data.tool_success:
|
|
304
|
+
data.tool_error = event.data.get("data", {}).get("error")
|
|
305
|
+
|
|
306
|
+
elif hook_event == HookEventType.SESSION_START:
|
|
307
|
+
data.goal = event.data.get("goal")
|
|
308
|
+
|
|
309
|
+
elif hook_event == HookEventType.SESSION_END:
|
|
310
|
+
data.success = event.data.get("success")
|
|
311
|
+
|
|
312
|
+
elif hook_event == HookEventType.RESPONSE:
|
|
313
|
+
data.response_text = event.data.get("content")
|
|
314
|
+
|
|
315
|
+
elif hook_event == HookEventType.ERROR:
|
|
316
|
+
data.error_message = event.data.get("message")
|
|
317
|
+
data.error_details = event.data.get("details")
|
|
318
|
+
|
|
319
|
+
return data
|
|
320
|
+
|
|
321
|
+
def _execute_hook_async(self, hook: HookConfig, event_data: HookEventData) -> None:
|
|
322
|
+
"""Execute a hook command asynchronously (fire and forget)."""
|
|
323
|
+
def run():
|
|
324
|
+
try:
|
|
325
|
+
env = os.environ.copy()
|
|
326
|
+
env.update(event_data.to_env_vars())
|
|
327
|
+
|
|
328
|
+
process = subprocess.Popen(
|
|
329
|
+
hook.command,
|
|
330
|
+
shell=True,
|
|
331
|
+
stdin=subprocess.PIPE,
|
|
332
|
+
stdout=subprocess.PIPE,
|
|
333
|
+
stderr=subprocess.PIPE,
|
|
334
|
+
env=env,
|
|
335
|
+
cwd=str(self._repo_root),
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
# Send JSON data to stdin
|
|
339
|
+
json_data = event_data.to_json()
|
|
340
|
+
assert process.stdin is not None
|
|
341
|
+
process.stdin.write(json_data.encode())
|
|
342
|
+
process.stdin.close()
|
|
343
|
+
|
|
344
|
+
# Don't wait for completion - fire and forget
|
|
345
|
+
# But log if there's an error
|
|
346
|
+
def log_completion():
|
|
347
|
+
_, stderr = process.communicate(timeout=30)
|
|
348
|
+
if process.returncode != 0:
|
|
349
|
+
log.warning(
|
|
350
|
+
f"Hook '{hook.id}' exited with code {process.returncode}: "
|
|
351
|
+
f"{stderr.decode()[:200]}"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
# Run completion logging in another thread to not block
|
|
355
|
+
completion_thread = threading.Thread(target=log_completion, daemon=True)
|
|
356
|
+
completion_thread.start()
|
|
357
|
+
|
|
358
|
+
except Exception as e:
|
|
359
|
+
log.warning(f"Failed to execute hook '{hook.id}': {e}")
|
|
360
|
+
|
|
361
|
+
thread = threading.Thread(target=run, daemon=True)
|
|
362
|
+
thread.start()
|
|
363
|
+
|
|
364
|
+
def trigger(self, event: AgentEvent) -> None:
|
|
365
|
+
"""Trigger hooks for an event.
|
|
366
|
+
|
|
367
|
+
Called by the event system when events occur.
|
|
368
|
+
"""
|
|
369
|
+
hook_event = HookEventType.from_event_type(event.type)
|
|
370
|
+
if hook_event is None:
|
|
371
|
+
return
|
|
372
|
+
|
|
373
|
+
hooks = self.get_enabled_hooks(hook_event)
|
|
374
|
+
if not hooks:
|
|
375
|
+
return
|
|
376
|
+
|
|
377
|
+
event_data = self._build_event_data(event, hook_event)
|
|
378
|
+
|
|
379
|
+
for hook in hooks:
|
|
380
|
+
log.debug(f"Triggering hook '{hook.id}' for event '{hook_event.value}'")
|
|
381
|
+
self._execute_hook_async(hook, event_data)
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
class HookHandler(EventHandler):
|
|
385
|
+
"""Event handler that triggers hooks.
|
|
386
|
+
|
|
387
|
+
Add this handler to an AgentEventEmitter to enable hooks.
|
|
388
|
+
"""
|
|
389
|
+
|
|
390
|
+
def __init__(self, manager: HookManager):
|
|
391
|
+
"""Initialize with a hook manager.
|
|
392
|
+
|
|
393
|
+
Args:
|
|
394
|
+
manager: The HookManager to use for triggering hooks
|
|
395
|
+
"""
|
|
396
|
+
self._manager = manager
|
|
397
|
+
|
|
398
|
+
def handle(self, event: AgentEvent) -> None:
|
|
399
|
+
"""Handle an event by triggering matching hooks."""
|
|
400
|
+
self._manager.trigger(event)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
# Convenience functions
|
|
404
|
+
|
|
405
|
+
_default_manager: HookManager | None = None
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def get_hook_manager(repo_root: Path | None = None) -> HookManager:
|
|
409
|
+
"""Get or create the default hook manager."""
|
|
410
|
+
global _default_manager
|
|
411
|
+
if _default_manager is None:
|
|
412
|
+
_default_manager = HookManager(repo_root)
|
|
413
|
+
return _default_manager
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
def reset_hook_manager() -> None:
|
|
417
|
+
"""Reset the default hook manager (for testing)."""
|
|
418
|
+
global _default_manager
|
|
419
|
+
_default_manager = None
|
|
@@ -16,6 +16,12 @@ from .toolkits import get_toolkit
|
|
|
16
16
|
from .subagent_prompts import get_subagent_prompt
|
|
17
17
|
from .providers import get_provider
|
|
18
18
|
from .providers.factory import DEFAULT_MODEL
|
|
19
|
+
from .context_manager import (
|
|
20
|
+
truncate_tool_output,
|
|
21
|
+
reduce_context_for_retry,
|
|
22
|
+
is_context_overflow_error,
|
|
23
|
+
)
|
|
24
|
+
from .runner.context import estimate_context_tokens
|
|
19
25
|
from ..utils.logger import log
|
|
20
26
|
|
|
21
27
|
|
|
@@ -33,8 +39,13 @@ class SubAgentResult:
|
|
|
33
39
|
iterations: int
|
|
34
40
|
tools_used: list[str]
|
|
35
41
|
execution_time: float
|
|
42
|
+
exploration_steps: list[dict] = None # Detailed exploration steps
|
|
36
43
|
error: Optional[str] = None
|
|
37
44
|
|
|
45
|
+
def __post_init__(self):
|
|
46
|
+
if self.exploration_steps is None:
|
|
47
|
+
self.exploration_steps = []
|
|
48
|
+
|
|
38
49
|
def to_dict(self) -> dict:
|
|
39
50
|
return asdict(self)
|
|
40
51
|
|
|
@@ -90,12 +101,13 @@ class InProcessSubAgent:
|
|
|
90
101
|
self.provider = get_provider(model_name)
|
|
91
102
|
|
|
92
103
|
# Get system prompt and inject thoroughness level
|
|
93
|
-
base_prompt = get_subagent_prompt(subagent_type)
|
|
104
|
+
base_prompt = get_subagent_prompt(subagent_type, repo_root=repo_root)
|
|
94
105
|
self.system_prompt = self._inject_thoroughness(base_prompt)
|
|
95
106
|
|
|
96
107
|
# Tracking
|
|
97
108
|
self.files_explored: set[str] = set()
|
|
98
109
|
self.tools_used: list[str] = []
|
|
110
|
+
self.exploration_steps: list[dict] = [] # Detailed step tracking
|
|
99
111
|
|
|
100
112
|
def _inject_thoroughness(self, prompt: str) -> str:
|
|
101
113
|
"""Inject thoroughness level into the system prompt."""
|
|
@@ -140,6 +152,7 @@ class InProcessSubAgent:
|
|
|
140
152
|
event_map = {
|
|
141
153
|
"tool_start": EventType.TOOL_START,
|
|
142
154
|
"tool_result": EventType.TOOL_RESULT,
|
|
155
|
+
"thinking": EventType.THINKING,
|
|
143
156
|
}
|
|
144
157
|
|
|
145
158
|
if event_type in event_map:
|
|
@@ -234,21 +247,50 @@ Now, your task:
|
|
|
234
247
|
|
|
235
248
|
log.debug(f"SubAgent {self.agent_id} turn {iterations}/{self.max_turns}")
|
|
236
249
|
|
|
237
|
-
#
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
250
|
+
# Check context size and compact if needed
|
|
251
|
+
context_tokens = estimate_context_tokens(messages, self.system_prompt)
|
|
252
|
+
context_limit = self.provider.get_context_limit()
|
|
253
|
+
|
|
254
|
+
if context_tokens > context_limit * 0.8:
|
|
255
|
+
log.info(
|
|
256
|
+
f"SubAgent {self.agent_id} context at {context_tokens:,}/{context_limit:,} "
|
|
257
|
+
f"({context_tokens/context_limit:.0%}), reducing..."
|
|
258
|
+
)
|
|
259
|
+
messages = reduce_context_for_retry(messages, keep_recent=6)
|
|
260
|
+
|
|
261
|
+
# Call LLM with retry on context overflow
|
|
262
|
+
response = None
|
|
263
|
+
max_retries = 2
|
|
264
|
+
for retry in range(max_retries + 1):
|
|
265
|
+
try:
|
|
266
|
+
response = self.provider.chat(
|
|
267
|
+
messages=messages,
|
|
268
|
+
tools=self.toolkit.get_all_schemas(),
|
|
269
|
+
system=self.system_prompt,
|
|
270
|
+
)
|
|
271
|
+
break # Success
|
|
272
|
+
except Exception as e:
|
|
273
|
+
if is_context_overflow_error(e) and retry < max_retries:
|
|
274
|
+
log.warning(
|
|
275
|
+
f"SubAgent {self.agent_id} context overflow on attempt {retry + 1}, reducing..."
|
|
276
|
+
)
|
|
277
|
+
messages = reduce_context_for_retry(messages, keep_recent=4 - retry)
|
|
278
|
+
else:
|
|
279
|
+
raise # Re-raise if not overflow or out of retries
|
|
280
|
+
|
|
281
|
+
if response is None:
|
|
282
|
+
raise RuntimeError("Failed to get response from LLM")
|
|
243
283
|
|
|
244
284
|
# Add assistant response
|
|
245
285
|
assistant_msg = self.provider.format_assistant_message(response)
|
|
246
286
|
if assistant_msg:
|
|
247
287
|
messages.append(assistant_msg)
|
|
248
288
|
|
|
249
|
-
# Save content
|
|
289
|
+
# Save content and emit thinking event
|
|
250
290
|
if response.content:
|
|
251
291
|
last_content = response.content
|
|
292
|
+
# Emit thinking event for UI
|
|
293
|
+
self._emit("thinking", content=response.content)
|
|
252
294
|
|
|
253
295
|
# Check if done
|
|
254
296
|
if not response.tool_calls:
|
|
@@ -283,10 +325,21 @@ Now, your task:
|
|
|
283
325
|
summary=summary,
|
|
284
326
|
)
|
|
285
327
|
|
|
286
|
-
#
|
|
328
|
+
# Track exploration step with details
|
|
329
|
+
step = {
|
|
330
|
+
"tool": tool_call.name,
|
|
331
|
+
"params": self._sanitize_params(args),
|
|
332
|
+
"success": result.success,
|
|
333
|
+
"summary": self._extract_result_summary(tool_call.name, args, result),
|
|
334
|
+
}
|
|
335
|
+
self.exploration_steps.append(step)
|
|
336
|
+
|
|
337
|
+
# Add tool result to messages (truncated to avoid context overflow)
|
|
338
|
+
tool_output = json.dumps(result.to_dict(), indent=2)
|
|
339
|
+
tool_output = truncate_tool_output(tool_output, max_tokens=15000)
|
|
287
340
|
tool_result_msg = self.provider.format_tool_result(
|
|
288
341
|
tool_call.id,
|
|
289
|
-
|
|
342
|
+
tool_output,
|
|
290
343
|
)
|
|
291
344
|
if tool_result_msg:
|
|
292
345
|
messages.append(tool_result_msg)
|
|
@@ -316,6 +369,7 @@ Now, your task:
|
|
|
316
369
|
iterations=iterations,
|
|
317
370
|
tools_used=list(set(self.tools_used)),
|
|
318
371
|
execution_time=execution_time,
|
|
372
|
+
exploration_steps=self.exploration_steps[-30:], # Last 30 steps
|
|
319
373
|
error=error,
|
|
320
374
|
)
|
|
321
375
|
|
|
@@ -332,6 +386,56 @@ Now, your task:
|
|
|
332
386
|
pass
|
|
333
387
|
return findings[-10:]
|
|
334
388
|
|
|
389
|
+
def _sanitize_params(self, args: dict) -> dict:
|
|
390
|
+
"""Sanitize params for logging - truncate long values."""
|
|
391
|
+
sanitized = {}
|
|
392
|
+
for key, value in args.items():
|
|
393
|
+
if isinstance(value, str) and len(value) > 200:
|
|
394
|
+
sanitized[key] = value[:200] + "..."
|
|
395
|
+
else:
|
|
396
|
+
sanitized[key] = value
|
|
397
|
+
return sanitized
|
|
398
|
+
|
|
399
|
+
def _extract_result_summary(self, tool_name: str, args: dict, result) -> str:
|
|
400
|
+
"""Extract a meaningful summary from tool result based on tool type."""
|
|
401
|
+
if not result.success:
|
|
402
|
+
return f"Failed: {result.error or 'unknown error'}"
|
|
403
|
+
|
|
404
|
+
data = result.data or {}
|
|
405
|
+
|
|
406
|
+
# Tool-specific summaries
|
|
407
|
+
if tool_name == "read_file":
|
|
408
|
+
path = args.get("path", args.get("file_path", ""))
|
|
409
|
+
lines = data.get("line_count", data.get("lines", "?"))
|
|
410
|
+
return f"Read {path} ({lines} lines)"
|
|
411
|
+
|
|
412
|
+
elif tool_name == "glob":
|
|
413
|
+
matches = data.get("matches", data.get("files", []))
|
|
414
|
+
pattern = args.get("pattern", "")
|
|
415
|
+
return f"Found {len(matches)} files matching '{pattern}'"
|
|
416
|
+
|
|
417
|
+
elif tool_name == "grep":
|
|
418
|
+
matches = data.get("matches", [])
|
|
419
|
+
pattern = args.get("pattern", "")
|
|
420
|
+
return f"Found {len(matches)} matches for '{pattern}'"
|
|
421
|
+
|
|
422
|
+
elif tool_name == "semantic_search":
|
|
423
|
+
results = data.get("results", [])
|
|
424
|
+
query = args.get("query", "")[:50]
|
|
425
|
+
return f"Found {len(results)} results for '{query}'"
|
|
426
|
+
|
|
427
|
+
elif tool_name == "list_files":
|
|
428
|
+
files = data.get("files", data.get("entries", []))
|
|
429
|
+
path = args.get("path", "")
|
|
430
|
+
return f"Listed {len(files)} items in {path}"
|
|
431
|
+
|
|
432
|
+
else:
|
|
433
|
+
# Generic summary
|
|
434
|
+
if isinstance(data, dict):
|
|
435
|
+
keys = list(data.keys())[:3]
|
|
436
|
+
return f"Returned: {', '.join(keys)}" if keys else "Success"
|
|
437
|
+
return str(data)[:100] if data else "Success"
|
|
438
|
+
|
|
335
439
|
|
|
336
440
|
# Thread pool for parallel execution
|
|
337
441
|
_executor: Optional[ThreadPoolExecutor] = None
|