emdash-core 0.1.37__py3-none-any.whl → 0.1.60__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/agent/agents.py +9 -0
- emdash_core/agent/background.py +481 -0
- emdash_core/agent/inprocess_subagent.py +70 -1
- emdash_core/agent/mcp/config.py +78 -2
- emdash_core/agent/prompts/main_agent.py +53 -1
- emdash_core/agent/prompts/plan_mode.py +65 -44
- emdash_core/agent/prompts/subagents.py +73 -1
- emdash_core/agent/prompts/workflow.py +179 -28
- emdash_core/agent/providers/models.py +1 -1
- emdash_core/agent/providers/openai_provider.py +10 -0
- emdash_core/agent/research/researcher.py +154 -45
- emdash_core/agent/runner/agent_runner.py +145 -19
- emdash_core/agent/runner/sdk_runner.py +29 -2
- emdash_core/agent/skills.py +81 -1
- emdash_core/agent/toolkit.py +87 -11
- emdash_core/agent/tools/__init__.py +2 -0
- emdash_core/agent/tools/coding.py +344 -52
- emdash_core/agent/tools/lsp.py +361 -0
- emdash_core/agent/tools/skill.py +21 -1
- emdash_core/agent/tools/task.py +16 -19
- emdash_core/agent/tools/task_output.py +262 -32
- emdash_core/agent/verifier/__init__.py +11 -0
- emdash_core/agent/verifier/manager.py +295 -0
- emdash_core/agent/verifier/models.py +97 -0
- emdash_core/{swarm/worktree_manager.py → agent/worktree.py} +19 -1
- emdash_core/api/agent.py +297 -2
- emdash_core/api/research.py +3 -3
- emdash_core/api/router.py +0 -4
- emdash_core/context/longevity.py +197 -0
- emdash_core/context/providers/explored_areas.py +83 -39
- emdash_core/context/reranker.py +35 -144
- emdash_core/context/simple_reranker.py +500 -0
- emdash_core/context/tool_relevance.py +84 -0
- emdash_core/core/config.py +8 -0
- emdash_core/graph/__init__.py +8 -1
- emdash_core/graph/connection.py +24 -3
- emdash_core/graph/writer.py +7 -1
- emdash_core/models/agent.py +10 -0
- emdash_core/server.py +1 -6
- emdash_core/sse/stream.py +16 -1
- emdash_core/utils/__init__.py +0 -2
- emdash_core/utils/git.py +103 -0
- emdash_core/utils/image.py +147 -160
- {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/METADATA +6 -6
- {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/RECORD +47 -52
- emdash_core/api/swarm.py +0 -223
- emdash_core/db/__init__.py +0 -67
- emdash_core/db/auth.py +0 -134
- emdash_core/db/models.py +0 -91
- emdash_core/db/provider.py +0 -222
- emdash_core/db/providers/__init__.py +0 -5
- emdash_core/db/providers/supabase.py +0 -452
- emdash_core/swarm/__init__.py +0 -17
- emdash_core/swarm/merge_agent.py +0 -383
- emdash_core/swarm/session_manager.py +0 -274
- emdash_core/swarm/swarm_runner.py +0 -226
- emdash_core/swarm/task_definition.py +0 -137
- emdash_core/swarm/worker_spawner.py +0 -319
- {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/WHEEL +0 -0
- {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/entry_points.txt +0 -0
|
@@ -11,6 +11,7 @@ from typing import Any, Optional
|
|
|
11
11
|
|
|
12
12
|
from ...utils.logger import log
|
|
13
13
|
from ...core.exceptions import ContextLengthError
|
|
14
|
+
from ...core.config import get_config
|
|
14
15
|
from ..toolkit import AgentToolkit
|
|
15
16
|
from ..events import AgentEventEmitter, NullEmitter
|
|
16
17
|
from ..providers import get_provider
|
|
@@ -30,8 +31,10 @@ from .context import (
|
|
|
30
31
|
get_context_breakdown,
|
|
31
32
|
maybe_compact_context,
|
|
32
33
|
emit_context_frame,
|
|
34
|
+
get_reranked_context,
|
|
33
35
|
)
|
|
34
36
|
from .plan import PlanMixin
|
|
37
|
+
from ..background import BackgroundTaskManager
|
|
35
38
|
|
|
36
39
|
|
|
37
40
|
class AgentRunner(PlanMixin):
|
|
@@ -109,6 +112,12 @@ class AgentRunner(PlanMixin):
|
|
|
109
112
|
self._pending_plan: Optional[dict] = None
|
|
110
113
|
# Callback for autosave after each iteration (set by API layer)
|
|
111
114
|
self._on_iteration_callback: Optional[callable] = None
|
|
115
|
+
# Context frame injection flag
|
|
116
|
+
self._inject_context_frame = os.getenv("EMDASH_INJECT_CONTEXT_FRAME", "").lower() in ("1", "true", "yes")
|
|
117
|
+
# Persistent thread pool executor for parallel tool execution
|
|
118
|
+
config = get_config()
|
|
119
|
+
self._tool_parallel_workers = config.agent.tool_parallel_workers
|
|
120
|
+
self._tool_executor: Optional[ThreadPoolExecutor] = None
|
|
112
121
|
|
|
113
122
|
def _get_default_plan_file_path(self) -> str:
|
|
114
123
|
"""Get the default plan file path based on repo root.
|
|
@@ -143,9 +152,101 @@ class AgentRunner(PlanMixin):
|
|
|
143
152
|
task_list = "\n".join(lines)
|
|
144
153
|
return f"<todo-state>\n{header}\n{task_list}\n</todo-state>"
|
|
145
154
|
|
|
155
|
+
def _check_background_notifications(self) -> list[str]:
|
|
156
|
+
"""Check for completed background tasks and format notifications.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
List of notification messages to inject into context
|
|
160
|
+
"""
|
|
161
|
+
try:
|
|
162
|
+
manager = BackgroundTaskManager.get_instance()
|
|
163
|
+
completed_tasks = manager.get_pending_notifications()
|
|
164
|
+
|
|
165
|
+
notifications = []
|
|
166
|
+
for task in completed_tasks:
|
|
167
|
+
msg = manager.format_notification(task)
|
|
168
|
+
notifications.append(msg)
|
|
169
|
+
log.info(f"Background task {task.task_id} notification ready")
|
|
170
|
+
|
|
171
|
+
return notifications
|
|
172
|
+
except Exception as e:
|
|
173
|
+
log.warning(f"Failed to check background notifications: {e}")
|
|
174
|
+
return []
|
|
175
|
+
|
|
176
|
+
def _format_context_reminder(self) -> str:
|
|
177
|
+
"""Format reranked context items as XML reminder for injection.
|
|
178
|
+
|
|
179
|
+
Only called when EMDASH_INJECT_CONTEXT_FRAME is enabled.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Formatted context reminder string, or empty if no context
|
|
183
|
+
"""
|
|
184
|
+
if not self._current_query:
|
|
185
|
+
return ""
|
|
186
|
+
|
|
187
|
+
reading = get_reranked_context(self.toolkit, self._current_query)
|
|
188
|
+
items = reading.get("items", [])
|
|
189
|
+
|
|
190
|
+
if not items:
|
|
191
|
+
return ""
|
|
192
|
+
|
|
193
|
+
lines = [
|
|
194
|
+
"<context-frame>",
|
|
195
|
+
f"Relevant context for query: {self._current_query[:100]}",
|
|
196
|
+
f"Found {len(items)} relevant items (ranked by relevance score):",
|
|
197
|
+
"",
|
|
198
|
+
]
|
|
199
|
+
|
|
200
|
+
for item in items[:15]: # Top 15 items
|
|
201
|
+
name = item.get("name", "?")
|
|
202
|
+
item_type = item.get("type", "?")
|
|
203
|
+
score = item.get("score")
|
|
204
|
+
file_path = item.get("file", "")
|
|
205
|
+
description = item.get("description", "")
|
|
206
|
+
|
|
207
|
+
score_str = f" (score: {score:.3f})" if score is not None else ""
|
|
208
|
+
file_str = f" in {file_path}" if file_path else ""
|
|
209
|
+
|
|
210
|
+
lines.append(f" - [{item_type}] {name}{score_str}{file_str}")
|
|
211
|
+
if description:
|
|
212
|
+
lines.append(f" {description[:150]}")
|
|
213
|
+
|
|
214
|
+
lines.append("</context-frame>")
|
|
215
|
+
return "\n".join(lines)
|
|
216
|
+
|
|
217
|
+
def _get_tool_executor(self) -> ThreadPoolExecutor:
|
|
218
|
+
"""Get the persistent thread pool executor, creating it if needed.
|
|
219
|
+
|
|
220
|
+
Uses lazy initialization to avoid creating threads until actually needed.
|
|
221
|
+
"""
|
|
222
|
+
if self._tool_executor is None:
|
|
223
|
+
self._tool_executor = ThreadPoolExecutor(
|
|
224
|
+
max_workers=self._tool_parallel_workers,
|
|
225
|
+
thread_name_prefix="tool-exec-"
|
|
226
|
+
)
|
|
227
|
+
return self._tool_executor
|
|
228
|
+
|
|
229
|
+
def close(self) -> None:
|
|
230
|
+
"""Clean up resources, including the thread pool executor."""
|
|
231
|
+
if self._tool_executor is not None:
|
|
232
|
+
self._tool_executor.shutdown(wait=False)
|
|
233
|
+
self._tool_executor = None
|
|
234
|
+
|
|
235
|
+
def __enter__(self):
|
|
236
|
+
"""Support context manager protocol."""
|
|
237
|
+
return self
|
|
238
|
+
|
|
239
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
240
|
+
"""Clean up on exit from context manager."""
|
|
241
|
+
self.close()
|
|
242
|
+
return False
|
|
243
|
+
|
|
146
244
|
def _execute_tools_parallel(self, parsed_calls: list) -> list:
|
|
147
245
|
"""Execute multiple tool calls in parallel using a thread pool.
|
|
148
246
|
|
|
247
|
+
Uses a persistent thread pool executor for better performance by avoiding
|
|
248
|
+
thread creation/destruction overhead on each batch of tool calls.
|
|
249
|
+
|
|
149
250
|
Args:
|
|
150
251
|
parsed_calls: List of (tool_call, args) tuples
|
|
151
252
|
|
|
@@ -166,14 +267,14 @@ class AgentRunner(PlanMixin):
|
|
|
166
267
|
from ..tools.base import ToolResult
|
|
167
268
|
return (tool_call, args, ToolResult.error_result(str(e)))
|
|
168
269
|
|
|
169
|
-
# Execute in parallel
|
|
270
|
+
# Execute in parallel using persistent executor
|
|
271
|
+
executor = self._get_tool_executor()
|
|
170
272
|
results: list = [None] * len(parsed_calls)
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
results[idx] = future.result()
|
|
273
|
+
futures = {executor.submit(execute_one, item): i for i, item in enumerate(parsed_calls)}
|
|
274
|
+
# Collect results maintaining order
|
|
275
|
+
for future in as_completed(futures):
|
|
276
|
+
idx = futures[future]
|
|
277
|
+
results[idx] = future.result()
|
|
177
278
|
|
|
178
279
|
# Emit tool result events for all calls
|
|
179
280
|
for tool_call, args, result in results:
|
|
@@ -209,24 +310,27 @@ class AgentRunner(PlanMixin):
|
|
|
209
310
|
from ..tools.modes import ModeState
|
|
210
311
|
ModeState.get_instance().reset_cycle()
|
|
211
312
|
|
|
212
|
-
# Build user message
|
|
313
|
+
# Build user message content
|
|
213
314
|
if context:
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
315
|
+
text_content = f"Context:\n{context}\n\nQuestion: {query}"
|
|
316
|
+
else:
|
|
317
|
+
text_content = query
|
|
318
|
+
|
|
319
|
+
# Format content with images if provided
|
|
320
|
+
if images:
|
|
321
|
+
content = self.provider.format_content_with_images(text_content, images)
|
|
218
322
|
else:
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
323
|
+
content = text_content
|
|
324
|
+
|
|
325
|
+
user_message = {
|
|
326
|
+
"role": "user",
|
|
327
|
+
"content": content,
|
|
328
|
+
}
|
|
223
329
|
|
|
224
330
|
# Save user message to history BEFORE running (so it's preserved even if interrupted)
|
|
225
331
|
self._messages.append(user_message)
|
|
226
332
|
messages = list(self._messages) # Copy for the loop
|
|
227
333
|
|
|
228
|
-
# TODO: Handle images if provided
|
|
229
|
-
|
|
230
334
|
# Get tool schemas
|
|
231
335
|
tools = self.toolkit.get_all_schemas()
|
|
232
336
|
|
|
@@ -262,6 +366,16 @@ class AgentRunner(PlanMixin):
|
|
|
262
366
|
max_retries = 3
|
|
263
367
|
|
|
264
368
|
for iteration in range(self.max_iterations):
|
|
369
|
+
# Check for completed background tasks and inject notifications
|
|
370
|
+
bg_notifications = self._check_background_notifications()
|
|
371
|
+
for notification in bg_notifications:
|
|
372
|
+
messages.append({
|
|
373
|
+
"role": "user",
|
|
374
|
+
"content": notification,
|
|
375
|
+
})
|
|
376
|
+
# Emit event so UI can show notification
|
|
377
|
+
self.emitter.emit_assistant_text(f"[Background task completed - see notification]")
|
|
378
|
+
|
|
265
379
|
# When approaching max iterations, ask agent to wrap up
|
|
266
380
|
if iteration == self.max_iterations - 2:
|
|
267
381
|
messages.append({
|
|
@@ -506,6 +620,12 @@ class AgentRunner(PlanMixin):
|
|
|
506
620
|
"content": result_json,
|
|
507
621
|
})
|
|
508
622
|
|
|
623
|
+
# Inject context frame reminder if enabled (append to last tool result)
|
|
624
|
+
if self._inject_context_frame and messages and messages[-1].get("role") == "tool":
|
|
625
|
+
context_reminder = self._format_context_reminder()
|
|
626
|
+
if context_reminder:
|
|
627
|
+
messages[-1]["content"] += f"\n\n{context_reminder}"
|
|
628
|
+
|
|
509
629
|
# Emit context frame after each iteration (for autosave and UI updates)
|
|
510
630
|
self._emit_context_frame(messages)
|
|
511
631
|
|
|
@@ -676,10 +796,16 @@ DO NOT output more text. Use a tool NOW.""",
|
|
|
676
796
|
# Store query for reranking context frame
|
|
677
797
|
self._current_query = message
|
|
678
798
|
|
|
799
|
+
# Format content with images if provided
|
|
800
|
+
if images:
|
|
801
|
+
content = self.provider.format_content_with_images(message, images)
|
|
802
|
+
else:
|
|
803
|
+
content = message
|
|
804
|
+
|
|
679
805
|
# Add new user message to history
|
|
680
806
|
self._messages.append({
|
|
681
807
|
"role": "user",
|
|
682
|
-
"content":
|
|
808
|
+
"content": content,
|
|
683
809
|
})
|
|
684
810
|
|
|
685
811
|
# Get tool schemas
|
|
@@ -184,11 +184,12 @@ class SDKAgentRunner:
|
|
|
184
184
|
from ..events import EventType
|
|
185
185
|
self.emitter.emit(getattr(EventType, event_type), data)
|
|
186
186
|
|
|
187
|
-
async def run(self, prompt: str) -> AsyncIterator[dict]:
|
|
187
|
+
async def run(self, prompt: str, images: list = None) -> AsyncIterator[dict]:
|
|
188
188
|
"""Execute agent with SDK.
|
|
189
189
|
|
|
190
190
|
Args:
|
|
191
191
|
prompt: User prompt/task
|
|
192
|
+
images: Optional list of image dicts with 'data' (bytes) and 'format' keys
|
|
192
193
|
|
|
193
194
|
Yields:
|
|
194
195
|
Event dicts for UI streaming
|
|
@@ -201,6 +202,7 @@ class SDKAgentRunner:
|
|
|
201
202
|
ToolResultBlock,
|
|
202
203
|
ResultMessage,
|
|
203
204
|
)
|
|
205
|
+
import base64
|
|
204
206
|
|
|
205
207
|
options = self._get_options()
|
|
206
208
|
|
|
@@ -210,9 +212,34 @@ class SDKAgentRunner:
|
|
|
210
212
|
"agent_name": "Emdash Code (SDK)",
|
|
211
213
|
})
|
|
212
214
|
|
|
215
|
+
# Format prompt with images if provided
|
|
216
|
+
if images:
|
|
217
|
+
# Build content blocks for Claude SDK
|
|
218
|
+
content_blocks = []
|
|
219
|
+
for img in images:
|
|
220
|
+
img_data = img.get("data")
|
|
221
|
+
img_format = img.get("format", "png")
|
|
222
|
+
if isinstance(img_data, bytes):
|
|
223
|
+
encoded = base64.b64encode(img_data).decode("utf-8")
|
|
224
|
+
else:
|
|
225
|
+
encoded = img_data # Already base64 encoded
|
|
226
|
+
content_blocks.append({
|
|
227
|
+
"type": "image",
|
|
228
|
+
"source": {
|
|
229
|
+
"type": "base64",
|
|
230
|
+
"media_type": f"image/{img_format}",
|
|
231
|
+
"data": encoded,
|
|
232
|
+
}
|
|
233
|
+
})
|
|
234
|
+
content_blocks.append({"type": "text", "text": prompt})
|
|
235
|
+
query_content = content_blocks
|
|
236
|
+
log.info(f"SDK agent: sending {len(images)} images with prompt")
|
|
237
|
+
else:
|
|
238
|
+
query_content = prompt
|
|
239
|
+
|
|
213
240
|
try:
|
|
214
241
|
async with ClaudeSDKClient(options=options) as client:
|
|
215
|
-
await client.query(
|
|
242
|
+
await client.query(query_content)
|
|
216
243
|
|
|
217
244
|
async for message in client.receive_response():
|
|
218
245
|
# Process message and yield events
|
emdash_core/agent/skills.py
CHANGED
|
@@ -16,6 +16,9 @@ from dataclasses import dataclass, field
|
|
|
16
16
|
from pathlib import Path
|
|
17
17
|
from typing import Optional
|
|
18
18
|
|
|
19
|
+
import os
|
|
20
|
+
import stat
|
|
21
|
+
|
|
19
22
|
from ..utils.logger import log
|
|
20
23
|
|
|
21
24
|
|
|
@@ -24,6 +27,66 @@ def _get_builtin_skills_dir() -> Path:
|
|
|
24
27
|
return Path(__file__).parent.parent / "skills"
|
|
25
28
|
|
|
26
29
|
|
|
30
|
+
def _discover_scripts(skill_dir: Path) -> list[Path]:
|
|
31
|
+
"""Discover executable scripts in a skill directory.
|
|
32
|
+
|
|
33
|
+
Scripts are self-contained bash executables that can be run by the agent
|
|
34
|
+
to perform specific actions. They must be either:
|
|
35
|
+
- Files with .sh extension
|
|
36
|
+
- Files with executable permission and a shebang (#!/bin/bash, #!/usr/bin/env bash, etc.)
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
skill_dir: Path to the skill directory
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
List of paths to executable scripts
|
|
43
|
+
"""
|
|
44
|
+
scripts = []
|
|
45
|
+
|
|
46
|
+
if not skill_dir.exists() or not skill_dir.is_dir():
|
|
47
|
+
return scripts
|
|
48
|
+
|
|
49
|
+
# Files to skip (not scripts)
|
|
50
|
+
skip_files = {"SKILL.md", "skill.md", "README.md", "readme.md"}
|
|
51
|
+
|
|
52
|
+
for file_path in skill_dir.iterdir():
|
|
53
|
+
if not file_path.is_file():
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
if file_path.name in skip_files:
|
|
57
|
+
continue
|
|
58
|
+
|
|
59
|
+
# Check if it's a .sh file
|
|
60
|
+
is_shell_script = file_path.suffix == ".sh"
|
|
61
|
+
|
|
62
|
+
# Check if it has a shebang
|
|
63
|
+
has_shebang = False
|
|
64
|
+
try:
|
|
65
|
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
66
|
+
first_line = f.readline().strip()
|
|
67
|
+
if first_line.startswith("#!"):
|
|
68
|
+
# Check for bash/sh shebang
|
|
69
|
+
if any(shell in first_line for shell in ["bash", "/sh", "python", "node", "ruby", "perl"]):
|
|
70
|
+
has_shebang = True
|
|
71
|
+
except (OSError, IOError):
|
|
72
|
+
continue
|
|
73
|
+
|
|
74
|
+
if is_shell_script or has_shebang:
|
|
75
|
+
# Ensure the file is executable
|
|
76
|
+
try:
|
|
77
|
+
current_mode = file_path.stat().st_mode
|
|
78
|
+
if not (current_mode & stat.S_IXUSR):
|
|
79
|
+
# Make it executable for the user
|
|
80
|
+
os.chmod(file_path, current_mode | stat.S_IXUSR)
|
|
81
|
+
log.debug(f"Made script executable: {file_path}")
|
|
82
|
+
except OSError as e:
|
|
83
|
+
log.warning(f"Could not make script executable: {file_path}: {e}")
|
|
84
|
+
|
|
85
|
+
scripts.append(file_path)
|
|
86
|
+
|
|
87
|
+
return sorted(scripts, key=lambda p: p.name)
|
|
88
|
+
|
|
89
|
+
|
|
27
90
|
@dataclass
|
|
28
91
|
class Skill:
|
|
29
92
|
"""A skill configuration loaded from SKILL.md.
|
|
@@ -35,6 +98,7 @@ class Skill:
|
|
|
35
98
|
tools: List of tools this skill needs access to
|
|
36
99
|
user_invocable: Whether skill can be invoked with /name
|
|
37
100
|
file_path: Source file path
|
|
101
|
+
scripts: List of executable script paths in the skill directory
|
|
38
102
|
_builtin: Whether this is a built-in skill bundled with emdash_core
|
|
39
103
|
"""
|
|
40
104
|
|
|
@@ -44,6 +108,7 @@ class Skill:
|
|
|
44
108
|
tools: list[str] = field(default_factory=list)
|
|
45
109
|
user_invocable: bool = False
|
|
46
110
|
file_path: Optional[Path] = None
|
|
111
|
+
scripts: list[Path] = field(default_factory=list)
|
|
47
112
|
_builtin: bool = False
|
|
48
113
|
|
|
49
114
|
|
|
@@ -166,6 +231,10 @@ class SkillRegistry:
|
|
|
166
231
|
skill = _parse_skill_file(skill_file, skill_dir.name)
|
|
167
232
|
if skill:
|
|
168
233
|
skill._builtin = is_builtin # Mark as built-in or user-defined
|
|
234
|
+
# Discover scripts in the skill directory
|
|
235
|
+
skill.scripts = _discover_scripts(skill_dir)
|
|
236
|
+
if skill.scripts:
|
|
237
|
+
log.debug(f"Found {len(skill.scripts)} scripts in skill: {skill.name}")
|
|
169
238
|
skills[skill.name] = skill
|
|
170
239
|
self._skills[skill.name] = skill
|
|
171
240
|
source = "built-in" if is_builtin else "user"
|
|
@@ -224,12 +293,23 @@ class SkillRegistry:
|
|
|
224
293
|
|
|
225
294
|
for skill in self._skills.values():
|
|
226
295
|
invocable = " (user-invocable: /{})".format(skill.name) if skill.user_invocable else ""
|
|
227
|
-
|
|
296
|
+
scripts_note = f" [has {len(skill.scripts)} script(s)]" if skill.scripts else ""
|
|
297
|
+
lines.append(f"- **{skill.name}**: {skill.description}{invocable}{scripts_note}")
|
|
228
298
|
|
|
229
299
|
lines.append("")
|
|
230
300
|
lines.append("To activate a skill, use the `skill` tool with the skill name.")
|
|
231
301
|
lines.append("")
|
|
232
302
|
|
|
303
|
+
# Add note about skill scripts if any skill has scripts
|
|
304
|
+
has_scripts = any(skill.scripts for skill in self._skills.values())
|
|
305
|
+
if has_scripts:
|
|
306
|
+
lines.append("### Skill Scripts")
|
|
307
|
+
lines.append("")
|
|
308
|
+
lines.append("Some skills include executable scripts that can be run using the Bash tool.")
|
|
309
|
+
lines.append("When you invoke a skill with scripts, the script paths will be provided.")
|
|
310
|
+
lines.append("Scripts are self-contained and can be executed directly.")
|
|
311
|
+
lines.append("")
|
|
312
|
+
|
|
233
313
|
return "\n".join(lines)
|
|
234
314
|
|
|
235
315
|
|
emdash_core/agent/toolkit.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
"""Main AgentToolkit class for LLM agent graph exploration."""
|
|
2
2
|
|
|
3
|
+
import os
|
|
3
4
|
from pathlib import Path
|
|
4
5
|
from typing import Optional
|
|
5
6
|
|
|
6
|
-
from ..graph.connection import KuzuConnection, get_connection
|
|
7
|
+
from ..graph.connection import KuzuConnection, get_connection, KUZU_AVAILABLE
|
|
7
8
|
from .tools.base import BaseTool, ToolResult, ToolCategory
|
|
8
9
|
from .session import AgentSession
|
|
9
10
|
from ..utils.logger import log
|
|
@@ -56,7 +57,20 @@ class AgentToolkit:
|
|
|
56
57
|
save_spec_path: If provided, specs will be saved to this path.
|
|
57
58
|
plan_file_path: Path to the plan file (only writable file in plan mode).
|
|
58
59
|
"""
|
|
59
|
-
|
|
60
|
+
# Handle connection - Kuzu is optional
|
|
61
|
+
if connection is not None:
|
|
62
|
+
self.connection = connection
|
|
63
|
+
elif KUZU_AVAILABLE:
|
|
64
|
+
try:
|
|
65
|
+
self.connection = get_connection()
|
|
66
|
+
except Exception as e:
|
|
67
|
+
log.warning(f"Failed to connect to Kuzu database: {e}")
|
|
68
|
+
log.warning("Semantic search will be disabled. Other tools will work normally.")
|
|
69
|
+
self.connection = None
|
|
70
|
+
else:
|
|
71
|
+
log.info("Kuzu not installed - semantic search disabled. Install with: pip install kuzu")
|
|
72
|
+
self.connection = None
|
|
73
|
+
|
|
60
74
|
self.session = AgentSession() if enable_session else None
|
|
61
75
|
self._tools: dict[str, BaseTool] = {}
|
|
62
76
|
self._mcp_manager = None
|
|
@@ -104,8 +118,10 @@ class AgentToolkit:
|
|
|
104
118
|
)
|
|
105
119
|
|
|
106
120
|
# Register search tools
|
|
107
|
-
|
|
108
|
-
|
|
121
|
+
# SemanticSearchTool requires Kuzu database
|
|
122
|
+
if self.connection is not None:
|
|
123
|
+
self.register_tool(SemanticSearchTool(self.connection))
|
|
124
|
+
# These tools work without database connection
|
|
109
125
|
self.register_tool(GrepTool(self.connection))
|
|
110
126
|
self.register_tool(GlobTool(self.connection))
|
|
111
127
|
self.register_tool(WebTool(self.connection))
|
|
@@ -131,15 +147,21 @@ class AgentToolkit:
|
|
|
131
147
|
# In code mode: full write access
|
|
132
148
|
from .tools.coding import (
|
|
133
149
|
WriteToFileTool,
|
|
134
|
-
ApplyDiffTool,
|
|
135
150
|
DeleteFileTool,
|
|
136
151
|
ExecuteCommandTool,
|
|
137
152
|
)
|
|
138
153
|
self.register_tool(WriteToFileTool(self._repo_root, self.connection))
|
|
139
|
-
self.register_tool(ApplyDiffTool(self._repo_root, self.connection))
|
|
140
154
|
self.register_tool(DeleteFileTool(self._repo_root, self.connection))
|
|
141
155
|
self.register_tool(ExecuteCommandTool(self._repo_root, self.connection))
|
|
142
156
|
|
|
157
|
+
# Toggle between apply_diff (default) and edit_file based on env var
|
|
158
|
+
if os.getenv("EMDASH_ENABLE_APPLY_DIFF", "true").lower() in ("0", "false", "no"):
|
|
159
|
+
from .tools.coding import EditFileTool
|
|
160
|
+
self.register_tool(EditFileTool(self._repo_root, self.connection))
|
|
161
|
+
else:
|
|
162
|
+
from .tools.coding import ApplyDiffTool
|
|
163
|
+
self.register_tool(ApplyDiffTool(self._repo_root, self.connection))
|
|
164
|
+
|
|
143
165
|
# Register sub-agent tools for spawning lightweight agents
|
|
144
166
|
self._register_subagent_tools()
|
|
145
167
|
|
|
@@ -169,16 +191,22 @@ class AgentToolkit:
|
|
|
169
191
|
log.debug(f"Registered {len(self._tools)} agent tools")
|
|
170
192
|
|
|
171
193
|
def _register_subagent_tools(self) -> None:
|
|
172
|
-
"""Register sub-agent
|
|
173
|
-
|
|
174
|
-
These tools allow
|
|
175
|
-
|
|
194
|
+
"""Register sub-agent and background task management tools.
|
|
195
|
+
|
|
196
|
+
These tools allow:
|
|
197
|
+
- Spawning specialized sub-agents as subprocesses
|
|
198
|
+
- Running shell commands in the background
|
|
199
|
+
- Getting output from background tasks
|
|
200
|
+
- Killing background tasks
|
|
201
|
+
- Listing all background tasks
|
|
176
202
|
"""
|
|
177
203
|
from .tools.task import TaskTool
|
|
178
|
-
from .tools.task_output import TaskOutputTool
|
|
204
|
+
from .tools.task_output import TaskOutputTool, KillTaskTool, ListTasksTool
|
|
179
205
|
|
|
180
206
|
self.register_tool(TaskTool(repo_root=self._repo_root, connection=self.connection))
|
|
181
207
|
self.register_tool(TaskOutputTool(repo_root=self._repo_root, connection=self.connection))
|
|
208
|
+
self.register_tool(KillTaskTool(repo_root=self._repo_root, connection=self.connection))
|
|
209
|
+
self.register_tool(ListTasksTool(repo_root=self._repo_root, connection=self.connection))
|
|
182
210
|
|
|
183
211
|
def _register_mode_tools(self) -> None:
|
|
184
212
|
"""Register mode switching tools.
|
|
@@ -347,10 +375,58 @@ class AgentToolkit:
|
|
|
347
375
|
if tools:
|
|
348
376
|
log.info(f"Registered {len(tools)} dynamic MCP tools from config")
|
|
349
377
|
|
|
378
|
+
# Register LSP tools if USE_LSP is enabled (default: true)
|
|
379
|
+
self._register_lsp_tools()
|
|
380
|
+
|
|
350
381
|
except Exception as e:
|
|
351
382
|
log.warning(f"Failed to initialize MCP manager: {e}")
|
|
352
383
|
self._mcp_manager = None
|
|
353
384
|
|
|
385
|
+
def _register_lsp_tools(self) -> None:
|
|
386
|
+
"""Register LSP-based code navigation tools.
|
|
387
|
+
|
|
388
|
+
These tools use cclsp MCP server to provide IDE-level code intelligence.
|
|
389
|
+
Enabled by default with USE_LSP=true. Set USE_LSP=false to disable.
|
|
390
|
+
"""
|
|
391
|
+
from .tools.lsp import (
|
|
392
|
+
is_lsp_enabled,
|
|
393
|
+
LSPFindDefinitionTool,
|
|
394
|
+
LSPFindReferencesTool,
|
|
395
|
+
LSPRenameSymbolTool,
|
|
396
|
+
LSPGetDiagnosticsTool,
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
if not is_lsp_enabled():
|
|
400
|
+
log.info("LSP tools disabled (USE_LSP=false)")
|
|
401
|
+
return
|
|
402
|
+
|
|
403
|
+
if self._mcp_manager is None:
|
|
404
|
+
log.warning("Cannot register LSP tools: MCP manager not initialized")
|
|
405
|
+
return
|
|
406
|
+
|
|
407
|
+
# Check if cclsp is available
|
|
408
|
+
config = self._mcp_manager.load_config()
|
|
409
|
+
cclsp_config = config.get_server("cclsp")
|
|
410
|
+
if not cclsp_config or not cclsp_config.enabled:
|
|
411
|
+
log.info("LSP tools not registered: cclsp MCP server not enabled")
|
|
412
|
+
return
|
|
413
|
+
|
|
414
|
+
# Register LSP tools with better descriptions for the agent
|
|
415
|
+
lsp_tools = [
|
|
416
|
+
LSPFindDefinitionTool(self._mcp_manager, self.connection),
|
|
417
|
+
LSPFindReferencesTool(self._mcp_manager, self.connection),
|
|
418
|
+
LSPRenameSymbolTool(self._mcp_manager, self.connection),
|
|
419
|
+
LSPGetDiagnosticsTool(self._mcp_manager, self.connection),
|
|
420
|
+
]
|
|
421
|
+
|
|
422
|
+
for tool in lsp_tools:
|
|
423
|
+
# LSP tools take priority - overwrite if exists
|
|
424
|
+
if tool.name in self._tools:
|
|
425
|
+
log.debug(f"LSP tool '{tool.name}' overwriting existing tool")
|
|
426
|
+
self.register_tool(tool)
|
|
427
|
+
|
|
428
|
+
log.info(f"Registered {len(lsp_tools)} LSP tools (USE_LSP=true)")
|
|
429
|
+
|
|
354
430
|
def get_mcp_manager(self):
|
|
355
431
|
"""Get the MCP manager instance.
|
|
356
432
|
|
|
@@ -56,6 +56,7 @@ from .coding import (
|
|
|
56
56
|
CodingTool,
|
|
57
57
|
ReadFileTool,
|
|
58
58
|
WriteToFileTool,
|
|
59
|
+
EditFileTool,
|
|
59
60
|
ApplyDiffTool,
|
|
60
61
|
DeleteFileTool,
|
|
61
62
|
ListFilesTool,
|
|
@@ -124,6 +125,7 @@ __all__ = [
|
|
|
124
125
|
"CodingTool",
|
|
125
126
|
"ReadFileTool",
|
|
126
127
|
"WriteToFileTool",
|
|
128
|
+
"EditFileTool",
|
|
127
129
|
"ApplyDiffTool",
|
|
128
130
|
"DeleteFileTool",
|
|
129
131
|
"ListFilesTool",
|