emdash-core 0.1.33__py3-none-any.whl → 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. emdash_core/agent/agents.py +93 -23
  2. emdash_core/agent/background.py +481 -0
  3. emdash_core/agent/hooks.py +419 -0
  4. emdash_core/agent/inprocess_subagent.py +114 -10
  5. emdash_core/agent/mcp/config.py +78 -2
  6. emdash_core/agent/prompts/main_agent.py +88 -1
  7. emdash_core/agent/prompts/plan_mode.py +65 -44
  8. emdash_core/agent/prompts/subagents.py +96 -8
  9. emdash_core/agent/prompts/workflow.py +215 -50
  10. emdash_core/agent/providers/models.py +1 -1
  11. emdash_core/agent/providers/openai_provider.py +10 -0
  12. emdash_core/agent/research/researcher.py +154 -45
  13. emdash_core/agent/runner/agent_runner.py +157 -19
  14. emdash_core/agent/runner/context.py +28 -9
  15. emdash_core/agent/runner/sdk_runner.py +29 -2
  16. emdash_core/agent/skills.py +81 -1
  17. emdash_core/agent/toolkit.py +87 -11
  18. emdash_core/agent/toolkits/__init__.py +117 -18
  19. emdash_core/agent/toolkits/base.py +87 -2
  20. emdash_core/agent/toolkits/explore.py +18 -0
  21. emdash_core/agent/toolkits/plan.py +18 -0
  22. emdash_core/agent/tools/__init__.py +2 -0
  23. emdash_core/agent/tools/coding.py +344 -52
  24. emdash_core/agent/tools/lsp.py +361 -0
  25. emdash_core/agent/tools/skill.py +21 -1
  26. emdash_core/agent/tools/task.py +27 -23
  27. emdash_core/agent/tools/task_output.py +262 -32
  28. emdash_core/agent/verifier/__init__.py +11 -0
  29. emdash_core/agent/verifier/manager.py +295 -0
  30. emdash_core/agent/verifier/models.py +97 -0
  31. emdash_core/{swarm/worktree_manager.py → agent/worktree.py} +19 -1
  32. emdash_core/api/agent.py +451 -5
  33. emdash_core/api/research.py +3 -3
  34. emdash_core/api/router.py +0 -4
  35. emdash_core/context/longevity.py +197 -0
  36. emdash_core/context/providers/explored_areas.py +83 -39
  37. emdash_core/context/reranker.py +35 -144
  38. emdash_core/context/simple_reranker.py +500 -0
  39. emdash_core/context/tool_relevance.py +84 -0
  40. emdash_core/core/config.py +8 -0
  41. emdash_core/graph/__init__.py +8 -1
  42. emdash_core/graph/connection.py +24 -3
  43. emdash_core/graph/writer.py +7 -1
  44. emdash_core/ingestion/repository.py +17 -198
  45. emdash_core/models/agent.py +14 -0
  46. emdash_core/server.py +1 -6
  47. emdash_core/sse/stream.py +16 -1
  48. emdash_core/utils/__init__.py +0 -2
  49. emdash_core/utils/git.py +103 -0
  50. emdash_core/utils/image.py +147 -160
  51. {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/METADATA +7 -5
  52. {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/RECORD +54 -58
  53. emdash_core/api/swarm.py +0 -223
  54. emdash_core/db/__init__.py +0 -67
  55. emdash_core/db/auth.py +0 -134
  56. emdash_core/db/models.py +0 -91
  57. emdash_core/db/provider.py +0 -222
  58. emdash_core/db/providers/__init__.py +0 -5
  59. emdash_core/db/providers/supabase.py +0 -452
  60. emdash_core/swarm/__init__.py +0 -17
  61. emdash_core/swarm/merge_agent.py +0 -383
  62. emdash_core/swarm/session_manager.py +0 -274
  63. emdash_core/swarm/swarm_runner.py +0 -226
  64. emdash_core/swarm/task_definition.py +0 -137
  65. emdash_core/swarm/worker_spawner.py +0 -319
  66. {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/WHEEL +0 -0
  67. {emdash_core-0.1.33.dist-info → emdash_core-0.1.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,361 @@
1
+ """LSP-based code navigation tools using cclsp MCP server.
2
+
3
+ These tools provide IDE-level code intelligence using the Language Server Protocol
4
+ via the cclsp MCP server. They offer real-time, accurate code navigation without
5
+ requiring pre-indexing.
6
+
7
+ Enable with USE_LSP=true (disabled by default).
8
+ """
9
+
10
+ import os
11
+ from typing import Optional
12
+
13
+ from .base import BaseTool, ToolResult, ToolCategory
14
+ from ...utils.logger import log
15
+
16
+
17
+ def is_lsp_enabled() -> bool:
18
+ """Check if LSP tools should be used.
19
+
20
+ Returns:
21
+ True if USE_LSP is explicitly set to 'true' (disabled by default)
22
+ """
23
+ return os.getenv("USE_LSP", "false").lower() == "true"
24
+
25
+
26
+ class LSPFindDefinitionTool(BaseTool):
27
+ """Find the definition of a symbol using LSP."""
28
+
29
+ name = "find_definition"
30
+ description = """Find where a symbol (function, class, variable) is defined.
31
+ Uses Language Server Protocol for accurate, real-time results.
32
+ Returns the file path and location of the definition."""
33
+ category = ToolCategory.TRAVERSAL
34
+
35
+ def __init__(self, mcp_manager, connection=None):
36
+ """Initialize the LSP tool.
37
+
38
+ Args:
39
+ mcp_manager: MCP server manager for calling cclsp
40
+ connection: Optional Kuzu connection (for compatibility)
41
+ """
42
+ super().__init__(connection)
43
+ self._mcp_manager = mcp_manager
44
+
45
+ def execute(
46
+ self,
47
+ file_path: str,
48
+ symbol_name: str,
49
+ symbol_kind: str = "function",
50
+ ) -> ToolResult:
51
+ """Find the definition of a symbol.
52
+
53
+ Args:
54
+ file_path: Path to the file containing the symbol reference
55
+ symbol_name: Name of the symbol to find definition for
56
+ symbol_kind: Kind of symbol (function, class, variable, method)
57
+
58
+ Returns:
59
+ ToolResult with definition location
60
+ """
61
+ try:
62
+ response = self._mcp_manager.call_tool("find_definition", {
63
+ "file": file_path,
64
+ "name": symbol_name,
65
+ "kind": symbol_kind,
66
+ })
67
+
68
+ if not response.content:
69
+ return ToolResult.error_result(
70
+ f"No definition found for '{symbol_name}'",
71
+ suggestions=["Check the symbol name spelling", "Try a different symbol_kind"],
72
+ )
73
+
74
+ # Parse response content
75
+ content = response.content[0] if response.content else {}
76
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
77
+
78
+ return ToolResult.success_result(
79
+ data={
80
+ "symbol": symbol_name,
81
+ "kind": symbol_kind,
82
+ "definition": text,
83
+ },
84
+ )
85
+
86
+ except Exception as e:
87
+ log.exception("LSP find_definition failed")
88
+ return ToolResult.error_result(f"Failed to find definition: {str(e)}")
89
+
90
+ def get_schema(self) -> dict:
91
+ """Get OpenAI function schema."""
92
+ return self._make_schema(
93
+ properties={
94
+ "file_path": {
95
+ "type": "string",
96
+ "description": "Path to the file containing the symbol reference",
97
+ },
98
+ "symbol_name": {
99
+ "type": "string",
100
+ "description": "Name of the symbol to find definition for",
101
+ },
102
+ "symbol_kind": {
103
+ "type": "string",
104
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
105
+ "description": "Kind of symbol",
106
+ "default": "function",
107
+ },
108
+ },
109
+ required=["file_path", "symbol_name"],
110
+ )
111
+
112
+
113
+ class LSPFindReferencesTool(BaseTool):
114
+ """Find all references to a symbol using LSP."""
115
+
116
+ name = "find_references"
117
+ description = """Find all usages/references of a symbol across the codebase.
118
+ Uses Language Server Protocol for accurate, real-time results.
119
+ Similar to 'Find All References' in an IDE."""
120
+ category = ToolCategory.TRAVERSAL
121
+
122
+ def __init__(self, mcp_manager, connection=None):
123
+ """Initialize the LSP tool.
124
+
125
+ Args:
126
+ mcp_manager: MCP server manager for calling cclsp
127
+ connection: Optional Kuzu connection (for compatibility)
128
+ """
129
+ super().__init__(connection)
130
+ self._mcp_manager = mcp_manager
131
+
132
+ def execute(
133
+ self,
134
+ file_path: str,
135
+ symbol_name: str,
136
+ symbol_kind: str = "function",
137
+ include_declaration: bool = True,
138
+ ) -> ToolResult:
139
+ """Find all references to a symbol.
140
+
141
+ Args:
142
+ file_path: Path to the file containing the symbol
143
+ symbol_name: Name of the symbol to find references for
144
+ symbol_kind: Kind of symbol (function, class, variable, method)
145
+ include_declaration: Whether to include the declaration itself
146
+
147
+ Returns:
148
+ ToolResult with list of references
149
+ """
150
+ try:
151
+ response = self._mcp_manager.call_tool("find_references", {
152
+ "file": file_path,
153
+ "name": symbol_name,
154
+ "kind": symbol_kind,
155
+ "includeDeclaration": include_declaration,
156
+ })
157
+
158
+ if not response.content:
159
+ return ToolResult.error_result(
160
+ f"No references found for '{symbol_name}'",
161
+ suggestions=["Check the symbol name spelling", "The symbol may not be used anywhere"],
162
+ )
163
+
164
+ content = response.content[0] if response.content else {}
165
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
166
+
167
+ return ToolResult.success_result(
168
+ data={
169
+ "symbol": symbol_name,
170
+ "kind": symbol_kind,
171
+ "references": text,
172
+ },
173
+ )
174
+
175
+ except Exception as e:
176
+ log.exception("LSP find_references failed")
177
+ return ToolResult.error_result(f"Failed to find references: {str(e)}")
178
+
179
+ def get_schema(self) -> dict:
180
+ """Get OpenAI function schema."""
181
+ return self._make_schema(
182
+ properties={
183
+ "file_path": {
184
+ "type": "string",
185
+ "description": "Path to the file containing the symbol",
186
+ },
187
+ "symbol_name": {
188
+ "type": "string",
189
+ "description": "Name of the symbol to find references for",
190
+ },
191
+ "symbol_kind": {
192
+ "type": "string",
193
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
194
+ "description": "Kind of symbol",
195
+ "default": "function",
196
+ },
197
+ "include_declaration": {
198
+ "type": "boolean",
199
+ "description": "Whether to include the declaration in results",
200
+ "default": True,
201
+ },
202
+ },
203
+ required=["file_path", "symbol_name"],
204
+ )
205
+
206
+
207
+ class LSPRenameSymbolTool(BaseTool):
208
+ """Rename a symbol across the codebase using LSP."""
209
+
210
+ name = "rename_symbol"
211
+ description = """Safely rename a symbol (function, class, variable) across all files.
212
+ Uses Language Server Protocol for semantic renaming that understands code structure.
213
+ Much safer than find-and-replace."""
214
+ category = ToolCategory.TRAVERSAL
215
+
216
+ def __init__(self, mcp_manager, connection=None):
217
+ """Initialize the LSP tool.
218
+
219
+ Args:
220
+ mcp_manager: MCP server manager for calling cclsp
221
+ connection: Optional Kuzu connection (for compatibility)
222
+ """
223
+ super().__init__(connection)
224
+ self._mcp_manager = mcp_manager
225
+
226
+ def execute(
227
+ self,
228
+ file_path: str,
229
+ old_name: str,
230
+ new_name: str,
231
+ symbol_kind: str = "function",
232
+ ) -> ToolResult:
233
+ """Rename a symbol across the codebase.
234
+
235
+ Args:
236
+ file_path: Path to the file containing the symbol
237
+ old_name: Current name of the symbol
238
+ new_name: New name for the symbol
239
+ symbol_kind: Kind of symbol (function, class, variable, method)
240
+
241
+ Returns:
242
+ ToolResult with rename results
243
+ """
244
+ try:
245
+ response = self._mcp_manager.call_tool("rename_symbol", {
246
+ "file": file_path,
247
+ "name": old_name,
248
+ "newName": new_name,
249
+ "kind": symbol_kind,
250
+ })
251
+
252
+ if not response.content:
253
+ return ToolResult.error_result(
254
+ f"Failed to rename '{old_name}' to '{new_name}'",
255
+ suggestions=["Check the symbol name", "Try rename_symbol_strict for precise matching"],
256
+ )
257
+
258
+ content = response.content[0] if response.content else {}
259
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
260
+
261
+ return ToolResult.success_result(
262
+ data={
263
+ "old_name": old_name,
264
+ "new_name": new_name,
265
+ "kind": symbol_kind,
266
+ "result": text,
267
+ },
268
+ )
269
+
270
+ except Exception as e:
271
+ log.exception("LSP rename_symbol failed")
272
+ return ToolResult.error_result(f"Failed to rename symbol: {str(e)}")
273
+
274
+ def get_schema(self) -> dict:
275
+ """Get OpenAI function schema."""
276
+ return self._make_schema(
277
+ properties={
278
+ "file_path": {
279
+ "type": "string",
280
+ "description": "Path to the file containing the symbol",
281
+ },
282
+ "old_name": {
283
+ "type": "string",
284
+ "description": "Current name of the symbol",
285
+ },
286
+ "new_name": {
287
+ "type": "string",
288
+ "description": "New name for the symbol",
289
+ },
290
+ "symbol_kind": {
291
+ "type": "string",
292
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
293
+ "description": "Kind of symbol",
294
+ "default": "function",
295
+ },
296
+ },
297
+ required=["file_path", "old_name", "new_name"],
298
+ )
299
+
300
+
301
+ class LSPGetDiagnosticsTool(BaseTool):
302
+ """Get diagnostics (errors, warnings) for a file using LSP."""
303
+
304
+ name = "get_diagnostics"
305
+ description = """Get errors, warnings, and other diagnostics for a file.
306
+ Uses Language Server Protocol to provide real-time feedback without running the compiler.
307
+ Useful for checking code health before making changes."""
308
+ category = ToolCategory.ANALYTICS
309
+
310
+ def __init__(self, mcp_manager, connection=None):
311
+ """Initialize the LSP tool.
312
+
313
+ Args:
314
+ mcp_manager: MCP server manager for calling cclsp
315
+ connection: Optional Kuzu connection (for compatibility)
316
+ """
317
+ super().__init__(connection)
318
+ self._mcp_manager = mcp_manager
319
+
320
+ def execute(
321
+ self,
322
+ file_path: str,
323
+ ) -> ToolResult:
324
+ """Get diagnostics for a file.
325
+
326
+ Args:
327
+ file_path: Path to the file to check
328
+
329
+ Returns:
330
+ ToolResult with diagnostics
331
+ """
332
+ try:
333
+ response = self._mcp_manager.call_tool("get_diagnostics", {
334
+ "file": file_path,
335
+ })
336
+
337
+ content = response.content[0] if response.content else {}
338
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
339
+
340
+ return ToolResult.success_result(
341
+ data={
342
+ "file": file_path,
343
+ "diagnostics": text,
344
+ },
345
+ )
346
+
347
+ except Exception as e:
348
+ log.exception("LSP get_diagnostics failed")
349
+ return ToolResult.error_result(f"Failed to get diagnostics: {str(e)}")
350
+
351
+ def get_schema(self) -> dict:
352
+ """Get OpenAI function schema."""
353
+ return self._make_schema(
354
+ properties={
355
+ "file_path": {
356
+ "type": "string",
357
+ "description": "Path to the file to check for diagnostics",
358
+ },
359
+ },
360
+ required=["file_path"],
361
+ )
@@ -88,6 +88,23 @@ Use list_skills to see available skills."""
88
88
  "",
89
89
  ])
90
90
 
91
+ # Include available scripts
92
+ scripts_paths = []
93
+ if skill_obj.scripts:
94
+ response_parts.extend([
95
+ "**Available Scripts**:",
96
+ "",
97
+ ])
98
+ for script in skill_obj.scripts:
99
+ script_path = str(script)
100
+ scripts_paths.append(script_path)
101
+ response_parts.append(f"- `{script_path}` - Run with: `bash {script_path}` or `./{script.name}` from skill directory")
102
+ response_parts.extend([
103
+ "",
104
+ "*Scripts are self-contained executables. Run them using Bash tool when needed.*",
105
+ "",
106
+ ])
107
+
91
108
  response_parts.extend([
92
109
  "---",
93
110
  "",
@@ -100,6 +117,7 @@ Use list_skills to see available skills."""
100
117
  "description": skill_obj.description,
101
118
  "instructions": skill_obj.instructions,
102
119
  "tools": skill_obj.tools,
120
+ "scripts": scripts_paths,
103
121
  "args": args,
104
122
  "message": "\n".join(response_parts),
105
123
  },
@@ -172,13 +190,15 @@ class ListSkillsTool(BaseTool):
172
190
  "description": skill.description,
173
191
  "user_invocable": skill.user_invocable,
174
192
  "tools": skill.tools,
193
+ "scripts": [str(s) for s in skill.scripts] if skill.scripts else [],
175
194
  })
176
195
 
177
196
  # Build human-readable message
178
197
  lines = ["# Available Skills", ""]
179
198
  for s in skills_list:
180
199
  invocable = f" (invoke with /{s['name']})" if s["user_invocable"] else ""
181
- lines.append(f"- **{s['name']}**: {s['description']}{invocable}")
200
+ scripts_note = f" [has {len(s['scripts'])} script(s)]" if s["scripts"] else ""
201
+ lines.append(f"- **{s['name']}**: {s['description']}{invocable}{scripts_note}")
182
202
 
183
203
  return ToolResult.success_result(
184
204
  data={
@@ -14,6 +14,7 @@ from typing import Optional
14
14
  from .base import BaseTool, ToolResult, ToolCategory
15
15
  from ..toolkits import list_agent_types
16
16
  from ..inprocess_subagent import run_subagent, run_subagent_async
17
+ from ..background import BackgroundTaskManager
17
18
  from ...utils.logger import log
18
19
 
19
20
 
@@ -87,11 +88,15 @@ Multiple sub-agents can be launched in parallel."""
87
88
  suggestions=["Provide a clear task description in 'prompt'"],
88
89
  )
89
90
 
90
- available_types = list_agent_types()
91
+ available_types = list_agent_types(self.repo_root)
92
+ log.info(f"TaskTool: repo_root={self.repo_root}, available_types={available_types}")
91
93
  if subagent_type not in available_types:
92
94
  return ToolResult.error_result(
93
95
  f"Unknown agent type: {subagent_type}",
94
- suggestions=[f"Available types: {available_types}"],
96
+ suggestions=[
97
+ f"Available types: {available_types}",
98
+ f"Searched in: {self.repo_root / '.emdash' / 'agents'}",
99
+ ],
95
100
  )
96
101
 
97
102
  # Log current mode for debugging
@@ -116,7 +121,7 @@ Multiple sub-agents can be launched in parallel."""
116
121
  })
117
122
 
118
123
  if run_in_background:
119
- return self._run_background(subagent_type, prompt, max_turns, thoroughness)
124
+ return self._run_background(subagent_type, prompt, max_turns, thoroughness, description)
120
125
  else:
121
126
  return self._run_sync(subagent_type, prompt, max_turns, thoroughness)
122
127
 
@@ -180,6 +185,7 @@ Multiple sub-agents can be launched in parallel."""
180
185
  prompt: str,
181
186
  max_turns: int,
182
187
  thoroughness: str = "medium",
188
+ description: str = "",
183
189
  ) -> ToolResult:
184
190
  """Run sub-agent in background using a thread.
185
191
 
@@ -188,17 +194,11 @@ Multiple sub-agents can be launched in parallel."""
188
194
  prompt: Task prompt
189
195
  max_turns: Maximum API round-trips
190
196
  thoroughness: Search thoroughness level
197
+ description: Short task description
191
198
 
192
199
  Returns:
193
200
  ToolResult with task info
194
201
  """
195
- agent_id = str(uuid.uuid4())[:8]
196
-
197
- # Output file for results
198
- output_dir = self.repo_root / ".emdash" / "agents"
199
- output_dir.mkdir(parents=True, exist_ok=True)
200
- output_file = output_dir / f"{agent_id}.output"
201
-
202
202
  try:
203
203
  # Start async execution
204
204
  future = run_subagent_async(
@@ -210,25 +210,26 @@ Multiple sub-agents can be launched in parallel."""
210
210
  thoroughness=thoroughness,
211
211
  )
212
212
 
213
- # Store future for later retrieval (attach to class for now)
214
- if not hasattr(self, "_background_tasks"):
215
- self._background_tasks = {}
216
- self._background_tasks[agent_id] = {
217
- "future": future,
218
- "output_file": output_file,
219
- }
213
+ # Register with BackgroundTaskManager for notification support
214
+ manager = BackgroundTaskManager.get_instance()
215
+ task_id = manager.start_subagent(
216
+ future=future,
217
+ agent_type=subagent_type,
218
+ description=description or prompt[:50],
219
+ )
220
220
 
221
- log.info(f"Started background agent {agent_id}")
221
+ log.info(f"Started background agent {task_id}")
222
222
 
223
223
  return ToolResult.success_result(
224
224
  data={
225
- "agent_id": agent_id,
225
+ "task_id": task_id,
226
226
  "status": "running",
227
227
  "agent_type": subagent_type,
228
- "output_file": str(output_file),
228
+ "message": "Sub-agent started in background. You'll be notified when it completes.",
229
229
  },
230
230
  suggestions=[
231
- f"Use task_output(agent_id='{agent_id}') to check results",
231
+ f"Use task_output(task_id='{task_id}') to check results",
232
+ f"Use kill_task(task_id='{task_id}') to stop it",
232
233
  ],
233
234
  )
234
235
 
@@ -254,6 +255,9 @@ Multiple sub-agents can be launched in parallel."""
254
255
 
255
256
  def get_schema(self) -> dict:
256
257
  """Get OpenAI function schema."""
258
+ # Get available agent types dynamically (includes custom agents)
259
+ available_types = list_agent_types(self.repo_root)
260
+
257
261
  return self._make_schema(
258
262
  properties={
259
263
  "description": {
@@ -266,8 +270,8 @@ Multiple sub-agents can be launched in parallel."""
266
270
  },
267
271
  "subagent_type": {
268
272
  "type": "string",
269
- "enum": ["Explore", "Plan"],
270
- "description": "Type of specialized agent",
273
+ "enum": available_types,
274
+ "description": f"Type of specialized agent. Available: {', '.join(available_types)}",
271
275
  "default": "Explore",
272
276
  },
273
277
  "model_tier": {