emdash-core 0.1.37__py3-none-any.whl → 0.1.60__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. emdash_core/agent/agents.py +9 -0
  2. emdash_core/agent/background.py +481 -0
  3. emdash_core/agent/inprocess_subagent.py +70 -1
  4. emdash_core/agent/mcp/config.py +78 -2
  5. emdash_core/agent/prompts/main_agent.py +53 -1
  6. emdash_core/agent/prompts/plan_mode.py +65 -44
  7. emdash_core/agent/prompts/subagents.py +73 -1
  8. emdash_core/agent/prompts/workflow.py +179 -28
  9. emdash_core/agent/providers/models.py +1 -1
  10. emdash_core/agent/providers/openai_provider.py +10 -0
  11. emdash_core/agent/research/researcher.py +154 -45
  12. emdash_core/agent/runner/agent_runner.py +145 -19
  13. emdash_core/agent/runner/sdk_runner.py +29 -2
  14. emdash_core/agent/skills.py +81 -1
  15. emdash_core/agent/toolkit.py +87 -11
  16. emdash_core/agent/tools/__init__.py +2 -0
  17. emdash_core/agent/tools/coding.py +344 -52
  18. emdash_core/agent/tools/lsp.py +361 -0
  19. emdash_core/agent/tools/skill.py +21 -1
  20. emdash_core/agent/tools/task.py +16 -19
  21. emdash_core/agent/tools/task_output.py +262 -32
  22. emdash_core/agent/verifier/__init__.py +11 -0
  23. emdash_core/agent/verifier/manager.py +295 -0
  24. emdash_core/agent/verifier/models.py +97 -0
  25. emdash_core/{swarm/worktree_manager.py → agent/worktree.py} +19 -1
  26. emdash_core/api/agent.py +297 -2
  27. emdash_core/api/research.py +3 -3
  28. emdash_core/api/router.py +0 -4
  29. emdash_core/context/longevity.py +197 -0
  30. emdash_core/context/providers/explored_areas.py +83 -39
  31. emdash_core/context/reranker.py +35 -144
  32. emdash_core/context/simple_reranker.py +500 -0
  33. emdash_core/context/tool_relevance.py +84 -0
  34. emdash_core/core/config.py +8 -0
  35. emdash_core/graph/__init__.py +8 -1
  36. emdash_core/graph/connection.py +24 -3
  37. emdash_core/graph/writer.py +7 -1
  38. emdash_core/models/agent.py +10 -0
  39. emdash_core/server.py +1 -6
  40. emdash_core/sse/stream.py +16 -1
  41. emdash_core/utils/__init__.py +0 -2
  42. emdash_core/utils/git.py +103 -0
  43. emdash_core/utils/image.py +147 -160
  44. {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/METADATA +6 -6
  45. {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/RECORD +47 -52
  46. emdash_core/api/swarm.py +0 -223
  47. emdash_core/db/__init__.py +0 -67
  48. emdash_core/db/auth.py +0 -134
  49. emdash_core/db/models.py +0 -91
  50. emdash_core/db/provider.py +0 -222
  51. emdash_core/db/providers/__init__.py +0 -5
  52. emdash_core/db/providers/supabase.py +0 -452
  53. emdash_core/swarm/__init__.py +0 -17
  54. emdash_core/swarm/merge_agent.py +0 -383
  55. emdash_core/swarm/session_manager.py +0 -274
  56. emdash_core/swarm/swarm_runner.py +0 -226
  57. emdash_core/swarm/task_definition.py +0 -137
  58. emdash_core/swarm/worker_spawner.py +0 -319
  59. {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/WHEEL +0 -0
  60. {emdash_core-0.1.37.dist-info → emdash_core-0.1.60.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,361 @@
1
+ """LSP-based code navigation tools using cclsp MCP server.
2
+
3
+ These tools provide IDE-level code intelligence using the Language Server Protocol
4
+ via the cclsp MCP server. They offer real-time, accurate code navigation without
5
+ requiring pre-indexing.
6
+
7
+ Enable with USE_LSP=true (disabled by default).
8
+ """
9
+
10
+ import os
11
+ from typing import Optional
12
+
13
+ from .base import BaseTool, ToolResult, ToolCategory
14
+ from ...utils.logger import log
15
+
16
+
17
+ def is_lsp_enabled() -> bool:
18
+ """Check if LSP tools should be used.
19
+
20
+ Returns:
21
+ True if USE_LSP is explicitly set to 'true' (disabled by default)
22
+ """
23
+ return os.getenv("USE_LSP", "false").lower() == "true"
24
+
25
+
26
+ class LSPFindDefinitionTool(BaseTool):
27
+ """Find the definition of a symbol using LSP."""
28
+
29
+ name = "find_definition"
30
+ description = """Find where a symbol (function, class, variable) is defined.
31
+ Uses Language Server Protocol for accurate, real-time results.
32
+ Returns the file path and location of the definition."""
33
+ category = ToolCategory.TRAVERSAL
34
+
35
+ def __init__(self, mcp_manager, connection=None):
36
+ """Initialize the LSP tool.
37
+
38
+ Args:
39
+ mcp_manager: MCP server manager for calling cclsp
40
+ connection: Optional Kuzu connection (for compatibility)
41
+ """
42
+ super().__init__(connection)
43
+ self._mcp_manager = mcp_manager
44
+
45
+ def execute(
46
+ self,
47
+ file_path: str,
48
+ symbol_name: str,
49
+ symbol_kind: str = "function",
50
+ ) -> ToolResult:
51
+ """Find the definition of a symbol.
52
+
53
+ Args:
54
+ file_path: Path to the file containing the symbol reference
55
+ symbol_name: Name of the symbol to find definition for
56
+ symbol_kind: Kind of symbol (function, class, variable, method)
57
+
58
+ Returns:
59
+ ToolResult with definition location
60
+ """
61
+ try:
62
+ response = self._mcp_manager.call_tool("find_definition", {
63
+ "file": file_path,
64
+ "name": symbol_name,
65
+ "kind": symbol_kind,
66
+ })
67
+
68
+ if not response.content:
69
+ return ToolResult.error_result(
70
+ f"No definition found for '{symbol_name}'",
71
+ suggestions=["Check the symbol name spelling", "Try a different symbol_kind"],
72
+ )
73
+
74
+ # Parse response content
75
+ content = response.content[0] if response.content else {}
76
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
77
+
78
+ return ToolResult.success_result(
79
+ data={
80
+ "symbol": symbol_name,
81
+ "kind": symbol_kind,
82
+ "definition": text,
83
+ },
84
+ )
85
+
86
+ except Exception as e:
87
+ log.exception("LSP find_definition failed")
88
+ return ToolResult.error_result(f"Failed to find definition: {str(e)}")
89
+
90
+ def get_schema(self) -> dict:
91
+ """Get OpenAI function schema."""
92
+ return self._make_schema(
93
+ properties={
94
+ "file_path": {
95
+ "type": "string",
96
+ "description": "Path to the file containing the symbol reference",
97
+ },
98
+ "symbol_name": {
99
+ "type": "string",
100
+ "description": "Name of the symbol to find definition for",
101
+ },
102
+ "symbol_kind": {
103
+ "type": "string",
104
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
105
+ "description": "Kind of symbol",
106
+ "default": "function",
107
+ },
108
+ },
109
+ required=["file_path", "symbol_name"],
110
+ )
111
+
112
+
113
+ class LSPFindReferencesTool(BaseTool):
114
+ """Find all references to a symbol using LSP."""
115
+
116
+ name = "find_references"
117
+ description = """Find all usages/references of a symbol across the codebase.
118
+ Uses Language Server Protocol for accurate, real-time results.
119
+ Similar to 'Find All References' in an IDE."""
120
+ category = ToolCategory.TRAVERSAL
121
+
122
+ def __init__(self, mcp_manager, connection=None):
123
+ """Initialize the LSP tool.
124
+
125
+ Args:
126
+ mcp_manager: MCP server manager for calling cclsp
127
+ connection: Optional Kuzu connection (for compatibility)
128
+ """
129
+ super().__init__(connection)
130
+ self._mcp_manager = mcp_manager
131
+
132
+ def execute(
133
+ self,
134
+ file_path: str,
135
+ symbol_name: str,
136
+ symbol_kind: str = "function",
137
+ include_declaration: bool = True,
138
+ ) -> ToolResult:
139
+ """Find all references to a symbol.
140
+
141
+ Args:
142
+ file_path: Path to the file containing the symbol
143
+ symbol_name: Name of the symbol to find references for
144
+ symbol_kind: Kind of symbol (function, class, variable, method)
145
+ include_declaration: Whether to include the declaration itself
146
+
147
+ Returns:
148
+ ToolResult with list of references
149
+ """
150
+ try:
151
+ response = self._mcp_manager.call_tool("find_references", {
152
+ "file": file_path,
153
+ "name": symbol_name,
154
+ "kind": symbol_kind,
155
+ "includeDeclaration": include_declaration,
156
+ })
157
+
158
+ if not response.content:
159
+ return ToolResult.error_result(
160
+ f"No references found for '{symbol_name}'",
161
+ suggestions=["Check the symbol name spelling", "The symbol may not be used anywhere"],
162
+ )
163
+
164
+ content = response.content[0] if response.content else {}
165
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
166
+
167
+ return ToolResult.success_result(
168
+ data={
169
+ "symbol": symbol_name,
170
+ "kind": symbol_kind,
171
+ "references": text,
172
+ },
173
+ )
174
+
175
+ except Exception as e:
176
+ log.exception("LSP find_references failed")
177
+ return ToolResult.error_result(f"Failed to find references: {str(e)}")
178
+
179
+ def get_schema(self) -> dict:
180
+ """Get OpenAI function schema."""
181
+ return self._make_schema(
182
+ properties={
183
+ "file_path": {
184
+ "type": "string",
185
+ "description": "Path to the file containing the symbol",
186
+ },
187
+ "symbol_name": {
188
+ "type": "string",
189
+ "description": "Name of the symbol to find references for",
190
+ },
191
+ "symbol_kind": {
192
+ "type": "string",
193
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
194
+ "description": "Kind of symbol",
195
+ "default": "function",
196
+ },
197
+ "include_declaration": {
198
+ "type": "boolean",
199
+ "description": "Whether to include the declaration in results",
200
+ "default": True,
201
+ },
202
+ },
203
+ required=["file_path", "symbol_name"],
204
+ )
205
+
206
+
207
+ class LSPRenameSymbolTool(BaseTool):
208
+ """Rename a symbol across the codebase using LSP."""
209
+
210
+ name = "rename_symbol"
211
+ description = """Safely rename a symbol (function, class, variable) across all files.
212
+ Uses Language Server Protocol for semantic renaming that understands code structure.
213
+ Much safer than find-and-replace."""
214
+ category = ToolCategory.TRAVERSAL
215
+
216
+ def __init__(self, mcp_manager, connection=None):
217
+ """Initialize the LSP tool.
218
+
219
+ Args:
220
+ mcp_manager: MCP server manager for calling cclsp
221
+ connection: Optional Kuzu connection (for compatibility)
222
+ """
223
+ super().__init__(connection)
224
+ self._mcp_manager = mcp_manager
225
+
226
+ def execute(
227
+ self,
228
+ file_path: str,
229
+ old_name: str,
230
+ new_name: str,
231
+ symbol_kind: str = "function",
232
+ ) -> ToolResult:
233
+ """Rename a symbol across the codebase.
234
+
235
+ Args:
236
+ file_path: Path to the file containing the symbol
237
+ old_name: Current name of the symbol
238
+ new_name: New name for the symbol
239
+ symbol_kind: Kind of symbol (function, class, variable, method)
240
+
241
+ Returns:
242
+ ToolResult with rename results
243
+ """
244
+ try:
245
+ response = self._mcp_manager.call_tool("rename_symbol", {
246
+ "file": file_path,
247
+ "name": old_name,
248
+ "newName": new_name,
249
+ "kind": symbol_kind,
250
+ })
251
+
252
+ if not response.content:
253
+ return ToolResult.error_result(
254
+ f"Failed to rename '{old_name}' to '{new_name}'",
255
+ suggestions=["Check the symbol name", "Try rename_symbol_strict for precise matching"],
256
+ )
257
+
258
+ content = response.content[0] if response.content else {}
259
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
260
+
261
+ return ToolResult.success_result(
262
+ data={
263
+ "old_name": old_name,
264
+ "new_name": new_name,
265
+ "kind": symbol_kind,
266
+ "result": text,
267
+ },
268
+ )
269
+
270
+ except Exception as e:
271
+ log.exception("LSP rename_symbol failed")
272
+ return ToolResult.error_result(f"Failed to rename symbol: {str(e)}")
273
+
274
+ def get_schema(self) -> dict:
275
+ """Get OpenAI function schema."""
276
+ return self._make_schema(
277
+ properties={
278
+ "file_path": {
279
+ "type": "string",
280
+ "description": "Path to the file containing the symbol",
281
+ },
282
+ "old_name": {
283
+ "type": "string",
284
+ "description": "Current name of the symbol",
285
+ },
286
+ "new_name": {
287
+ "type": "string",
288
+ "description": "New name for the symbol",
289
+ },
290
+ "symbol_kind": {
291
+ "type": "string",
292
+ "enum": ["function", "class", "variable", "method", "interface", "type"],
293
+ "description": "Kind of symbol",
294
+ "default": "function",
295
+ },
296
+ },
297
+ required=["file_path", "old_name", "new_name"],
298
+ )
299
+
300
+
301
+ class LSPGetDiagnosticsTool(BaseTool):
302
+ """Get diagnostics (errors, warnings) for a file using LSP."""
303
+
304
+ name = "get_diagnostics"
305
+ description = """Get errors, warnings, and other diagnostics for a file.
306
+ Uses Language Server Protocol to provide real-time feedback without running the compiler.
307
+ Useful for checking code health before making changes."""
308
+ category = ToolCategory.ANALYTICS
309
+
310
+ def __init__(self, mcp_manager, connection=None):
311
+ """Initialize the LSP tool.
312
+
313
+ Args:
314
+ mcp_manager: MCP server manager for calling cclsp
315
+ connection: Optional Kuzu connection (for compatibility)
316
+ """
317
+ super().__init__(connection)
318
+ self._mcp_manager = mcp_manager
319
+
320
+ def execute(
321
+ self,
322
+ file_path: str,
323
+ ) -> ToolResult:
324
+ """Get diagnostics for a file.
325
+
326
+ Args:
327
+ file_path: Path to the file to check
328
+
329
+ Returns:
330
+ ToolResult with diagnostics
331
+ """
332
+ try:
333
+ response = self._mcp_manager.call_tool("get_diagnostics", {
334
+ "file": file_path,
335
+ })
336
+
337
+ content = response.content[0] if response.content else {}
338
+ text = content.get("text", "") if isinstance(content, dict) else str(content)
339
+
340
+ return ToolResult.success_result(
341
+ data={
342
+ "file": file_path,
343
+ "diagnostics": text,
344
+ },
345
+ )
346
+
347
+ except Exception as e:
348
+ log.exception("LSP get_diagnostics failed")
349
+ return ToolResult.error_result(f"Failed to get diagnostics: {str(e)}")
350
+
351
+ def get_schema(self) -> dict:
352
+ """Get OpenAI function schema."""
353
+ return self._make_schema(
354
+ properties={
355
+ "file_path": {
356
+ "type": "string",
357
+ "description": "Path to the file to check for diagnostics",
358
+ },
359
+ },
360
+ required=["file_path"],
361
+ )
@@ -88,6 +88,23 @@ Use list_skills to see available skills."""
88
88
  "",
89
89
  ])
90
90
 
91
+ # Include available scripts
92
+ scripts_paths = []
93
+ if skill_obj.scripts:
94
+ response_parts.extend([
95
+ "**Available Scripts**:",
96
+ "",
97
+ ])
98
+ for script in skill_obj.scripts:
99
+ script_path = str(script)
100
+ scripts_paths.append(script_path)
101
+ response_parts.append(f"- `{script_path}` - Run with: `bash {script_path}` or `./{script.name}` from skill directory")
102
+ response_parts.extend([
103
+ "",
104
+ "*Scripts are self-contained executables. Run them using Bash tool when needed.*",
105
+ "",
106
+ ])
107
+
91
108
  response_parts.extend([
92
109
  "---",
93
110
  "",
@@ -100,6 +117,7 @@ Use list_skills to see available skills."""
100
117
  "description": skill_obj.description,
101
118
  "instructions": skill_obj.instructions,
102
119
  "tools": skill_obj.tools,
120
+ "scripts": scripts_paths,
103
121
  "args": args,
104
122
  "message": "\n".join(response_parts),
105
123
  },
@@ -172,13 +190,15 @@ class ListSkillsTool(BaseTool):
172
190
  "description": skill.description,
173
191
  "user_invocable": skill.user_invocable,
174
192
  "tools": skill.tools,
193
+ "scripts": [str(s) for s in skill.scripts] if skill.scripts else [],
175
194
  })
176
195
 
177
196
  # Build human-readable message
178
197
  lines = ["# Available Skills", ""]
179
198
  for s in skills_list:
180
199
  invocable = f" (invoke with /{s['name']})" if s["user_invocable"] else ""
181
- lines.append(f"- **{s['name']}**: {s['description']}{invocable}")
200
+ scripts_note = f" [has {len(s['scripts'])} script(s)]" if s["scripts"] else ""
201
+ lines.append(f"- **{s['name']}**: {s['description']}{invocable}{scripts_note}")
182
202
 
183
203
  return ToolResult.success_result(
184
204
  data={
@@ -14,6 +14,7 @@ from typing import Optional
14
14
  from .base import BaseTool, ToolResult, ToolCategory
15
15
  from ..toolkits import list_agent_types
16
16
  from ..inprocess_subagent import run_subagent, run_subagent_async
17
+ from ..background import BackgroundTaskManager
17
18
  from ...utils.logger import log
18
19
 
19
20
 
@@ -120,7 +121,7 @@ Multiple sub-agents can be launched in parallel."""
120
121
  })
121
122
 
122
123
  if run_in_background:
123
- return self._run_background(subagent_type, prompt, max_turns, thoroughness)
124
+ return self._run_background(subagent_type, prompt, max_turns, thoroughness, description)
124
125
  else:
125
126
  return self._run_sync(subagent_type, prompt, max_turns, thoroughness)
126
127
 
@@ -184,6 +185,7 @@ Multiple sub-agents can be launched in parallel."""
184
185
  prompt: str,
185
186
  max_turns: int,
186
187
  thoroughness: str = "medium",
188
+ description: str = "",
187
189
  ) -> ToolResult:
188
190
  """Run sub-agent in background using a thread.
189
191
 
@@ -192,17 +194,11 @@ Multiple sub-agents can be launched in parallel."""
192
194
  prompt: Task prompt
193
195
  max_turns: Maximum API round-trips
194
196
  thoroughness: Search thoroughness level
197
+ description: Short task description
195
198
 
196
199
  Returns:
197
200
  ToolResult with task info
198
201
  """
199
- agent_id = str(uuid.uuid4())[:8]
200
-
201
- # Output file for results
202
- output_dir = self.repo_root / ".emdash" / "agents"
203
- output_dir.mkdir(parents=True, exist_ok=True)
204
- output_file = output_dir / f"{agent_id}.output"
205
-
206
202
  try:
207
203
  # Start async execution
208
204
  future = run_subagent_async(
@@ -214,25 +210,26 @@ Multiple sub-agents can be launched in parallel."""
214
210
  thoroughness=thoroughness,
215
211
  )
216
212
 
217
- # Store future for later retrieval (attach to class for now)
218
- if not hasattr(self, "_background_tasks"):
219
- self._background_tasks = {}
220
- self._background_tasks[agent_id] = {
221
- "future": future,
222
- "output_file": output_file,
223
- }
213
+ # Register with BackgroundTaskManager for notification support
214
+ manager = BackgroundTaskManager.get_instance()
215
+ task_id = manager.start_subagent(
216
+ future=future,
217
+ agent_type=subagent_type,
218
+ description=description or prompt[:50],
219
+ )
224
220
 
225
- log.info(f"Started background agent {agent_id}")
221
+ log.info(f"Started background agent {task_id}")
226
222
 
227
223
  return ToolResult.success_result(
228
224
  data={
229
- "agent_id": agent_id,
225
+ "task_id": task_id,
230
226
  "status": "running",
231
227
  "agent_type": subagent_type,
232
- "output_file": str(output_file),
228
+ "message": "Sub-agent started in background. You'll be notified when it completes.",
233
229
  },
234
230
  suggestions=[
235
- f"Use task_output(agent_id='{agent_id}') to check results",
231
+ f"Use task_output(task_id='{task_id}') to check results",
232
+ f"Use kill_task(task_id='{task_id}') to stop it",
236
233
  ],
237
234
  )
238
235