emdash-core 0.1.25__py3-none-any.whl → 0.1.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. emdash_core/agent/__init__.py +4 -0
  2. emdash_core/agent/agents.py +84 -23
  3. emdash_core/agent/events.py +42 -20
  4. emdash_core/agent/hooks.py +419 -0
  5. emdash_core/agent/inprocess_subagent.py +166 -18
  6. emdash_core/agent/prompts/__init__.py +4 -3
  7. emdash_core/agent/prompts/main_agent.py +67 -2
  8. emdash_core/agent/prompts/plan_mode.py +236 -107
  9. emdash_core/agent/prompts/subagents.py +103 -23
  10. emdash_core/agent/prompts/workflow.py +159 -26
  11. emdash_core/agent/providers/factory.py +2 -2
  12. emdash_core/agent/providers/openai_provider.py +67 -15
  13. emdash_core/agent/runner/__init__.py +49 -0
  14. emdash_core/agent/runner/agent_runner.py +765 -0
  15. emdash_core/agent/runner/context.py +470 -0
  16. emdash_core/agent/runner/factory.py +108 -0
  17. emdash_core/agent/runner/plan.py +217 -0
  18. emdash_core/agent/runner/sdk_runner.py +324 -0
  19. emdash_core/agent/runner/utils.py +67 -0
  20. emdash_core/agent/skills.py +47 -8
  21. emdash_core/agent/toolkit.py +46 -14
  22. emdash_core/agent/toolkits/__init__.py +117 -18
  23. emdash_core/agent/toolkits/base.py +87 -2
  24. emdash_core/agent/toolkits/explore.py +18 -0
  25. emdash_core/agent/toolkits/plan.py +27 -11
  26. emdash_core/agent/tools/__init__.py +2 -2
  27. emdash_core/agent/tools/coding.py +48 -4
  28. emdash_core/agent/tools/modes.py +151 -143
  29. emdash_core/agent/tools/task.py +52 -6
  30. emdash_core/api/agent.py +706 -1
  31. emdash_core/ingestion/repository.py +17 -198
  32. emdash_core/models/agent.py +4 -0
  33. emdash_core/skills/frontend-design/SKILL.md +56 -0
  34. emdash_core/sse/stream.py +4 -0
  35. {emdash_core-0.1.25.dist-info → emdash_core-0.1.37.dist-info}/METADATA +4 -1
  36. {emdash_core-0.1.25.dist-info → emdash_core-0.1.37.dist-info}/RECORD +38 -30
  37. emdash_core/agent/runner.py +0 -1123
  38. {emdash_core-0.1.25.dist-info → emdash_core-0.1.37.dist-info}/WHEEL +0 -0
  39. {emdash_core-0.1.25.dist-info → emdash_core-0.1.37.dist-info}/entry_points.txt +0 -0
@@ -16,6 +16,12 @@ from .toolkits import get_toolkit
16
16
  from .subagent_prompts import get_subagent_prompt
17
17
  from .providers import get_provider
18
18
  from .providers.factory import DEFAULT_MODEL
19
+ from .context_manager import (
20
+ truncate_tool_output,
21
+ reduce_context_for_retry,
22
+ is_context_overflow_error,
23
+ )
24
+ from .runner.context import estimate_context_tokens
19
25
  from ..utils.logger import log
20
26
 
21
27
 
@@ -62,6 +68,7 @@ class InProcessSubAgent:
62
68
  model: Optional[str] = None,
63
69
  max_turns: int = 10,
64
70
  agent_id: Optional[str] = None,
71
+ thoroughness: str = "medium",
65
72
  ):
66
73
  """Initialize in-process sub-agent.
67
74
 
@@ -72,12 +79,14 @@ class InProcessSubAgent:
72
79
  model: Model to use (defaults to fast model)
73
80
  max_turns: Maximum iterations
74
81
  agent_id: Optional agent ID (generated if not provided)
82
+ thoroughness: Search thoroughness level (quick, medium, thorough)
75
83
  """
76
84
  self.subagent_type = subagent_type
77
85
  self.repo_root = repo_root.resolve()
78
86
  self.emitter = emitter
79
87
  self.max_turns = max_turns
80
88
  self.agent_id = agent_id or str(uuid.uuid4())[:8]
89
+ self.thoroughness = thoroughness
81
90
 
82
91
  # Get toolkit for this agent type
83
92
  self.toolkit = get_toolkit(subagent_type, repo_root)
@@ -86,13 +95,40 @@ class InProcessSubAgent:
86
95
  model_name = model or DEFAULT_MODEL
87
96
  self.provider = get_provider(model_name)
88
97
 
89
- # Get system prompt
90
- self.system_prompt = get_subagent_prompt(subagent_type)
98
+ # Get system prompt and inject thoroughness level
99
+ base_prompt = get_subagent_prompt(subagent_type, repo_root=repo_root)
100
+ self.system_prompt = self._inject_thoroughness(base_prompt)
91
101
 
92
102
  # Tracking
93
103
  self.files_explored: set[str] = set()
94
104
  self.tools_used: list[str] = []
95
105
 
106
+ def _inject_thoroughness(self, prompt: str) -> str:
107
+ """Inject thoroughness level into the system prompt."""
108
+ thoroughness_guidance = {
109
+ "quick": """
110
+ ## Thoroughness Level: QUICK
111
+ - Do basic searches only - find the most obvious matches first
112
+ - Stop after finding 2-3 relevant files
113
+ - Don't explore deeply - just locate the key files
114
+ - Prioritize speed over completeness""",
115
+ "medium": """
116
+ ## Thoroughness Level: MEDIUM
117
+ - Do moderate exploration - check multiple locations
118
+ - Follow 1-2 levels of imports/references
119
+ - Balance speed with coverage
120
+ - Stop when you have reasonable confidence""",
121
+ "thorough": """
122
+ ## Thoroughness Level: THOROUGH
123
+ - Do comprehensive analysis across the codebase
124
+ - Check multiple naming conventions and locations
125
+ - Follow import chains and cross-references deeply
126
+ - Explore edge cases and alternative implementations
127
+ - Only stop when you've exhausted relevant areas""",
128
+ }
129
+ guidance = thoroughness_guidance.get(self.thoroughness, thoroughness_guidance["medium"])
130
+ return prompt + "\n" + guidance
131
+
96
132
  def _emit(self, event_type: str, **data) -> None:
97
133
  """Emit event with agent tagging.
98
134
 
@@ -115,6 +151,50 @@ class InProcessSubAgent:
115
151
  if event_type in event_map:
116
152
  self.emitter.emit(event_map[event_type], data)
117
153
 
154
+ def _get_project_context(self) -> str:
155
+ """Get PROJECT.md and directory structure for context."""
156
+ context_parts = []
157
+
158
+ # Try to read PROJECT.md
159
+ project_md = self.repo_root / "PROJECT.md"
160
+ if project_md.exists():
161
+ try:
162
+ content = project_md.read_text()
163
+ # Truncate if too long
164
+ if len(content) > 8000:
165
+ content = content[:8000] + "\n...[truncated]"
166
+ context_parts.append(f"## PROJECT.md\n\n{content}")
167
+ except Exception as e:
168
+ log.debug(f"Could not read PROJECT.md: {e}")
169
+
170
+ # Get directory structure (top 2 levels)
171
+ try:
172
+ structure_lines = ["## Project Structure\n"]
173
+ for item in sorted(self.repo_root.iterdir()):
174
+ if item.name.startswith(".") and item.name not in (".emdash",):
175
+ continue
176
+ if item.name in ("node_modules", "__pycache__", ".git", "dist", "build", ".venv", "venv"):
177
+ continue
178
+ if item.is_dir():
179
+ structure_lines.append(f" {item.name}/")
180
+ # Show first level contents
181
+ try:
182
+ for subitem in sorted(item.iterdir())[:10]:
183
+ if not subitem.name.startswith("."):
184
+ suffix = "/" if subitem.is_dir() else ""
185
+ structure_lines.append(f" {subitem.name}{suffix}")
186
+ if len(list(item.iterdir())) > 10:
187
+ structure_lines.append(f" ...")
188
+ except PermissionError:
189
+ pass
190
+ else:
191
+ structure_lines.append(f" {item.name}")
192
+ context_parts.append("\n".join(structure_lines))
193
+ except Exception as e:
194
+ log.debug(f"Could not get directory structure: {e}")
195
+
196
+ return "\n\n".join(context_parts) if context_parts else ""
197
+
118
198
  def run(self, prompt: str) -> SubAgentResult:
119
199
  """Execute the task and return results.
120
200
 
@@ -130,6 +210,19 @@ class InProcessSubAgent:
130
210
  last_content = ""
131
211
  error = None
132
212
 
213
+ # For Plan agents, inject project context
214
+ if self.subagent_type == "Plan":
215
+ context = self._get_project_context()
216
+ if context:
217
+ prompt = f"""Here is context about the project:
218
+
219
+ {context}
220
+
221
+ ---
222
+
223
+ Now, your task:
224
+ {prompt}"""
225
+
133
226
  # Add user message
134
227
  messages.append({"role": "user", "content": prompt})
135
228
 
@@ -147,12 +240,39 @@ class InProcessSubAgent:
147
240
 
148
241
  log.debug(f"SubAgent {self.agent_id} turn {iterations}/{self.max_turns}")
149
242
 
150
- # Call LLM
151
- response = self.provider.chat(
152
- messages=messages,
153
- tools=self.toolkit.get_all_schemas(),
154
- system=self.system_prompt,
155
- )
243
+ # Check context size and compact if needed
244
+ context_tokens = estimate_context_tokens(messages, self.system_prompt)
245
+ context_limit = self.provider.get_context_limit()
246
+
247
+ if context_tokens > context_limit * 0.8:
248
+ log.info(
249
+ f"SubAgent {self.agent_id} context at {context_tokens:,}/{context_limit:,} "
250
+ f"({context_tokens/context_limit:.0%}), reducing..."
251
+ )
252
+ messages = reduce_context_for_retry(messages, keep_recent=6)
253
+
254
+ # Call LLM with retry on context overflow
255
+ response = None
256
+ max_retries = 2
257
+ for retry in range(max_retries + 1):
258
+ try:
259
+ response = self.provider.chat(
260
+ messages=messages,
261
+ tools=self.toolkit.get_all_schemas(),
262
+ system=self.system_prompt,
263
+ )
264
+ break # Success
265
+ except Exception as e:
266
+ if is_context_overflow_error(e) and retry < max_retries:
267
+ log.warning(
268
+ f"SubAgent {self.agent_id} context overflow on attempt {retry + 1}, reducing..."
269
+ )
270
+ messages = reduce_context_for_retry(messages, keep_recent=4 - retry)
271
+ else:
272
+ raise # Re-raise if not overflow or out of retries
273
+
274
+ if response is None:
275
+ raise RuntimeError("Failed to get response from LLM")
156
276
 
157
277
  # Add assistant response
158
278
  assistant_msg = self.provider.format_assistant_message(response)
@@ -196,10 +316,12 @@ class InProcessSubAgent:
196
316
  summary=summary,
197
317
  )
198
318
 
199
- # Add tool result to messages
319
+ # Add tool result to messages (truncated to avoid context overflow)
320
+ tool_output = json.dumps(result.to_dict(), indent=2)
321
+ tool_output = truncate_tool_output(tool_output, max_tokens=15000)
200
322
  tool_result_msg = self.provider.format_tool_result(
201
323
  tool_call.id,
202
- json.dumps(result.to_dict(), indent=2),
324
+ tool_output,
203
325
  )
204
326
  if tool_result_msg:
205
327
  messages.append(tool_result_msg)
@@ -265,6 +387,7 @@ def run_subagent(
265
387
  emitter=None,
266
388
  model: Optional[str] = None,
267
389
  max_turns: int = 10,
390
+ thoroughness: str = "medium",
268
391
  ) -> SubAgentResult:
269
392
  """Run a sub-agent synchronously.
270
393
 
@@ -275,18 +398,38 @@ def run_subagent(
275
398
  emitter: Event emitter
276
399
  model: Model to use
277
400
  max_turns: Max iterations
401
+ thoroughness: Search thoroughness level (quick, medium, thorough)
278
402
 
279
403
  Returns:
280
404
  SubAgentResult
281
405
  """
282
- agent = InProcessSubAgent(
283
- subagent_type=subagent_type,
284
- repo_root=repo_root,
285
- emitter=emitter,
286
- model=model,
287
- max_turns=max_turns,
288
- )
289
- return agent.run(prompt)
406
+ try:
407
+ agent = InProcessSubAgent(
408
+ subagent_type=subagent_type,
409
+ repo_root=repo_root,
410
+ emitter=emitter,
411
+ model=model,
412
+ max_turns=max_turns,
413
+ thoroughness=thoroughness,
414
+ )
415
+ return agent.run(prompt)
416
+ except Exception as e:
417
+ # Return a proper error result instead of letting the exception propagate
418
+ # This prevents 0.0s "silent" failures and gives clear error messages
419
+ log.error(f"Failed to create sub-agent: {e}")
420
+ return SubAgentResult(
421
+ success=False,
422
+ agent_type=subagent_type,
423
+ agent_id="init-failed",
424
+ task=prompt,
425
+ summary="",
426
+ files_explored=[],
427
+ findings=[],
428
+ iterations=0,
429
+ tools_used=[],
430
+ execution_time=0.0,
431
+ error=f"Sub-agent initialization failed: {e}",
432
+ )
290
433
 
291
434
 
292
435
  def run_subagent_async(
@@ -296,6 +439,7 @@ def run_subagent_async(
296
439
  emitter=None,
297
440
  model: Optional[str] = None,
298
441
  max_turns: int = 10,
442
+ thoroughness: str = "medium",
299
443
  ) -> Future[SubAgentResult]:
300
444
  """Run a sub-agent asynchronously (returns Future).
301
445
 
@@ -306,6 +450,7 @@ def run_subagent_async(
306
450
  emitter: Event emitter
307
451
  model: Model to use
308
452
  max_turns: Max iterations
453
+ thoroughness: Search thoroughness level (quick, medium, thorough)
309
454
 
310
455
  Returns:
311
456
  Future[SubAgentResult] - call .result() to get result
@@ -319,6 +464,7 @@ def run_subagent_async(
319
464
  emitter=emitter,
320
465
  model=model,
321
466
  max_turns=max_turns,
467
+ thoroughness=thoroughness,
322
468
  )
323
469
 
324
470
 
@@ -335,6 +481,7 @@ def run_subagents_parallel(
335
481
  - prompt: str
336
482
  - model: str (optional)
337
483
  - max_turns: int (optional)
484
+ - thoroughness: str (optional, default "medium")
338
485
  repo_root: Repository root
339
486
  emitter: Shared event emitter
340
487
 
@@ -350,6 +497,7 @@ def run_subagents_parallel(
350
497
  emitter=emitter,
351
498
  model=task.get("model"),
352
499
  max_turns=task.get("max_turns", 10),
500
+ thoroughness=task.get("thoroughness", "medium"),
353
501
  )
354
502
  futures.append(future)
355
503
 
@@ -15,11 +15,12 @@ from .workflow import (
15
15
  )
16
16
  from .main_agent import (
17
17
  BASE_SYSTEM_PROMPT,
18
+ CODE_MODE_PROMPT,
19
+ PLAN_MODE_PROMPT,
18
20
  build_system_prompt,
19
21
  build_tools_section,
20
22
  )
21
23
  from .subagents import SUBAGENT_PROMPTS, get_subagent_prompt
22
- from .plan_mode import PLAN_MODE_PROMPT
23
24
 
24
25
  __all__ = [
25
26
  # Workflow patterns
@@ -33,11 +34,11 @@ __all__ = [
33
34
  "PARALLEL_EXECUTION",
34
35
  # Main agent
35
36
  "BASE_SYSTEM_PROMPT",
37
+ "CODE_MODE_PROMPT",
38
+ "PLAN_MODE_PROMPT",
36
39
  "build_system_prompt",
37
40
  "build_tools_section",
38
41
  # Sub-agents
39
42
  "SUBAGENT_PROMPTS",
40
43
  "get_subagent_prompt",
41
- # Plan mode
42
- "PLAN_MODE_PROMPT",
43
44
  ]
@@ -9,13 +9,22 @@ from .workflow import (
9
9
  EXPLORATION_STRATEGY,
10
10
  OUTPUT_GUIDELINES,
11
11
  PARALLEL_EXECUTION,
12
+ TODO_LIST_GUIDANCE,
12
13
  )
13
14
 
14
15
  # Base system prompt template with placeholder for tools
15
- BASE_SYSTEM_PROMPT = """You are a code exploration and implementation assistant. You orchestrate focused sub-agents for exploration while maintaining the high-level view.
16
+ _BASE_PROMPT = """You are a code exploration and implementation assistant. You orchestrate focused sub-agents for exploration while maintaining the high-level view.
16
17
 
17
18
  {tools_section}
18
- """ + WORKFLOW_PATTERNS + PARALLEL_EXECUTION + EXPLORATION_STRATEGY + OUTPUT_GUIDELINES
19
+ """
20
+
21
+ # Main agent system prompt - same for both code and plan modes
22
+ # Main agent is always an orchestrator that delegates to subagents
23
+ BASE_SYSTEM_PROMPT = _BASE_PROMPT + WORKFLOW_PATTERNS + PARALLEL_EXECUTION + EXPLORATION_STRATEGY + TODO_LIST_GUIDANCE + OUTPUT_GUIDELINES
24
+
25
+ # Legacy aliases
26
+ CODE_MODE_PROMPT = BASE_SYSTEM_PROMPT
27
+ PLAN_MODE_PROMPT = BASE_SYSTEM_PROMPT
19
28
 
20
29
 
21
30
  def build_system_prompt(toolkit) -> str:
@@ -28,9 +37,21 @@ def build_system_prompt(toolkit) -> str:
28
37
  Complete system prompt string
29
38
  """
30
39
  tools_section = build_tools_section(toolkit)
40
+ agents_section = build_agents_section(toolkit)
31
41
  skills_section = build_skills_section()
42
+ rules_section = build_rules_section()
43
+
44
+ # Main agent always uses the same prompt - it orchestrates and delegates
32
45
  prompt = BASE_SYSTEM_PROMPT.format(tools_section=tools_section)
33
46
 
47
+ # Add agents section so main agent knows what agents are available
48
+ if agents_section:
49
+ prompt += "\n" + agents_section
50
+
51
+ # Add rules section if there are rules defined
52
+ if rules_section:
53
+ prompt += "\n" + rules_section
54
+
34
55
  # Add skills section if there are skills available
35
56
  if skills_section:
36
57
  prompt += "\n" + skills_section
@@ -38,6 +59,20 @@ def build_system_prompt(toolkit) -> str:
38
59
  return prompt
39
60
 
40
61
 
62
+ def build_rules_section() -> str:
63
+ """Build the rules section of the system prompt.
64
+
65
+ Loads rules from .emdash/rules/*.md files.
66
+
67
+ Returns:
68
+ Formatted string with project rules, or empty string if none
69
+ """
70
+ from ..rules import load_rules, format_rules_for_prompt
71
+
72
+ rules = load_rules()
73
+ return format_rules_for_prompt(rules)
74
+
75
+
41
76
  def build_skills_section() -> str:
42
77
  """Build the skills section of the system prompt.
43
78
 
@@ -50,6 +85,36 @@ def build_skills_section() -> str:
50
85
  return registry.get_skills_for_prompt()
51
86
 
52
87
 
88
+ def build_agents_section(toolkit) -> str:
89
+ """Build the agents section describing available sub-agents.
90
+
91
+ Args:
92
+ toolkit: The agent toolkit (to access repo_root)
93
+
94
+ Returns:
95
+ Formatted string with agent descriptions, or empty string if none
96
+ """
97
+ from ..toolkits import get_agents_with_descriptions
98
+
99
+ repo_root = getattr(toolkit, '_repo_root', None)
100
+ agents = get_agents_with_descriptions(repo_root)
101
+
102
+ if not agents:
103
+ return ""
104
+
105
+ lines = [
106
+ "## Available Agents",
107
+ "",
108
+ "Use the `task` tool to delegate work to these specialized agents:",
109
+ "",
110
+ ]
111
+
112
+ for agent in agents:
113
+ lines.append(f"- **{agent['name']}**: {agent['description']}")
114
+
115
+ return "\n".join(lines)
116
+
117
+
53
118
  def build_tools_section(toolkit) -> str:
54
119
  """Build the tools section of the system prompt from registered tools.
55
120