emdash-core 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/__init__.py +3 -0
- emdash_core/agent/__init__.py +37 -0
- emdash_core/agent/agents.py +225 -0
- emdash_core/agent/code_reviewer.py +476 -0
- emdash_core/agent/compaction.py +143 -0
- emdash_core/agent/context_manager.py +140 -0
- emdash_core/agent/events.py +338 -0
- emdash_core/agent/handlers.py +224 -0
- emdash_core/agent/inprocess_subagent.py +377 -0
- emdash_core/agent/mcp/__init__.py +50 -0
- emdash_core/agent/mcp/client.py +346 -0
- emdash_core/agent/mcp/config.py +302 -0
- emdash_core/agent/mcp/manager.py +496 -0
- emdash_core/agent/mcp/tool_factory.py +213 -0
- emdash_core/agent/prompts/__init__.py +38 -0
- emdash_core/agent/prompts/main_agent.py +104 -0
- emdash_core/agent/prompts/subagents.py +131 -0
- emdash_core/agent/prompts/workflow.py +136 -0
- emdash_core/agent/providers/__init__.py +34 -0
- emdash_core/agent/providers/base.py +143 -0
- emdash_core/agent/providers/factory.py +80 -0
- emdash_core/agent/providers/models.py +220 -0
- emdash_core/agent/providers/openai_provider.py +463 -0
- emdash_core/agent/providers/transformers_provider.py +217 -0
- emdash_core/agent/research/__init__.py +81 -0
- emdash_core/agent/research/agent.py +143 -0
- emdash_core/agent/research/controller.py +254 -0
- emdash_core/agent/research/critic.py +428 -0
- emdash_core/agent/research/macros.py +469 -0
- emdash_core/agent/research/planner.py +449 -0
- emdash_core/agent/research/researcher.py +436 -0
- emdash_core/agent/research/state.py +523 -0
- emdash_core/agent/research/synthesizer.py +594 -0
- emdash_core/agent/reviewer_profile.py +475 -0
- emdash_core/agent/rules.py +123 -0
- emdash_core/agent/runner.py +601 -0
- emdash_core/agent/session.py +262 -0
- emdash_core/agent/spec_schema.py +66 -0
- emdash_core/agent/specification.py +479 -0
- emdash_core/agent/subagent.py +397 -0
- emdash_core/agent/subagent_prompts.py +13 -0
- emdash_core/agent/toolkit.py +482 -0
- emdash_core/agent/toolkits/__init__.py +64 -0
- emdash_core/agent/toolkits/base.py +96 -0
- emdash_core/agent/toolkits/explore.py +47 -0
- emdash_core/agent/toolkits/plan.py +55 -0
- emdash_core/agent/tools/__init__.py +141 -0
- emdash_core/agent/tools/analytics.py +436 -0
- emdash_core/agent/tools/base.py +131 -0
- emdash_core/agent/tools/coding.py +484 -0
- emdash_core/agent/tools/github_mcp.py +592 -0
- emdash_core/agent/tools/history.py +13 -0
- emdash_core/agent/tools/modes.py +153 -0
- emdash_core/agent/tools/plan.py +206 -0
- emdash_core/agent/tools/plan_write.py +135 -0
- emdash_core/agent/tools/search.py +412 -0
- emdash_core/agent/tools/spec.py +341 -0
- emdash_core/agent/tools/task.py +262 -0
- emdash_core/agent/tools/task_output.py +204 -0
- emdash_core/agent/tools/tasks.py +454 -0
- emdash_core/agent/tools/traversal.py +588 -0
- emdash_core/agent/tools/web.py +179 -0
- emdash_core/analytics/__init__.py +5 -0
- emdash_core/analytics/engine.py +1286 -0
- emdash_core/api/__init__.py +5 -0
- emdash_core/api/agent.py +308 -0
- emdash_core/api/agents.py +154 -0
- emdash_core/api/analyze.py +264 -0
- emdash_core/api/auth.py +173 -0
- emdash_core/api/context.py +77 -0
- emdash_core/api/db.py +121 -0
- emdash_core/api/embed.py +131 -0
- emdash_core/api/feature.py +143 -0
- emdash_core/api/health.py +93 -0
- emdash_core/api/index.py +162 -0
- emdash_core/api/plan.py +110 -0
- emdash_core/api/projectmd.py +210 -0
- emdash_core/api/query.py +320 -0
- emdash_core/api/research.py +122 -0
- emdash_core/api/review.py +161 -0
- emdash_core/api/router.py +76 -0
- emdash_core/api/rules.py +116 -0
- emdash_core/api/search.py +119 -0
- emdash_core/api/spec.py +99 -0
- emdash_core/api/swarm.py +223 -0
- emdash_core/api/tasks.py +109 -0
- emdash_core/api/team.py +120 -0
- emdash_core/auth/__init__.py +17 -0
- emdash_core/auth/github.py +389 -0
- emdash_core/config.py +74 -0
- emdash_core/context/__init__.py +52 -0
- emdash_core/context/models.py +50 -0
- emdash_core/context/providers/__init__.py +11 -0
- emdash_core/context/providers/base.py +74 -0
- emdash_core/context/providers/explored_areas.py +183 -0
- emdash_core/context/providers/touched_areas.py +360 -0
- emdash_core/context/registry.py +73 -0
- emdash_core/context/reranker.py +199 -0
- emdash_core/context/service.py +260 -0
- emdash_core/context/session.py +352 -0
- emdash_core/core/__init__.py +104 -0
- emdash_core/core/config.py +454 -0
- emdash_core/core/exceptions.py +55 -0
- emdash_core/core/models.py +265 -0
- emdash_core/core/review_config.py +57 -0
- emdash_core/db/__init__.py +67 -0
- emdash_core/db/auth.py +134 -0
- emdash_core/db/models.py +91 -0
- emdash_core/db/provider.py +222 -0
- emdash_core/db/providers/__init__.py +5 -0
- emdash_core/db/providers/supabase.py +452 -0
- emdash_core/embeddings/__init__.py +24 -0
- emdash_core/embeddings/indexer.py +534 -0
- emdash_core/embeddings/models.py +192 -0
- emdash_core/embeddings/providers/__init__.py +7 -0
- emdash_core/embeddings/providers/base.py +112 -0
- emdash_core/embeddings/providers/fireworks.py +141 -0
- emdash_core/embeddings/providers/openai.py +104 -0
- emdash_core/embeddings/registry.py +146 -0
- emdash_core/embeddings/service.py +215 -0
- emdash_core/graph/__init__.py +26 -0
- emdash_core/graph/builder.py +134 -0
- emdash_core/graph/connection.py +692 -0
- emdash_core/graph/schema.py +416 -0
- emdash_core/graph/writer.py +667 -0
- emdash_core/ingestion/__init__.py +7 -0
- emdash_core/ingestion/change_detector.py +150 -0
- emdash_core/ingestion/git/__init__.py +5 -0
- emdash_core/ingestion/git/commit_analyzer.py +196 -0
- emdash_core/ingestion/github/__init__.py +6 -0
- emdash_core/ingestion/github/pr_fetcher.py +296 -0
- emdash_core/ingestion/github/task_extractor.py +100 -0
- emdash_core/ingestion/orchestrator.py +540 -0
- emdash_core/ingestion/parsers/__init__.py +10 -0
- emdash_core/ingestion/parsers/base_parser.py +66 -0
- emdash_core/ingestion/parsers/call_graph_builder.py +121 -0
- emdash_core/ingestion/parsers/class_extractor.py +154 -0
- emdash_core/ingestion/parsers/function_extractor.py +202 -0
- emdash_core/ingestion/parsers/import_analyzer.py +119 -0
- emdash_core/ingestion/parsers/python_parser.py +123 -0
- emdash_core/ingestion/parsers/registry.py +72 -0
- emdash_core/ingestion/parsers/ts_ast_parser.js +313 -0
- emdash_core/ingestion/parsers/typescript_parser.py +278 -0
- emdash_core/ingestion/repository.py +346 -0
- emdash_core/models/__init__.py +38 -0
- emdash_core/models/agent.py +68 -0
- emdash_core/models/index.py +77 -0
- emdash_core/models/query.py +113 -0
- emdash_core/planning/__init__.py +7 -0
- emdash_core/planning/agent_api.py +413 -0
- emdash_core/planning/context_builder.py +265 -0
- emdash_core/planning/feature_context.py +232 -0
- emdash_core/planning/feature_expander.py +646 -0
- emdash_core/planning/llm_explainer.py +198 -0
- emdash_core/planning/similarity.py +509 -0
- emdash_core/planning/team_focus.py +821 -0
- emdash_core/server.py +153 -0
- emdash_core/sse/__init__.py +5 -0
- emdash_core/sse/stream.py +196 -0
- emdash_core/swarm/__init__.py +17 -0
- emdash_core/swarm/merge_agent.py +383 -0
- emdash_core/swarm/session_manager.py +274 -0
- emdash_core/swarm/swarm_runner.py +226 -0
- emdash_core/swarm/task_definition.py +137 -0
- emdash_core/swarm/worker_spawner.py +319 -0
- emdash_core/swarm/worktree_manager.py +278 -0
- emdash_core/templates/__init__.py +10 -0
- emdash_core/templates/defaults/agent-builder.md.template +82 -0
- emdash_core/templates/defaults/focus.md.template +115 -0
- emdash_core/templates/defaults/pr-review-enhanced.md.template +309 -0
- emdash_core/templates/defaults/pr-review.md.template +80 -0
- emdash_core/templates/defaults/project.md.template +85 -0
- emdash_core/templates/defaults/research_critic.md.template +112 -0
- emdash_core/templates/defaults/research_planner.md.template +85 -0
- emdash_core/templates/defaults/research_synthesizer.md.template +128 -0
- emdash_core/templates/defaults/reviewer.md.template +81 -0
- emdash_core/templates/defaults/spec.md.template +41 -0
- emdash_core/templates/defaults/tasks.md.template +78 -0
- emdash_core/templates/loader.py +296 -0
- emdash_core/utils/__init__.py +45 -0
- emdash_core/utils/git.py +84 -0
- emdash_core/utils/image.py +502 -0
- emdash_core/utils/logger.py +51 -0
- emdash_core-0.1.7.dist-info/METADATA +35 -0
- emdash_core-0.1.7.dist-info/RECORD +187 -0
- emdash_core-0.1.7.dist-info/WHEEL +4 -0
- emdash_core-0.1.7.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,479 @@
|
|
|
1
|
+
"""Specification agent for generating detailed feature specs."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
from rich.console import Console
|
|
10
|
+
from rich.panel import Panel
|
|
11
|
+
from rich.prompt import Prompt, Confirm
|
|
12
|
+
|
|
13
|
+
from .toolkit import AgentToolkit
|
|
14
|
+
from .runner import SafeJSONEncoder
|
|
15
|
+
from .providers import get_provider
|
|
16
|
+
from .providers.factory import DEFAULT_MODEL
|
|
17
|
+
from ..templates import load_template_for_agent
|
|
18
|
+
from .spec_schema import Spec, SPEC_TEMPLATE
|
|
19
|
+
from .events import AgentEventEmitter, EventType, NullEmitter
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# Tool schema for asking clarification questions (OpenAI function calling format)
|
|
23
|
+
ASK_CLARIFICATION_TOOL = {
|
|
24
|
+
"type": "function",
|
|
25
|
+
"function": {
|
|
26
|
+
"name": "ask_clarification",
|
|
27
|
+
"description": "Ask the user a clarification question when you need more information to write the spec. Use this instead of outputting JSON questions.",
|
|
28
|
+
"parameters": {
|
|
29
|
+
"type": "object",
|
|
30
|
+
"properties": {
|
|
31
|
+
"question": {
|
|
32
|
+
"type": "string",
|
|
33
|
+
"description": "The question to ask the user",
|
|
34
|
+
},
|
|
35
|
+
"context": {
|
|
36
|
+
"type": "string",
|
|
37
|
+
"description": "Brief context explaining why you're asking",
|
|
38
|
+
},
|
|
39
|
+
"options": {
|
|
40
|
+
"type": "array",
|
|
41
|
+
"items": {"type": "string"},
|
|
42
|
+
"description": "Optional suggested answers to help the user",
|
|
43
|
+
},
|
|
44
|
+
},
|
|
45
|
+
"required": ["question"],
|
|
46
|
+
},
|
|
47
|
+
},
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
SUBMIT_SPEC_TOOL = {
|
|
52
|
+
"type": "function",
|
|
53
|
+
"function": {
|
|
54
|
+
"name": "submit_spec",
|
|
55
|
+
"description": "Submit the final specification in markdown format.",
|
|
56
|
+
"parameters": {
|
|
57
|
+
"type": "object",
|
|
58
|
+
"properties": {
|
|
59
|
+
"title": {
|
|
60
|
+
"type": "string",
|
|
61
|
+
"description": "Feature name/title",
|
|
62
|
+
},
|
|
63
|
+
"content": {
|
|
64
|
+
"type": "string",
|
|
65
|
+
"description": "Markdown content of the spec including problem, solution, implementation steps, related files, edge cases, etc.",
|
|
66
|
+
},
|
|
67
|
+
},
|
|
68
|
+
"required": ["title", "content"],
|
|
69
|
+
},
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class SpecificationAgent:
|
|
75
|
+
"""Agent that generates detailed feature specifications."""
|
|
76
|
+
|
|
77
|
+
MAX_TOOL_RESULT_SIZE = 8000
|
|
78
|
+
MAX_CLARIFICATION_ROUNDS = 10
|
|
79
|
+
|
|
80
|
+
def __init__(
|
|
81
|
+
self,
|
|
82
|
+
model: str = DEFAULT_MODEL,
|
|
83
|
+
verbose: bool = True,
|
|
84
|
+
max_iterations: int = 30,
|
|
85
|
+
project_md_path: Optional[str] = None,
|
|
86
|
+
show_tool_results: bool = False,
|
|
87
|
+
emitter: Optional[AgentEventEmitter] = None,
|
|
88
|
+
interactive: bool = True,
|
|
89
|
+
):
|
|
90
|
+
"""Initialize the specification agent.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
model: LLM model to use (claude-* for Anthropic, gpt-* for OpenAI)
|
|
94
|
+
verbose: Whether to print progress
|
|
95
|
+
max_iterations: Maximum tool call iterations
|
|
96
|
+
project_md_path: Path to PROJECT.md file
|
|
97
|
+
show_tool_results: Whether to print full tool results (--verbose)
|
|
98
|
+
emitter: Event emitter for unified message stream
|
|
99
|
+
interactive: Whether to allow interactive prompts (False for JSON mode)
|
|
100
|
+
"""
|
|
101
|
+
self.provider = get_provider(model)
|
|
102
|
+
self.toolkit = AgentToolkit(enable_session=True)
|
|
103
|
+
self.model = model
|
|
104
|
+
self.verbose = verbose
|
|
105
|
+
self.show_tool_results = show_tool_results
|
|
106
|
+
self.max_iterations = max_iterations
|
|
107
|
+
self.context_limit = self.provider.get_context_limit()
|
|
108
|
+
self.console = Console()
|
|
109
|
+
self.messages: list[dict] = []
|
|
110
|
+
self.emitter = emitter or NullEmitter(agent_name="SpecificationAgent")
|
|
111
|
+
self.interactive = interactive
|
|
112
|
+
self.project_context = self._load_project_md(project_md_path)
|
|
113
|
+
|
|
114
|
+
def _load_project_md(self, path: Optional[str] = None) -> str:
|
|
115
|
+
"""Load PROJECT.md if it exists."""
|
|
116
|
+
search_paths = [
|
|
117
|
+
path,
|
|
118
|
+
"PROJECT.md",
|
|
119
|
+
"./PROJECT.md",
|
|
120
|
+
"../PROJECT.md",
|
|
121
|
+
]
|
|
122
|
+
|
|
123
|
+
for p in search_paths:
|
|
124
|
+
if p and os.path.exists(p):
|
|
125
|
+
with open(p, "r") as f:
|
|
126
|
+
content = f.read()
|
|
127
|
+
if self.verbose:
|
|
128
|
+
self.console.print(f"[dim]Loaded project context from {p}[/dim]")
|
|
129
|
+
return content
|
|
130
|
+
|
|
131
|
+
return ""
|
|
132
|
+
|
|
133
|
+
def generate_spec(self, feature_description: str) -> Spec:
|
|
134
|
+
"""Generate a specification for a feature.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
feature_description: Description of the feature to spec
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
The generated specification
|
|
141
|
+
"""
|
|
142
|
+
# Emit session start
|
|
143
|
+
self.emitter.emit(EventType.SESSION_START, {
|
|
144
|
+
"agent_name": "Specification Agent",
|
|
145
|
+
"model": self.model,
|
|
146
|
+
"feature": feature_description,
|
|
147
|
+
})
|
|
148
|
+
|
|
149
|
+
if self.verbose:
|
|
150
|
+
self.console.print(
|
|
151
|
+
Panel(
|
|
152
|
+
f"[cyan]Generating specification for:[/cyan]\n{feature_description}",
|
|
153
|
+
title="[bold]Specification Agent[/bold]",
|
|
154
|
+
border_style="cyan",
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Build initial context
|
|
159
|
+
spec_template = load_template_for_agent("spec")
|
|
160
|
+
system_content = f"""{spec_template}
|
|
161
|
+
|
|
162
|
+
## Spec Format
|
|
163
|
+
Write your spec as free-form markdown. Include:
|
|
164
|
+
|
|
165
|
+
{SPEC_TEMPLATE}
|
|
166
|
+
"""
|
|
167
|
+
if self.project_context:
|
|
168
|
+
system_content = f"""## PROJECT.md - READ THIS FIRST
|
|
169
|
+
|
|
170
|
+
This is the project's constitution. Use this vocabulary and these concepts in your spec.
|
|
171
|
+
|
|
172
|
+
{self.project_context}
|
|
173
|
+
|
|
174
|
+
---
|
|
175
|
+
|
|
176
|
+
{system_content}"""
|
|
177
|
+
else:
|
|
178
|
+
if self.verbose:
|
|
179
|
+
self.console.print("[yellow]Warning: No PROJECT.md found. Spec may not use project-native terminology.[/yellow]")
|
|
180
|
+
|
|
181
|
+
self.messages = [
|
|
182
|
+
{"role": "system", "content": system_content},
|
|
183
|
+
{"role": "user", "content": f"Create a specification for this feature:\n\n{feature_description}\n\nMANDATORY SEQUENCE:\n1. Call plan_exploration with the feature goal and use_case=\"spec\"\n2. Follow the recommended tools in order - do NOT skip steps\n3. Use ask_clarification if you need to ask me any questions\n4. Submit the final spec using submit_spec with title and markdown content"},
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
# Add ask_clarification tool to the toolkit tools
|
|
187
|
+
# Filter out write tools - spec agent should only explore, not modify files
|
|
188
|
+
WRITE_TOOLS = {'write_to_file', 'apply_diff', 'delete_file', 'execute_command'}
|
|
189
|
+
read_only_schemas = [
|
|
190
|
+
schema for schema in self.toolkit.get_all_schemas()
|
|
191
|
+
if schema.get('function', {}).get('name') not in WRITE_TOOLS
|
|
192
|
+
]
|
|
193
|
+
tools = read_only_schemas + [ASK_CLARIFICATION_TOOL, SUBMIT_SPEC_TOOL]
|
|
194
|
+
clarification_rounds = 0
|
|
195
|
+
|
|
196
|
+
# Main loop: explore, clarify, generate
|
|
197
|
+
iterations = 0
|
|
198
|
+
while iterations < self.max_iterations:
|
|
199
|
+
iterations += 1
|
|
200
|
+
|
|
201
|
+
response = self.provider.chat(self.messages, tools=tools)
|
|
202
|
+
self.messages.append(self.provider.format_assistant_message(response))
|
|
203
|
+
|
|
204
|
+
if response.tool_calls:
|
|
205
|
+
# Execute tool calls
|
|
206
|
+
for tool_call in response.tool_calls:
|
|
207
|
+
# Handle ask_clarification specially
|
|
208
|
+
if tool_call.name == "ask_clarification":
|
|
209
|
+
clarification_rounds += 1
|
|
210
|
+
if clarification_rounds > self.MAX_CLARIFICATION_ROUNDS:
|
|
211
|
+
result = {"answer": "No more questions needed. Please generate the specification with the information you have."}
|
|
212
|
+
else:
|
|
213
|
+
result = self._handle_clarification_tool(tool_call)
|
|
214
|
+
result_json = json.dumps(result, cls=SafeJSONEncoder)
|
|
215
|
+
self.messages.append(
|
|
216
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
217
|
+
)
|
|
218
|
+
elif tool_call.name == "submit_spec":
|
|
219
|
+
try:
|
|
220
|
+
args = json.loads(tool_call.arguments)
|
|
221
|
+
except json.JSONDecodeError:
|
|
222
|
+
args = {}
|
|
223
|
+
|
|
224
|
+
title = args.get("title", "Untitled Spec")
|
|
225
|
+
content = args.get("content", "")
|
|
226
|
+
|
|
227
|
+
if not content:
|
|
228
|
+
error_result = {"success": False, "error": "Content is required"}
|
|
229
|
+
result_json = json.dumps(error_result, cls=SafeJSONEncoder)
|
|
230
|
+
self.messages.append(
|
|
231
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
232
|
+
)
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
spec = Spec(title=title, content=content)
|
|
236
|
+
success_result = {"success": True}
|
|
237
|
+
result_json = json.dumps(success_result, cls=SafeJSONEncoder)
|
|
238
|
+
self.messages.append(
|
|
239
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
240
|
+
)
|
|
241
|
+
self.emitter.emit_response(spec.to_markdown())
|
|
242
|
+
self.emitter.emit(EventType.SESSION_END, {"success": True})
|
|
243
|
+
return spec
|
|
244
|
+
else:
|
|
245
|
+
result = self._execute_tool_call(tool_call)
|
|
246
|
+
result_json = json.dumps(result, cls=SafeJSONEncoder)
|
|
247
|
+
self.messages.append(
|
|
248
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
249
|
+
)
|
|
250
|
+
else:
|
|
251
|
+
# Check if response was truncated due to max_tokens
|
|
252
|
+
if response.stop_reason == "max_tokens":
|
|
253
|
+
if self.verbose:
|
|
254
|
+
self.console.print("[yellow]Response truncated (max_tokens). Requesting continuation...[/yellow]")
|
|
255
|
+
self.messages.append({
|
|
256
|
+
"role": "user",
|
|
257
|
+
"content": "Your response was cut off. Please continue generating the specification.",
|
|
258
|
+
})
|
|
259
|
+
continue
|
|
260
|
+
|
|
261
|
+
content = (response.content or "").strip()
|
|
262
|
+
if content:
|
|
263
|
+
# Try to parse as markdown spec
|
|
264
|
+
spec = Spec.from_markdown(content)
|
|
265
|
+
if spec.content:
|
|
266
|
+
self.emitter.emit_response(spec.to_markdown())
|
|
267
|
+
self.emitter.emit(EventType.SESSION_END, {"success": True})
|
|
268
|
+
return spec
|
|
269
|
+
|
|
270
|
+
# Response doesn't match required format
|
|
271
|
+
self.messages.append({
|
|
272
|
+
"role": "user",
|
|
273
|
+
"content": "Please submit the spec using submit_spec with a title and markdown content.",
|
|
274
|
+
})
|
|
275
|
+
continue
|
|
276
|
+
|
|
277
|
+
# Max iterations - generate what we have
|
|
278
|
+
if self.verbose:
|
|
279
|
+
self.console.print("[yellow]Max iterations reached, generating specification...[/yellow]")
|
|
280
|
+
|
|
281
|
+
self.messages.append({
|
|
282
|
+
"role": "user",
|
|
283
|
+
"content": "Generate the specification NOW. Submit it using submit_spec with title and content.",
|
|
284
|
+
})
|
|
285
|
+
|
|
286
|
+
response = self.provider.chat(self.messages, tools=[SUBMIT_SPEC_TOOL])
|
|
287
|
+
if response.tool_calls:
|
|
288
|
+
for tool_call in response.tool_calls:
|
|
289
|
+
if tool_call.name == "submit_spec":
|
|
290
|
+
try:
|
|
291
|
+
args = json.loads(tool_call.arguments)
|
|
292
|
+
title = args.get("title", "Untitled Spec")
|
|
293
|
+
content = args.get("content", "")
|
|
294
|
+
spec = Spec(title=title, content=content)
|
|
295
|
+
success_result = {"success": True}
|
|
296
|
+
result_json = json.dumps(success_result, cls=SafeJSONEncoder)
|
|
297
|
+
self.messages.append(
|
|
298
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
299
|
+
)
|
|
300
|
+
self.emitter.emit_response(spec.to_markdown())
|
|
301
|
+
self.emitter.emit(EventType.SESSION_END, {"success": True})
|
|
302
|
+
return spec
|
|
303
|
+
except Exception:
|
|
304
|
+
error_result = {"success": False, "error": "Invalid spec."}
|
|
305
|
+
result_json = json.dumps(error_result, cls=SafeJSONEncoder)
|
|
306
|
+
self.messages.append(
|
|
307
|
+
self.provider.format_tool_result(tool_call.id, result_json)
|
|
308
|
+
)
|
|
309
|
+
break
|
|
310
|
+
|
|
311
|
+
raise ValueError("Failed to generate a valid specification.")
|
|
312
|
+
|
|
313
|
+
def _handle_clarification_tool(self, tool_call) -> dict:
|
|
314
|
+
"""Handle the ask_clarification tool call by prompting the user."""
|
|
315
|
+
try:
|
|
316
|
+
args = json.loads(tool_call.arguments)
|
|
317
|
+
except json.JSONDecodeError:
|
|
318
|
+
args = {}
|
|
319
|
+
|
|
320
|
+
question = args.get("question", "What would you like to clarify?")
|
|
321
|
+
context = args.get("context", "")
|
|
322
|
+
options = args.get("options", [])
|
|
323
|
+
|
|
324
|
+
# Emit clarification event
|
|
325
|
+
self.emitter.emit_clarification(question, context, options)
|
|
326
|
+
|
|
327
|
+
# In non-interactive mode, auto-select first option or provide default
|
|
328
|
+
if not self.interactive:
|
|
329
|
+
if options:
|
|
330
|
+
answer = options[0] # Use first suggested option
|
|
331
|
+
else:
|
|
332
|
+
answer = "Please proceed with your best judgment based on the codebase analysis."
|
|
333
|
+
|
|
334
|
+
# Emit clarification response
|
|
335
|
+
self.emitter.emit(EventType.CLARIFICATION_RESPONSE, {"answer": answer, "auto": True})
|
|
336
|
+
|
|
337
|
+
return {
|
|
338
|
+
"answer": answer,
|
|
339
|
+
"note": "Auto-selected in non-interactive mode",
|
|
340
|
+
"instruction": "Now submit the specification using submit_spec.",
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
# Display the question nicely (interactive mode)
|
|
344
|
+
self.console.print()
|
|
345
|
+
self.console.print(Panel(
|
|
346
|
+
f"[bold]{question}[/bold]" +
|
|
347
|
+
(f"\n\n[dim]{context}[/dim]" if context else ""),
|
|
348
|
+
title="[yellow]Clarification Needed[/yellow]",
|
|
349
|
+
border_style="yellow",
|
|
350
|
+
))
|
|
351
|
+
|
|
352
|
+
# Show options if available
|
|
353
|
+
if options:
|
|
354
|
+
self.console.print("[dim]Suggested options:[/dim]")
|
|
355
|
+
for i, opt in enumerate(options, 1):
|
|
356
|
+
self.console.print(f" [cyan]{i}.[/cyan] {opt}")
|
|
357
|
+
self.console.print()
|
|
358
|
+
|
|
359
|
+
# Get answer from user
|
|
360
|
+
answer = Prompt.ask("[bold green]Your answer[/bold green]")
|
|
361
|
+
|
|
362
|
+
# Emit clarification response
|
|
363
|
+
self.emitter.emit(EventType.CLARIFICATION_RESPONSE, {"answer": answer})
|
|
364
|
+
|
|
365
|
+
return {
|
|
366
|
+
"answer": answer,
|
|
367
|
+
"instruction": "Now submit the specification using submit_spec.",
|
|
368
|
+
}
|
|
369
|
+
|
|
370
|
+
def _truncate_data(self, data: dict) -> dict:
|
|
371
|
+
"""Truncate data to fit within size limits."""
|
|
372
|
+
serialized = json.dumps(data, cls=SafeJSONEncoder)
|
|
373
|
+
if len(serialized) <= self.MAX_TOOL_RESULT_SIZE:
|
|
374
|
+
return data
|
|
375
|
+
|
|
376
|
+
truncated = {}
|
|
377
|
+
for key, value in data.items():
|
|
378
|
+
if isinstance(value, list) and len(value) > 10:
|
|
379
|
+
truncated[key] = value[:10]
|
|
380
|
+
truncated[f"{key}_truncated"] = True
|
|
381
|
+
truncated[f"{key}_total"] = len(value)
|
|
382
|
+
elif isinstance(value, dict):
|
|
383
|
+
truncated[key] = self._truncate_data(value)
|
|
384
|
+
else:
|
|
385
|
+
truncated[key] = value
|
|
386
|
+
|
|
387
|
+
return truncated
|
|
388
|
+
|
|
389
|
+
def _execute_tool_call(self, tool_call) -> dict:
|
|
390
|
+
"""Execute a tool call and return the result."""
|
|
391
|
+
name = tool_call.name
|
|
392
|
+
try:
|
|
393
|
+
args = json.loads(tool_call.arguments)
|
|
394
|
+
except json.JSONDecodeError:
|
|
395
|
+
args = {}
|
|
396
|
+
|
|
397
|
+
# Emit tool start event
|
|
398
|
+
self.emitter.emit_tool_start(name, args)
|
|
399
|
+
|
|
400
|
+
result = self.toolkit.execute(name, **args)
|
|
401
|
+
|
|
402
|
+
# Build summary for the event
|
|
403
|
+
summary = None
|
|
404
|
+
if result.success and result.data:
|
|
405
|
+
if "results" in result.data:
|
|
406
|
+
summary = f"{len(result.data['results'])} results"
|
|
407
|
+
elif "summary" in result.data:
|
|
408
|
+
s = result.data["summary"]
|
|
409
|
+
summary = f"{s.get('function_count', 0)} functions, {s.get('class_count', 0)} classes"
|
|
410
|
+
elif not result.success:
|
|
411
|
+
summary = result.error
|
|
412
|
+
|
|
413
|
+
# Emit tool result event
|
|
414
|
+
self.emitter.emit_tool_result(
|
|
415
|
+
name=name,
|
|
416
|
+
success=result.success,
|
|
417
|
+
summary=summary,
|
|
418
|
+
data=result.data if self.show_tool_results else None,
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
if self.verbose:
|
|
422
|
+
self._print_tool_call(name, args, result)
|
|
423
|
+
|
|
424
|
+
if result.success:
|
|
425
|
+
data = self._truncate_data(result.data)
|
|
426
|
+
return {
|
|
427
|
+
"success": True,
|
|
428
|
+
"data": data,
|
|
429
|
+
"suggestions": result.suggestions,
|
|
430
|
+
}
|
|
431
|
+
else:
|
|
432
|
+
return {
|
|
433
|
+
"success": False,
|
|
434
|
+
"error": result.error,
|
|
435
|
+
"suggestions": result.suggestions,
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
def _print_tool_call(self, name: str, args: dict, result):
|
|
439
|
+
"""Print concise tool call info."""
|
|
440
|
+
status = "[green]✓[/green]" if result.success else "[red]✗[/red]"
|
|
441
|
+
|
|
442
|
+
args_str = ""
|
|
443
|
+
if args:
|
|
444
|
+
key_args = []
|
|
445
|
+
for k, v in list(args.items())[:2]:
|
|
446
|
+
if isinstance(v, str) and len(v) > 30:
|
|
447
|
+
v = v[:30] + "..."
|
|
448
|
+
key_args.append(f"{k}={v}")
|
|
449
|
+
args_str = f" ({', '.join(key_args)})"
|
|
450
|
+
|
|
451
|
+
result_str = ""
|
|
452
|
+
if result.success and result.data:
|
|
453
|
+
if "results" in result.data:
|
|
454
|
+
result_str = f" → {len(result.data['results'])} results"
|
|
455
|
+
elif "summary" in result.data:
|
|
456
|
+
s = result.data["summary"]
|
|
457
|
+
result_str = f" → {s.get('function_count', 0)} functions, {s.get('class_count', 0)} classes"
|
|
458
|
+
|
|
459
|
+
self.console.print(f" {status} [cyan]{name}[/cyan]{args_str}{result_str}")
|
|
460
|
+
|
|
461
|
+
# Print full results if --verbose flag is set
|
|
462
|
+
if self.show_tool_results and result.success and result.data:
|
|
463
|
+
self.console.print()
|
|
464
|
+
self.console.print(f" [dim]─── {name} result ───[/dim]")
|
|
465
|
+
result_json = json.dumps(result.data, indent=2, default=str)
|
|
466
|
+
# Truncate very long results
|
|
467
|
+
if len(result_json) > 3000:
|
|
468
|
+
result_json = result_json[:3000] + "\n... (truncated)"
|
|
469
|
+
self.console.print(f" [dim]{result_json}[/dim]")
|
|
470
|
+
self.console.print()
|
|
471
|
+
|
|
472
|
+
|
|
473
|
+
def slugify(text: str) -> str:
|
|
474
|
+
"""Convert text to a slug for directory names."""
|
|
475
|
+
# Lowercase and replace spaces with hyphens
|
|
476
|
+
slug = text.lower().strip()
|
|
477
|
+
slug = re.sub(r'[^\w\s-]', '', slug)
|
|
478
|
+
slug = re.sub(r'[-\s]+', '-', slug)
|
|
479
|
+
return slug[:50] # Limit length
|