emdash-core 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/__init__.py +3 -0
- emdash_core/agent/__init__.py +37 -0
- emdash_core/agent/agents.py +225 -0
- emdash_core/agent/code_reviewer.py +476 -0
- emdash_core/agent/compaction.py +143 -0
- emdash_core/agent/context_manager.py +140 -0
- emdash_core/agent/events.py +338 -0
- emdash_core/agent/handlers.py +224 -0
- emdash_core/agent/inprocess_subagent.py +377 -0
- emdash_core/agent/mcp/__init__.py +50 -0
- emdash_core/agent/mcp/client.py +346 -0
- emdash_core/agent/mcp/config.py +302 -0
- emdash_core/agent/mcp/manager.py +496 -0
- emdash_core/agent/mcp/tool_factory.py +213 -0
- emdash_core/agent/prompts/__init__.py +38 -0
- emdash_core/agent/prompts/main_agent.py +104 -0
- emdash_core/agent/prompts/subagents.py +131 -0
- emdash_core/agent/prompts/workflow.py +136 -0
- emdash_core/agent/providers/__init__.py +34 -0
- emdash_core/agent/providers/base.py +143 -0
- emdash_core/agent/providers/factory.py +80 -0
- emdash_core/agent/providers/models.py +220 -0
- emdash_core/agent/providers/openai_provider.py +463 -0
- emdash_core/agent/providers/transformers_provider.py +217 -0
- emdash_core/agent/research/__init__.py +81 -0
- emdash_core/agent/research/agent.py +143 -0
- emdash_core/agent/research/controller.py +254 -0
- emdash_core/agent/research/critic.py +428 -0
- emdash_core/agent/research/macros.py +469 -0
- emdash_core/agent/research/planner.py +449 -0
- emdash_core/agent/research/researcher.py +436 -0
- emdash_core/agent/research/state.py +523 -0
- emdash_core/agent/research/synthesizer.py +594 -0
- emdash_core/agent/reviewer_profile.py +475 -0
- emdash_core/agent/rules.py +123 -0
- emdash_core/agent/runner.py +601 -0
- emdash_core/agent/session.py +262 -0
- emdash_core/agent/spec_schema.py +66 -0
- emdash_core/agent/specification.py +479 -0
- emdash_core/agent/subagent.py +397 -0
- emdash_core/agent/subagent_prompts.py +13 -0
- emdash_core/agent/toolkit.py +482 -0
- emdash_core/agent/toolkits/__init__.py +64 -0
- emdash_core/agent/toolkits/base.py +96 -0
- emdash_core/agent/toolkits/explore.py +47 -0
- emdash_core/agent/toolkits/plan.py +55 -0
- emdash_core/agent/tools/__init__.py +141 -0
- emdash_core/agent/tools/analytics.py +436 -0
- emdash_core/agent/tools/base.py +131 -0
- emdash_core/agent/tools/coding.py +484 -0
- emdash_core/agent/tools/github_mcp.py +592 -0
- emdash_core/agent/tools/history.py +13 -0
- emdash_core/agent/tools/modes.py +153 -0
- emdash_core/agent/tools/plan.py +206 -0
- emdash_core/agent/tools/plan_write.py +135 -0
- emdash_core/agent/tools/search.py +412 -0
- emdash_core/agent/tools/spec.py +341 -0
- emdash_core/agent/tools/task.py +262 -0
- emdash_core/agent/tools/task_output.py +204 -0
- emdash_core/agent/tools/tasks.py +454 -0
- emdash_core/agent/tools/traversal.py +588 -0
- emdash_core/agent/tools/web.py +179 -0
- emdash_core/analytics/__init__.py +5 -0
- emdash_core/analytics/engine.py +1286 -0
- emdash_core/api/__init__.py +5 -0
- emdash_core/api/agent.py +308 -0
- emdash_core/api/agents.py +154 -0
- emdash_core/api/analyze.py +264 -0
- emdash_core/api/auth.py +173 -0
- emdash_core/api/context.py +77 -0
- emdash_core/api/db.py +121 -0
- emdash_core/api/embed.py +131 -0
- emdash_core/api/feature.py +143 -0
- emdash_core/api/health.py +93 -0
- emdash_core/api/index.py +162 -0
- emdash_core/api/plan.py +110 -0
- emdash_core/api/projectmd.py +210 -0
- emdash_core/api/query.py +320 -0
- emdash_core/api/research.py +122 -0
- emdash_core/api/review.py +161 -0
- emdash_core/api/router.py +76 -0
- emdash_core/api/rules.py +116 -0
- emdash_core/api/search.py +119 -0
- emdash_core/api/spec.py +99 -0
- emdash_core/api/swarm.py +223 -0
- emdash_core/api/tasks.py +109 -0
- emdash_core/api/team.py +120 -0
- emdash_core/auth/__init__.py +17 -0
- emdash_core/auth/github.py +389 -0
- emdash_core/config.py +74 -0
- emdash_core/context/__init__.py +52 -0
- emdash_core/context/models.py +50 -0
- emdash_core/context/providers/__init__.py +11 -0
- emdash_core/context/providers/base.py +74 -0
- emdash_core/context/providers/explored_areas.py +183 -0
- emdash_core/context/providers/touched_areas.py +360 -0
- emdash_core/context/registry.py +73 -0
- emdash_core/context/reranker.py +199 -0
- emdash_core/context/service.py +260 -0
- emdash_core/context/session.py +352 -0
- emdash_core/core/__init__.py +104 -0
- emdash_core/core/config.py +454 -0
- emdash_core/core/exceptions.py +55 -0
- emdash_core/core/models.py +265 -0
- emdash_core/core/review_config.py +57 -0
- emdash_core/db/__init__.py +67 -0
- emdash_core/db/auth.py +134 -0
- emdash_core/db/models.py +91 -0
- emdash_core/db/provider.py +222 -0
- emdash_core/db/providers/__init__.py +5 -0
- emdash_core/db/providers/supabase.py +452 -0
- emdash_core/embeddings/__init__.py +24 -0
- emdash_core/embeddings/indexer.py +534 -0
- emdash_core/embeddings/models.py +192 -0
- emdash_core/embeddings/providers/__init__.py +7 -0
- emdash_core/embeddings/providers/base.py +112 -0
- emdash_core/embeddings/providers/fireworks.py +141 -0
- emdash_core/embeddings/providers/openai.py +104 -0
- emdash_core/embeddings/registry.py +146 -0
- emdash_core/embeddings/service.py +215 -0
- emdash_core/graph/__init__.py +26 -0
- emdash_core/graph/builder.py +134 -0
- emdash_core/graph/connection.py +692 -0
- emdash_core/graph/schema.py +416 -0
- emdash_core/graph/writer.py +667 -0
- emdash_core/ingestion/__init__.py +7 -0
- emdash_core/ingestion/change_detector.py +150 -0
- emdash_core/ingestion/git/__init__.py +5 -0
- emdash_core/ingestion/git/commit_analyzer.py +196 -0
- emdash_core/ingestion/github/__init__.py +6 -0
- emdash_core/ingestion/github/pr_fetcher.py +296 -0
- emdash_core/ingestion/github/task_extractor.py +100 -0
- emdash_core/ingestion/orchestrator.py +540 -0
- emdash_core/ingestion/parsers/__init__.py +10 -0
- emdash_core/ingestion/parsers/base_parser.py +66 -0
- emdash_core/ingestion/parsers/call_graph_builder.py +121 -0
- emdash_core/ingestion/parsers/class_extractor.py +154 -0
- emdash_core/ingestion/parsers/function_extractor.py +202 -0
- emdash_core/ingestion/parsers/import_analyzer.py +119 -0
- emdash_core/ingestion/parsers/python_parser.py +123 -0
- emdash_core/ingestion/parsers/registry.py +72 -0
- emdash_core/ingestion/parsers/ts_ast_parser.js +313 -0
- emdash_core/ingestion/parsers/typescript_parser.py +278 -0
- emdash_core/ingestion/repository.py +346 -0
- emdash_core/models/__init__.py +38 -0
- emdash_core/models/agent.py +68 -0
- emdash_core/models/index.py +77 -0
- emdash_core/models/query.py +113 -0
- emdash_core/planning/__init__.py +7 -0
- emdash_core/planning/agent_api.py +413 -0
- emdash_core/planning/context_builder.py +265 -0
- emdash_core/planning/feature_context.py +232 -0
- emdash_core/planning/feature_expander.py +646 -0
- emdash_core/planning/llm_explainer.py +198 -0
- emdash_core/planning/similarity.py +509 -0
- emdash_core/planning/team_focus.py +821 -0
- emdash_core/server.py +153 -0
- emdash_core/sse/__init__.py +5 -0
- emdash_core/sse/stream.py +196 -0
- emdash_core/swarm/__init__.py +17 -0
- emdash_core/swarm/merge_agent.py +383 -0
- emdash_core/swarm/session_manager.py +274 -0
- emdash_core/swarm/swarm_runner.py +226 -0
- emdash_core/swarm/task_definition.py +137 -0
- emdash_core/swarm/worker_spawner.py +319 -0
- emdash_core/swarm/worktree_manager.py +278 -0
- emdash_core/templates/__init__.py +10 -0
- emdash_core/templates/defaults/agent-builder.md.template +82 -0
- emdash_core/templates/defaults/focus.md.template +115 -0
- emdash_core/templates/defaults/pr-review-enhanced.md.template +309 -0
- emdash_core/templates/defaults/pr-review.md.template +80 -0
- emdash_core/templates/defaults/project.md.template +85 -0
- emdash_core/templates/defaults/research_critic.md.template +112 -0
- emdash_core/templates/defaults/research_planner.md.template +85 -0
- emdash_core/templates/defaults/research_synthesizer.md.template +128 -0
- emdash_core/templates/defaults/reviewer.md.template +81 -0
- emdash_core/templates/defaults/spec.md.template +41 -0
- emdash_core/templates/defaults/tasks.md.template +78 -0
- emdash_core/templates/loader.py +296 -0
- emdash_core/utils/__init__.py +45 -0
- emdash_core/utils/git.py +84 -0
- emdash_core/utils/image.py +502 -0
- emdash_core/utils/logger.py +51 -0
- emdash_core-0.1.7.dist-info/METADATA +35 -0
- emdash_core-0.1.7.dist-info/RECORD +187 -0
- emdash_core-0.1.7.dist-info/WHEEL +4 -0
- emdash_core-0.1.7.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,594 @@
|
|
|
1
|
+
"""Synthesizer agent for generating final research reports.
|
|
2
|
+
|
|
3
|
+
The Synthesizer produces team-usable final reports that include:
|
|
4
|
+
1. Findings (Fact-only) - claims with evidence IDs
|
|
5
|
+
2. Evidence Coverage Matrix - questions vs evidence
|
|
6
|
+
3. Design/Spec Implications
|
|
7
|
+
4. Risks & Unknowns - gaps + impact
|
|
8
|
+
5. Recommended Tasks - actionable items
|
|
9
|
+
6. Execution-Ready Work Packages - sprint grouping + DoD + acceptance tests
|
|
10
|
+
7. Capacity & Sizing - T-shirt sizes and capacity notes
|
|
11
|
+
8. Reviewer Checklist - what to verify in PRs
|
|
12
|
+
9. Tooling Summary - macros, calls, budgets
|
|
13
|
+
|
|
14
|
+
Hard rules:
|
|
15
|
+
- No claim appears without evidence IDs
|
|
16
|
+
- Unknowns must be explicit, not hidden
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
import json
|
|
20
|
+
from typing import Optional
|
|
21
|
+
|
|
22
|
+
from rich.console import Console
|
|
23
|
+
|
|
24
|
+
from ..providers import get_provider
|
|
25
|
+
from ..providers.factory import DEFAULT_MODEL
|
|
26
|
+
from ..compaction import LLMCompactor
|
|
27
|
+
from .state import (
|
|
28
|
+
ResearchState,
|
|
29
|
+
ResearchPlan,
|
|
30
|
+
IterationResult,
|
|
31
|
+
EvidenceItem,
|
|
32
|
+
Claim,
|
|
33
|
+
Gap,
|
|
34
|
+
Contradiction,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
SYNTHESIZER_SYSTEM_PROMPT = """You are a research synthesizer that produces team-usable reports.
|
|
39
|
+
|
|
40
|
+
Your job is to combine research findings into a structured report that helps the team take action.
|
|
41
|
+
|
|
42
|
+
REQUIRED SECTIONS (in order):
|
|
43
|
+
|
|
44
|
+
## Findings
|
|
45
|
+
- List claims with evidence IDs: "**C7** [E12, E13]: Statement here"
|
|
46
|
+
- Group by topic/question
|
|
47
|
+
- Only include claims that have evidence
|
|
48
|
+
|
|
49
|
+
## Evidence Coverage Matrix
|
|
50
|
+
- Show which questions are covered by which evidence
|
|
51
|
+
- Format as markdown table
|
|
52
|
+
- Highlight gaps
|
|
53
|
+
|
|
54
|
+
## Design/Spec Implications
|
|
55
|
+
- What must be true for implementation
|
|
56
|
+
- Design constraints discovered
|
|
57
|
+
- Patterns to follow
|
|
58
|
+
|
|
59
|
+
## Risks & Unknowns
|
|
60
|
+
- List all gaps
|
|
61
|
+
- Impact of unknowns
|
|
62
|
+
- How to close gaps
|
|
63
|
+
|
|
64
|
+
## Recommended Tasks
|
|
65
|
+
- Actionable items
|
|
66
|
+
- Map each task to evidence
|
|
67
|
+
- Include owner placeholders: "**Owner TBD**"
|
|
68
|
+
|
|
69
|
+
## Execution-Ready Work Packages
|
|
70
|
+
- Group Phase 1 tasks into sprints
|
|
71
|
+
- Each task includes: T-shirt size (XS/S/M/L/XL), Definition of Done, Acceptance Tests
|
|
72
|
+
- Keep dependencies explicit
|
|
73
|
+
|
|
74
|
+
## Capacity & Sizing
|
|
75
|
+
- Roll up T-shirt sizing per sprint
|
|
76
|
+
- Note owner gaps and capacity risks
|
|
77
|
+
|
|
78
|
+
## Reviewer Checklist
|
|
79
|
+
- What to verify in PRs
|
|
80
|
+
- Critical paths to check
|
|
81
|
+
- Tests to ensure
|
|
82
|
+
|
|
83
|
+
## Tooling Summary
|
|
84
|
+
- Macro runs
|
|
85
|
+
- Tool calls made
|
|
86
|
+
- Budget used
|
|
87
|
+
|
|
88
|
+
## Planning Artifacts
|
|
89
|
+
- Recommend JSON task export to Jira/Linear for execution tracking
|
|
90
|
+
|
|
91
|
+
CRITICAL RULES:
|
|
92
|
+
- Every claim must show evidence IDs
|
|
93
|
+
- No ungrounded statements
|
|
94
|
+
- Gaps must be explicit
|
|
95
|
+
- Use team vocabulary"""
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class SynthesizerAgent:
|
|
99
|
+
"""Generates final report with required sections.
|
|
100
|
+
|
|
101
|
+
The Synthesizer transforms raw research data into a
|
|
102
|
+
structured, team-usable report.
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
model: str = DEFAULT_MODEL,
|
|
108
|
+
verbose: bool = True,
|
|
109
|
+
):
|
|
110
|
+
"""Initialize the synthesizer agent.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
model: LLM model to use
|
|
114
|
+
verbose: Whether to print progress
|
|
115
|
+
"""
|
|
116
|
+
self.provider = get_provider(model)
|
|
117
|
+
self.model = model
|
|
118
|
+
self.verbose = verbose
|
|
119
|
+
self.console = Console()
|
|
120
|
+
self.compactor = LLMCompactor(self.provider)
|
|
121
|
+
|
|
122
|
+
def write(
|
|
123
|
+
self,
|
|
124
|
+
plan: ResearchPlan,
|
|
125
|
+
history: list[IterationResult],
|
|
126
|
+
) -> str:
|
|
127
|
+
"""Generate final report from research state.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
plan: Research plan
|
|
131
|
+
history: All iteration results
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Markdown report string
|
|
135
|
+
"""
|
|
136
|
+
if self.verbose:
|
|
137
|
+
self.console.print("[cyan]Synthesizing final report...[/cyan]")
|
|
138
|
+
|
|
139
|
+
# Gather all data
|
|
140
|
+
all_evidence = []
|
|
141
|
+
all_claims = []
|
|
142
|
+
all_gaps = []
|
|
143
|
+
all_contradictions = []
|
|
144
|
+
|
|
145
|
+
for result in history:
|
|
146
|
+
all_evidence.extend(result.evidence)
|
|
147
|
+
all_claims.extend(result.claims)
|
|
148
|
+
all_gaps.extend(result.gaps)
|
|
149
|
+
all_contradictions.extend(result.critique.contradictions)
|
|
150
|
+
|
|
151
|
+
# Try LLM-based synthesis
|
|
152
|
+
try:
|
|
153
|
+
report = self._llm_synthesize(plan, all_evidence, all_claims, all_gaps, all_contradictions, history)
|
|
154
|
+
if report and len(report) > 500:
|
|
155
|
+
if self.verbose:
|
|
156
|
+
self.console.print("[green]Report synthesized[/green]")
|
|
157
|
+
return report
|
|
158
|
+
except Exception as e:
|
|
159
|
+
if self.verbose:
|
|
160
|
+
self.console.print(f"[yellow]LLM synthesis failed: {e}. Using template.[/yellow]")
|
|
161
|
+
|
|
162
|
+
# Fallback to template-based synthesis
|
|
163
|
+
return self._template_synthesize(plan, all_evidence, all_claims, all_gaps, all_contradictions, history)
|
|
164
|
+
|
|
165
|
+
def _llm_synthesize(
|
|
166
|
+
self,
|
|
167
|
+
plan: ResearchPlan,
|
|
168
|
+
evidence: list[EvidenceItem],
|
|
169
|
+
claims: list[Claim],
|
|
170
|
+
gaps: list[Gap],
|
|
171
|
+
contradictions: list[Contradiction],
|
|
172
|
+
history: list[IterationResult],
|
|
173
|
+
) -> Optional[str]:
|
|
174
|
+
"""Synthesize using LLM.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
plan: Research plan
|
|
178
|
+
evidence: All evidence
|
|
179
|
+
claims: All claims
|
|
180
|
+
gaps: All gaps
|
|
181
|
+
contradictions: All contradictions
|
|
182
|
+
history: Iteration history
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Report string or None
|
|
186
|
+
"""
|
|
187
|
+
# Build context for LLM
|
|
188
|
+
claims_text = "\n".join([
|
|
189
|
+
f"- {c.id} (conf={c.confidence}, evidence={c.evidence_ids}): {c.statement}"
|
|
190
|
+
for c in claims
|
|
191
|
+
]) or "No claims"
|
|
192
|
+
|
|
193
|
+
evidence_text = "\n".join([
|
|
194
|
+
f"- {e.id}: {e.tool} -> {e.summary}"
|
|
195
|
+
for e in evidence[:30] # Limit
|
|
196
|
+
]) or "No evidence"
|
|
197
|
+
|
|
198
|
+
gaps_text = "\n".join([
|
|
199
|
+
f"- {g.question} (reason: {g.reason})"
|
|
200
|
+
for g in gaps
|
|
201
|
+
]) or "No gaps"
|
|
202
|
+
|
|
203
|
+
contradictions_text = "\n".join([
|
|
204
|
+
f"- {c.claim_a} vs {c.claim_b}: {c.note}"
|
|
205
|
+
for c in contradictions
|
|
206
|
+
]) or "No contradictions"
|
|
207
|
+
|
|
208
|
+
questions_text = "\n".join([
|
|
209
|
+
f"- [{q.priority}] {q.qid}: {q.question}"
|
|
210
|
+
for q in plan.questions
|
|
211
|
+
])
|
|
212
|
+
|
|
213
|
+
# Calculate stats
|
|
214
|
+
total_tool_calls = len(evidence)
|
|
215
|
+
iterations = len(history)
|
|
216
|
+
|
|
217
|
+
payload = {
|
|
218
|
+
"questions": questions_text,
|
|
219
|
+
"claims": claims_text,
|
|
220
|
+
"evidence": evidence_text,
|
|
221
|
+
"gaps": gaps_text,
|
|
222
|
+
"contradictions": contradictions_text,
|
|
223
|
+
}
|
|
224
|
+
compacted = self.compactor.compact_payload(payload, plan.goal)
|
|
225
|
+
questions_text = compacted.get("questions", questions_text)
|
|
226
|
+
claims_text = compacted.get("claims", claims_text)
|
|
227
|
+
evidence_text = compacted.get("evidence", evidence_text)
|
|
228
|
+
gaps_text = compacted.get("gaps", gaps_text)
|
|
229
|
+
contradictions_text = compacted.get("contradictions", contradictions_text)
|
|
230
|
+
|
|
231
|
+
user_message = f"""Synthesize a research report from this data.
|
|
232
|
+
|
|
233
|
+
RESEARCH GOAL: {plan.goal}
|
|
234
|
+
|
|
235
|
+
QUESTIONS INVESTIGATED:
|
|
236
|
+
{questions_text}
|
|
237
|
+
|
|
238
|
+
CLAIMS (with evidence):
|
|
239
|
+
{claims_text}
|
|
240
|
+
|
|
241
|
+
EVIDENCE COLLECTED:
|
|
242
|
+
{evidence_text}
|
|
243
|
+
|
|
244
|
+
GAPS (unanswered):
|
|
245
|
+
{gaps_text}
|
|
246
|
+
|
|
247
|
+
CONTRADICTIONS:
|
|
248
|
+
{contradictions_text}
|
|
249
|
+
|
|
250
|
+
STATS:
|
|
251
|
+
- Iterations: {iterations}
|
|
252
|
+
- Tool calls: {total_tool_calls}
|
|
253
|
+
- Claims: {len(claims)}
|
|
254
|
+
- Gaps: {len(gaps)}
|
|
255
|
+
|
|
256
|
+
Generate a complete research report with all required sections.
|
|
257
|
+
Start with "# Research Report: [topic]"
|
|
258
|
+
"""
|
|
259
|
+
|
|
260
|
+
messages = [
|
|
261
|
+
{"role": "user", "content": user_message},
|
|
262
|
+
]
|
|
263
|
+
|
|
264
|
+
response = self.provider.chat(messages, system=SYNTHESIZER_SYSTEM_PROMPT)
|
|
265
|
+
return response.content
|
|
266
|
+
|
|
267
|
+
def _template_synthesize(
|
|
268
|
+
self,
|
|
269
|
+
plan: ResearchPlan,
|
|
270
|
+
evidence: list[EvidenceItem],
|
|
271
|
+
claims: list[Claim],
|
|
272
|
+
gaps: list[Gap],
|
|
273
|
+
contradictions: list[Contradiction],
|
|
274
|
+
history: list[IterationResult],
|
|
275
|
+
) -> str:
|
|
276
|
+
"""Synthesize using template.
|
|
277
|
+
|
|
278
|
+
Fallback method for structured report generation.
|
|
279
|
+
"""
|
|
280
|
+
sections = []
|
|
281
|
+
|
|
282
|
+
# Title
|
|
283
|
+
sections.append(f"# Research Report: {plan.goal}\n")
|
|
284
|
+
|
|
285
|
+
# Summary
|
|
286
|
+
sections.append("## Executive Summary\n")
|
|
287
|
+
sections.append(f"This report summarizes research into: **{plan.goal}**\n")
|
|
288
|
+
sections.append(f"- **{len(claims)}** claims established")
|
|
289
|
+
sections.append(f"- **{len(evidence)}** evidence items collected")
|
|
290
|
+
sections.append(f"- **{len(gaps)}** gaps identified")
|
|
291
|
+
sections.append(f"- **{len(history)}** research iterations\n")
|
|
292
|
+
|
|
293
|
+
# Findings
|
|
294
|
+
sections.append("## Findings\n")
|
|
295
|
+
sections.append(self._format_findings(claims))
|
|
296
|
+
|
|
297
|
+
# Evidence Coverage Matrix
|
|
298
|
+
sections.append("## Evidence Coverage Matrix\n")
|
|
299
|
+
sections.append(self._build_coverage_matrix(plan, claims, evidence))
|
|
300
|
+
|
|
301
|
+
# Design/Spec Implications
|
|
302
|
+
sections.append("## Design/Spec Implications\n")
|
|
303
|
+
sections.append(self._format_implications(claims))
|
|
304
|
+
|
|
305
|
+
# Risks & Unknowns
|
|
306
|
+
sections.append("## Risks & Unknowns\n")
|
|
307
|
+
sections.append(self._format_risks(gaps, contradictions))
|
|
308
|
+
|
|
309
|
+
# Recommended Tasks
|
|
310
|
+
sections.append("## Recommended Tasks\n")
|
|
311
|
+
sections.append(self._format_tasks(claims, gaps))
|
|
312
|
+
|
|
313
|
+
# Execution-Ready Work Packages
|
|
314
|
+
sections.append("## Execution-Ready Work Packages\n")
|
|
315
|
+
sections.append(
|
|
316
|
+
"### Sprint 1 (Phase 1)\n"
|
|
317
|
+
"- Task: [Owner TBD] (Size: M)\n"
|
|
318
|
+
" - Definition of Done: Documented artifacts delivered\n"
|
|
319
|
+
" - Acceptance Tests: Evidence-backed review checklist complete\n\n"
|
|
320
|
+
"### Sprint 2 (Phase 1)\n"
|
|
321
|
+
"- Task: [Owner TBD] (Size: M)\n"
|
|
322
|
+
" - Definition of Done: Dependencies mapped\n"
|
|
323
|
+
" - Acceptance Tests: Dependency graph reviewed\n\n"
|
|
324
|
+
"### Sprint 3 (Phase 1)\n"
|
|
325
|
+
"- Task: [Owner TBD] (Size: S)\n"
|
|
326
|
+
" - Definition of Done: Test inventory documented\n"
|
|
327
|
+
" - Acceptance Tests: Coverage gaps recorded\n"
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
# Capacity & Sizing
|
|
331
|
+
sections.append("## Capacity & Sizing\n")
|
|
332
|
+
sections.append(
|
|
333
|
+
"- Sprint 1: M\n"
|
|
334
|
+
"- Sprint 2: M\n"
|
|
335
|
+
"- Sprint 3: S\n"
|
|
336
|
+
"- Owners: TBD (capacity risk until ownership assigned)\n"
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Reviewer Checklist
|
|
340
|
+
sections.append("## Reviewer Checklist\n")
|
|
341
|
+
sections.append(self._format_checklist(claims, evidence))
|
|
342
|
+
|
|
343
|
+
# Tooling Summary
|
|
344
|
+
sections.append("## Tooling Summary\n")
|
|
345
|
+
sections.append(self._format_tooling(evidence, history, plan))
|
|
346
|
+
|
|
347
|
+
# Planning Artifacts
|
|
348
|
+
sections.append("## Planning Artifacts\n")
|
|
349
|
+
sections.append("- Export tasks as JSON for Jira/Linear indexing\n")
|
|
350
|
+
|
|
351
|
+
return "\n".join(sections)
|
|
352
|
+
|
|
353
|
+
def _format_findings(self, claims: list[Claim]) -> str:
|
|
354
|
+
"""Format claims as findings."""
|
|
355
|
+
if not claims:
|
|
356
|
+
return "_No findings established._\n"
|
|
357
|
+
|
|
358
|
+
lines = []
|
|
359
|
+
|
|
360
|
+
# Group by confidence
|
|
361
|
+
high_conf = [c for c in claims if c.confidence >= 2]
|
|
362
|
+
low_conf = [c for c in claims if c.confidence < 2]
|
|
363
|
+
|
|
364
|
+
if high_conf:
|
|
365
|
+
lines.append("### High Confidence\n")
|
|
366
|
+
for claim in high_conf:
|
|
367
|
+
evidence_str = ", ".join(claim.evidence_ids)
|
|
368
|
+
lines.append(f"- **{claim.id}** [{evidence_str}]: {claim.statement}")
|
|
369
|
+
lines.append("")
|
|
370
|
+
|
|
371
|
+
if low_conf:
|
|
372
|
+
lines.append("### Lower Confidence\n")
|
|
373
|
+
for claim in low_conf:
|
|
374
|
+
evidence_str = ", ".join(claim.evidence_ids)
|
|
375
|
+
assumptions_str = ""
|
|
376
|
+
if claim.assumptions:
|
|
377
|
+
assumptions_str = f" _(Assumptions: {', '.join(claim.assumptions)})_"
|
|
378
|
+
lines.append(f"- **{claim.id}** [{evidence_str}]: {claim.statement}{assumptions_str}")
|
|
379
|
+
lines.append("")
|
|
380
|
+
|
|
381
|
+
return "\n".join(lines)
|
|
382
|
+
|
|
383
|
+
def _build_coverage_matrix(
|
|
384
|
+
self,
|
|
385
|
+
plan: ResearchPlan,
|
|
386
|
+
claims: list[Claim],
|
|
387
|
+
evidence: list[EvidenceItem],
|
|
388
|
+
) -> str:
|
|
389
|
+
"""Build evidence coverage matrix."""
|
|
390
|
+
lines = []
|
|
391
|
+
|
|
392
|
+
# Header
|
|
393
|
+
lines.append("| Question | Priority | Evidence | Claims | Status |")
|
|
394
|
+
lines.append("|----------|----------|----------|--------|--------|")
|
|
395
|
+
|
|
396
|
+
for question in plan.questions:
|
|
397
|
+
# Find related claims and evidence
|
|
398
|
+
q_keywords = set(question.question.lower().split())
|
|
399
|
+
|
|
400
|
+
related_claims = []
|
|
401
|
+
related_evidence = set()
|
|
402
|
+
|
|
403
|
+
for claim in claims:
|
|
404
|
+
c_keywords = set(claim.statement.lower().split())
|
|
405
|
+
if len(q_keywords & c_keywords) >= 2:
|
|
406
|
+
related_claims.append(claim.id)
|
|
407
|
+
related_evidence.update(claim.evidence_ids)
|
|
408
|
+
|
|
409
|
+
# Status
|
|
410
|
+
if related_claims:
|
|
411
|
+
if any(c.confidence >= 2 for c in claims if c.id in related_claims):
|
|
412
|
+
status = "Answered"
|
|
413
|
+
else:
|
|
414
|
+
status = "Partial"
|
|
415
|
+
else:
|
|
416
|
+
status = "Gap"
|
|
417
|
+
|
|
418
|
+
evidence_str = ", ".join(list(related_evidence)[:3])
|
|
419
|
+
if len(related_evidence) > 3:
|
|
420
|
+
evidence_str += "..."
|
|
421
|
+
|
|
422
|
+
claims_str = ", ".join(related_claims[:3])
|
|
423
|
+
if len(related_claims) > 3:
|
|
424
|
+
claims_str += "..."
|
|
425
|
+
|
|
426
|
+
lines.append(
|
|
427
|
+
f"| {question.question[:40]}... | {question.priority} | "
|
|
428
|
+
f"{evidence_str or '-'} | {claims_str or '-'} | {status} |"
|
|
429
|
+
)
|
|
430
|
+
|
|
431
|
+
lines.append("")
|
|
432
|
+
return "\n".join(lines)
|
|
433
|
+
|
|
434
|
+
def _format_implications(self, claims: list[Claim]) -> str:
|
|
435
|
+
"""Format design/spec implications."""
|
|
436
|
+
if not claims:
|
|
437
|
+
return "_No implications identified._\n"
|
|
438
|
+
|
|
439
|
+
lines = []
|
|
440
|
+
|
|
441
|
+
# Extract implications from high-confidence claims
|
|
442
|
+
high_conf = [c for c in claims if c.confidence >= 2]
|
|
443
|
+
|
|
444
|
+
if high_conf:
|
|
445
|
+
lines.append("Based on the research findings:\n")
|
|
446
|
+
for i, claim in enumerate(high_conf[:10], 1):
|
|
447
|
+
lines.append(f"{i}. {claim.statement} (from {', '.join(claim.evidence_ids)})")
|
|
448
|
+
lines.append("")
|
|
449
|
+
else:
|
|
450
|
+
lines.append("_Limited high-confidence findings. More research needed._\n")
|
|
451
|
+
|
|
452
|
+
return "\n".join(lines)
|
|
453
|
+
|
|
454
|
+
def _format_risks(
|
|
455
|
+
self,
|
|
456
|
+
gaps: list[Gap],
|
|
457
|
+
contradictions: list[Contradiction],
|
|
458
|
+
) -> str:
|
|
459
|
+
"""Format risks and unknowns."""
|
|
460
|
+
lines = []
|
|
461
|
+
|
|
462
|
+
if gaps:
|
|
463
|
+
lines.append("### Unanswered Questions\n")
|
|
464
|
+
for gap in gaps:
|
|
465
|
+
tools_str = ", ".join(gap.suggested_tools) if gap.suggested_tools else "manual investigation"
|
|
466
|
+
lines.append(f"- **{gap.question}**")
|
|
467
|
+
lines.append(f" - Reason: {gap.reason}")
|
|
468
|
+
lines.append(f" - Suggested: {tools_str}")
|
|
469
|
+
lines.append("")
|
|
470
|
+
|
|
471
|
+
if contradictions:
|
|
472
|
+
lines.append("### Contradictions (Unresolved)\n")
|
|
473
|
+
for c in contradictions:
|
|
474
|
+
lines.append(f"- **{c.claim_a}** vs **{c.claim_b}**: {c.note}")
|
|
475
|
+
lines.append("")
|
|
476
|
+
|
|
477
|
+
if not gaps and not contradictions:
|
|
478
|
+
lines.append("_No significant risks or unknowns identified._\n")
|
|
479
|
+
|
|
480
|
+
return "\n".join(lines)
|
|
481
|
+
|
|
482
|
+
def _format_tasks(
|
|
483
|
+
self,
|
|
484
|
+
claims: list[Claim],
|
|
485
|
+
gaps: list[Gap],
|
|
486
|
+
) -> str:
|
|
487
|
+
"""Format recommended tasks."""
|
|
488
|
+
lines = []
|
|
489
|
+
task_num = 1
|
|
490
|
+
|
|
491
|
+
# Tasks from high-confidence findings
|
|
492
|
+
high_conf = [c for c in claims if c.confidence >= 2]
|
|
493
|
+
if high_conf:
|
|
494
|
+
lines.append("### Based on Findings\n")
|
|
495
|
+
for claim in high_conf[:5]:
|
|
496
|
+
lines.append(f"{task_num}. Implement based on {claim.id}: {claim.statement[:60]}...")
|
|
497
|
+
lines.append(f" - **Owner**: TBD")
|
|
498
|
+
lines.append(f" - **Evidence**: {', '.join(claim.evidence_ids)}")
|
|
499
|
+
task_num += 1
|
|
500
|
+
lines.append("")
|
|
501
|
+
|
|
502
|
+
# Tasks from gaps
|
|
503
|
+
if gaps:
|
|
504
|
+
lines.append("### To Close Gaps\n")
|
|
505
|
+
for gap in gaps[:5]:
|
|
506
|
+
lines.append(f"{task_num}. Investigate: {gap.question}")
|
|
507
|
+
lines.append(f" - **Owner**: TBD")
|
|
508
|
+
lines.append(f" - **Tools**: {', '.join(gap.suggested_tools)}")
|
|
509
|
+
task_num += 1
|
|
510
|
+
lines.append("")
|
|
511
|
+
|
|
512
|
+
if not high_conf and not gaps:
|
|
513
|
+
lines.append("_No specific tasks recommended. More research needed._\n")
|
|
514
|
+
|
|
515
|
+
return "\n".join(lines)
|
|
516
|
+
|
|
517
|
+
def _format_checklist(
|
|
518
|
+
self,
|
|
519
|
+
claims: list[Claim],
|
|
520
|
+
evidence: list[EvidenceItem],
|
|
521
|
+
) -> str:
|
|
522
|
+
"""Format reviewer checklist."""
|
|
523
|
+
lines = []
|
|
524
|
+
|
|
525
|
+
# Extract file paths from evidence
|
|
526
|
+
files = set()
|
|
527
|
+
for e in evidence:
|
|
528
|
+
for entity in e.entities:
|
|
529
|
+
if "/" in entity or entity.endswith(".py") or entity.endswith(".ts"):
|
|
530
|
+
files.add(entity)
|
|
531
|
+
|
|
532
|
+
if files:
|
|
533
|
+
lines.append("### Files to Review\n")
|
|
534
|
+
for f in list(files)[:15]:
|
|
535
|
+
lines.append(f"- [ ] `{f}`")
|
|
536
|
+
lines.append("")
|
|
537
|
+
|
|
538
|
+
# Checklist items from claims
|
|
539
|
+
if claims:
|
|
540
|
+
lines.append("### Verification Points\n")
|
|
541
|
+
for claim in claims[:10]:
|
|
542
|
+
lines.append(f"- [ ] Verify: {claim.statement[:60]}... ({claim.id})")
|
|
543
|
+
lines.append("")
|
|
544
|
+
|
|
545
|
+
# General checklist
|
|
546
|
+
lines.append("### General Checks\n")
|
|
547
|
+
lines.append("- [ ] Tests pass")
|
|
548
|
+
lines.append("- [ ] No regressions in affected areas")
|
|
549
|
+
lines.append("- [ ] Documentation updated if needed")
|
|
550
|
+
lines.append("- [ ] Code follows project patterns")
|
|
551
|
+
lines.append("")
|
|
552
|
+
|
|
553
|
+
return "\n".join(lines)
|
|
554
|
+
|
|
555
|
+
def _format_tooling(
|
|
556
|
+
self,
|
|
557
|
+
evidence: list[EvidenceItem],
|
|
558
|
+
history: list[IterationResult],
|
|
559
|
+
plan: ResearchPlan,
|
|
560
|
+
) -> str:
|
|
561
|
+
"""Format tooling summary."""
|
|
562
|
+
lines = []
|
|
563
|
+
|
|
564
|
+
# Tool usage stats
|
|
565
|
+
tool_counts: dict[str, int] = {}
|
|
566
|
+
for e in evidence:
|
|
567
|
+
tool_counts[e.tool] = tool_counts.get(e.tool, 0) + 1
|
|
568
|
+
|
|
569
|
+
lines.append("### Tool Usage\n")
|
|
570
|
+
lines.append("| Tool | Calls |")
|
|
571
|
+
lines.append("|------|-------|")
|
|
572
|
+
for tool, count in sorted(tool_counts.items(), key=lambda x: -x[1]):
|
|
573
|
+
lines.append(f"| {tool} | {count} |")
|
|
574
|
+
lines.append("")
|
|
575
|
+
|
|
576
|
+
# Iteration summary
|
|
577
|
+
lines.append("### Iteration History\n")
|
|
578
|
+
for result in history:
|
|
579
|
+
decision = result.critique.decision
|
|
580
|
+
lines.append(
|
|
581
|
+
f"- Iteration {result.iteration + 1}: "
|
|
582
|
+
f"{len(result.evidence)} evidence, {len(result.claims)} claims -> {decision}"
|
|
583
|
+
)
|
|
584
|
+
lines.append("")
|
|
585
|
+
|
|
586
|
+
# Budget summary
|
|
587
|
+
lines.append("### Budget\n")
|
|
588
|
+
total_calls = len(evidence)
|
|
589
|
+
budget = plan.budgets.get("tool_calls", 50)
|
|
590
|
+
lines.append(f"- Tool calls: {total_calls}/{budget} ({total_calls/budget*100:.0f}%)")
|
|
591
|
+
lines.append(f"- Iterations: {len(history)}/{plan.max_iterations}")
|
|
592
|
+
lines.append("")
|
|
593
|
+
|
|
594
|
+
return "\n".join(lines)
|