emdash-core 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- emdash_core/__init__.py +3 -0
- emdash_core/agent/__init__.py +37 -0
- emdash_core/agent/agents.py +225 -0
- emdash_core/agent/code_reviewer.py +476 -0
- emdash_core/agent/compaction.py +143 -0
- emdash_core/agent/context_manager.py +140 -0
- emdash_core/agent/events.py +338 -0
- emdash_core/agent/handlers.py +224 -0
- emdash_core/agent/inprocess_subagent.py +377 -0
- emdash_core/agent/mcp/__init__.py +50 -0
- emdash_core/agent/mcp/client.py +346 -0
- emdash_core/agent/mcp/config.py +302 -0
- emdash_core/agent/mcp/manager.py +496 -0
- emdash_core/agent/mcp/tool_factory.py +213 -0
- emdash_core/agent/prompts/__init__.py +38 -0
- emdash_core/agent/prompts/main_agent.py +104 -0
- emdash_core/agent/prompts/subagents.py +131 -0
- emdash_core/agent/prompts/workflow.py +136 -0
- emdash_core/agent/providers/__init__.py +34 -0
- emdash_core/agent/providers/base.py +143 -0
- emdash_core/agent/providers/factory.py +80 -0
- emdash_core/agent/providers/models.py +220 -0
- emdash_core/agent/providers/openai_provider.py +463 -0
- emdash_core/agent/providers/transformers_provider.py +217 -0
- emdash_core/agent/research/__init__.py +81 -0
- emdash_core/agent/research/agent.py +143 -0
- emdash_core/agent/research/controller.py +254 -0
- emdash_core/agent/research/critic.py +428 -0
- emdash_core/agent/research/macros.py +469 -0
- emdash_core/agent/research/planner.py +449 -0
- emdash_core/agent/research/researcher.py +436 -0
- emdash_core/agent/research/state.py +523 -0
- emdash_core/agent/research/synthesizer.py +594 -0
- emdash_core/agent/reviewer_profile.py +475 -0
- emdash_core/agent/rules.py +123 -0
- emdash_core/agent/runner.py +601 -0
- emdash_core/agent/session.py +262 -0
- emdash_core/agent/spec_schema.py +66 -0
- emdash_core/agent/specification.py +479 -0
- emdash_core/agent/subagent.py +397 -0
- emdash_core/agent/subagent_prompts.py +13 -0
- emdash_core/agent/toolkit.py +482 -0
- emdash_core/agent/toolkits/__init__.py +64 -0
- emdash_core/agent/toolkits/base.py +96 -0
- emdash_core/agent/toolkits/explore.py +47 -0
- emdash_core/agent/toolkits/plan.py +55 -0
- emdash_core/agent/tools/__init__.py +141 -0
- emdash_core/agent/tools/analytics.py +436 -0
- emdash_core/agent/tools/base.py +131 -0
- emdash_core/agent/tools/coding.py +484 -0
- emdash_core/agent/tools/github_mcp.py +592 -0
- emdash_core/agent/tools/history.py +13 -0
- emdash_core/agent/tools/modes.py +153 -0
- emdash_core/agent/tools/plan.py +206 -0
- emdash_core/agent/tools/plan_write.py +135 -0
- emdash_core/agent/tools/search.py +412 -0
- emdash_core/agent/tools/spec.py +341 -0
- emdash_core/agent/tools/task.py +262 -0
- emdash_core/agent/tools/task_output.py +204 -0
- emdash_core/agent/tools/tasks.py +454 -0
- emdash_core/agent/tools/traversal.py +588 -0
- emdash_core/agent/tools/web.py +179 -0
- emdash_core/analytics/__init__.py +5 -0
- emdash_core/analytics/engine.py +1286 -0
- emdash_core/api/__init__.py +5 -0
- emdash_core/api/agent.py +308 -0
- emdash_core/api/agents.py +154 -0
- emdash_core/api/analyze.py +264 -0
- emdash_core/api/auth.py +173 -0
- emdash_core/api/context.py +77 -0
- emdash_core/api/db.py +121 -0
- emdash_core/api/embed.py +131 -0
- emdash_core/api/feature.py +143 -0
- emdash_core/api/health.py +93 -0
- emdash_core/api/index.py +162 -0
- emdash_core/api/plan.py +110 -0
- emdash_core/api/projectmd.py +210 -0
- emdash_core/api/query.py +320 -0
- emdash_core/api/research.py +122 -0
- emdash_core/api/review.py +161 -0
- emdash_core/api/router.py +76 -0
- emdash_core/api/rules.py +116 -0
- emdash_core/api/search.py +119 -0
- emdash_core/api/spec.py +99 -0
- emdash_core/api/swarm.py +223 -0
- emdash_core/api/tasks.py +109 -0
- emdash_core/api/team.py +120 -0
- emdash_core/auth/__init__.py +17 -0
- emdash_core/auth/github.py +389 -0
- emdash_core/config.py +74 -0
- emdash_core/context/__init__.py +52 -0
- emdash_core/context/models.py +50 -0
- emdash_core/context/providers/__init__.py +11 -0
- emdash_core/context/providers/base.py +74 -0
- emdash_core/context/providers/explored_areas.py +183 -0
- emdash_core/context/providers/touched_areas.py +360 -0
- emdash_core/context/registry.py +73 -0
- emdash_core/context/reranker.py +199 -0
- emdash_core/context/service.py +260 -0
- emdash_core/context/session.py +352 -0
- emdash_core/core/__init__.py +104 -0
- emdash_core/core/config.py +454 -0
- emdash_core/core/exceptions.py +55 -0
- emdash_core/core/models.py +265 -0
- emdash_core/core/review_config.py +57 -0
- emdash_core/db/__init__.py +67 -0
- emdash_core/db/auth.py +134 -0
- emdash_core/db/models.py +91 -0
- emdash_core/db/provider.py +222 -0
- emdash_core/db/providers/__init__.py +5 -0
- emdash_core/db/providers/supabase.py +452 -0
- emdash_core/embeddings/__init__.py +24 -0
- emdash_core/embeddings/indexer.py +534 -0
- emdash_core/embeddings/models.py +192 -0
- emdash_core/embeddings/providers/__init__.py +7 -0
- emdash_core/embeddings/providers/base.py +112 -0
- emdash_core/embeddings/providers/fireworks.py +141 -0
- emdash_core/embeddings/providers/openai.py +104 -0
- emdash_core/embeddings/registry.py +146 -0
- emdash_core/embeddings/service.py +215 -0
- emdash_core/graph/__init__.py +26 -0
- emdash_core/graph/builder.py +134 -0
- emdash_core/graph/connection.py +692 -0
- emdash_core/graph/schema.py +416 -0
- emdash_core/graph/writer.py +667 -0
- emdash_core/ingestion/__init__.py +7 -0
- emdash_core/ingestion/change_detector.py +150 -0
- emdash_core/ingestion/git/__init__.py +5 -0
- emdash_core/ingestion/git/commit_analyzer.py +196 -0
- emdash_core/ingestion/github/__init__.py +6 -0
- emdash_core/ingestion/github/pr_fetcher.py +296 -0
- emdash_core/ingestion/github/task_extractor.py +100 -0
- emdash_core/ingestion/orchestrator.py +540 -0
- emdash_core/ingestion/parsers/__init__.py +10 -0
- emdash_core/ingestion/parsers/base_parser.py +66 -0
- emdash_core/ingestion/parsers/call_graph_builder.py +121 -0
- emdash_core/ingestion/parsers/class_extractor.py +154 -0
- emdash_core/ingestion/parsers/function_extractor.py +202 -0
- emdash_core/ingestion/parsers/import_analyzer.py +119 -0
- emdash_core/ingestion/parsers/python_parser.py +123 -0
- emdash_core/ingestion/parsers/registry.py +72 -0
- emdash_core/ingestion/parsers/ts_ast_parser.js +313 -0
- emdash_core/ingestion/parsers/typescript_parser.py +278 -0
- emdash_core/ingestion/repository.py +346 -0
- emdash_core/models/__init__.py +38 -0
- emdash_core/models/agent.py +68 -0
- emdash_core/models/index.py +77 -0
- emdash_core/models/query.py +113 -0
- emdash_core/planning/__init__.py +7 -0
- emdash_core/planning/agent_api.py +413 -0
- emdash_core/planning/context_builder.py +265 -0
- emdash_core/planning/feature_context.py +232 -0
- emdash_core/planning/feature_expander.py +646 -0
- emdash_core/planning/llm_explainer.py +198 -0
- emdash_core/planning/similarity.py +509 -0
- emdash_core/planning/team_focus.py +821 -0
- emdash_core/server.py +153 -0
- emdash_core/sse/__init__.py +5 -0
- emdash_core/sse/stream.py +196 -0
- emdash_core/swarm/__init__.py +17 -0
- emdash_core/swarm/merge_agent.py +383 -0
- emdash_core/swarm/session_manager.py +274 -0
- emdash_core/swarm/swarm_runner.py +226 -0
- emdash_core/swarm/task_definition.py +137 -0
- emdash_core/swarm/worker_spawner.py +319 -0
- emdash_core/swarm/worktree_manager.py +278 -0
- emdash_core/templates/__init__.py +10 -0
- emdash_core/templates/defaults/agent-builder.md.template +82 -0
- emdash_core/templates/defaults/focus.md.template +115 -0
- emdash_core/templates/defaults/pr-review-enhanced.md.template +309 -0
- emdash_core/templates/defaults/pr-review.md.template +80 -0
- emdash_core/templates/defaults/project.md.template +85 -0
- emdash_core/templates/defaults/research_critic.md.template +112 -0
- emdash_core/templates/defaults/research_planner.md.template +85 -0
- emdash_core/templates/defaults/research_synthesizer.md.template +128 -0
- emdash_core/templates/defaults/reviewer.md.template +81 -0
- emdash_core/templates/defaults/spec.md.template +41 -0
- emdash_core/templates/defaults/tasks.md.template +78 -0
- emdash_core/templates/loader.py +296 -0
- emdash_core/utils/__init__.py +45 -0
- emdash_core/utils/git.py +84 -0
- emdash_core/utils/image.py +502 -0
- emdash_core/utils/logger.py +51 -0
- emdash_core-0.1.7.dist-info/METADATA +35 -0
- emdash_core-0.1.7.dist-info/RECORD +187 -0
- emdash_core-0.1.7.dist-info/WHEEL +4 -0
- emdash_core-0.1.7.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
"""Agent session state management."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class ExplorationStep:
|
|
10
|
+
"""Record of a single exploration step."""
|
|
11
|
+
|
|
12
|
+
tool: str
|
|
13
|
+
params: dict
|
|
14
|
+
result_summary: str
|
|
15
|
+
timestamp: str = field(default_factory=lambda: datetime.now().isoformat())
|
|
16
|
+
entities_found: list[str] = field(default_factory=list)
|
|
17
|
+
content_preview: Optional[str] = None # For read_file, grep - first few lines of content
|
|
18
|
+
token_count: int = 0 # Estimated token count for this step's content
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AgentSession:
|
|
22
|
+
"""Manages exploration session state.
|
|
23
|
+
|
|
24
|
+
Tracks the history of tool calls and their results to provide
|
|
25
|
+
context for subsequent explorations.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
session = AgentSession()
|
|
29
|
+
|
|
30
|
+
# Record an action
|
|
31
|
+
result = toolkit.execute("semantic_search", query="auth")
|
|
32
|
+
session.record_action("semantic_search", {"query": "auth"}, result)
|
|
33
|
+
|
|
34
|
+
# Get context for next action
|
|
35
|
+
context = session.get_context_summary()
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
def __init__(self, max_steps: int = 100):
|
|
39
|
+
"""Initialize the session.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
max_steps: Maximum steps to retain in history
|
|
43
|
+
"""
|
|
44
|
+
self.max_steps = max_steps
|
|
45
|
+
self.steps: list[ExplorationStep] = []
|
|
46
|
+
self._visited_entities: set[str] = set()
|
|
47
|
+
|
|
48
|
+
def record_action(
|
|
49
|
+
self,
|
|
50
|
+
tool_name: str,
|
|
51
|
+
params: dict,
|
|
52
|
+
result: Any,
|
|
53
|
+
) -> None:
|
|
54
|
+
"""Record an exploration action.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
tool_name: Name of the tool executed
|
|
58
|
+
params: Parameters passed to the tool
|
|
59
|
+
result: ToolResult from execution
|
|
60
|
+
"""
|
|
61
|
+
# Extract entities from result
|
|
62
|
+
entities = self._extract_entities(result)
|
|
63
|
+
|
|
64
|
+
# Extract content preview for file reads and greps
|
|
65
|
+
content_preview = self._extract_content_preview(tool_name, result)
|
|
66
|
+
|
|
67
|
+
# Estimate token count from result content
|
|
68
|
+
token_count = self._estimate_token_count(tool_name, result)
|
|
69
|
+
|
|
70
|
+
# Create step record
|
|
71
|
+
step = ExplorationStep(
|
|
72
|
+
tool=tool_name,
|
|
73
|
+
params=params,
|
|
74
|
+
result_summary=self._summarize_result(result),
|
|
75
|
+
entities_found=entities,
|
|
76
|
+
content_preview=content_preview,
|
|
77
|
+
token_count=token_count,
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
self.steps.append(step)
|
|
81
|
+
self._visited_entities.update(entities)
|
|
82
|
+
|
|
83
|
+
# Trim if needed
|
|
84
|
+
if len(self.steps) > self.max_steps:
|
|
85
|
+
self.steps = self.steps[-self.max_steps :]
|
|
86
|
+
|
|
87
|
+
def get_context_summary(self) -> dict:
|
|
88
|
+
"""Get a summary of the exploration context.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Dict with context information for the agent
|
|
92
|
+
"""
|
|
93
|
+
recent_steps = self.steps[-10:] if self.steps else []
|
|
94
|
+
|
|
95
|
+
return {
|
|
96
|
+
"total_steps": len(self.steps),
|
|
97
|
+
"entities_visited": list(self._visited_entities)[-20:],
|
|
98
|
+
"recent_actions": [
|
|
99
|
+
{
|
|
100
|
+
"tool": s.tool,
|
|
101
|
+
"params": s.params,
|
|
102
|
+
"summary": s.result_summary,
|
|
103
|
+
}
|
|
104
|
+
for s in recent_steps
|
|
105
|
+
],
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
def reset(self) -> None:
|
|
109
|
+
"""Reset the session state."""
|
|
110
|
+
self.steps.clear()
|
|
111
|
+
self._visited_entities.clear()
|
|
112
|
+
|
|
113
|
+
def _extract_entities(self, result: Any) -> list[str]:
|
|
114
|
+
"""Extract entity identifiers from a result."""
|
|
115
|
+
entities = []
|
|
116
|
+
|
|
117
|
+
if not hasattr(result, "data") or not result.data:
|
|
118
|
+
return entities
|
|
119
|
+
|
|
120
|
+
data = result.data
|
|
121
|
+
|
|
122
|
+
# Extract from results list
|
|
123
|
+
if "results" in data:
|
|
124
|
+
for item in data["results"][:10]:
|
|
125
|
+
if isinstance(item, dict):
|
|
126
|
+
for key in ["qualified_name", "file_path", "identifier"]:
|
|
127
|
+
if key in item:
|
|
128
|
+
entities.append(str(item[key]))
|
|
129
|
+
break
|
|
130
|
+
|
|
131
|
+
# Extract from root_node
|
|
132
|
+
if "root_node" in data:
|
|
133
|
+
root = data["root_node"]
|
|
134
|
+
for key in ["qualified_name", "file_path"]:
|
|
135
|
+
if key in root:
|
|
136
|
+
entities.append(str(root[key]))
|
|
137
|
+
|
|
138
|
+
return entities
|
|
139
|
+
|
|
140
|
+
def _summarize_result(self, result: Any) -> str:
|
|
141
|
+
"""Create a brief summary of a result."""
|
|
142
|
+
if not hasattr(result, "success"):
|
|
143
|
+
return "Unknown result"
|
|
144
|
+
|
|
145
|
+
if not result.success:
|
|
146
|
+
return f"Error: {result.error}"
|
|
147
|
+
|
|
148
|
+
if not result.data:
|
|
149
|
+
return "Empty result"
|
|
150
|
+
|
|
151
|
+
data = result.data
|
|
152
|
+
|
|
153
|
+
if "results" in data:
|
|
154
|
+
return f"Found {len(data['results'])} results"
|
|
155
|
+
elif "root_node" in data:
|
|
156
|
+
node = data["root_node"]
|
|
157
|
+
name = node.get("qualified_name") or node.get("file_path", "unknown")
|
|
158
|
+
return f"Expanded: {name}"
|
|
159
|
+
elif "callers" in data:
|
|
160
|
+
return f"Found {len(data['callers'])} callers"
|
|
161
|
+
elif "callees" in data:
|
|
162
|
+
return f"Found {len(data['callees'])} callees"
|
|
163
|
+
elif "prs" in data:
|
|
164
|
+
return f"Found {len(data['prs'])} PRs"
|
|
165
|
+
else:
|
|
166
|
+
return "Completed"
|
|
167
|
+
|
|
168
|
+
def _extract_content_preview(
|
|
169
|
+
self,
|
|
170
|
+
tool_name: str,
|
|
171
|
+
result: Any,
|
|
172
|
+
max_lines: int = 5,
|
|
173
|
+
max_chars: int = 300,
|
|
174
|
+
) -> Optional[str]:
|
|
175
|
+
"""Extract a content preview from tool results.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
tool_name: Name of the tool
|
|
179
|
+
result: ToolResult from execution
|
|
180
|
+
max_lines: Maximum lines to include
|
|
181
|
+
max_chars: Maximum characters to include
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
Content preview string or None
|
|
185
|
+
"""
|
|
186
|
+
if not hasattr(result, "success") or not result.success:
|
|
187
|
+
return None
|
|
188
|
+
|
|
189
|
+
if not hasattr(result, "data") or not result.data:
|
|
190
|
+
return None
|
|
191
|
+
|
|
192
|
+
data = result.data
|
|
193
|
+
|
|
194
|
+
# read_file - show first few lines
|
|
195
|
+
if tool_name == "read_file" and "content" in data:
|
|
196
|
+
content = data["content"]
|
|
197
|
+
lines = content.split("\n")[:max_lines]
|
|
198
|
+
preview = "\n".join(lines)
|
|
199
|
+
if len(preview) > max_chars:
|
|
200
|
+
preview = preview[:max_chars] + "..."
|
|
201
|
+
elif len(content.split("\n")) > max_lines:
|
|
202
|
+
preview += "\n..."
|
|
203
|
+
return preview
|
|
204
|
+
|
|
205
|
+
# grep - show first few matches
|
|
206
|
+
if tool_name == "grep" and "results" in data:
|
|
207
|
+
matches = data["results"][:max_lines]
|
|
208
|
+
lines = [f"{m.get('file', '')}:{m.get('line_number', '')}: {m.get('line', '')}"
|
|
209
|
+
for m in matches if isinstance(m, dict)]
|
|
210
|
+
preview = "\n".join(lines)
|
|
211
|
+
if len(preview) > max_chars:
|
|
212
|
+
preview = preview[:max_chars] + "..."
|
|
213
|
+
return preview if lines else None
|
|
214
|
+
|
|
215
|
+
# semantic_search - show first few result names
|
|
216
|
+
if tool_name == "semantic_search" and "results" in data:
|
|
217
|
+
results = data["results"][:max_lines]
|
|
218
|
+
lines = [f"{r.get('type', '')}: {r.get('qualified_name', r.get('identifier', ''))}"
|
|
219
|
+
for r in results if isinstance(r, dict)]
|
|
220
|
+
preview = "\n".join(lines)
|
|
221
|
+
return preview if lines else None
|
|
222
|
+
|
|
223
|
+
return None
|
|
224
|
+
|
|
225
|
+
def _estimate_token_count(self, tool_name: str, result: Any) -> int:
|
|
226
|
+
"""Estimate token count from tool result content.
|
|
227
|
+
|
|
228
|
+
Uses ~4 characters per token as rough estimate.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
tool_name: Name of the tool
|
|
232
|
+
result: ToolResult from execution
|
|
233
|
+
|
|
234
|
+
Returns:
|
|
235
|
+
Estimated token count
|
|
236
|
+
"""
|
|
237
|
+
if not hasattr(result, "success") or not result.success:
|
|
238
|
+
return 0
|
|
239
|
+
|
|
240
|
+
if not hasattr(result, "data") or not result.data:
|
|
241
|
+
return 0
|
|
242
|
+
|
|
243
|
+
data = result.data
|
|
244
|
+
|
|
245
|
+
# read_file - estimate from content
|
|
246
|
+
if tool_name == "read_file" and "content" in data:
|
|
247
|
+
content = data["content"]
|
|
248
|
+
return len(content) // 4 if content else 0
|
|
249
|
+
|
|
250
|
+
# grep - estimate from all matched lines
|
|
251
|
+
if tool_name == "grep" and "results" in data:
|
|
252
|
+
total_chars = 0
|
|
253
|
+
for match in data["results"]:
|
|
254
|
+
if isinstance(match, dict) and "line" in match:
|
|
255
|
+
total_chars += len(match["line"])
|
|
256
|
+
return total_chars // 4
|
|
257
|
+
|
|
258
|
+
# semantic_search - minimal tokens (just metadata)
|
|
259
|
+
if tool_name == "semantic_search" and "results" in data:
|
|
260
|
+
return len(data["results"]) * 20 # ~20 tokens per result metadata
|
|
261
|
+
|
|
262
|
+
return 0
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Specification schema - free-form markdown."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Spec:
|
|
11
|
+
"""A feature specification in markdown format."""
|
|
12
|
+
|
|
13
|
+
title: str
|
|
14
|
+
content: str # Free-form markdown
|
|
15
|
+
|
|
16
|
+
def to_markdown(self) -> str:
|
|
17
|
+
"""Return the spec as markdown."""
|
|
18
|
+
return f"# {self.title}\n\n{self.content}"
|
|
19
|
+
|
|
20
|
+
@classmethod
|
|
21
|
+
def from_markdown(cls, markdown: str) -> "Spec":
|
|
22
|
+
"""Parse a markdown spec.
|
|
23
|
+
|
|
24
|
+
Extracts title from first # heading, rest is content.
|
|
25
|
+
"""
|
|
26
|
+
lines = markdown.strip().split("\n")
|
|
27
|
+
title = "Untitled Spec"
|
|
28
|
+
content_start = 0
|
|
29
|
+
|
|
30
|
+
for i, line in enumerate(lines):
|
|
31
|
+
if line.startswith("# "):
|
|
32
|
+
title = line[2:].strip()
|
|
33
|
+
content_start = i + 1
|
|
34
|
+
break
|
|
35
|
+
|
|
36
|
+
content = "\n".join(lines[content_start:]).strip()
|
|
37
|
+
return cls(title=title, content=content)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# Template for suggested spec structure (shown in system prompt)
|
|
41
|
+
SPEC_TEMPLATE = """# Feature Title
|
|
42
|
+
|
|
43
|
+
> One-sentence summary
|
|
44
|
+
|
|
45
|
+
## Problem
|
|
46
|
+
What is broken or missing?
|
|
47
|
+
|
|
48
|
+
## Solution
|
|
49
|
+
What are we building?
|
|
50
|
+
|
|
51
|
+
## Implementation
|
|
52
|
+
- Step 1
|
|
53
|
+
- Step 2
|
|
54
|
+
- Step 3
|
|
55
|
+
|
|
56
|
+
## Related Files
|
|
57
|
+
- `path/to/file.py` - reason
|
|
58
|
+
- `path/to/other.py` - reason
|
|
59
|
+
|
|
60
|
+
## Edge Cases
|
|
61
|
+
- Case 1: expected behavior
|
|
62
|
+
- Case 2: expected behavior
|
|
63
|
+
|
|
64
|
+
## Open Questions
|
|
65
|
+
- Any unresolved questions?
|
|
66
|
+
"""
|