doit-toolkit-cli 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of doit-toolkit-cli might be problematic. Click here for more details.
- doit_cli/__init__.py +1356 -0
- doit_cli/cli/__init__.py +26 -0
- doit_cli/cli/analytics_command.py +616 -0
- doit_cli/cli/context_command.py +213 -0
- doit_cli/cli/diagram_command.py +304 -0
- doit_cli/cli/fixit_command.py +641 -0
- doit_cli/cli/hooks_command.py +211 -0
- doit_cli/cli/init_command.py +613 -0
- doit_cli/cli/memory_command.py +293 -0
- doit_cli/cli/roadmapit_command.py +10 -0
- doit_cli/cli/status_command.py +117 -0
- doit_cli/cli/sync_prompts_command.py +248 -0
- doit_cli/cli/validate_command.py +196 -0
- doit_cli/cli/verify_command.py +204 -0
- doit_cli/cli/workflow_mixin.py +224 -0
- doit_cli/cli/xref_command.py +555 -0
- doit_cli/formatters/__init__.py +8 -0
- doit_cli/formatters/base.py +38 -0
- doit_cli/formatters/json_formatter.py +126 -0
- doit_cli/formatters/markdown_formatter.py +97 -0
- doit_cli/formatters/rich_formatter.py +257 -0
- doit_cli/main.py +51 -0
- doit_cli/models/__init__.py +139 -0
- doit_cli/models/agent.py +74 -0
- doit_cli/models/analytics_models.py +384 -0
- doit_cli/models/context_config.py +464 -0
- doit_cli/models/crossref_models.py +182 -0
- doit_cli/models/diagram_models.py +363 -0
- doit_cli/models/fixit_models.py +355 -0
- doit_cli/models/hook_config.py +125 -0
- doit_cli/models/project.py +91 -0
- doit_cli/models/results.py +121 -0
- doit_cli/models/search_models.py +228 -0
- doit_cli/models/status_models.py +195 -0
- doit_cli/models/sync_models.py +146 -0
- doit_cli/models/template.py +77 -0
- doit_cli/models/validation_models.py +175 -0
- doit_cli/models/workflow_models.py +319 -0
- doit_cli/prompts/__init__.py +5 -0
- doit_cli/prompts/fixit_prompts.py +344 -0
- doit_cli/prompts/interactive.py +390 -0
- doit_cli/rules/__init__.py +5 -0
- doit_cli/rules/builtin_rules.py +160 -0
- doit_cli/services/__init__.py +79 -0
- doit_cli/services/agent_detector.py +168 -0
- doit_cli/services/analytics_service.py +218 -0
- doit_cli/services/architecture_generator.py +290 -0
- doit_cli/services/backup_service.py +204 -0
- doit_cli/services/config_loader.py +113 -0
- doit_cli/services/context_loader.py +1123 -0
- doit_cli/services/coverage_calculator.py +142 -0
- doit_cli/services/crossref_service.py +237 -0
- doit_cli/services/cycle_time_calculator.py +134 -0
- doit_cli/services/date_inferrer.py +349 -0
- doit_cli/services/diagram_service.py +337 -0
- doit_cli/services/drift_detector.py +109 -0
- doit_cli/services/entity_parser.py +301 -0
- doit_cli/services/er_diagram_generator.py +197 -0
- doit_cli/services/fixit_service.py +699 -0
- doit_cli/services/github_service.py +192 -0
- doit_cli/services/hook_manager.py +258 -0
- doit_cli/services/hook_validator.py +528 -0
- doit_cli/services/input_validator.py +322 -0
- doit_cli/services/memory_search.py +527 -0
- doit_cli/services/mermaid_validator.py +334 -0
- doit_cli/services/prompt_transformer.py +91 -0
- doit_cli/services/prompt_writer.py +133 -0
- doit_cli/services/query_interpreter.py +428 -0
- doit_cli/services/report_exporter.py +219 -0
- doit_cli/services/report_generator.py +256 -0
- doit_cli/services/requirement_parser.py +112 -0
- doit_cli/services/roadmap_summarizer.py +209 -0
- doit_cli/services/rule_engine.py +443 -0
- doit_cli/services/scaffolder.py +215 -0
- doit_cli/services/score_calculator.py +172 -0
- doit_cli/services/section_parser.py +204 -0
- doit_cli/services/spec_scanner.py +327 -0
- doit_cli/services/state_manager.py +355 -0
- doit_cli/services/status_reporter.py +143 -0
- doit_cli/services/task_parser.py +347 -0
- doit_cli/services/template_manager.py +710 -0
- doit_cli/services/template_reader.py +158 -0
- doit_cli/services/user_journey_generator.py +214 -0
- doit_cli/services/user_story_parser.py +232 -0
- doit_cli/services/validation_service.py +188 -0
- doit_cli/services/validator.py +232 -0
- doit_cli/services/velocity_tracker.py +173 -0
- doit_cli/services/workflow_engine.py +405 -0
- doit_cli/templates/agent-file-template.md +28 -0
- doit_cli/templates/checklist-template.md +39 -0
- doit_cli/templates/commands/doit.checkin.md +363 -0
- doit_cli/templates/commands/doit.constitution.md +187 -0
- doit_cli/templates/commands/doit.documentit.md +485 -0
- doit_cli/templates/commands/doit.fixit.md +181 -0
- doit_cli/templates/commands/doit.implementit.md +265 -0
- doit_cli/templates/commands/doit.planit.md +262 -0
- doit_cli/templates/commands/doit.reviewit.md +355 -0
- doit_cli/templates/commands/doit.roadmapit.md +389 -0
- doit_cli/templates/commands/doit.scaffoldit.md +458 -0
- doit_cli/templates/commands/doit.specit.md +521 -0
- doit_cli/templates/commands/doit.taskit.md +304 -0
- doit_cli/templates/commands/doit.testit.md +277 -0
- doit_cli/templates/config/context.yaml +134 -0
- doit_cli/templates/config/hooks.yaml +93 -0
- doit_cli/templates/config/validation-rules.yaml +64 -0
- doit_cli/templates/github-issue-templates/epic.yml +78 -0
- doit_cli/templates/github-issue-templates/feature.yml +116 -0
- doit_cli/templates/github-issue-templates/task.yml +129 -0
- doit_cli/templates/hooks/.gitkeep +0 -0
- doit_cli/templates/hooks/post-commit.sh +25 -0
- doit_cli/templates/hooks/post-merge.sh +75 -0
- doit_cli/templates/hooks/pre-commit.sh +17 -0
- doit_cli/templates/hooks/pre-push.sh +18 -0
- doit_cli/templates/memory/completed_roadmap.md +50 -0
- doit_cli/templates/memory/constitution.md +125 -0
- doit_cli/templates/memory/roadmap.md +61 -0
- doit_cli/templates/plan-template.md +146 -0
- doit_cli/templates/scripts/bash/check-prerequisites.sh +166 -0
- doit_cli/templates/scripts/bash/common.sh +156 -0
- doit_cli/templates/scripts/bash/create-new-feature.sh +297 -0
- doit_cli/templates/scripts/bash/setup-plan.sh +61 -0
- doit_cli/templates/scripts/bash/update-agent-context.sh +675 -0
- doit_cli/templates/scripts/powershell/check-prerequisites.ps1 +148 -0
- doit_cli/templates/scripts/powershell/common.ps1 +137 -0
- doit_cli/templates/scripts/powershell/create-new-feature.ps1 +283 -0
- doit_cli/templates/scripts/powershell/setup-plan.ps1 +61 -0
- doit_cli/templates/scripts/powershell/update-agent-context.ps1 +406 -0
- doit_cli/templates/spec-template.md +159 -0
- doit_cli/templates/tasks-template.md +313 -0
- doit_cli/templates/vscode-settings.json +14 -0
- doit_toolkit_cli-0.1.10.dist-info/METADATA +324 -0
- doit_toolkit_cli-0.1.10.dist-info/RECORD +135 -0
- doit_toolkit_cli-0.1.10.dist-info/WHEEL +4 -0
- doit_toolkit_cli-0.1.10.dist-info/entry_points.txt +2 -0
- doit_toolkit_cli-0.1.10.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,428 @@
|
|
|
1
|
+
"""Natural language query interpretation for memory search.
|
|
2
|
+
|
|
3
|
+
This module provides the QueryInterpreter class that transforms natural
|
|
4
|
+
language questions into search queries by:
|
|
5
|
+
1. Classifying question types (what, why, how, where)
|
|
6
|
+
2. Extracting keywords by removing stop words
|
|
7
|
+
3. Identifying section hints for targeted searching
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import re
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from enum import Enum
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class QuestionType(str, Enum):
|
|
16
|
+
"""Classification of natural language question types."""
|
|
17
|
+
|
|
18
|
+
WHAT = "what" # Definitional questions
|
|
19
|
+
WHY = "why" # Rationale/reasoning questions
|
|
20
|
+
HOW = "how" # Procedural/process questions
|
|
21
|
+
WHERE = "where" # Location/reference questions
|
|
22
|
+
WHEN = "when" # Temporal questions
|
|
23
|
+
WHO = "who" # Entity/ownership questions
|
|
24
|
+
WHICH = "which" # Selection/choice questions
|
|
25
|
+
UNKNOWN = "unknown" # Unclassified questions
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class InterpretedQuery:
|
|
30
|
+
"""Result of interpreting a natural language query.
|
|
31
|
+
|
|
32
|
+
Attributes:
|
|
33
|
+
original_query: The original natural language question.
|
|
34
|
+
question_type: Classified type of question.
|
|
35
|
+
keywords: Extracted keywords for search.
|
|
36
|
+
section_hints: Suggested sections to prioritize.
|
|
37
|
+
search_terms: Final search terms to use.
|
|
38
|
+
confidence: Confidence score for interpretation (0.0-1.0).
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
original_query: str
|
|
42
|
+
question_type: QuestionType = QuestionType.UNKNOWN
|
|
43
|
+
keywords: list[str] = field(default_factory=list)
|
|
44
|
+
section_hints: list[str] = field(default_factory=list)
|
|
45
|
+
search_terms: list[str] = field(default_factory=list)
|
|
46
|
+
confidence: float = 0.5
|
|
47
|
+
|
|
48
|
+
def get_search_query(self) -> str:
|
|
49
|
+
"""Get the search query string from interpreted terms.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Space-separated search terms.
|
|
53
|
+
"""
|
|
54
|
+
return " ".join(self.search_terms) if self.search_terms else " ".join(self.keywords)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class QueryInterpreter:
|
|
58
|
+
"""Interprets natural language queries for memory search.
|
|
59
|
+
|
|
60
|
+
This class transforms natural language questions into structured search
|
|
61
|
+
queries by classifying question types, extracting keywords, and
|
|
62
|
+
identifying section hints.
|
|
63
|
+
|
|
64
|
+
Attributes:
|
|
65
|
+
stop_words: Set of common words to filter out.
|
|
66
|
+
question_patterns: Regex patterns for question classification.
|
|
67
|
+
section_mappings: Mappings from keywords to section names.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
# Common English stop words to filter out
|
|
71
|
+
STOP_WORDS = {
|
|
72
|
+
"a",
|
|
73
|
+
"an",
|
|
74
|
+
"and",
|
|
75
|
+
"are",
|
|
76
|
+
"as",
|
|
77
|
+
"at",
|
|
78
|
+
"be",
|
|
79
|
+
"by",
|
|
80
|
+
"can",
|
|
81
|
+
"do",
|
|
82
|
+
"does",
|
|
83
|
+
"for",
|
|
84
|
+
"from",
|
|
85
|
+
"has",
|
|
86
|
+
"have",
|
|
87
|
+
"how",
|
|
88
|
+
"i",
|
|
89
|
+
"if",
|
|
90
|
+
"in",
|
|
91
|
+
"is",
|
|
92
|
+
"it",
|
|
93
|
+
"its",
|
|
94
|
+
"me",
|
|
95
|
+
"my",
|
|
96
|
+
"no",
|
|
97
|
+
"not",
|
|
98
|
+
"of",
|
|
99
|
+
"on",
|
|
100
|
+
"or",
|
|
101
|
+
"our",
|
|
102
|
+
"should",
|
|
103
|
+
"so",
|
|
104
|
+
"than",
|
|
105
|
+
"that",
|
|
106
|
+
"the",
|
|
107
|
+
"their",
|
|
108
|
+
"them",
|
|
109
|
+
"then",
|
|
110
|
+
"there",
|
|
111
|
+
"these",
|
|
112
|
+
"they",
|
|
113
|
+
"this",
|
|
114
|
+
"to",
|
|
115
|
+
"was",
|
|
116
|
+
"we",
|
|
117
|
+
"were",
|
|
118
|
+
"what",
|
|
119
|
+
"when",
|
|
120
|
+
"where",
|
|
121
|
+
"which",
|
|
122
|
+
"who",
|
|
123
|
+
"why",
|
|
124
|
+
"will",
|
|
125
|
+
"with",
|
|
126
|
+
"would",
|
|
127
|
+
"you",
|
|
128
|
+
"your",
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
# Question word patterns for classification
|
|
132
|
+
QUESTION_PATTERNS = {
|
|
133
|
+
QuestionType.WHAT: [
|
|
134
|
+
r"^what\s+",
|
|
135
|
+
r"^what's\s+",
|
|
136
|
+
r"^define\s+",
|
|
137
|
+
r"^describe\s+",
|
|
138
|
+
r"^explain\s+what",
|
|
139
|
+
],
|
|
140
|
+
QuestionType.WHY: [
|
|
141
|
+
r"^why\s+",
|
|
142
|
+
r"^why's\s+",
|
|
143
|
+
r"\breason\s+for\b",
|
|
144
|
+
r"\brationale\b",
|
|
145
|
+
r"\bpurpose\s+of\b",
|
|
146
|
+
],
|
|
147
|
+
QuestionType.HOW: [
|
|
148
|
+
r"^how\s+",
|
|
149
|
+
r"^how's\s+",
|
|
150
|
+
r"\bprocess\s+for\b",
|
|
151
|
+
r"\bsteps\s+to\b",
|
|
152
|
+
r"\bprocedure\b",
|
|
153
|
+
],
|
|
154
|
+
QuestionType.WHERE: [
|
|
155
|
+
r"^where\s+",
|
|
156
|
+
r"^where's\s+",
|
|
157
|
+
r"\blocated\b",
|
|
158
|
+
r"\blocation\s+of\b",
|
|
159
|
+
r"\bfind\s+the\b",
|
|
160
|
+
],
|
|
161
|
+
QuestionType.WHEN: [
|
|
162
|
+
r"^when\s+",
|
|
163
|
+
r"^when's\s+",
|
|
164
|
+
r"\btimeline\b",
|
|
165
|
+
r"\bdeadline\b",
|
|
166
|
+
r"\bschedule\b",
|
|
167
|
+
],
|
|
168
|
+
QuestionType.WHO: [
|
|
169
|
+
r"^who\s+",
|
|
170
|
+
r"^who's\s+",
|
|
171
|
+
r"\bresponsible\s+for\b",
|
|
172
|
+
r"\bowner\s+of\b",
|
|
173
|
+
r"\bauthor\b",
|
|
174
|
+
],
|
|
175
|
+
QuestionType.WHICH: [
|
|
176
|
+
r"^which\s+",
|
|
177
|
+
r"\bchoose\s+between\b",
|
|
178
|
+
r"\bselect\b",
|
|
179
|
+
r"\bcompare\b",
|
|
180
|
+
],
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
# Mappings from topic keywords to section names
|
|
184
|
+
SECTION_MAPPINGS = {
|
|
185
|
+
# Vision-related keywords
|
|
186
|
+
"vision": ["Vision", "Project Vision", "Overview"],
|
|
187
|
+
"goal": ["Vision", "Goals", "Objectives"],
|
|
188
|
+
"objective": ["Vision", "Goals", "Objectives"],
|
|
189
|
+
"purpose": ["Vision", "Purpose", "Overview"],
|
|
190
|
+
# Principles-related keywords
|
|
191
|
+
"principle": ["Principles", "Guiding Principles", "Core Principles"],
|
|
192
|
+
"value": ["Principles", "Values", "Core Values"],
|
|
193
|
+
"standard": ["Standards", "Coding Standards", "Principles"],
|
|
194
|
+
# Requirements-related keywords
|
|
195
|
+
"requirement": ["Requirements", "Functional Requirements", "Non-Functional Requirements"],
|
|
196
|
+
"feature": ["Features", "Requirements", "User Stories"],
|
|
197
|
+
"user story": ["User Stories", "Requirements", "Features"],
|
|
198
|
+
# Technical keywords
|
|
199
|
+
"architecture": ["Architecture", "Technical Architecture", "System Design"],
|
|
200
|
+
"technology": ["Technology Stack", "Tech Stack", "Technologies"],
|
|
201
|
+
"stack": ["Technology Stack", "Tech Stack"],
|
|
202
|
+
"api": ["API", "Endpoints", "Contracts"],
|
|
203
|
+
"database": ["Database", "Data Model", "Schema"],
|
|
204
|
+
# Process keywords
|
|
205
|
+
"workflow": ["Workflow", "Process", "Procedures"],
|
|
206
|
+
"process": ["Process", "Workflow", "Procedures"],
|
|
207
|
+
"procedure": ["Procedures", "Process", "Steps"],
|
|
208
|
+
# Roadmap keywords
|
|
209
|
+
"priority": ["Priority", "Priorities", "Roadmap"],
|
|
210
|
+
"roadmap": ["Roadmap", "Timeline", "Priorities"],
|
|
211
|
+
"milestone": ["Milestones", "Roadmap", "Timeline"],
|
|
212
|
+
"phase": ["Phases", "Timeline", "Roadmap"],
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
def __init__(self) -> None:
|
|
216
|
+
"""Initialize the query interpreter."""
|
|
217
|
+
self.stop_words = self.STOP_WORDS.copy()
|
|
218
|
+
|
|
219
|
+
def interpret(self, query: str) -> InterpretedQuery:
|
|
220
|
+
"""Interpret a natural language query.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
query: The natural language question or query.
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
InterpretedQuery with classification and extracted terms.
|
|
227
|
+
"""
|
|
228
|
+
if not query or not query.strip():
|
|
229
|
+
return InterpretedQuery(
|
|
230
|
+
original_query=query or "",
|
|
231
|
+
question_type=QuestionType.UNKNOWN,
|
|
232
|
+
confidence=0.0,
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
query = query.strip()
|
|
236
|
+
query_lower = query.lower()
|
|
237
|
+
|
|
238
|
+
# Classify question type
|
|
239
|
+
question_type, type_confidence = self._classify_question(query_lower)
|
|
240
|
+
|
|
241
|
+
# Extract keywords
|
|
242
|
+
keywords = self._extract_keywords(query_lower)
|
|
243
|
+
|
|
244
|
+
# Identify section hints
|
|
245
|
+
section_hints = self._identify_section_hints(keywords)
|
|
246
|
+
|
|
247
|
+
# Generate search terms (prioritize important keywords)
|
|
248
|
+
search_terms = self._generate_search_terms(keywords, question_type)
|
|
249
|
+
|
|
250
|
+
# Calculate overall confidence
|
|
251
|
+
confidence = self._calculate_confidence(
|
|
252
|
+
keywords, section_hints, type_confidence
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
return InterpretedQuery(
|
|
256
|
+
original_query=query,
|
|
257
|
+
question_type=question_type,
|
|
258
|
+
keywords=keywords,
|
|
259
|
+
section_hints=section_hints,
|
|
260
|
+
search_terms=search_terms,
|
|
261
|
+
confidence=confidence,
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
def _classify_question(self, query: str) -> tuple[QuestionType, float]:
|
|
265
|
+
"""Classify the type of question.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
query: Lowercase query string.
|
|
269
|
+
|
|
270
|
+
Returns:
|
|
271
|
+
Tuple of (QuestionType, confidence score).
|
|
272
|
+
"""
|
|
273
|
+
for q_type, patterns in self.QUESTION_PATTERNS.items():
|
|
274
|
+
for pattern in patterns:
|
|
275
|
+
if re.search(pattern, query, re.IGNORECASE):
|
|
276
|
+
# Higher confidence for start-of-query matches
|
|
277
|
+
confidence = 0.9 if pattern.startswith("^") else 0.7
|
|
278
|
+
return q_type, confidence
|
|
279
|
+
|
|
280
|
+
return QuestionType.UNKNOWN, 0.3
|
|
281
|
+
|
|
282
|
+
def _extract_keywords(self, query: str) -> list[str]:
|
|
283
|
+
"""Extract keywords by removing stop words.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
query: Lowercase query string.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
List of extracted keywords.
|
|
290
|
+
"""
|
|
291
|
+
# Remove punctuation except hyphens in compound words
|
|
292
|
+
cleaned = re.sub(r"[^\w\s-]", " ", query)
|
|
293
|
+
|
|
294
|
+
# Split into words
|
|
295
|
+
words = cleaned.split()
|
|
296
|
+
|
|
297
|
+
# Filter stop words and short words
|
|
298
|
+
keywords = [
|
|
299
|
+
word
|
|
300
|
+
for word in words
|
|
301
|
+
if word not in self.stop_words and len(word) > 2
|
|
302
|
+
]
|
|
303
|
+
|
|
304
|
+
# Preserve order but remove duplicates
|
|
305
|
+
seen = set()
|
|
306
|
+
unique_keywords = []
|
|
307
|
+
for kw in keywords:
|
|
308
|
+
if kw not in seen:
|
|
309
|
+
seen.add(kw)
|
|
310
|
+
unique_keywords.append(kw)
|
|
311
|
+
|
|
312
|
+
return unique_keywords
|
|
313
|
+
|
|
314
|
+
def _identify_section_hints(self, keywords: list[str]) -> list[str]:
|
|
315
|
+
"""Identify relevant sections based on keywords.
|
|
316
|
+
|
|
317
|
+
Args:
|
|
318
|
+
keywords: List of extracted keywords.
|
|
319
|
+
|
|
320
|
+
Returns:
|
|
321
|
+
List of section names to prioritize.
|
|
322
|
+
"""
|
|
323
|
+
hints = set()
|
|
324
|
+
|
|
325
|
+
for keyword in keywords:
|
|
326
|
+
keyword_lower = keyword.lower()
|
|
327
|
+
if keyword_lower in self.SECTION_MAPPINGS:
|
|
328
|
+
hints.update(self.SECTION_MAPPINGS[keyword_lower])
|
|
329
|
+
|
|
330
|
+
# Check for partial matches
|
|
331
|
+
for mapped_keyword, sections in self.SECTION_MAPPINGS.items():
|
|
332
|
+
if mapped_keyword in keyword_lower or keyword_lower in mapped_keyword:
|
|
333
|
+
hints.update(sections)
|
|
334
|
+
|
|
335
|
+
return list(hints)
|
|
336
|
+
|
|
337
|
+
def _generate_search_terms(
|
|
338
|
+
self, keywords: list[str], question_type: QuestionType
|
|
339
|
+
) -> list[str]:
|
|
340
|
+
"""Generate optimized search terms from keywords.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
keywords: Extracted keywords.
|
|
344
|
+
question_type: Classified question type.
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Optimized list of search terms.
|
|
348
|
+
"""
|
|
349
|
+
if not keywords:
|
|
350
|
+
return []
|
|
351
|
+
|
|
352
|
+
# Start with all keywords
|
|
353
|
+
search_terms = keywords.copy()
|
|
354
|
+
|
|
355
|
+
# Add synonyms/related terms based on question type
|
|
356
|
+
type_boosters = {
|
|
357
|
+
QuestionType.WHAT: ["definition", "description", "overview"],
|
|
358
|
+
QuestionType.WHY: ["reason", "rationale", "purpose", "because"],
|
|
359
|
+
QuestionType.HOW: ["steps", "process", "procedure", "workflow"],
|
|
360
|
+
QuestionType.WHERE: ["location", "file", "section", "path"],
|
|
361
|
+
QuestionType.WHEN: ["timeline", "date", "schedule", "deadline"],
|
|
362
|
+
QuestionType.WHO: ["owner", "responsible", "team", "author"],
|
|
363
|
+
QuestionType.WHICH: ["compare", "options", "choose", "select"],
|
|
364
|
+
}
|
|
365
|
+
|
|
366
|
+
# Add one booster term if available and not already present
|
|
367
|
+
if question_type in type_boosters:
|
|
368
|
+
for booster in type_boosters[question_type]:
|
|
369
|
+
if booster not in search_terms:
|
|
370
|
+
search_terms.append(booster)
|
|
371
|
+
break
|
|
372
|
+
|
|
373
|
+
return search_terms
|
|
374
|
+
|
|
375
|
+
def _calculate_confidence(
|
|
376
|
+
self,
|
|
377
|
+
keywords: list[str],
|
|
378
|
+
section_hints: list[str],
|
|
379
|
+
type_confidence: float,
|
|
380
|
+
) -> float:
|
|
381
|
+
"""Calculate overall interpretation confidence.
|
|
382
|
+
|
|
383
|
+
Args:
|
|
384
|
+
keywords: Extracted keywords.
|
|
385
|
+
section_hints: Identified section hints.
|
|
386
|
+
type_confidence: Confidence from question classification.
|
|
387
|
+
|
|
388
|
+
Returns:
|
|
389
|
+
Overall confidence score (0.0-1.0).
|
|
390
|
+
"""
|
|
391
|
+
if not keywords:
|
|
392
|
+
return 0.1
|
|
393
|
+
|
|
394
|
+
# Base confidence from keyword extraction
|
|
395
|
+
keyword_confidence = min(len(keywords) * 0.15, 0.5)
|
|
396
|
+
|
|
397
|
+
# Bonus for finding section hints
|
|
398
|
+
hint_bonus = 0.2 if section_hints else 0.0
|
|
399
|
+
|
|
400
|
+
# Combine with type confidence
|
|
401
|
+
total = (type_confidence * 0.4) + (keyword_confidence * 0.4) + hint_bonus
|
|
402
|
+
|
|
403
|
+
return min(total, 1.0)
|
|
404
|
+
|
|
405
|
+
def add_stop_word(self, word: str) -> None:
|
|
406
|
+
"""Add a custom stop word.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
word: Word to add to stop words set.
|
|
410
|
+
"""
|
|
411
|
+
self.stop_words.add(word.lower())
|
|
412
|
+
|
|
413
|
+
def remove_stop_word(self, word: str) -> None:
|
|
414
|
+
"""Remove a word from stop words.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
word: Word to remove from stop words set.
|
|
418
|
+
"""
|
|
419
|
+
self.stop_words.discard(word.lower())
|
|
420
|
+
|
|
421
|
+
def add_section_mapping(self, keyword: str, sections: list[str]) -> None:
|
|
422
|
+
"""Add a custom section mapping.
|
|
423
|
+
|
|
424
|
+
Args:
|
|
425
|
+
keyword: Keyword to map.
|
|
426
|
+
sections: List of section names to associate.
|
|
427
|
+
"""
|
|
428
|
+
self.SECTION_MAPPINGS[keyword.lower()] = sections
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
"""Report exporter service for spec analytics.
|
|
2
|
+
|
|
3
|
+
Provides export functionality for analytics reports in
|
|
4
|
+
Markdown and JSON formats.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from ..models.analytics_models import AnalyticsReport
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ReportExporter:
|
|
16
|
+
"""Exporter for analytics reports.
|
|
17
|
+
|
|
18
|
+
Generates formatted reports in various output formats
|
|
19
|
+
suitable for sharing or archival.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(self, report: AnalyticsReport):
|
|
23
|
+
"""Initialize exporter with a report.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
report: The AnalyticsReport to export
|
|
27
|
+
"""
|
|
28
|
+
self.report = report
|
|
29
|
+
|
|
30
|
+
def export_markdown(self) -> str:
|
|
31
|
+
"""Export report as Markdown.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Markdown formatted report string
|
|
35
|
+
"""
|
|
36
|
+
lines = [
|
|
37
|
+
f"# Analytics Report - {self.report.project_root.name}",
|
|
38
|
+
"",
|
|
39
|
+
f"**Generated**: {self.report.generated_at.strftime('%Y-%m-%d %H:%M:%S')}",
|
|
40
|
+
f"**Report ID**: {self.report.report_id}",
|
|
41
|
+
"",
|
|
42
|
+
"---",
|
|
43
|
+
"",
|
|
44
|
+
"## Summary",
|
|
45
|
+
"",
|
|
46
|
+
f"- **Total Specs**: {self.report.total_specs}",
|
|
47
|
+
f"- **Completion Rate**: {self.report.completion_pct}%",
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
if self.report.cycle_stats:
|
|
51
|
+
lines.append(
|
|
52
|
+
f"- **Average Cycle Time**: {self.report.cycle_stats.average_days} days"
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Status breakdown
|
|
56
|
+
lines.extend([
|
|
57
|
+
"",
|
|
58
|
+
"## Status Breakdown",
|
|
59
|
+
"",
|
|
60
|
+
"| Status | Count | Percentage |",
|
|
61
|
+
"|--------|-------|------------|",
|
|
62
|
+
])
|
|
63
|
+
|
|
64
|
+
total = self.report.total_specs
|
|
65
|
+
for status, count in sorted(
|
|
66
|
+
self.report.by_status.items(), key=lambda x: x[1], reverse=True
|
|
67
|
+
):
|
|
68
|
+
pct = (count / total * 100) if total > 0 else 0
|
|
69
|
+
lines.append(f"| {status.display_name} | {count} | {pct:.1f}% |")
|
|
70
|
+
|
|
71
|
+
# Cycle time statistics
|
|
72
|
+
if self.report.cycle_stats:
|
|
73
|
+
stats = self.report.cycle_stats
|
|
74
|
+
lines.extend([
|
|
75
|
+
"",
|
|
76
|
+
"## Cycle Time Statistics",
|
|
77
|
+
"",
|
|
78
|
+
f"Based on {stats.sample_count} completed specifications.",
|
|
79
|
+
"",
|
|
80
|
+
"| Metric | Value |",
|
|
81
|
+
"|--------|-------|",
|
|
82
|
+
f"| Average | {stats.average_days} days |",
|
|
83
|
+
f"| Median | {stats.median_days} days |",
|
|
84
|
+
f"| Minimum | {stats.min_days} days |",
|
|
85
|
+
f"| Maximum | {stats.max_days} days |",
|
|
86
|
+
f"| Std Deviation | {stats.std_dev_days} days |",
|
|
87
|
+
])
|
|
88
|
+
|
|
89
|
+
# Velocity trends
|
|
90
|
+
if self.report.velocity:
|
|
91
|
+
lines.extend([
|
|
92
|
+
"",
|
|
93
|
+
"## Velocity Trends",
|
|
94
|
+
"",
|
|
95
|
+
"| Week | Completed | Specs |",
|
|
96
|
+
"|------|-----------|-------|",
|
|
97
|
+
])
|
|
98
|
+
|
|
99
|
+
for v in self.report.velocity[:12]:
|
|
100
|
+
specs_list = ", ".join(v.spec_names[:3])
|
|
101
|
+
if len(v.spec_names) > 3:
|
|
102
|
+
specs_list += f" (+{len(v.spec_names) - 3} more)"
|
|
103
|
+
lines.append(f"| {v.week_key} | {v.specs_completed} | {specs_list} |")
|
|
104
|
+
|
|
105
|
+
# Calculate average
|
|
106
|
+
total_completed = sum(v.specs_completed for v in self.report.velocity)
|
|
107
|
+
avg = total_completed / len(self.report.velocity) if self.report.velocity else 0
|
|
108
|
+
lines.extend([
|
|
109
|
+
"",
|
|
110
|
+
f"**Average velocity**: {avg:.1f} specs/week",
|
|
111
|
+
])
|
|
112
|
+
|
|
113
|
+
# Individual specs (top 10 most recent)
|
|
114
|
+
if self.report.specs:
|
|
115
|
+
completed_specs = [
|
|
116
|
+
s for s in self.report.specs if s.completed_at
|
|
117
|
+
]
|
|
118
|
+
completed_specs.sort(key=lambda s: s.completed_at, reverse=True)
|
|
119
|
+
|
|
120
|
+
if completed_specs:
|
|
121
|
+
lines.extend([
|
|
122
|
+
"",
|
|
123
|
+
"## Recent Completions",
|
|
124
|
+
"",
|
|
125
|
+
"| Spec | Completed | Cycle Time |",
|
|
126
|
+
"|------|-----------|------------|",
|
|
127
|
+
])
|
|
128
|
+
|
|
129
|
+
for spec in completed_specs[:10]:
|
|
130
|
+
cycle = (
|
|
131
|
+
f"{spec.cycle_time_days} days"
|
|
132
|
+
if spec.cycle_time_days is not None
|
|
133
|
+
else "-"
|
|
134
|
+
)
|
|
135
|
+
lines.append(
|
|
136
|
+
f"| {spec.name} | {spec.completed_at} | {cycle} |"
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
# Footer
|
|
140
|
+
lines.extend([
|
|
141
|
+
"",
|
|
142
|
+
"---",
|
|
143
|
+
"",
|
|
144
|
+
f"*Report generated by doit analytics on {self.report.generated_at.strftime('%Y-%m-%d')}*",
|
|
145
|
+
])
|
|
146
|
+
|
|
147
|
+
return "\n".join(lines)
|
|
148
|
+
|
|
149
|
+
def export_json(self) -> str:
|
|
150
|
+
"""Export report as JSON.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
JSON formatted report string
|
|
154
|
+
"""
|
|
155
|
+
return json.dumps(self.report.to_dict(), indent=2)
|
|
156
|
+
|
|
157
|
+
def save(
|
|
158
|
+
self,
|
|
159
|
+
output_path: Optional[Path] = None,
|
|
160
|
+
format_type: str = "markdown",
|
|
161
|
+
) -> Path:
|
|
162
|
+
"""Save report to file.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
output_path: Path to save to (auto-generates if None)
|
|
166
|
+
format_type: Export format (markdown or json)
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Path where report was saved
|
|
170
|
+
|
|
171
|
+
Raises:
|
|
172
|
+
ValueError: If format_type is not supported
|
|
173
|
+
OSError: If file cannot be written
|
|
174
|
+
"""
|
|
175
|
+
if format_type not in ("markdown", "json"):
|
|
176
|
+
raise ValueError(f"Unsupported format: {format_type}")
|
|
177
|
+
|
|
178
|
+
# Generate default path if not provided
|
|
179
|
+
if output_path is None:
|
|
180
|
+
reports_dir = self.report.project_root / ".doit" / "reports"
|
|
181
|
+
reports_dir.mkdir(parents=True, exist_ok=True)
|
|
182
|
+
|
|
183
|
+
timestamp = datetime.now().strftime("%Y-%m-%d")
|
|
184
|
+
ext = "json" if format_type == "json" else "md"
|
|
185
|
+
output_path = reports_dir / f"analytics-{timestamp}.{ext}"
|
|
186
|
+
|
|
187
|
+
# Generate content
|
|
188
|
+
if format_type == "json":
|
|
189
|
+
content = self.export_json()
|
|
190
|
+
else:
|
|
191
|
+
content = self.export_markdown()
|
|
192
|
+
|
|
193
|
+
# Ensure parent directory exists
|
|
194
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
195
|
+
|
|
196
|
+
# Write file
|
|
197
|
+
output_path.write_text(content, encoding="utf-8")
|
|
198
|
+
|
|
199
|
+
return output_path
|
|
200
|
+
|
|
201
|
+
@classmethod
|
|
202
|
+
def export_to_file(
|
|
203
|
+
cls,
|
|
204
|
+
report: AnalyticsReport,
|
|
205
|
+
output_path: Optional[Path] = None,
|
|
206
|
+
format_type: str = "markdown",
|
|
207
|
+
) -> Path:
|
|
208
|
+
"""Convenience method to export report directly.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
report: The AnalyticsReport to export
|
|
212
|
+
output_path: Path to save to (auto-generates if None)
|
|
213
|
+
format_type: Export format (markdown or json)
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
Path where report was saved
|
|
217
|
+
"""
|
|
218
|
+
exporter = cls(report)
|
|
219
|
+
return exporter.save(output_path, format_type)
|