specfact-cli 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of specfact-cli might be problematic. Click here for more details.
- specfact_cli/__init__.py +14 -0
- specfact_cli/agents/__init__.py +23 -0
- specfact_cli/agents/analyze_agent.py +392 -0
- specfact_cli/agents/base.py +95 -0
- specfact_cli/agents/plan_agent.py +202 -0
- specfact_cli/agents/registry.py +176 -0
- specfact_cli/agents/sync_agent.py +133 -0
- specfact_cli/analyzers/__init__.py +10 -0
- specfact_cli/analyzers/code_analyzer.py +775 -0
- specfact_cli/cli.py +397 -0
- specfact_cli/commands/__init__.py +7 -0
- specfact_cli/commands/enforce.py +87 -0
- specfact_cli/commands/import_cmd.py +355 -0
- specfact_cli/commands/init.py +119 -0
- specfact_cli/commands/plan.py +1090 -0
- specfact_cli/commands/repro.py +172 -0
- specfact_cli/commands/sync.py +408 -0
- specfact_cli/common/__init__.py +24 -0
- specfact_cli/common/logger_setup.py +673 -0
- specfact_cli/common/logging_utils.py +41 -0
- specfact_cli/common/text_utils.py +52 -0
- specfact_cli/common/utils.py +48 -0
- specfact_cli/comparators/__init__.py +10 -0
- specfact_cli/comparators/plan_comparator.py +391 -0
- specfact_cli/generators/__init__.py +13 -0
- specfact_cli/generators/plan_generator.py +105 -0
- specfact_cli/generators/protocol_generator.py +115 -0
- specfact_cli/generators/report_generator.py +200 -0
- specfact_cli/generators/workflow_generator.py +111 -0
- specfact_cli/importers/__init__.py +6 -0
- specfact_cli/importers/speckit_converter.py +773 -0
- specfact_cli/importers/speckit_scanner.py +704 -0
- specfact_cli/models/__init__.py +32 -0
- specfact_cli/models/deviation.py +105 -0
- specfact_cli/models/enforcement.py +150 -0
- specfact_cli/models/plan.py +97 -0
- specfact_cli/models/protocol.py +28 -0
- specfact_cli/modes/__init__.py +18 -0
- specfact_cli/modes/detector.py +126 -0
- specfact_cli/modes/router.py +153 -0
- specfact_cli/sync/__init__.py +11 -0
- specfact_cli/sync/repository_sync.py +279 -0
- specfact_cli/sync/speckit_sync.py +388 -0
- specfact_cli/utils/__init__.py +57 -0
- specfact_cli/utils/console.py +69 -0
- specfact_cli/utils/feature_keys.py +213 -0
- specfact_cli/utils/git.py +241 -0
- specfact_cli/utils/ide_setup.py +381 -0
- specfact_cli/utils/prompts.py +179 -0
- specfact_cli/utils/structure.py +496 -0
- specfact_cli/utils/yaml_utils.py +200 -0
- specfact_cli/validators/__init__.py +19 -0
- specfact_cli/validators/fsm.py +260 -0
- specfact_cli/validators/repro_checker.py +320 -0
- specfact_cli/validators/schema.py +200 -0
- specfact_cli-0.4.0.dist-info/METADATA +332 -0
- specfact_cli-0.4.0.dist-info/RECORD +60 -0
- specfact_cli-0.4.0.dist-info/WHEEL +4 -0
- specfact_cli-0.4.0.dist-info/entry_points.txt +2 -0
- specfact_cli-0.4.0.dist-info/licenses/LICENSE.md +55 -0
specfact_cli/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SpecFact CLI - Spec→Contract→Sentinel tool for contract-driven development.
|
|
3
|
+
|
|
4
|
+
This package provides command-line tools for:
|
|
5
|
+
- Importing Spec-Kit projects
|
|
6
|
+
- Analyzing brownfield codebases
|
|
7
|
+
- Managing development plans
|
|
8
|
+
- Enforcing contract validation
|
|
9
|
+
- Validating reproducibility
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
__version__ = "0.4.0"
|
|
13
|
+
|
|
14
|
+
__all__ = ["__version__"]
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Modes - Enhanced prompts and routing for CoPilot-enabled mode.
|
|
3
|
+
|
|
4
|
+
This package provides agent mode framework for generating enhanced prompts
|
|
5
|
+
and routing commands with context injection for CoPilot integration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from specfact_cli.agents.analyze_agent import AnalyzeAgent
|
|
11
|
+
from specfact_cli.agents.base import AgentMode
|
|
12
|
+
from specfact_cli.agents.plan_agent import PlanAgent
|
|
13
|
+
from specfact_cli.agents.registry import AgentRegistry, get_agent
|
|
14
|
+
from specfact_cli.agents.sync_agent import SyncAgent
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"AgentMode",
|
|
18
|
+
"AgentRegistry",
|
|
19
|
+
"AnalyzeAgent",
|
|
20
|
+
"PlanAgent",
|
|
21
|
+
"SyncAgent",
|
|
22
|
+
"get_agent",
|
|
23
|
+
]
|
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Analyze Agent - AI-first brownfield analysis with semantic understanding.
|
|
3
|
+
|
|
4
|
+
This module provides the AnalyzeAgent for brownfield code analysis using
|
|
5
|
+
AI (LLM) to understand codebase semantics and generate Spec-Kit/SpecFact
|
|
6
|
+
compatible artifacts. This replaces the AST-based approach for better
|
|
7
|
+
multi-language support and semantic understanding.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from beartype import beartype
|
|
16
|
+
from icontract import ensure, require
|
|
17
|
+
|
|
18
|
+
from specfact_cli.agents.base import AgentMode
|
|
19
|
+
from specfact_cli.models.plan import Idea, Metadata, PlanBundle, Product
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AnalyzeAgent(AgentMode):
|
|
23
|
+
"""
|
|
24
|
+
AI-first brownfield analysis agent with semantic understanding.
|
|
25
|
+
|
|
26
|
+
Provides enhanced prompts for brownfield analysis operations using
|
|
27
|
+
AI (LLM) to understand codebase semantics and generate Spec-Kit/SpecFact
|
|
28
|
+
compatible artifacts. This approach enables:
|
|
29
|
+
- Multi-language support (Python, TypeScript, JavaScript, PowerShell, etc.)
|
|
30
|
+
- Semantic understanding (priorities, constraints, unknowns, scenarios)
|
|
31
|
+
- High-quality Spec-Kit artifact generation
|
|
32
|
+
- Proper bidirectional sync with semantic preservation
|
|
33
|
+
|
|
34
|
+
Falls back to AST-based analysis in CI/CD mode when LLM is unavailable.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
@beartype
|
|
38
|
+
@require(lambda command: bool(command), "Command must be non-empty")
|
|
39
|
+
@ensure(lambda result: isinstance(result, str) and bool(result), "Prompt must be non-empty string")
|
|
40
|
+
def generate_prompt(self, command: str, context: dict[str, Any] | None = None) -> str:
|
|
41
|
+
"""
|
|
42
|
+
Generate enhanced prompt for brownfield analysis.
|
|
43
|
+
|
|
44
|
+
This prompt instructs the AI IDE's LLM to:
|
|
45
|
+
1. Understand the codebase semantically
|
|
46
|
+
2. Call the SpecFact CLI for structured analysis
|
|
47
|
+
3. Enhance results with semantic understanding
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
command: CLI command being executed (e.g., "import from-code")
|
|
51
|
+
context: Context dictionary with current file, selection, workspace
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
Enhanced prompt optimized for AI IDE (Cursor, CoPilot, etc.)
|
|
55
|
+
|
|
56
|
+
Examples:
|
|
57
|
+
>>> agent = AnalyzeAgent()
|
|
58
|
+
>>> prompt = agent.generate_prompt("import from-code", {"current_file": "src/main.py"})
|
|
59
|
+
>>> "specfact import from-code" in prompt.lower()
|
|
60
|
+
True
|
|
61
|
+
"""
|
|
62
|
+
if context is None:
|
|
63
|
+
context = {}
|
|
64
|
+
|
|
65
|
+
current_file = context.get("current_file", "")
|
|
66
|
+
selection = context.get("selection", "")
|
|
67
|
+
workspace = context.get("workspace", "")
|
|
68
|
+
|
|
69
|
+
# Load codebase context for AI analysis
|
|
70
|
+
repo_path = Path(workspace) if workspace else Path(".")
|
|
71
|
+
codebase_context = self._load_codebase_context(repo_path)
|
|
72
|
+
|
|
73
|
+
prompt = f"""
|
|
74
|
+
You are helping analyze a codebase and generate a SpecFact plan bundle using AI-first semantic understanding.
|
|
75
|
+
|
|
76
|
+
## Repository Context
|
|
77
|
+
|
|
78
|
+
- **Directory structure**: {codebase_context.get("structure", "N/A")}
|
|
79
|
+
- **Code files**: {len(codebase_context.get("files", []))} files analyzed
|
|
80
|
+
- **Languages detected**: {", ".join({f.suffix for f in [Path(f) for f in codebase_context.get("files", [])[:20]]})}
|
|
81
|
+
- **Dependencies**: {", ".join(codebase_context.get("dependencies", [])[:10])}
|
|
82
|
+
- **Current file**: {current_file or "None"}
|
|
83
|
+
- **Selection**: {selection or "None"}
|
|
84
|
+
|
|
85
|
+
## Your Task
|
|
86
|
+
|
|
87
|
+
### Step 1: Semantic Understanding (Use Your AI Capabilities)
|
|
88
|
+
|
|
89
|
+
Use your AI capabilities to understand the codebase:
|
|
90
|
+
|
|
91
|
+
1. **Read and understand** the repository structure and codebase
|
|
92
|
+
2. **Identify features** from business logic (not just class structure)
|
|
93
|
+
3. **Extract user stories** from code intent (not just method patterns)
|
|
94
|
+
4. **Infer priorities** from code context (comments, docs, structure, usage patterns)
|
|
95
|
+
5. **Identify constraints** from code/docs (technical limitations, requirements)
|
|
96
|
+
6. **Identify unknowns** from code analysis (missing information, unclear decisions)
|
|
97
|
+
7. **Generate scenarios** from acceptance criteria (Primary, Alternate, Exception, Recovery)
|
|
98
|
+
8. **Extract technology stack** from dependencies and imports
|
|
99
|
+
|
|
100
|
+
### Step 2: Generate Plan Bundle Directly
|
|
101
|
+
|
|
102
|
+
**Generate a PlanBundle structure directly** using your semantic understanding:
|
|
103
|
+
|
|
104
|
+
1. **Create PlanBundle structure** (as a Python dict matching the Pydantic model):
|
|
105
|
+
- `version: "1.0"`
|
|
106
|
+
- `idea` with `title` set to the provided plan name (from `--name` argument) instead of "Unknown Project"
|
|
107
|
+
- `product` with `themes: []` and `releases: []`
|
|
108
|
+
- `features: []` with Feature objects containing:
|
|
109
|
+
- `key`, `title`, `outcomes`, `acceptance`, `constraints`
|
|
110
|
+
- `confidence`, `draft`, `stories: []`
|
|
111
|
+
- `metadata` with `stage: "draft"`
|
|
112
|
+
|
|
113
|
+
2. **Convert to YAML** using proper YAML formatting (2-space indentation, no flow style)
|
|
114
|
+
|
|
115
|
+
3. **Write to file**: `.specfact/plans/<name>-<timestamp>.bundle.yaml`
|
|
116
|
+
- If no name provided, ask user for a meaningful plan name (e.g., "API Client v2", "User Authentication", "Payment Processing")
|
|
117
|
+
- Name will be automatically sanitized (lowercased, spaces/special chars removed) for filesystem persistence
|
|
118
|
+
- Use ISO 8601 timestamp format: `YYYY-MM-DDTHH-MM-SS`
|
|
119
|
+
- Ensure directory exists: `.specfact/plans/`
|
|
120
|
+
- Example: `.specfact/plans/api-client-v2.2025-11-04T22-17-22.bundle.yaml`
|
|
121
|
+
|
|
122
|
+
### Step 3: Present Results
|
|
123
|
+
|
|
124
|
+
**Present the generated plan bundle** to the user:
|
|
125
|
+
|
|
126
|
+
- Plan bundle location and summary
|
|
127
|
+
- Feature/story counts with confidence scores
|
|
128
|
+
- Semantic insights and recommendations
|
|
129
|
+
|
|
130
|
+
## Key Principles
|
|
131
|
+
|
|
132
|
+
- **Semantic understanding first**: Use AI to understand business logic and intent
|
|
133
|
+
- **Direct generation**: Generate the plan bundle directly as YAML, don't call the CLI
|
|
134
|
+
- **Multi-language support**: Works with Python, TypeScript, JavaScript, PowerShell, etc.
|
|
135
|
+
- **Spec-Kit compatibility**: Generate artifacts that work with `/speckit.analyze`, `/speckit.implement`, `/speckit.checklist`
|
|
136
|
+
|
|
137
|
+
Focus on semantic understanding, not just structural parsing. Generate the plan bundle directly using your AI capabilities.
|
|
138
|
+
"""
|
|
139
|
+
return prompt.strip()
|
|
140
|
+
|
|
141
|
+
@beartype
|
|
142
|
+
@require(lambda command: bool(command), "Command must be non-empty")
|
|
143
|
+
@ensure(lambda result: isinstance(result, dict), "Result must be a dictionary")
|
|
144
|
+
def execute(
|
|
145
|
+
self, command: str, args: dict[str, Any] | None = None, context: dict[str, Any] | None = None
|
|
146
|
+
) -> dict[str, Any]:
|
|
147
|
+
"""
|
|
148
|
+
Execute brownfield analysis with enhanced prompts.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
command: CLI command being executed (e.g., "import from-code")
|
|
152
|
+
args: Command arguments (e.g., {"repo": ".", "confidence": 0.7})
|
|
153
|
+
context: Context dictionary with current file, selection, workspace
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Command result with enhanced output
|
|
157
|
+
|
|
158
|
+
Examples:
|
|
159
|
+
>>> agent = AnalyzeAgent()
|
|
160
|
+
>>> result = agent.execute("import from-code", {"repo": "."}, {"current_file": "src/main.py"})
|
|
161
|
+
>>> isinstance(result, dict)
|
|
162
|
+
True
|
|
163
|
+
"""
|
|
164
|
+
if args is None:
|
|
165
|
+
args = {}
|
|
166
|
+
if context is None:
|
|
167
|
+
context = {}
|
|
168
|
+
|
|
169
|
+
# Generate enhanced prompt
|
|
170
|
+
prompt = self.generate_prompt(command, context)
|
|
171
|
+
|
|
172
|
+
# For Phase 4.1, return structured result with prompt
|
|
173
|
+
# In Phase 4.2+, this will route to actual command execution with agent mode
|
|
174
|
+
return {
|
|
175
|
+
"type": "analysis",
|
|
176
|
+
"command": command,
|
|
177
|
+
"prompt": prompt,
|
|
178
|
+
"args": args,
|
|
179
|
+
"context": context,
|
|
180
|
+
"enhanced": True,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
@beartype
|
|
184
|
+
@ensure(lambda result: isinstance(result, dict), "Result must be a dictionary")
|
|
185
|
+
def inject_context(self, context: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
186
|
+
"""
|
|
187
|
+
Inject context information specific to analysis operations.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
context: Basic context dictionary (can be None)
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
Enhanced context with analysis-specific information
|
|
194
|
+
|
|
195
|
+
Examples:
|
|
196
|
+
>>> agent = AnalyzeAgent()
|
|
197
|
+
>>> enhanced = agent.inject_context({"current_file": "src/main.py"})
|
|
198
|
+
>>> isinstance(enhanced, dict)
|
|
199
|
+
True
|
|
200
|
+
"""
|
|
201
|
+
enhanced = super().inject_context(context)
|
|
202
|
+
|
|
203
|
+
# Add workspace structure if workspace is available
|
|
204
|
+
if enhanced.get("workspace"):
|
|
205
|
+
workspace_path = Path(enhanced["workspace"])
|
|
206
|
+
if workspace_path.exists() and workspace_path.is_dir():
|
|
207
|
+
# Add workspace structure information
|
|
208
|
+
src_dirs = list(workspace_path.glob("src/**"))
|
|
209
|
+
test_dirs = list(workspace_path.glob("tests/**"))
|
|
210
|
+
enhanced["workspace_structure"] = {
|
|
211
|
+
"src_dirs": [str(d) for d in src_dirs[:10]], # Limit to first 10
|
|
212
|
+
"test_dirs": [str(d) for d in test_dirs[:10]],
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
return enhanced
|
|
216
|
+
|
|
217
|
+
@beartype
|
|
218
|
+
@require(lambda repo_path: repo_path.exists() and repo_path.is_dir(), "Repo path must exist and be directory")
|
|
219
|
+
@ensure(lambda result: isinstance(result, dict), "Result must be a dictionary")
|
|
220
|
+
def _load_codebase_context(self, repo_path: Path) -> dict[str, Any]:
|
|
221
|
+
"""
|
|
222
|
+
Load codebase context for AI analysis.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
repo_path: Path to repository root
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
Dictionary with codebase context (structure, files, dependencies, summary)
|
|
229
|
+
"""
|
|
230
|
+
context: dict[str, Any] = {
|
|
231
|
+
"structure": [],
|
|
232
|
+
"files": [],
|
|
233
|
+
"dependencies": [],
|
|
234
|
+
"summary": "",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# Load directory structure
|
|
238
|
+
try:
|
|
239
|
+
src_dirs = list(repo_path.glob("src/**")) if (repo_path / "src").exists() else []
|
|
240
|
+
test_dirs = list(repo_path.glob("tests/**")) if (repo_path / "tests").exists() else []
|
|
241
|
+
context["structure"] = {
|
|
242
|
+
"src_dirs": [str(d.relative_to(repo_path)) for d in src_dirs[:20]],
|
|
243
|
+
"test_dirs": [str(d.relative_to(repo_path)) for d in test_dirs[:20]],
|
|
244
|
+
}
|
|
245
|
+
except Exception:
|
|
246
|
+
context["structure"] = {}
|
|
247
|
+
|
|
248
|
+
# Load code files (all languages)
|
|
249
|
+
code_extensions = {".py", ".ts", ".tsx", ".js", ".jsx", ".ps1", ".psm1", ".go", ".rs", ".java", ".kt"}
|
|
250
|
+
code_files: list[Path] = []
|
|
251
|
+
for ext in code_extensions:
|
|
252
|
+
code_files.extend(list(repo_path.rglob(f"*{ext}")))
|
|
253
|
+
|
|
254
|
+
# Filter out common ignore patterns
|
|
255
|
+
ignore_patterns = {
|
|
256
|
+
"__pycache__",
|
|
257
|
+
".git",
|
|
258
|
+
"venv",
|
|
259
|
+
".venv",
|
|
260
|
+
"node_modules",
|
|
261
|
+
".pytest_cache",
|
|
262
|
+
"dist",
|
|
263
|
+
"build",
|
|
264
|
+
".eggs",
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
filtered_files = [
|
|
268
|
+
f
|
|
269
|
+
for f in code_files[:100] # Limit to first 100 files
|
|
270
|
+
if not any(pattern in str(f) for pattern in ignore_patterns)
|
|
271
|
+
]
|
|
272
|
+
|
|
273
|
+
context["files"] = [str(f.relative_to(repo_path)) for f in filtered_files]
|
|
274
|
+
|
|
275
|
+
# Load dependencies
|
|
276
|
+
dependency_files = [
|
|
277
|
+
repo_path / "requirements.txt",
|
|
278
|
+
repo_path / "package.json",
|
|
279
|
+
repo_path / "pom.xml",
|
|
280
|
+
repo_path / "go.mod",
|
|
281
|
+
repo_path / "Cargo.toml",
|
|
282
|
+
repo_path / "pyproject.toml",
|
|
283
|
+
]
|
|
284
|
+
|
|
285
|
+
dependencies: list[str] = []
|
|
286
|
+
for dep_file in dependency_files:
|
|
287
|
+
if dep_file.exists():
|
|
288
|
+
try:
|
|
289
|
+
content = dep_file.read_text(encoding="utf-8")[:500] # First 500 chars
|
|
290
|
+
dependencies.append(f"{dep_file.name}: {content[:100]}...")
|
|
291
|
+
except Exception:
|
|
292
|
+
pass
|
|
293
|
+
|
|
294
|
+
context["dependencies"] = dependencies
|
|
295
|
+
|
|
296
|
+
# Generate summary
|
|
297
|
+
context[
|
|
298
|
+
"summary"
|
|
299
|
+
] = f"""
|
|
300
|
+
Repository: {repo_path.name}
|
|
301
|
+
Total code files: {len(filtered_files)}
|
|
302
|
+
Languages detected: {", ".join({f.suffix for f in filtered_files[:20]})}
|
|
303
|
+
Dependencies: {len(dependencies)} dependency files found
|
|
304
|
+
"""
|
|
305
|
+
|
|
306
|
+
return context
|
|
307
|
+
|
|
308
|
+
@beartype
|
|
309
|
+
@require(lambda repo_path: repo_path.exists() and repo_path.is_dir(), "Repo path must exist and be directory")
|
|
310
|
+
@require(lambda confidence: 0.0 <= confidence <= 1.0, "Confidence must be 0.0-1.0")
|
|
311
|
+
@require(lambda plan_name: plan_name is None or isinstance(plan_name, str), "Plan name must be None or str")
|
|
312
|
+
@ensure(lambda result: isinstance(result, PlanBundle), "Result must be PlanBundle")
|
|
313
|
+
def analyze_codebase(self, repo_path: Path, confidence: float = 0.5, plan_name: str | None = None) -> PlanBundle:
|
|
314
|
+
"""
|
|
315
|
+
Analyze codebase using AI-first approach with semantic understanding.
|
|
316
|
+
|
|
317
|
+
**Pragmatic Approach**: This method is designed for AI IDE integration (Cursor, CoPilot, etc.).
|
|
318
|
+
The AI IDE's native LLM will:
|
|
319
|
+
1. Understand the codebase semantically (using the prompt from `generate_prompt()`)
|
|
320
|
+
2. Call the SpecFact CLI (`specfact import from-code`) for structured analysis
|
|
321
|
+
3. Enhance results with semantic understanding
|
|
322
|
+
|
|
323
|
+
This avoids the need for:
|
|
324
|
+
- Separate LLM API setup (langchain, OpenAI API keys, etc.)
|
|
325
|
+
- Additional API costs
|
|
326
|
+
- Complex integration code
|
|
327
|
+
|
|
328
|
+
The CLI handles:
|
|
329
|
+
- File I/O (reading code, writing YAML/Markdown)
|
|
330
|
+
- Structured data generation (plan bundle format)
|
|
331
|
+
- Validation (schema checking, error handling)
|
|
332
|
+
- Mode detection (AI-first vs AST-based fallback)
|
|
333
|
+
|
|
334
|
+
Args:
|
|
335
|
+
repo_path: Path to repository root
|
|
336
|
+
confidence: Minimum confidence score (0.0-1.0)
|
|
337
|
+
plan_name: Custom plan name (will be used for idea.title, optional)
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
PlanBundle with semantic understanding
|
|
341
|
+
|
|
342
|
+
Note:
|
|
343
|
+
In CoPilot mode, the AI IDE will execute the CLI and parse results.
|
|
344
|
+
In CI/CD mode, the command falls back to AST-based CodeAnalyzer.
|
|
345
|
+
"""
|
|
346
|
+
# Load codebase context for AI prompt generation
|
|
347
|
+
_context = self._load_codebase_context(repo_path)
|
|
348
|
+
|
|
349
|
+
# Generate AI analysis prompt (instructs AI IDE to use CLI)
|
|
350
|
+
agent_context = {
|
|
351
|
+
"workspace": str(repo_path),
|
|
352
|
+
"current_file": None,
|
|
353
|
+
"selection": None,
|
|
354
|
+
}
|
|
355
|
+
enhanced_context = self.inject_context(agent_context)
|
|
356
|
+
_prompt = self.generate_prompt("import from-code", enhanced_context)
|
|
357
|
+
|
|
358
|
+
# In AI IDE mode, the AI will:
|
|
359
|
+
# 1. Use the prompt to understand the codebase semantically
|
|
360
|
+
# 2. Call `specfact import from-code` with appropriate arguments
|
|
361
|
+
# 3. Parse the CLI output and enhance with semantic understanding
|
|
362
|
+
# 4. Present results to the user
|
|
363
|
+
|
|
364
|
+
# For now, return a placeholder plan bundle
|
|
365
|
+
# The actual analysis will be done by the AI IDE calling the CLI
|
|
366
|
+
# Use plan name if provided, otherwise use repo name, otherwise fallback
|
|
367
|
+
if plan_name:
|
|
368
|
+
# Use the plan name (already sanitized, but humanize for title)
|
|
369
|
+
title = plan_name.replace("_", " ").replace("-", " ").title()
|
|
370
|
+
else:
|
|
371
|
+
repo_name = repo_path.name or "Unknown Project"
|
|
372
|
+
title = repo_name.replace("_", " ").replace("-", " ").title()
|
|
373
|
+
|
|
374
|
+
idea = Idea(
|
|
375
|
+
title=title,
|
|
376
|
+
narrative=f"Auto-derived plan from brownfield analysis of {title}",
|
|
377
|
+
metrics=None,
|
|
378
|
+
)
|
|
379
|
+
|
|
380
|
+
product = Product(
|
|
381
|
+
themes=["Core"],
|
|
382
|
+
releases=[],
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
return PlanBundle(
|
|
386
|
+
version="1.0",
|
|
387
|
+
idea=idea,
|
|
388
|
+
business=None,
|
|
389
|
+
product=product,
|
|
390
|
+
features=[],
|
|
391
|
+
metadata=Metadata(stage="draft", promoted_at=None, promoted_by=None),
|
|
392
|
+
)
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Base Agent Mode - Abstract interface for agent modes.
|
|
3
|
+
|
|
4
|
+
This module provides the base class for agent modes that generate enhanced
|
|
5
|
+
prompts and route commands with context injection for CoPilot integration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from beartype import beartype
|
|
14
|
+
from icontract import ensure, require
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AgentMode(ABC):
|
|
18
|
+
"""
|
|
19
|
+
Base class for agent modes.
|
|
20
|
+
|
|
21
|
+
Agent modes provide enhanced prompts optimized for CoPilot and route
|
|
22
|
+
commands with context injection.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
@beartype
|
|
26
|
+
@require(lambda command: bool(command), "Command must be non-empty")
|
|
27
|
+
@ensure(lambda result: isinstance(result, str) and bool(result), "Prompt must be non-empty string")
|
|
28
|
+
@abstractmethod
|
|
29
|
+
def generate_prompt(self, command: str, context: dict[str, Any] | None = None) -> str:
|
|
30
|
+
"""
|
|
31
|
+
Generate enhanced prompt for CoPilot.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
command: CLI command being executed (e.g., "import from-code")
|
|
35
|
+
context: Context dictionary with current file, selection, workspace, etc.
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
Enhanced prompt optimized for CoPilot execution
|
|
39
|
+
|
|
40
|
+
Examples:
|
|
41
|
+
>>> agent = AnalyzeAgent()
|
|
42
|
+
>>> prompt = agent.generate_prompt("import from-code", {"current_file": "src/main.py"})
|
|
43
|
+
>>> isinstance(prompt, str)
|
|
44
|
+
True
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
@beartype
|
|
48
|
+
@require(lambda command: bool(command), "Command must be non-empty")
|
|
49
|
+
@ensure(lambda result: isinstance(result, dict), "Result must be a dictionary")
|
|
50
|
+
@abstractmethod
|
|
51
|
+
def execute(
|
|
52
|
+
self, command: str, args: dict[str, Any] | None = None, context: dict[str, Any] | None = None
|
|
53
|
+
) -> dict[str, Any]:
|
|
54
|
+
"""
|
|
55
|
+
Execute command with agent mode routing.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
command: CLI command being executed (e.g., "import from-code")
|
|
59
|
+
args: Command arguments (e.g., {"repo": ".", "confidence": 0.7})
|
|
60
|
+
context: Context dictionary with current file, selection, workspace, etc.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Command result dictionary with enhanced output
|
|
64
|
+
|
|
65
|
+
Examples:
|
|
66
|
+
>>> agent = AnalyzeAgent()
|
|
67
|
+
>>> result = agent.execute("import from-code", {"repo": "."}, {"current_file": "src/main.py"})
|
|
68
|
+
>>> isinstance(result, dict)
|
|
69
|
+
True
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
@beartype
|
|
73
|
+
@ensure(lambda result: isinstance(result, dict), "Result must be a dictionary")
|
|
74
|
+
def inject_context(self, context: dict[str, Any] | None = None) -> dict[str, Any]:
|
|
75
|
+
"""
|
|
76
|
+
Inject context information for CoPilot.
|
|
77
|
+
|
|
78
|
+
This method can be overridden by specialized agents to add
|
|
79
|
+
context-specific information.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
context: Basic context dictionary (can be None)
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
Enhanced context dictionary with additional information
|
|
86
|
+
|
|
87
|
+
Examples:
|
|
88
|
+
>>> agent = AnalyzeAgent()
|
|
89
|
+
>>> enhanced = agent.inject_context({"current_file": "src/main.py"})
|
|
90
|
+
>>> isinstance(enhanced, dict)
|
|
91
|
+
True
|
|
92
|
+
"""
|
|
93
|
+
if context is None:
|
|
94
|
+
return {}
|
|
95
|
+
return context.copy()
|