foundry-mcp 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -0
- foundry_mcp/cli/__init__.py +80 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +633 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +652 -0
- foundry_mcp/cli/commands/session.py +479 -0
- foundry_mcp/cli/commands/specs.py +856 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +259 -0
- foundry_mcp/cli/flags.py +266 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +850 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1636 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/feature_flags.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/journal.py +694 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1350 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +123 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +317 -0
- foundry_mcp/core/prometheus.py +577 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +546 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
- foundry_mcp/core/prompts/plan_review.py +623 -0
- foundry_mcp/core/providers/__init__.py +225 -0
- foundry_mcp/core/providers/base.py +476 -0
- foundry_mcp/core/providers/claude.py +460 -0
- foundry_mcp/core/providers/codex.py +619 -0
- foundry_mcp/core/providers/cursor_agent.py +642 -0
- foundry_mcp/core/providers/detectors.py +488 -0
- foundry_mcp/core/providers/gemini.py +405 -0
- foundry_mcp/core/providers/opencode.py +616 -0
- foundry_mcp/core/providers/opencode_wrapper.js +302 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +729 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +934 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +1650 -0
- foundry_mcp/core/task.py +1289 -0
- foundry_mcp/core/testing.py +450 -0
- foundry_mcp/core/validation.py +2081 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +234 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +289 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +174 -0
- foundry_mcp/dashboard/views/overview.py +160 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/sdd-spec-schema.json +386 -0
- foundry_mcp/server.py +164 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +71 -0
- foundry_mcp/tools/unified/authoring.py +1487 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +198 -0
- foundry_mcp/tools/unified/environment.py +939 -0
- foundry_mcp/tools/unified/error.py +462 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +632 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +745 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +629 -0
- foundry_mcp/tools/unified/review.py +685 -0
- foundry_mcp/tools/unified/review_helpers.py +299 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +580 -0
- foundry_mcp/tools/unified/spec.py +808 -0
- foundry_mcp/tools/unified/task.py +2202 -0
- foundry_mcp/tools/unified/test.py +370 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.3.3.dist-info/METADATA +337 -0
- foundry_mcp-0.3.3.dist-info/RECORD +135 -0
- foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
- foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,546 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Prompt templates for fidelity review workflow.
|
|
3
|
+
|
|
4
|
+
This module provides prompts for comparing implementation against
|
|
5
|
+
specifications, identifying deviations, and assessing compliance
|
|
6
|
+
with documented requirements.
|
|
7
|
+
|
|
8
|
+
Prompt IDs (PromptTemplate-based):
|
|
9
|
+
- FIDELITY_REVIEW_V1: Main 6-section fidelity review prompt
|
|
10
|
+
- FIDELITY_DEVIATION_ANALYSIS_V1: Analyze identified deviations
|
|
11
|
+
- FIDELITY_COMPLIANCE_SUMMARY_V1: Generate compliance summary
|
|
12
|
+
|
|
13
|
+
Legacy Prompt IDs (string templates for backward compatibility):
|
|
14
|
+
- review_task: Compare task implementation against spec requirements
|
|
15
|
+
- review_phase: Review entire phase for fidelity to spec
|
|
16
|
+
- compare_files: Compare specific files against spec expectations
|
|
17
|
+
- deviation_analysis: Analyze identified deviations for impact
|
|
18
|
+
- compliance_summary: Generate compliance summary report
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from typing import Any, Dict, List
|
|
24
|
+
|
|
25
|
+
from foundry_mcp.core.prompts import PromptBuilder, PromptTemplate
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# =============================================================================
|
|
29
|
+
# Response Schema
|
|
30
|
+
# =============================================================================
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
# JSON response schema for fidelity reviews - structured format for AI response
|
|
34
|
+
FIDELITY_RESPONSE_SCHEMA = """{
|
|
35
|
+
"verdict": "pass|fail|partial|unknown",
|
|
36
|
+
"summary": "Overall findings (any length).",
|
|
37
|
+
"requirement_alignment": {
|
|
38
|
+
"answer": "yes|no|partial",
|
|
39
|
+
"details": "Explain how implementation aligns or diverges."
|
|
40
|
+
},
|
|
41
|
+
"success_criteria": {
|
|
42
|
+
"met": "yes|no|partial",
|
|
43
|
+
"details": "Call out verification steps passed or missing."
|
|
44
|
+
},
|
|
45
|
+
"deviations": [
|
|
46
|
+
{
|
|
47
|
+
"description": "Describe deviation from the spec.",
|
|
48
|
+
"justification": "Optional rationale or evidence.",
|
|
49
|
+
"severity": "critical|high|medium|low"
|
|
50
|
+
}
|
|
51
|
+
],
|
|
52
|
+
"test_coverage": {
|
|
53
|
+
"status": "sufficient|insufficient|not_applicable",
|
|
54
|
+
"details": "Summarise test evidence or gaps."
|
|
55
|
+
},
|
|
56
|
+
"code_quality": {
|
|
57
|
+
"issues": ["Describe each notable quality concern."],
|
|
58
|
+
"details": "Optional supporting commentary."
|
|
59
|
+
},
|
|
60
|
+
"documentation": {
|
|
61
|
+
"status": "adequate|inadequate|not_applicable",
|
|
62
|
+
"details": "Note doc updates or omissions."
|
|
63
|
+
},
|
|
64
|
+
"issues": ["Concise list of primary issues for consensus logic."],
|
|
65
|
+
"recommendations": ["Actionable next steps to resolve findings."]
|
|
66
|
+
}"""
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# =============================================================================
|
|
70
|
+
# Severity Categorization Keywords
|
|
71
|
+
# =============================================================================
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
# CRITICAL: Security vulnerabilities, data loss, crashes
|
|
75
|
+
CRITICAL_KEYWORDS = [
|
|
76
|
+
"security",
|
|
77
|
+
"vulnerability",
|
|
78
|
+
"injection",
|
|
79
|
+
"xss",
|
|
80
|
+
"csrf",
|
|
81
|
+
"authentication bypass",
|
|
82
|
+
"unauthorized access",
|
|
83
|
+
"data loss",
|
|
84
|
+
"crash",
|
|
85
|
+
"segfault",
|
|
86
|
+
"memory leak",
|
|
87
|
+
"remote code execution",
|
|
88
|
+
"privilege escalation",
|
|
89
|
+
"buffer overflow",
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
# HIGH: Incorrect behavior, spec violations, broken functionality
|
|
93
|
+
HIGH_KEYWORDS = [
|
|
94
|
+
"incorrect",
|
|
95
|
+
"wrong",
|
|
96
|
+
"broken",
|
|
97
|
+
"fails",
|
|
98
|
+
"failure",
|
|
99
|
+
"spec violation",
|
|
100
|
+
"requirement not met",
|
|
101
|
+
"does not match",
|
|
102
|
+
"missing required",
|
|
103
|
+
"critical bug",
|
|
104
|
+
"data corruption",
|
|
105
|
+
"logic error",
|
|
106
|
+
"incorrect behavior",
|
|
107
|
+
]
|
|
108
|
+
|
|
109
|
+
# MEDIUM: Performance issues, missing tests, code quality
|
|
110
|
+
MEDIUM_KEYWORDS = [
|
|
111
|
+
"performance",
|
|
112
|
+
"slow",
|
|
113
|
+
"inefficient",
|
|
114
|
+
"optimization",
|
|
115
|
+
"missing test",
|
|
116
|
+
"no tests",
|
|
117
|
+
"untested",
|
|
118
|
+
"test coverage",
|
|
119
|
+
"code quality",
|
|
120
|
+
"maintainability",
|
|
121
|
+
"complexity",
|
|
122
|
+
"duplication",
|
|
123
|
+
"refactor",
|
|
124
|
+
"improvement needed",
|
|
125
|
+
]
|
|
126
|
+
|
|
127
|
+
# LOW: Style issues, documentation, minor improvements
|
|
128
|
+
LOW_KEYWORDS = [
|
|
129
|
+
"style",
|
|
130
|
+
"formatting",
|
|
131
|
+
"naming",
|
|
132
|
+
"documentation",
|
|
133
|
+
"comment",
|
|
134
|
+
"typo",
|
|
135
|
+
"whitespace",
|
|
136
|
+
"minor",
|
|
137
|
+
"suggestion",
|
|
138
|
+
"consider",
|
|
139
|
+
"could be better",
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
# All severity keywords organized by level
|
|
143
|
+
SEVERITY_KEYWORDS = {
|
|
144
|
+
"critical": CRITICAL_KEYWORDS,
|
|
145
|
+
"high": HIGH_KEYWORDS,
|
|
146
|
+
"medium": MEDIUM_KEYWORDS,
|
|
147
|
+
"low": LOW_KEYWORDS,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
# =============================================================================
|
|
152
|
+
# PromptTemplate-based Prompts (New Format)
|
|
153
|
+
# =============================================================================
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
# Main fidelity review prompt - 6-section structure
|
|
157
|
+
FIDELITY_REVIEW_V1 = PromptTemplate(
|
|
158
|
+
id="FIDELITY_REVIEW_V1",
|
|
159
|
+
version="1.0",
|
|
160
|
+
system_prompt="""You are an expert code reviewer performing implementation fidelity analysis.
|
|
161
|
+
|
|
162
|
+
Your role is to compare actual code implementation against specification requirements
|
|
163
|
+
and identify any deviations, issues, or concerns.
|
|
164
|
+
|
|
165
|
+
CRITICAL CONSTRAINTS:
|
|
166
|
+
- This is a READ-ONLY review - you MUST NOT write, create, or modify ANY files
|
|
167
|
+
- Execute code or commands - ANALYSIS ONLY
|
|
168
|
+
- Provide findings as structured JSON in your response
|
|
169
|
+
|
|
170
|
+
Focus on:
|
|
171
|
+
1. Requirement alignment - Does implementation match spec?
|
|
172
|
+
2. Success criteria - Are verification steps satisfied?
|
|
173
|
+
3. Deviations - Any divergences from specification?
|
|
174
|
+
4. Test coverage - Are tests comprehensive?
|
|
175
|
+
5. Code quality - Any maintainability concerns?
|
|
176
|
+
6. Documentation - Is implementation properly documented?""",
|
|
177
|
+
user_template="""# Implementation Fidelity Review
|
|
178
|
+
|
|
179
|
+
## 1. Context
|
|
180
|
+
**Spec ID:** {spec_id}
|
|
181
|
+
**Spec Title:** {spec_title}
|
|
182
|
+
{spec_description}
|
|
183
|
+
**Review Scope:** {review_scope}
|
|
184
|
+
|
|
185
|
+
## 2. Specification Requirements
|
|
186
|
+
{spec_requirements}
|
|
187
|
+
|
|
188
|
+
## 3. Implementation Artifacts
|
|
189
|
+
{implementation_artifacts}
|
|
190
|
+
|
|
191
|
+
## 4. Test Results
|
|
192
|
+
{test_results}
|
|
193
|
+
|
|
194
|
+
## 5. Journal Entries
|
|
195
|
+
{journal_entries}
|
|
196
|
+
|
|
197
|
+
## 6. Review Questions
|
|
198
|
+
|
|
199
|
+
Please evaluate the implementation against the specification:
|
|
200
|
+
|
|
201
|
+
1. **Requirement Alignment:** Does the implementation match the spec requirements?
|
|
202
|
+
2. **Success Criteria:** Are all verification steps satisfied?
|
|
203
|
+
3. **Deviations:** Are there any deviations from the spec? If so, are they justified?
|
|
204
|
+
4. **Test Coverage:** Are tests comprehensive and passing?
|
|
205
|
+
5. **Code Quality:** Are there any quality, maintainability, or security concerns?
|
|
206
|
+
6. **Documentation:** Is the implementation properly documented?
|
|
207
|
+
|
|
208
|
+
### Required Response Format
|
|
209
|
+
|
|
210
|
+
Respond **only** with valid JSON matching the schema below. Do not include Markdown, prose, or additional commentary outside the JSON object.
|
|
211
|
+
|
|
212
|
+
```json
|
|
213
|
+
{response_schema}
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
Rules:
|
|
217
|
+
- Use lowercase values shown for enumerated fields (e.g., `verdict`, status flags)
|
|
218
|
+
- Keep arrays as arrays (use `[]` when a section has nothing to report)
|
|
219
|
+
- Populate `issues` and `recommendations` with key takeaways
|
|
220
|
+
- Feel free to include additional keys if needed, but never omit the ones above
|
|
221
|
+
- Severity levels for deviations: critical, high, medium, low""",
|
|
222
|
+
required_context=[
|
|
223
|
+
"spec_id",
|
|
224
|
+
"spec_title",
|
|
225
|
+
"review_scope",
|
|
226
|
+
"spec_requirements",
|
|
227
|
+
"implementation_artifacts",
|
|
228
|
+
],
|
|
229
|
+
optional_context=[
|
|
230
|
+
"spec_description",
|
|
231
|
+
"test_results",
|
|
232
|
+
"journal_entries",
|
|
233
|
+
"response_schema",
|
|
234
|
+
],
|
|
235
|
+
metadata={
|
|
236
|
+
"workflow": "fidelity_review",
|
|
237
|
+
"author": "system",
|
|
238
|
+
"category": "implementation",
|
|
239
|
+
"sections": [
|
|
240
|
+
"Context",
|
|
241
|
+
"Specification Requirements",
|
|
242
|
+
"Implementation Artifacts",
|
|
243
|
+
"Test Results",
|
|
244
|
+
"Journal Entries",
|
|
245
|
+
"Review Questions",
|
|
246
|
+
],
|
|
247
|
+
"output_format": "json",
|
|
248
|
+
"severity_levels": ["critical", "high", "medium", "low"],
|
|
249
|
+
},
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
# Deviation analysis prompt - for deep-diving into identified deviations
|
|
254
|
+
FIDELITY_DEVIATION_ANALYSIS_V1 = PromptTemplate(
|
|
255
|
+
id="FIDELITY_DEVIATION_ANALYSIS_V1",
|
|
256
|
+
version="1.0",
|
|
257
|
+
system_prompt="""You are an expert software architect analyzing specification deviations.
|
|
258
|
+
|
|
259
|
+
Your role is to assess the impact of identified deviations between implementation
|
|
260
|
+
and specification, determine risks, and recommend remediation strategies.
|
|
261
|
+
|
|
262
|
+
Focus on:
|
|
263
|
+
- Impact assessment (functional, security, performance, maintenance)
|
|
264
|
+
- Risk analysis and downstream effects
|
|
265
|
+
- Remediation options with effort/risk tradeoffs
|
|
266
|
+
- Prioritized recommendations""",
|
|
267
|
+
user_template="""# Deviation Analysis
|
|
268
|
+
|
|
269
|
+
## Context
|
|
270
|
+
**Spec ID:** {spec_id}
|
|
271
|
+
**Analysis Scope:** {scope}
|
|
272
|
+
|
|
273
|
+
## Identified Deviations
|
|
274
|
+
{deviations_json}
|
|
275
|
+
|
|
276
|
+
## Original Requirements
|
|
277
|
+
{original_requirements}
|
|
278
|
+
|
|
279
|
+
## Analysis Requirements
|
|
280
|
+
|
|
281
|
+
For each deviation, provide:
|
|
282
|
+
1. **Impact Assessment**: Functional, security, performance, maintenance impact
|
|
283
|
+
2. **Risk Analysis**: Risks introduced by this deviation
|
|
284
|
+
3. **Downstream Effects**: How does this affect dependent components?
|
|
285
|
+
4. **Remediation Options**: Ways to address with effort/risk tradeoffs
|
|
286
|
+
5. **Recommendation**: accept|fix_now|fix_later|needs_discussion
|
|
287
|
+
|
|
288
|
+
### Required Response Format
|
|
289
|
+
|
|
290
|
+
Respond with valid JSON:
|
|
291
|
+
|
|
292
|
+
```json
|
|
293
|
+
{{
|
|
294
|
+
"analysis_scope": "{scope}",
|
|
295
|
+
"total_deviations": 0,
|
|
296
|
+
"critical_count": 0,
|
|
297
|
+
"deviation_analysis": [
|
|
298
|
+
{{
|
|
299
|
+
"deviation_id": "index or identifier",
|
|
300
|
+
"original_deviation": "brief description",
|
|
301
|
+
"impact_assessment": {{
|
|
302
|
+
"functional_impact": "none|minor|moderate|major",
|
|
303
|
+
"security_impact": "none|minor|moderate|major",
|
|
304
|
+
"performance_impact": "none|minor|moderate|major",
|
|
305
|
+
"maintenance_impact": "none|minor|moderate|major"
|
|
306
|
+
}},
|
|
307
|
+
"affected_components": ["list of affected components"],
|
|
308
|
+
"downstream_effects": ["list of downstream effects"],
|
|
309
|
+
"remediation_options": [
|
|
310
|
+
{{
|
|
311
|
+
"option": "description",
|
|
312
|
+
"effort": "low|medium|high",
|
|
313
|
+
"risk": "low|medium|high"
|
|
314
|
+
}}
|
|
315
|
+
],
|
|
316
|
+
"recommendation": "accept|fix_now|fix_later|needs_discussion",
|
|
317
|
+
"rationale": "explanation for recommendation"
|
|
318
|
+
}}
|
|
319
|
+
],
|
|
320
|
+
"overall_risk_level": "low|medium|high|critical",
|
|
321
|
+
"recommended_actions": ["prioritized list of recommended actions"],
|
|
322
|
+
"summary": "overall deviation analysis summary"
|
|
323
|
+
}}
|
|
324
|
+
```""",
|
|
325
|
+
required_context=["spec_id", "scope", "deviations_json", "original_requirements"],
|
|
326
|
+
optional_context=[],
|
|
327
|
+
metadata={
|
|
328
|
+
"workflow": "fidelity_review",
|
|
329
|
+
"author": "system",
|
|
330
|
+
"category": "analysis",
|
|
331
|
+
"output_format": "json",
|
|
332
|
+
},
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
# Compliance summary prompt - for generating overall compliance reports
|
|
337
|
+
FIDELITY_COMPLIANCE_SUMMARY_V1 = PromptTemplate(
|
|
338
|
+
id="FIDELITY_COMPLIANCE_SUMMARY_V1",
|
|
339
|
+
version="1.0",
|
|
340
|
+
system_prompt="""You are an expert technical lead generating compliance reports.
|
|
341
|
+
|
|
342
|
+
Your role is to synthesize fidelity review findings into an executive summary
|
|
343
|
+
with clear compliance status, prioritized issues, and sign-off recommendations.
|
|
344
|
+
|
|
345
|
+
Focus on:
|
|
346
|
+
- Overall compliance score and status
|
|
347
|
+
- Phase-by-phase breakdown
|
|
348
|
+
- Critical blocking issues
|
|
349
|
+
- Sign-off readiness assessment""",
|
|
350
|
+
user_template="""# Compliance Summary Report
|
|
351
|
+
|
|
352
|
+
## Specification
|
|
353
|
+
**Spec ID:** {spec_id}
|
|
354
|
+
**Title:** {spec_title}
|
|
355
|
+
**Total Phases:** {total_phases}
|
|
356
|
+
**Total Tasks:** {total_tasks}
|
|
357
|
+
|
|
358
|
+
## Fidelity Review Data
|
|
359
|
+
{review_data}
|
|
360
|
+
|
|
361
|
+
## Summary Requirements
|
|
362
|
+
|
|
363
|
+
Generate a compliance summary addressing:
|
|
364
|
+
1. **Overall Compliance**: What is the overall compliance level?
|
|
365
|
+
2. **Phase Breakdown**: Compliance by phase
|
|
366
|
+
3. **Critical Issues**: List critical compliance issues
|
|
367
|
+
4. **Recommendations**: Prioritized recommendations
|
|
368
|
+
5. **Sign-off Status**: Is the implementation ready for approval?
|
|
369
|
+
|
|
370
|
+
### Required Response Format
|
|
371
|
+
|
|
372
|
+
Respond with valid JSON:
|
|
373
|
+
|
|
374
|
+
```json
|
|
375
|
+
{{
|
|
376
|
+
"spec_id": "{spec_id}",
|
|
377
|
+
"spec_title": "{spec_title}",
|
|
378
|
+
"overall_compliance": {{
|
|
379
|
+
"score": 0-100,
|
|
380
|
+
"status": "compliant|mostly_compliant|needs_work|non_compliant",
|
|
381
|
+
"tasks_compliant": 0,
|
|
382
|
+
"tasks_partial": 0,
|
|
383
|
+
"tasks_non_compliant": 0
|
|
384
|
+
}},
|
|
385
|
+
"phase_breakdown": [
|
|
386
|
+
{{
|
|
387
|
+
"phase_id": "phase-id",
|
|
388
|
+
"phase_title": "title",
|
|
389
|
+
"compliance_score": 0-100,
|
|
390
|
+
"status": "compliant|partial|non_compliant"
|
|
391
|
+
}}
|
|
392
|
+
],
|
|
393
|
+
"critical_issues": [
|
|
394
|
+
{{
|
|
395
|
+
"issue": "description",
|
|
396
|
+
"location": "task or phase id",
|
|
397
|
+
"priority": "p0|p1|p2",
|
|
398
|
+
"remediation": "suggested fix"
|
|
399
|
+
}}
|
|
400
|
+
],
|
|
401
|
+
"recommendations": [
|
|
402
|
+
{{
|
|
403
|
+
"recommendation": "description",
|
|
404
|
+
"priority": "critical|high|medium|low",
|
|
405
|
+
"effort": "low|medium|high"
|
|
406
|
+
}}
|
|
407
|
+
],
|
|
408
|
+
"sign_off": {{
|
|
409
|
+
"ready": true|false,
|
|
410
|
+
"blocking_issues": ["list of issues that must be resolved"],
|
|
411
|
+
"conditions": ["conditions for approval if any"]
|
|
412
|
+
}},
|
|
413
|
+
"summary": "executive summary of compliance status"
|
|
414
|
+
}}
|
|
415
|
+
```""",
|
|
416
|
+
required_context=[
|
|
417
|
+
"spec_id",
|
|
418
|
+
"spec_title",
|
|
419
|
+
"total_phases",
|
|
420
|
+
"total_tasks",
|
|
421
|
+
"review_data",
|
|
422
|
+
],
|
|
423
|
+
optional_context=[],
|
|
424
|
+
metadata={
|
|
425
|
+
"workflow": "fidelity_review",
|
|
426
|
+
"author": "system",
|
|
427
|
+
"category": "reporting",
|
|
428
|
+
"output_format": "json",
|
|
429
|
+
},
|
|
430
|
+
)
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
# =============================================================================
|
|
434
|
+
# Template Registry (PromptTemplate-based)
|
|
435
|
+
# =============================================================================
|
|
436
|
+
|
|
437
|
+
|
|
438
|
+
FIDELITY_REVIEW_TEMPLATES: Dict[str, PromptTemplate] = {
|
|
439
|
+
"FIDELITY_REVIEW_V1": FIDELITY_REVIEW_V1,
|
|
440
|
+
"FIDELITY_DEVIATION_ANALYSIS_V1": FIDELITY_DEVIATION_ANALYSIS_V1,
|
|
441
|
+
"FIDELITY_COMPLIANCE_SUMMARY_V1": FIDELITY_COMPLIANCE_SUMMARY_V1,
|
|
442
|
+
}
|
|
443
|
+
|
|
444
|
+
|
|
445
|
+
# =============================================================================
|
|
446
|
+
# Legacy Prompt Templates (String-based for backward compatibility)
|
|
447
|
+
# =============================================================================
|
|
448
|
+
|
|
449
|
+
# Legacy templates have been removed. Use FIDELITY_REVIEW_V1 and related prompts.
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
# =============================================================================
|
|
453
|
+
# Prompt Builder Implementation
|
|
454
|
+
# =============================================================================
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
class FidelityReviewPromptBuilder(PromptBuilder):
|
|
458
|
+
"""
|
|
459
|
+
Prompt builder for fidelity review workflow.
|
|
460
|
+
|
|
461
|
+
Provides templates for comparing implementations against specifications
|
|
462
|
+
and generating compliance reports.
|
|
463
|
+
|
|
464
|
+
Supports PromptTemplate-based prompts (FIDELITY_*_V1).
|
|
465
|
+
"""
|
|
466
|
+
|
|
467
|
+
def __init__(self) -> None:
|
|
468
|
+
"""Initialize the builder with template registries."""
|
|
469
|
+
self._prompt_templates = FIDELITY_REVIEW_TEMPLATES
|
|
470
|
+
|
|
471
|
+
def build(self, prompt_id: str, context: Dict[str, Any]) -> str:
|
|
472
|
+
"""
|
|
473
|
+
Build a fidelity review prompt.
|
|
474
|
+
|
|
475
|
+
Args:
|
|
476
|
+
prompt_id: Template identifier. Supports:
|
|
477
|
+
- PromptTemplate IDs: FIDELITY_REVIEW_V1, FIDELITY_DEVIATION_ANALYSIS_V1,
|
|
478
|
+
FIDELITY_COMPLIANCE_SUMMARY_V1
|
|
479
|
+
context: Template context variables
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
Rendered prompt string
|
|
483
|
+
|
|
484
|
+
Raises:
|
|
485
|
+
ValueError: If prompt_id is not recognized
|
|
486
|
+
"""
|
|
487
|
+
# Check PromptTemplate registry
|
|
488
|
+
if prompt_id in self._prompt_templates:
|
|
489
|
+
template = self._prompt_templates[prompt_id]
|
|
490
|
+
|
|
491
|
+
# Provide defaults for optional context
|
|
492
|
+
render_context = dict(context)
|
|
493
|
+
|
|
494
|
+
# Add response schema default
|
|
495
|
+
if "response_schema" not in render_context:
|
|
496
|
+
render_context["response_schema"] = FIDELITY_RESPONSE_SCHEMA
|
|
497
|
+
|
|
498
|
+
# Add empty defaults for optional fields
|
|
499
|
+
if "spec_description" not in render_context:
|
|
500
|
+
render_context["spec_description"] = ""
|
|
501
|
+
if "test_results" not in render_context:
|
|
502
|
+
render_context["test_results"] = "*No test results available*"
|
|
503
|
+
if "journal_entries" not in render_context:
|
|
504
|
+
render_context["journal_entries"] = "*No journal entries found*"
|
|
505
|
+
|
|
506
|
+
return template.render(render_context)
|
|
507
|
+
|
|
508
|
+
# Unknown prompt_id
|
|
509
|
+
available = ", ".join(sorted(self._prompt_templates.keys()))
|
|
510
|
+
raise ValueError(f"Unknown prompt_id '{prompt_id}'. Available: {available}")
|
|
511
|
+
|
|
512
|
+
def list_prompts(self) -> List[str]:
|
|
513
|
+
"""Return available prompt IDs for fidelity review."""
|
|
514
|
+
return sorted(list(self._prompt_templates.keys()))
|
|
515
|
+
|
|
516
|
+
def get_severity_keywords(self, level: str) -> List[str]:
|
|
517
|
+
"""
|
|
518
|
+
Get severity categorization keywords for a given level.
|
|
519
|
+
|
|
520
|
+
Args:
|
|
521
|
+
level: Severity level (critical, high, medium, low)
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
List of keywords for that severity level
|
|
525
|
+
"""
|
|
526
|
+
return SEVERITY_KEYWORDS.get(level.lower(), [])
|
|
527
|
+
|
|
528
|
+
|
|
529
|
+
__all__ = [
|
|
530
|
+
# PromptTemplate instances
|
|
531
|
+
"FIDELITY_REVIEW_V1",
|
|
532
|
+
"FIDELITY_DEVIATION_ANALYSIS_V1",
|
|
533
|
+
"FIDELITY_COMPLIANCE_SUMMARY_V1",
|
|
534
|
+
# Template registries
|
|
535
|
+
"FIDELITY_REVIEW_TEMPLATES",
|
|
536
|
+
# Response schema
|
|
537
|
+
"FIDELITY_RESPONSE_SCHEMA",
|
|
538
|
+
# Severity keywords
|
|
539
|
+
"SEVERITY_KEYWORDS",
|
|
540
|
+
"CRITICAL_KEYWORDS",
|
|
541
|
+
"HIGH_KEYWORDS",
|
|
542
|
+
"MEDIUM_KEYWORDS",
|
|
543
|
+
"LOW_KEYWORDS",
|
|
544
|
+
# Builder
|
|
545
|
+
"FidelityReviewPromptBuilder",
|
|
546
|
+
]
|