foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""Context helpers shared by unified tool routers.
|
|
2
|
+
|
|
3
|
+
These helpers keep `server(action=...)` focused on routing/validation while
|
|
4
|
+
ensuring context/LLM status responses remain consistent and response-v2
|
|
5
|
+
compliant.
|
|
6
|
+
|
|
7
|
+
This module intentionally lives under `tools.unified` to avoid reintroducing
|
|
8
|
+
non-unified public tool surfaces.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import logging
|
|
14
|
+
from dataclasses import asdict
|
|
15
|
+
from typing import Any, Dict, Optional
|
|
16
|
+
|
|
17
|
+
from foundry_mcp.config import ServerConfig
|
|
18
|
+
from foundry_mcp.core.responses import (
|
|
19
|
+
ErrorCode,
|
|
20
|
+
ErrorType,
|
|
21
|
+
error_response,
|
|
22
|
+
success_response,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def build_llm_status_response(*, request_id: Optional[str] = None) -> dict:
|
|
29
|
+
"""Return a standardized envelope describing LLM configuration."""
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
from foundry_mcp.core.review import get_llm_status
|
|
33
|
+
|
|
34
|
+
llm_status = get_llm_status()
|
|
35
|
+
return asdict(success_response(llm_status=llm_status, request_id=request_id))
|
|
36
|
+
except Exception as exc:
|
|
37
|
+
logger.exception("Failed to build llm_status response")
|
|
38
|
+
return asdict(
|
|
39
|
+
error_response(
|
|
40
|
+
f"Failed to build llm_status response: {exc}",
|
|
41
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
42
|
+
error_type=ErrorType.INTERNAL,
|
|
43
|
+
remediation="Check server logs.",
|
|
44
|
+
request_id=request_id,
|
|
45
|
+
)
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def build_server_context_response(
|
|
50
|
+
config: ServerConfig,
|
|
51
|
+
*,
|
|
52
|
+
include_llm: bool = True,
|
|
53
|
+
include_workflow: bool = True,
|
|
54
|
+
include_workspace: bool = True,
|
|
55
|
+
include_capabilities: bool = True,
|
|
56
|
+
request_id: Optional[str] = None,
|
|
57
|
+
) -> dict:
|
|
58
|
+
"""Build a standardized server context payload."""
|
|
59
|
+
|
|
60
|
+
payload: Dict[str, Any] = {
|
|
61
|
+
"server": {
|
|
62
|
+
"name": config.server_name,
|
|
63
|
+
"version": config.server_version,
|
|
64
|
+
"log_level": config.log_level,
|
|
65
|
+
},
|
|
66
|
+
"paths": {
|
|
67
|
+
"specs_dir": str(config.specs_dir) if config.specs_dir else None,
|
|
68
|
+
"journals_path": str(config.journals_path)
|
|
69
|
+
if config.journals_path
|
|
70
|
+
else None,
|
|
71
|
+
},
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if include_workspace:
|
|
75
|
+
payload["workspace"] = {"roots": [str(p) for p in config.workspace_roots]}
|
|
76
|
+
|
|
77
|
+
if include_workflow:
|
|
78
|
+
payload["workflow"] = {"git": asdict(config.git)}
|
|
79
|
+
|
|
80
|
+
if include_llm:
|
|
81
|
+
try:
|
|
82
|
+
from foundry_mcp.core.review import get_llm_status
|
|
83
|
+
|
|
84
|
+
payload["llm_status"] = get_llm_status()
|
|
85
|
+
except Exception as exc:
|
|
86
|
+
logger.debug("Failed to compute llm_status: %s", exc)
|
|
87
|
+
payload["llm_status"] = {"configured": False, "error": "unavailable"}
|
|
88
|
+
|
|
89
|
+
if include_capabilities:
|
|
90
|
+
try:
|
|
91
|
+
from foundry_mcp.core.discovery import get_capabilities
|
|
92
|
+
|
|
93
|
+
payload["capabilities"] = get_capabilities()
|
|
94
|
+
except Exception as exc:
|
|
95
|
+
logger.debug("Failed to compute capabilities: %s", exc)
|
|
96
|
+
payload["capabilities"] = {}
|
|
97
|
+
|
|
98
|
+
return asdict(success_response(data=payload, request_id=request_id))
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
"""Helpers for building review context sections (implementation artifacts, requirements, etc)."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict, List, Optional
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def _build_spec_requirements(
|
|
8
|
+
spec_data: Dict[str, Any], task_id: Optional[str], phase_id: Optional[str]
|
|
9
|
+
) -> str:
|
|
10
|
+
lines: list[str] = []
|
|
11
|
+
if task_id:
|
|
12
|
+
task = _find_task(spec_data, task_id)
|
|
13
|
+
if task:
|
|
14
|
+
lines.append(f"### Task: {task.get('title', task_id)}")
|
|
15
|
+
lines.append(f"- **Status:** {task.get('status', 'unknown')}")
|
|
16
|
+
if task.get("metadata", {}).get("details"):
|
|
17
|
+
lines.append("- **Details:**")
|
|
18
|
+
for detail in task["metadata"]["details"]:
|
|
19
|
+
lines.append(f" - {detail}")
|
|
20
|
+
if task.get("metadata", {}).get("file_path"):
|
|
21
|
+
lines.append(f"- **Expected file:** {task['metadata']['file_path']}")
|
|
22
|
+
elif phase_id:
|
|
23
|
+
phase = _find_phase(spec_data, phase_id)
|
|
24
|
+
if phase:
|
|
25
|
+
lines.append(f"### Phase: {phase.get('title', phase_id)}")
|
|
26
|
+
lines.append(f"- **Status:** {phase.get('status', 'unknown')}")
|
|
27
|
+
child_nodes = _get_child_nodes(spec_data, phase)
|
|
28
|
+
if child_nodes:
|
|
29
|
+
lines.append("- **Tasks:**")
|
|
30
|
+
for child in child_nodes:
|
|
31
|
+
lines.append(
|
|
32
|
+
f" - {child.get('id', 'unknown')}: {child.get('title', 'Unknown task')}"
|
|
33
|
+
)
|
|
34
|
+
else:
|
|
35
|
+
lines.append(f"### Specification: {spec_data.get('title', 'Unknown')}")
|
|
36
|
+
if spec_data.get("description"):
|
|
37
|
+
lines.append(f"- **Description:** {spec_data['description']}")
|
|
38
|
+
if spec_data.get("assumptions"):
|
|
39
|
+
lines.append("- **Assumptions:**")
|
|
40
|
+
for assumption in spec_data["assumptions"][:5]:
|
|
41
|
+
if isinstance(assumption, dict):
|
|
42
|
+
lines.append(f" - {assumption.get('text', str(assumption))}")
|
|
43
|
+
else:
|
|
44
|
+
lines.append(f" - {assumption}")
|
|
45
|
+
return "\n".join(lines) if lines else "*No requirements available*"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _split_file_paths(value: Any) -> List[str]:
|
|
49
|
+
if value is None:
|
|
50
|
+
return []
|
|
51
|
+
if isinstance(value, list):
|
|
52
|
+
parts: List[str] = []
|
|
53
|
+
for item in value:
|
|
54
|
+
parts.extend(_split_file_paths(item))
|
|
55
|
+
return parts
|
|
56
|
+
if isinstance(value, str):
|
|
57
|
+
segments = [part.strip() for part in value.split(",")]
|
|
58
|
+
return [segment for segment in segments if segment]
|
|
59
|
+
return [str(value)]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _normalize_for_comparison(path_value: str, workspace_root: Optional[Path]) -> str:
|
|
63
|
+
raw_path = Path(path_value)
|
|
64
|
+
if raw_path.is_absolute() and workspace_root:
|
|
65
|
+
try:
|
|
66
|
+
raw_path = raw_path.relative_to(workspace_root)
|
|
67
|
+
except ValueError:
|
|
68
|
+
pass
|
|
69
|
+
if workspace_root and raw_path.parts and raw_path.parts[0] == workspace_root.name:
|
|
70
|
+
raw_path = Path(*raw_path.parts[1:])
|
|
71
|
+
return raw_path.as_posix()
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _resolve_path(path_value: str, workspace_root: Optional[Path]) -> Path:
|
|
75
|
+
raw_path = Path(path_value)
|
|
76
|
+
candidates: List[Path] = []
|
|
77
|
+
if raw_path.is_absolute():
|
|
78
|
+
candidates.append(raw_path)
|
|
79
|
+
else:
|
|
80
|
+
candidates.append(raw_path)
|
|
81
|
+
if workspace_root:
|
|
82
|
+
candidates.append(workspace_root / raw_path)
|
|
83
|
+
if raw_path.parts and raw_path.parts[0] == workspace_root.name:
|
|
84
|
+
candidates.append(workspace_root / Path(*raw_path.parts[1:]))
|
|
85
|
+
for candidate in candidates:
|
|
86
|
+
if candidate.exists():
|
|
87
|
+
return candidate
|
|
88
|
+
return candidates[0] if candidates else raw_path
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _build_implementation_artifacts(
|
|
92
|
+
spec_data: Dict[str, Any],
|
|
93
|
+
task_id: Optional[str],
|
|
94
|
+
phase_id: Optional[str],
|
|
95
|
+
files: Optional[List[str]],
|
|
96
|
+
incremental: bool,
|
|
97
|
+
base_branch: str,
|
|
98
|
+
workspace_root: Optional[Path] = None,
|
|
99
|
+
) -> str:
|
|
100
|
+
lines: list[str] = []
|
|
101
|
+
file_paths: list[str] = []
|
|
102
|
+
if workspace_root is not None and not isinstance(workspace_root, Path):
|
|
103
|
+
workspace_root = Path(str(workspace_root))
|
|
104
|
+
if files:
|
|
105
|
+
file_paths = _split_file_paths(files)
|
|
106
|
+
elif task_id:
|
|
107
|
+
task = _find_task(spec_data, task_id)
|
|
108
|
+
if task and task.get("metadata", {}).get("file_path"):
|
|
109
|
+
file_paths = _split_file_paths(task["metadata"]["file_path"])
|
|
110
|
+
elif phase_id:
|
|
111
|
+
phase = _find_phase(spec_data, phase_id)
|
|
112
|
+
if phase:
|
|
113
|
+
for child in _get_child_nodes(spec_data, phase):
|
|
114
|
+
if child.get("metadata", {}).get("file_path"):
|
|
115
|
+
file_paths.extend(_split_file_paths(child["metadata"]["file_path"]))
|
|
116
|
+
else:
|
|
117
|
+
# Full spec review - collect file_path from all tasks/subtasks/verify nodes
|
|
118
|
+
hierarchy_nodes = _get_hierarchy_nodes(spec_data)
|
|
119
|
+
for node in hierarchy_nodes.values():
|
|
120
|
+
if node.get("type") in ("task", "subtask", "verify"):
|
|
121
|
+
if node.get("metadata", {}).get("file_path"):
|
|
122
|
+
file_paths.extend(_split_file_paths(node["metadata"]["file_path"]))
|
|
123
|
+
if file_paths:
|
|
124
|
+
deduped: List[str] = []
|
|
125
|
+
seen = set()
|
|
126
|
+
for file_path in file_paths:
|
|
127
|
+
if file_path not in seen:
|
|
128
|
+
seen.add(file_path)
|
|
129
|
+
deduped.append(file_path)
|
|
130
|
+
file_paths = deduped
|
|
131
|
+
if incremental:
|
|
132
|
+
try:
|
|
133
|
+
import subprocess
|
|
134
|
+
|
|
135
|
+
result = subprocess.run(
|
|
136
|
+
["git", "diff", "--name-only", base_branch],
|
|
137
|
+
capture_output=True,
|
|
138
|
+
text=True,
|
|
139
|
+
timeout=10,
|
|
140
|
+
)
|
|
141
|
+
if result.returncode == 0:
|
|
142
|
+
changed_files = (
|
|
143
|
+
result.stdout.strip().split("\n") if result.stdout else []
|
|
144
|
+
)
|
|
145
|
+
if file_paths:
|
|
146
|
+
changed_set = {
|
|
147
|
+
_normalize_for_comparison(path, workspace_root)
|
|
148
|
+
for path in changed_files
|
|
149
|
+
if path
|
|
150
|
+
}
|
|
151
|
+
file_paths = [
|
|
152
|
+
path
|
|
153
|
+
for path in file_paths
|
|
154
|
+
if _normalize_for_comparison(path, workspace_root) in changed_set
|
|
155
|
+
]
|
|
156
|
+
else:
|
|
157
|
+
file_paths = [path for path in changed_files if path]
|
|
158
|
+
lines.append(
|
|
159
|
+
f"*Incremental review: {len(file_paths)} changed files since {base_branch}*\n"
|
|
160
|
+
)
|
|
161
|
+
except Exception:
|
|
162
|
+
lines.append(f"*Warning: Could not get git diff from {base_branch}*\n")
|
|
163
|
+
for file_path in file_paths[:5]:
|
|
164
|
+
path = _resolve_path(file_path, workspace_root)
|
|
165
|
+
if path.exists():
|
|
166
|
+
try:
|
|
167
|
+
content = path.read_text(encoding="utf-8")
|
|
168
|
+
if len(content) > 10_000:
|
|
169
|
+
content = content[:10_000] + "\n... [truncated] ..."
|
|
170
|
+
file_type = path.suffix.lstrip(".") or "text"
|
|
171
|
+
lines.append(f"### File: `{file_path}`")
|
|
172
|
+
lines.append(f"```{file_type}")
|
|
173
|
+
lines.append(content)
|
|
174
|
+
lines.append("```\n")
|
|
175
|
+
except Exception as exc:
|
|
176
|
+
lines.append(f"### File: `{file_path}`")
|
|
177
|
+
lines.append(f"*Error reading file: {exc}*\n")
|
|
178
|
+
else:
|
|
179
|
+
lines.append(f"### File: `{file_path}`")
|
|
180
|
+
lines.append("*File not found*\n")
|
|
181
|
+
if not lines:
|
|
182
|
+
lines.append("*No implementation artifacts available*")
|
|
183
|
+
return "\n".join(lines)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _build_test_results(
|
|
187
|
+
spec_data: Dict[str, Any], task_id: Optional[str], phase_id: Optional[str]
|
|
188
|
+
) -> str:
|
|
189
|
+
journal = spec_data.get("journal", [])
|
|
190
|
+
test_entries = [
|
|
191
|
+
entry
|
|
192
|
+
for entry in journal
|
|
193
|
+
if "test" in entry.get("title", "").lower()
|
|
194
|
+
or "verify" in entry.get("title", "").lower()
|
|
195
|
+
]
|
|
196
|
+
if test_entries:
|
|
197
|
+
lines = ["*Recent test-related journal entries:*"]
|
|
198
|
+
for entry in test_entries[-3:]:
|
|
199
|
+
lines.append(
|
|
200
|
+
f"- **{entry.get('title', 'Unknown')}** ({entry.get('timestamp', 'unknown')})"
|
|
201
|
+
)
|
|
202
|
+
if entry.get("content"):
|
|
203
|
+
content = entry["content"][:500]
|
|
204
|
+
if len(entry["content"]) > 500:
|
|
205
|
+
content += "..."
|
|
206
|
+
lines.append(f" {content}")
|
|
207
|
+
return "\n".join(lines)
|
|
208
|
+
return "*No test results available*"
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _build_journal_entries(
|
|
212
|
+
spec_data: Dict[str, Any], task_id: Optional[str], phase_id: Optional[str]
|
|
213
|
+
) -> str:
|
|
214
|
+
journal = spec_data.get("journal", [])
|
|
215
|
+
if task_id:
|
|
216
|
+
journal = [entry for entry in journal if entry.get("task_id") == task_id]
|
|
217
|
+
if journal:
|
|
218
|
+
lines = [f"*{len(journal)} journal entries found:*"]
|
|
219
|
+
for entry in journal[-5:]:
|
|
220
|
+
entry_type = entry.get("entry_type", "note")
|
|
221
|
+
timestamp = (
|
|
222
|
+
entry.get("timestamp", "unknown")[:10]
|
|
223
|
+
if entry.get("timestamp")
|
|
224
|
+
else "unknown"
|
|
225
|
+
)
|
|
226
|
+
lines.append(
|
|
227
|
+
f"- **[{entry_type}]** {entry.get('title', 'Untitled')} ({timestamp})"
|
|
228
|
+
)
|
|
229
|
+
return "\n".join(lines)
|
|
230
|
+
return "*No journal entries found*"
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _find_task(spec_data: Dict[str, Any], task_id: str) -> Optional[Dict[str, Any]]:
|
|
234
|
+
hierarchy_nodes = _get_hierarchy_nodes(spec_data)
|
|
235
|
+
if task_id in hierarchy_nodes:
|
|
236
|
+
return hierarchy_nodes[task_id]
|
|
237
|
+
return None
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def _find_phase(spec_data: Dict[str, Any], phase_id: str) -> Optional[Dict[str, Any]]:
|
|
241
|
+
hierarchy_nodes = _get_hierarchy_nodes(spec_data)
|
|
242
|
+
if phase_id in hierarchy_nodes:
|
|
243
|
+
return hierarchy_nodes[phase_id]
|
|
244
|
+
return None
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def _get_hierarchy_nodes(spec_data: Dict[str, Any]) -> Dict[str, Dict[str, Any]]:
|
|
248
|
+
hierarchy = spec_data.get("hierarchy", {})
|
|
249
|
+
nodes: Dict[str, Dict[str, Any]] = {}
|
|
250
|
+
if isinstance(hierarchy, dict):
|
|
251
|
+
if all(isinstance(value, dict) for value in hierarchy.values()):
|
|
252
|
+
for node_id, node in hierarchy.items():
|
|
253
|
+
node_copy = dict(node)
|
|
254
|
+
node_copy.setdefault("id", node_id)
|
|
255
|
+
nodes[node_id] = node_copy
|
|
256
|
+
return nodes
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def _get_child_nodes(
|
|
260
|
+
spec_data: Dict[str, Any], node: Dict[str, Any]
|
|
261
|
+
) -> List[Dict[str, Any]]:
|
|
262
|
+
hierarchy_nodes = _get_hierarchy_nodes(spec_data)
|
|
263
|
+
children = node.get("children", [])
|
|
264
|
+
return [
|
|
265
|
+
hierarchy_nodes[child_id]
|
|
266
|
+
for child_id in children
|
|
267
|
+
if child_id in hierarchy_nodes
|
|
268
|
+
]
|