tapps-agents 3.5.39__py3-none-any.whl → 3.5.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/enhancer/agent.py +2728 -2728
- tapps_agents/agents/implementer/agent.py +35 -13
- tapps_agents/agents/reviewer/agent.py +43 -10
- tapps_agents/agents/reviewer/scoring.py +59 -68
- tapps_agents/agents/reviewer/tools/__init__.py +24 -0
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
- tapps_agents/beads/__init__.py +11 -0
- tapps_agents/beads/hydration.py +213 -0
- tapps_agents/beads/specs.py +206 -0
- tapps_agents/cli/commands/health.py +19 -3
- tapps_agents/cli/commands/simple_mode.py +842 -676
- tapps_agents/cli/commands/task.py +219 -0
- tapps_agents/cli/commands/top_level.py +13 -0
- tapps_agents/cli/main.py +658 -651
- tapps_agents/cli/parsers/top_level.py +1978 -1881
- tapps_agents/core/config.py +1622 -1622
- tapps_agents/core/init_project.py +3012 -2897
- tapps_agents/epic/markdown_sync.py +105 -0
- tapps_agents/epic/orchestrator.py +1 -2
- tapps_agents/epic/parser.py +427 -423
- tapps_agents/experts/adaptive_domain_detector.py +0 -2
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +134 -46
- tapps_agents/health/orchestrator.py +12 -4
- tapps_agents/hooks/__init__.py +33 -0
- tapps_agents/hooks/config.py +140 -0
- tapps_agents/hooks/events.py +135 -0
- tapps_agents/hooks/executor.py +128 -0
- tapps_agents/hooks/manager.py +143 -0
- tapps_agents/session/__init__.py +19 -0
- tapps_agents/session/manager.py +256 -0
- tapps_agents/simple_mode/code_snippet_handler.py +382 -0
- tapps_agents/simple_mode/intent_parser.py +29 -4
- tapps_agents/simple_mode/orchestrators/base.py +185 -59
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +2 -2
- tapps_agents/simple_mode/workflow_suggester.py +37 -3
- tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
- tapps_agents/workflow/cursor_executor.py +2196 -2118
- tapps_agents/workflow/direct_execution_fallback.py +16 -3
- tapps_agents/workflow/message_formatter.py +2 -1
- tapps_agents/workflow/parallel_executor.py +43 -4
- tapps_agents/workflow/parser.py +375 -357
- tapps_agents/workflow/rules_generator.py +337 -337
- tapps_agents/workflow/skill_invoker.py +9 -3
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/METADATA +5 -1
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/RECORD +57 -53
- tapps_agents/agents/analyst/SKILL.md +0 -85
- tapps_agents/agents/architect/SKILL.md +0 -80
- tapps_agents/agents/debugger/SKILL.md +0 -66
- tapps_agents/agents/designer/SKILL.md +0 -78
- tapps_agents/agents/documenter/SKILL.md +0 -95
- tapps_agents/agents/enhancer/SKILL.md +0 -189
- tapps_agents/agents/implementer/SKILL.md +0 -117
- tapps_agents/agents/improver/SKILL.md +0 -55
- tapps_agents/agents/ops/SKILL.md +0 -64
- tapps_agents/agents/orchestrator/SKILL.md +0 -238
- tapps_agents/agents/planner/story_template.md +0 -37
- tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
- tapps_agents/agents/tester/SKILL.md +0 -71
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/licenses/LICENSE +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hydration engine: create Beads issues from task specs, update specs from Beads.
|
|
3
|
+
|
|
4
|
+
Hydrate: for specs without beads_issue, run bd create and save id; recreate
|
|
5
|
+
dependency graph with bd dep add. Dehydrate: run bd list, update spec files
|
|
6
|
+
with current status. Handles missing bd gracefully (log, no crash).
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
from .client import is_available, run_bd
|
|
17
|
+
from .parse import parse_bd_id_from_stdout
|
|
18
|
+
from .specs import TaskSpec, load_task_specs, save_task_spec
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class HydrationReport:
|
|
25
|
+
"""Summary of a hydration run."""
|
|
26
|
+
|
|
27
|
+
created: int = 0
|
|
28
|
+
skipped: int = 0
|
|
29
|
+
failed: int = 0
|
|
30
|
+
deps_added: int = 0
|
|
31
|
+
bd_unavailable: bool = False
|
|
32
|
+
dry_run: bool = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def hydrate_to_beads(
|
|
36
|
+
project_root: Path | None = None,
|
|
37
|
+
*,
|
|
38
|
+
dry_run: bool = False,
|
|
39
|
+
) -> HydrationReport:
|
|
40
|
+
"""
|
|
41
|
+
Create Beads issues for task specs that don't have beads_issue; recreate deps.
|
|
42
|
+
|
|
43
|
+
For each spec in .tapps-agents/task-specs/ without beads_issue, runs
|
|
44
|
+
bd create and stores the returned id in the spec file. Then runs
|
|
45
|
+
bd dep add for each dependency. If bd is not available, logs and returns
|
|
46
|
+
without raising.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
project_root: Project root (default: cwd).
|
|
50
|
+
dry_run: If True, do not run bd or write files; report what would be done.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
HydrationReport with created/skipped/failed/deps_added counts.
|
|
54
|
+
"""
|
|
55
|
+
project_root = project_root or Path.cwd()
|
|
56
|
+
report = HydrationReport(dry_run=dry_run)
|
|
57
|
+
|
|
58
|
+
if not is_available(project_root):
|
|
59
|
+
logger.warning("Hydration skipped: bd not available")
|
|
60
|
+
report.bd_unavailable = True
|
|
61
|
+
return report
|
|
62
|
+
|
|
63
|
+
specs = load_task_specs(project_root)
|
|
64
|
+
if not specs:
|
|
65
|
+
return report
|
|
66
|
+
|
|
67
|
+
# Map spec.id -> beads_issue id after creation
|
|
68
|
+
spec_to_bd: dict[str, str] = {}
|
|
69
|
+
for spec in specs:
|
|
70
|
+
if spec.beads_issue:
|
|
71
|
+
spec_to_bd[spec.id] = spec.beads_issue
|
|
72
|
+
report.skipped += 1
|
|
73
|
+
continue
|
|
74
|
+
if dry_run:
|
|
75
|
+
report.created += 1
|
|
76
|
+
continue
|
|
77
|
+
title = (spec.title or spec.id)[:200].strip()
|
|
78
|
+
desc = (spec.description or "")[:500].replace("\n", " ").strip()
|
|
79
|
+
args = ["create", title]
|
|
80
|
+
if desc:
|
|
81
|
+
args.extend(["-d", desc])
|
|
82
|
+
try:
|
|
83
|
+
r = run_bd(project_root, args)
|
|
84
|
+
if r.returncode != 0:
|
|
85
|
+
logger.warning("Hydration: bd create failed for %s: %s", spec.id, r.stderr)
|
|
86
|
+
report.failed += 1
|
|
87
|
+
continue
|
|
88
|
+
bd_id = parse_bd_id_from_stdout(r.stdout)
|
|
89
|
+
if bd_id:
|
|
90
|
+
spec_to_bd[spec.id] = bd_id
|
|
91
|
+
spec.beads_issue = bd_id
|
|
92
|
+
save_task_spec(spec, project_root)
|
|
93
|
+
report.created += 1
|
|
94
|
+
else:
|
|
95
|
+
logger.warning("Hydration: could not parse bd id for %s", spec.id)
|
|
96
|
+
report.failed += 1
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.warning("Hydration: bd create failed for %s: %s", spec.id, e)
|
|
99
|
+
report.failed += 1
|
|
100
|
+
|
|
101
|
+
# Recreate dependency graph: bd dep add child parent (parent blocks child)
|
|
102
|
+
if dry_run:
|
|
103
|
+
for spec in specs:
|
|
104
|
+
if spec.dependencies and spec_to_bd.get(spec.id):
|
|
105
|
+
report.deps_added += len([d for d in spec.dependencies if spec_to_bd.get(d)])
|
|
106
|
+
return report
|
|
107
|
+
|
|
108
|
+
for spec in specs:
|
|
109
|
+
child_bd = spec_to_bd.get(spec.id)
|
|
110
|
+
if not child_bd or not spec.dependencies:
|
|
111
|
+
continue
|
|
112
|
+
for dep_id in spec.dependencies:
|
|
113
|
+
parent_bd = spec_to_bd.get(dep_id)
|
|
114
|
+
if not parent_bd:
|
|
115
|
+
continue
|
|
116
|
+
try:
|
|
117
|
+
r = run_bd(project_root, ["dep", "add", child_bd, parent_bd])
|
|
118
|
+
if r.returncode == 0:
|
|
119
|
+
report.deps_added += 1
|
|
120
|
+
else:
|
|
121
|
+
logger.warning(
|
|
122
|
+
"Hydration: bd dep add %s %s failed: %s",
|
|
123
|
+
child_bd,
|
|
124
|
+
parent_bd,
|
|
125
|
+
r.stderr,
|
|
126
|
+
)
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.warning("Hydration: bd dep add %s %s failed: %s", child_bd, parent_bd, e)
|
|
129
|
+
|
|
130
|
+
return report
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def dehydrate_from_beads(project_root: Path | None = None) -> int:
|
|
134
|
+
"""
|
|
135
|
+
Update task spec files with current status from Beads.
|
|
136
|
+
|
|
137
|
+
Runs bd list (or equivalent), maps beads_issue id to status, and updates
|
|
138
|
+
each spec file. If bd is not available, logs and returns 0.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
project_root: Project root (default: cwd).
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Number of spec files updated.
|
|
145
|
+
"""
|
|
146
|
+
project_root = project_root or Path.cwd()
|
|
147
|
+
|
|
148
|
+
if not is_available(project_root):
|
|
149
|
+
logger.warning("Dehydration skipped: bd not available")
|
|
150
|
+
return 0
|
|
151
|
+
|
|
152
|
+
# Try bd list --json; fallback to parsing stdout if format differs
|
|
153
|
+
try:
|
|
154
|
+
r = run_bd(project_root, ["list", "--json"])
|
|
155
|
+
except Exception as e:
|
|
156
|
+
logger.warning("Dehydration: bd list failed: %s", e)
|
|
157
|
+
return 0
|
|
158
|
+
|
|
159
|
+
if r.returncode != 0:
|
|
160
|
+
logger.warning("Dehydration: bd list failed: %s", r.stderr)
|
|
161
|
+
return 0
|
|
162
|
+
|
|
163
|
+
bd_status_by_id: dict[str, str] = {}
|
|
164
|
+
try:
|
|
165
|
+
raw = json.loads(r.stdout or "[]")
|
|
166
|
+
if isinstance(raw, list):
|
|
167
|
+
for item in raw:
|
|
168
|
+
if isinstance(item, dict):
|
|
169
|
+
bid = item.get("id") or item.get("bd_id") or item.get("issue_id")
|
|
170
|
+
status = item.get("status") or item.get("state") or "todo"
|
|
171
|
+
if bid:
|
|
172
|
+
bd_status_by_id[str(bid)] = str(status).lower()
|
|
173
|
+
elif isinstance(raw, dict):
|
|
174
|
+
for bid, info in raw.items():
|
|
175
|
+
if isinstance(info, dict):
|
|
176
|
+
status = info.get("status") or info.get("state") or "todo"
|
|
177
|
+
else:
|
|
178
|
+
status = str(info)
|
|
179
|
+
bd_status_by_id[str(bid)] = str(status).lower()
|
|
180
|
+
except json.JSONDecodeError:
|
|
181
|
+
# Fallback: parse line-based output for "id status" or "id\tstatus"
|
|
182
|
+
for line in (r.stdout or "").splitlines():
|
|
183
|
+
parts = line.strip().split()
|
|
184
|
+
if len(parts) >= 2:
|
|
185
|
+
bd_status_by_id[parts[0]] = parts[1].lower()
|
|
186
|
+
elif len(parts) == 1:
|
|
187
|
+
bd_status_by_id[parts[0]] = "todo"
|
|
188
|
+
|
|
189
|
+
specs = load_task_specs(project_root)
|
|
190
|
+
updated = 0
|
|
191
|
+
for spec in specs:
|
|
192
|
+
if not spec.beads_issue:
|
|
193
|
+
continue
|
|
194
|
+
new_status = bd_status_by_id.get(spec.beads_issue)
|
|
195
|
+
if not new_status:
|
|
196
|
+
continue
|
|
197
|
+
# Map bd status to our status if needed
|
|
198
|
+
if new_status in ("todo", "open", "pending"):
|
|
199
|
+
mapped = "todo"
|
|
200
|
+
elif new_status in ("in-progress", "in_progress", "wip"):
|
|
201
|
+
mapped = "in-progress"
|
|
202
|
+
elif new_status in ("done", "closed", "completed"):
|
|
203
|
+
mapped = "done"
|
|
204
|
+
elif new_status == "blocked":
|
|
205
|
+
mapped = "blocked"
|
|
206
|
+
else:
|
|
207
|
+
mapped = spec.status
|
|
208
|
+
if spec.status != mapped:
|
|
209
|
+
spec = spec.model_copy(update={"status": mapped})
|
|
210
|
+
save_task_spec(spec, project_root)
|
|
211
|
+
updated += 1
|
|
212
|
+
|
|
213
|
+
return updated
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Task specification schema and loader.
|
|
3
|
+
|
|
4
|
+
Loads and saves task specification YAML files from .tapps-agents/task-specs/
|
|
5
|
+
with validation. Supports hydration/dehydration pattern for multi-session workflows.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Literal
|
|
13
|
+
|
|
14
|
+
import yaml
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger(__name__)
|
|
17
|
+
from pydantic import BaseModel, Field, field_validator
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class TaskSpec(BaseModel):
|
|
21
|
+
"""Task specification schema for .tapps-agents/task-specs/ YAML files."""
|
|
22
|
+
|
|
23
|
+
id: str = Field(..., min_length=1, description="Unique task ID (e.g. enh-002-s1)")
|
|
24
|
+
title: str = Field(..., min_length=1, description="Task title")
|
|
25
|
+
description: str = Field(default="", description="Task description")
|
|
26
|
+
type: Literal["story", "epic", "task"] = Field(
|
|
27
|
+
default="story",
|
|
28
|
+
description="Task type",
|
|
29
|
+
)
|
|
30
|
+
priority: int = Field(default=0, ge=0, description="Priority (0=highest)")
|
|
31
|
+
story_points: int | None = Field(default=None, ge=0, description="Story points estimate")
|
|
32
|
+
epic: str | None = Field(default=None, description="Epic ID this task belongs to")
|
|
33
|
+
dependencies: list[str] = Field(
|
|
34
|
+
default_factory=list,
|
|
35
|
+
description="IDs of tasks this depends on",
|
|
36
|
+
)
|
|
37
|
+
github_issue: str | int | None = Field(default=None, description="GitHub issue number or ID")
|
|
38
|
+
beads_issue: str | None = Field(default=None, description="Beads issue ID (populated after create)")
|
|
39
|
+
status: Literal["todo", "in-progress", "done", "blocked"] = Field(
|
|
40
|
+
default="todo",
|
|
41
|
+
description="Current status",
|
|
42
|
+
)
|
|
43
|
+
workflow: str | None = Field(
|
|
44
|
+
default=None,
|
|
45
|
+
description="Workflow to run (build, fix, review, test, full)",
|
|
46
|
+
)
|
|
47
|
+
files: list[str] = Field(default_factory=list, description="Files or paths affected")
|
|
48
|
+
tests: list[str] = Field(default_factory=list, description="Test paths")
|
|
49
|
+
|
|
50
|
+
model_config = {"extra": "forbid"}
|
|
51
|
+
|
|
52
|
+
@field_validator("id", mode="before")
|
|
53
|
+
@classmethod
|
|
54
|
+
def id_stripped(cls, v: object) -> str:
|
|
55
|
+
"""Strip whitespace from id."""
|
|
56
|
+
if isinstance(v, str):
|
|
57
|
+
return v.strip()
|
|
58
|
+
return str(v)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _task_specs_dir(project_root: Path) -> Path:
|
|
62
|
+
"""Return path to .tapps-agents/task-specs/."""
|
|
63
|
+
return project_root / ".tapps-agents" / "task-specs"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def load_task_specs(project_root: Path | None = None) -> list[TaskSpec]:
|
|
67
|
+
"""
|
|
68
|
+
Load all task specs from .tapps-agents/task-specs/.
|
|
69
|
+
|
|
70
|
+
Scans the directory for YAML files, parses valid ones, and returns a list
|
|
71
|
+
of validated TaskSpec. Validation errors are reported with file/field;
|
|
72
|
+
invalid files are skipped (not raised).
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
project_root: Project root (default: cwd).
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
List of successfully loaded TaskSpec. Empty if directory missing or no valid files.
|
|
79
|
+
"""
|
|
80
|
+
project_root = project_root or Path.cwd()
|
|
81
|
+
specs_dir = _task_specs_dir(project_root)
|
|
82
|
+
if not specs_dir.exists():
|
|
83
|
+
return []
|
|
84
|
+
|
|
85
|
+
result: list[TaskSpec] = []
|
|
86
|
+
for path in sorted(specs_dir.glob("*.yaml")):
|
|
87
|
+
try:
|
|
88
|
+
spec = _load_single_spec(path)
|
|
89
|
+
if spec:
|
|
90
|
+
result.append(spec)
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.warning("Task spec validation failed %s: %s", path, e)
|
|
93
|
+
continue
|
|
94
|
+
return result
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _load_single_spec(path: Path) -> TaskSpec | None:
|
|
98
|
+
"""Load and validate a single task spec file."""
|
|
99
|
+
content = path.read_text(encoding="utf-8")
|
|
100
|
+
raw = yaml.safe_load(content)
|
|
101
|
+
if raw is None:
|
|
102
|
+
return None
|
|
103
|
+
|
|
104
|
+
if not isinstance(raw, dict):
|
|
105
|
+
raise ValueError(f"Expected YAML object at {path}, got {type(raw).__name__}")
|
|
106
|
+
|
|
107
|
+
# Support both top-level "task" key and flat structure
|
|
108
|
+
data = raw.get("task", raw)
|
|
109
|
+
if not isinstance(data, dict):
|
|
110
|
+
raise ValueError(f"Expected task object at {path}")
|
|
111
|
+
|
|
112
|
+
return TaskSpec.model_validate(data)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def load_task_spec(
|
|
116
|
+
spec_id: str,
|
|
117
|
+
project_root: Path | None = None,
|
|
118
|
+
) -> TaskSpec | None:
|
|
119
|
+
"""
|
|
120
|
+
Load a single task spec by ID.
|
|
121
|
+
|
|
122
|
+
Searches .tapps-agents/task-specs/ for a file containing the given id.
|
|
123
|
+
Naming convention: <epic-id>-<story-id>.yaml (e.g. enh-002-s1.yaml).
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
spec_id: Task ID (e.g. enh-002-s1).
|
|
127
|
+
project_root: Project root (default: cwd).
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
TaskSpec if found and valid, else None.
|
|
131
|
+
"""
|
|
132
|
+
project_root = project_root or Path.cwd()
|
|
133
|
+
specs_dir = _task_specs_dir(project_root)
|
|
134
|
+
if not specs_dir.exists():
|
|
135
|
+
return None
|
|
136
|
+
|
|
137
|
+
# Try direct filename: spec_id.yaml
|
|
138
|
+
candidate = specs_dir / f"{spec_id}.yaml"
|
|
139
|
+
if candidate.exists():
|
|
140
|
+
try:
|
|
141
|
+
return _load_single_spec(candidate)
|
|
142
|
+
except Exception:
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
# Scan files for matching id
|
|
146
|
+
for path in specs_dir.glob("*.yaml"):
|
|
147
|
+
try:
|
|
148
|
+
spec = _load_single_spec(path)
|
|
149
|
+
if spec and spec.id == spec_id:
|
|
150
|
+
return spec
|
|
151
|
+
except Exception:
|
|
152
|
+
continue
|
|
153
|
+
return None
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def save_task_spec(
|
|
157
|
+
spec: TaskSpec,
|
|
158
|
+
project_root: Path | None = None,
|
|
159
|
+
) -> Path:
|
|
160
|
+
"""
|
|
161
|
+
Save task spec to .tapps-agents/task-specs/.
|
|
162
|
+
|
|
163
|
+
Uses naming convention <epic-id>-<story-id>.yaml when epic can be derived,
|
|
164
|
+
otherwise <spec.id>.yaml. Creates directory if needed.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
spec: TaskSpec to save.
|
|
168
|
+
project_root: Project root (default: cwd).
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Path where spec was written.
|
|
172
|
+
|
|
173
|
+
Raises:
|
|
174
|
+
ValueError: On validation failure (should not occur for valid TaskSpec).
|
|
175
|
+
"""
|
|
176
|
+
project_root = project_root or Path.cwd()
|
|
177
|
+
specs_dir = _task_specs_dir(project_root)
|
|
178
|
+
specs_dir.mkdir(parents=True, exist_ok=True)
|
|
179
|
+
|
|
180
|
+
# Naming: epic-id-story-id or spec.id
|
|
181
|
+
filename = f"{spec.id}.yaml"
|
|
182
|
+
out_path = specs_dir / filename
|
|
183
|
+
|
|
184
|
+
payload = {"task": spec.model_dump()}
|
|
185
|
+
out_path.write_text(
|
|
186
|
+
yaml.dump(payload, default_flow_style=False, sort_keys=False, allow_unicode=True),
|
|
187
|
+
encoding="utf-8",
|
|
188
|
+
)
|
|
189
|
+
return out_path
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def validate_task_spec_file(path: Path) -> tuple[TaskSpec | None, str | None]:
|
|
193
|
+
"""
|
|
194
|
+
Validate a task spec file without loading into global list.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
path: Path to YAML file.
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
(TaskSpec, None) if valid, (None, error_message) if invalid.
|
|
201
|
+
"""
|
|
202
|
+
try:
|
|
203
|
+
spec = _load_single_spec(path)
|
|
204
|
+
return (spec, None)
|
|
205
|
+
except Exception as e:
|
|
206
|
+
return (None, f"{path}: {e}")
|
|
@@ -5,6 +5,7 @@ Health command handlers.
|
|
|
5
5
|
from __future__ import annotations
|
|
6
6
|
|
|
7
7
|
import json
|
|
8
|
+
import logging
|
|
8
9
|
import sys
|
|
9
10
|
from collections import defaultdict
|
|
10
11
|
from datetime import UTC, datetime, timedelta
|
|
@@ -557,7 +558,8 @@ def handle_health_overview_command(
|
|
|
557
558
|
health_results = orchestrator.run_all_checks(save_metrics=True)
|
|
558
559
|
overall = orchestrator.get_overall_health(health_results)
|
|
559
560
|
|
|
560
|
-
# 2. Usage (best-effort; prefer analytics, fallback to execution metrics)
|
|
561
|
+
# 2. Usage (best-effort; prefer analytics, fallback to execution metrics — HM-001-S1)
|
|
562
|
+
_log = logging.getLogger(__name__)
|
|
561
563
|
usage_data = None
|
|
562
564
|
try:
|
|
563
565
|
usage_dashboard = AnalyticsDashboard()
|
|
@@ -565,6 +567,7 @@ def handle_health_overview_command(
|
|
|
565
567
|
except Exception:
|
|
566
568
|
pass
|
|
567
569
|
# If analytics has no agent/workflow data, derive from execution metrics
|
|
570
|
+
fallback_used = False
|
|
568
571
|
if usage_data:
|
|
569
572
|
agents = usage_data.get("agents") or []
|
|
570
573
|
workflows = usage_data.get("workflows") or []
|
|
@@ -572,9 +575,22 @@ def handle_health_overview_command(
|
|
|
572
575
|
w.get("total_executions", 0) for w in workflows
|
|
573
576
|
)
|
|
574
577
|
if total_runs == 0:
|
|
575
|
-
|
|
578
|
+
fallback = _usage_data_from_execution_metrics(project_root)
|
|
579
|
+
if fallback:
|
|
580
|
+
fallback_used = True
|
|
581
|
+
usage_data = fallback
|
|
576
582
|
else:
|
|
577
|
-
|
|
583
|
+
fallback = _usage_data_from_execution_metrics(project_root)
|
|
584
|
+
if fallback:
|
|
585
|
+
fallback_used = True
|
|
586
|
+
usage_data = fallback
|
|
587
|
+
if fallback_used and usage_data:
|
|
588
|
+
n_agents = len(usage_data.get("agents") or [])
|
|
589
|
+
n_workflows = len(usage_data.get("workflows") or [])
|
|
590
|
+
_log.info(
|
|
591
|
+
"Health overview: using execution metrics fallback (%s agents, %s workflows)",
|
|
592
|
+
n_agents, n_workflows,
|
|
593
|
+
)
|
|
578
594
|
|
|
579
595
|
# 3. Build output
|
|
580
596
|
feedback = get_feedback()
|