tapps-agents 3.5.38__py3-none-any.whl → 3.5.40__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/cleanup/__init__.py +7 -0
- tapps_agents/agents/cleanup/agent.py +445 -0
- tapps_agents/agents/enhancer/agent.py +2 -2
- tapps_agents/agents/implementer/agent.py +35 -13
- tapps_agents/agents/reviewer/agent.py +43 -10
- tapps_agents/agents/reviewer/scoring.py +59 -68
- tapps_agents/agents/reviewer/tools/__init__.py +24 -0
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
- tapps_agents/beads/__init__.py +11 -0
- tapps_agents/beads/hydration.py +213 -0
- tapps_agents/beads/specs.py +206 -0
- tapps_agents/cli/commands/cleanup_agent.py +92 -0
- tapps_agents/cli/commands/health.py +19 -3
- tapps_agents/cli/commands/simple_mode.py +842 -676
- tapps_agents/cli/commands/task.py +219 -0
- tapps_agents/cli/commands/top_level.py +13 -0
- tapps_agents/cli/main.py +15 -2
- tapps_agents/cli/parsers/cleanup_agent.py +228 -0
- tapps_agents/cli/parsers/top_level.py +1978 -1881
- tapps_agents/core/config.py +43 -0
- tapps_agents/core/init_project.py +3012 -2896
- tapps_agents/epic/markdown_sync.py +105 -0
- tapps_agents/epic/orchestrator.py +1 -2
- tapps_agents/epic/parser.py +427 -423
- tapps_agents/experts/adaptive_domain_detector.py +0 -2
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +134 -46
- tapps_agents/health/orchestrator.py +12 -4
- tapps_agents/hooks/__init__.py +33 -0
- tapps_agents/hooks/config.py +140 -0
- tapps_agents/hooks/events.py +135 -0
- tapps_agents/hooks/executor.py +128 -0
- tapps_agents/hooks/manager.py +143 -0
- tapps_agents/session/__init__.py +19 -0
- tapps_agents/session/manager.py +256 -0
- tapps_agents/simple_mode/code_snippet_handler.py +382 -0
- tapps_agents/simple_mode/intent_parser.py +29 -4
- tapps_agents/simple_mode/orchestrators/base.py +185 -59
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +723 -723
- tapps_agents/simple_mode/workflow_suggester.py +37 -3
- tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
- tapps_agents/workflow/cursor_executor.py +2196 -2118
- tapps_agents/workflow/direct_execution_fallback.py +16 -3
- tapps_agents/workflow/enforcer.py +36 -23
- tapps_agents/workflow/message_formatter.py +188 -0
- tapps_agents/workflow/parallel_executor.py +43 -4
- tapps_agents/workflow/parser.py +375 -357
- tapps_agents/workflow/rules_generator.py +337 -331
- tapps_agents/workflow/skill_invoker.py +9 -3
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/METADATA +9 -5
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/RECORD +62 -53
- tapps_agents/agents/analyst/SKILL.md +0 -85
- tapps_agents/agents/architect/SKILL.md +0 -80
- tapps_agents/agents/debugger/SKILL.md +0 -66
- tapps_agents/agents/designer/SKILL.md +0 -78
- tapps_agents/agents/documenter/SKILL.md +0 -95
- tapps_agents/agents/enhancer/SKILL.md +0 -189
- tapps_agents/agents/implementer/SKILL.md +0 -117
- tapps_agents/agents/improver/SKILL.md +0 -55
- tapps_agents/agents/ops/SKILL.md +0 -64
- tapps_agents/agents/orchestrator/SKILL.md +0 -238
- tapps_agents/agents/planner/story_template.md +0 -37
- tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
- tapps_agents/agents/tester/SKILL.md +0 -71
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/licenses/LICENSE +0 -0
- {tapps_agents-3.5.38.dist-info → tapps_agents-3.5.40.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Scoped Mypy Executor - ENH-002-S2
|
|
3
|
+
|
|
4
|
+
Runs mypy with file-level scoping for ~70% performance improvement.
|
|
5
|
+
Uses --follow-imports=skip and --no-site-packages; filters results to target file only.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import logging
|
|
12
|
+
import re
|
|
13
|
+
import subprocess # nosec B404 - fixed args, no shell
|
|
14
|
+
import sys
|
|
15
|
+
import time
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class MypyTimeoutError(Exception):
|
|
24
|
+
"""Mypy execution timed out."""
|
|
25
|
+
|
|
26
|
+
def __init__(self, timeout: int) -> None:
|
|
27
|
+
self.timeout = timeout
|
|
28
|
+
super().__init__(f"Mypy timed out after {timeout}s")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass(frozen=True)
|
|
32
|
+
class MypyIssue:
|
|
33
|
+
"""Single mypy type checking issue."""
|
|
34
|
+
|
|
35
|
+
file_path: Path
|
|
36
|
+
line: int
|
|
37
|
+
column: int
|
|
38
|
+
severity: str
|
|
39
|
+
message: str
|
|
40
|
+
error_code: str | None
|
|
41
|
+
|
|
42
|
+
def to_dict(self) -> dict[str, Any]:
|
|
43
|
+
"""Convert to dictionary for serialization."""
|
|
44
|
+
return {
|
|
45
|
+
"file_path": str(self.file_path),
|
|
46
|
+
"line": self.line,
|
|
47
|
+
"column": self.column,
|
|
48
|
+
"severity": self.severity,
|
|
49
|
+
"message": self.message,
|
|
50
|
+
"error_code": self.error_code,
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass(frozen=True)
|
|
55
|
+
class MypyResult:
|
|
56
|
+
"""Result of scoped mypy execution."""
|
|
57
|
+
|
|
58
|
+
issues: tuple[MypyIssue, ...]
|
|
59
|
+
duration_seconds: float
|
|
60
|
+
files_checked: int
|
|
61
|
+
success: bool
|
|
62
|
+
|
|
63
|
+
def to_dict(self) -> dict[str, Any]:
|
|
64
|
+
"""Convert to dictionary for serialization."""
|
|
65
|
+
return {
|
|
66
|
+
"issues": [i.to_dict() for i in self.issues],
|
|
67
|
+
"duration_seconds": self.duration_seconds,
|
|
68
|
+
"files_checked": self.files_checked,
|
|
69
|
+
"success": self.success,
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass(frozen=True)
|
|
74
|
+
class ScopedMypyConfig:
|
|
75
|
+
"""Configuration for scoped mypy execution."""
|
|
76
|
+
|
|
77
|
+
enabled: bool = True
|
|
78
|
+
timeout: int = 10
|
|
79
|
+
flags: tuple[str, ...] = (
|
|
80
|
+
"--follow-imports=skip",
|
|
81
|
+
"--no-site-packages",
|
|
82
|
+
"--show-column-numbers",
|
|
83
|
+
"--show-error-codes",
|
|
84
|
+
"--no-error-summary",
|
|
85
|
+
"--no-color-output",
|
|
86
|
+
"--no-incremental",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ScopedMypyExecutor:
|
|
91
|
+
"""
|
|
92
|
+
Execute mypy with scoped imports for performance.
|
|
93
|
+
|
|
94
|
+
Uses --follow-imports=skip, --no-site-packages and filters results
|
|
95
|
+
to the target file only. Target: <10s vs 30-60s unscoped.
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
def __init__(self, config: ScopedMypyConfig | None = None) -> None:
|
|
99
|
+
self.config = config or ScopedMypyConfig()
|
|
100
|
+
self._logger = logging.getLogger(__name__)
|
|
101
|
+
|
|
102
|
+
def get_scoped_flags(self) -> list[str]:
|
|
103
|
+
"""Return mypy flags for scoped execution."""
|
|
104
|
+
return list(self.config.flags)
|
|
105
|
+
|
|
106
|
+
def parse_output(self, raw_output: str, file_path: Path) -> list[MypyIssue]:
|
|
107
|
+
"""
|
|
108
|
+
Parse mypy stdout and return issues for the target file only.
|
|
109
|
+
|
|
110
|
+
Mypy format: file.py:line:col: severity: message [error-code]
|
|
111
|
+
or file.py:line: severity: message [error-code]
|
|
112
|
+
"""
|
|
113
|
+
issues: list[MypyIssue] = []
|
|
114
|
+
target_name = file_path.name
|
|
115
|
+
target_resolved = str(file_path.resolve()).replace("\\", "/")
|
|
116
|
+
|
|
117
|
+
for line in raw_output.splitlines():
|
|
118
|
+
line = line.strip()
|
|
119
|
+
if not line or "error:" not in line.lower():
|
|
120
|
+
continue
|
|
121
|
+
# Match file:line:col: severity: message [code] or file:line: severity: message [code]
|
|
122
|
+
match = re.match(
|
|
123
|
+
r"^(.+?):(\d+):(?:\d+:)?\s*(error|warning|note):\s*(.+)$",
|
|
124
|
+
line,
|
|
125
|
+
re.IGNORECASE,
|
|
126
|
+
)
|
|
127
|
+
if not match:
|
|
128
|
+
continue
|
|
129
|
+
path_part, line_str, severity, rest = match.groups()
|
|
130
|
+
path_part = path_part.replace("\\", "/")
|
|
131
|
+
if target_name not in path_part and target_resolved not in path_part:
|
|
132
|
+
try:
|
|
133
|
+
if Path(path_part).resolve() != file_path.resolve():
|
|
134
|
+
continue
|
|
135
|
+
except Exception:
|
|
136
|
+
continue
|
|
137
|
+
try:
|
|
138
|
+
line_num = int(line_str)
|
|
139
|
+
except ValueError:
|
|
140
|
+
continue
|
|
141
|
+
col_num = 0
|
|
142
|
+
if ":" in line:
|
|
143
|
+
col_match = re.match(r"^.+?:\d+:(\d+):", line)
|
|
144
|
+
if col_match:
|
|
145
|
+
try:
|
|
146
|
+
col_num = int(col_match.group(1))
|
|
147
|
+
except ValueError:
|
|
148
|
+
pass
|
|
149
|
+
error_code = None
|
|
150
|
+
if "[" in rest and "]" in rest:
|
|
151
|
+
start = rest.rfind("[")
|
|
152
|
+
end = rest.rfind("]")
|
|
153
|
+
if start < end:
|
|
154
|
+
error_code = rest[start + 1 : end].strip()
|
|
155
|
+
rest = rest[:start].strip()
|
|
156
|
+
issues.append(
|
|
157
|
+
MypyIssue(
|
|
158
|
+
file_path=file_path,
|
|
159
|
+
line=line_num,
|
|
160
|
+
column=col_num,
|
|
161
|
+
severity=severity.strip().lower(),
|
|
162
|
+
message=rest.strip(),
|
|
163
|
+
error_code=error_code,
|
|
164
|
+
)
|
|
165
|
+
)
|
|
166
|
+
return issues
|
|
167
|
+
|
|
168
|
+
async def execute_scoped(
|
|
169
|
+
self,
|
|
170
|
+
file_path: Path,
|
|
171
|
+
*,
|
|
172
|
+
timeout: int | None = None,
|
|
173
|
+
) -> MypyResult:
|
|
174
|
+
"""
|
|
175
|
+
Run mypy on a single file with scoped flags and filter to that file.
|
|
176
|
+
|
|
177
|
+
On timeout, returns a result with success=False and empty issues
|
|
178
|
+
(graceful fallback; caller can run full mypy if needed).
|
|
179
|
+
"""
|
|
180
|
+
timeout_sec = timeout if timeout is not None else self.config.timeout
|
|
181
|
+
if not file_path.is_file():
|
|
182
|
+
return MypyResult(
|
|
183
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
184
|
+
)
|
|
185
|
+
cmd = [sys.executable, "-m", "mypy"] + self.get_scoped_flags() + [str(file_path)]
|
|
186
|
+
cwd = file_path.parent if file_path.parent.exists() else None
|
|
187
|
+
start = time.monotonic()
|
|
188
|
+
try:
|
|
189
|
+
proc = await asyncio.create_subprocess_exec(
|
|
190
|
+
*cmd,
|
|
191
|
+
stdout=asyncio.subprocess.PIPE,
|
|
192
|
+
stderr=asyncio.subprocess.PIPE,
|
|
193
|
+
cwd=cwd,
|
|
194
|
+
)
|
|
195
|
+
try:
|
|
196
|
+
stdout, stderr = await asyncio.wait_for(
|
|
197
|
+
proc.communicate(), timeout=timeout_sec
|
|
198
|
+
)
|
|
199
|
+
except TimeoutError:
|
|
200
|
+
proc.kill()
|
|
201
|
+
await proc.wait()
|
|
202
|
+
elapsed = time.monotonic() - start
|
|
203
|
+
self._logger.warning(
|
|
204
|
+
"mypy timed out for %s after %.2fs", file_path, elapsed
|
|
205
|
+
)
|
|
206
|
+
raise MypyTimeoutError(timeout_sec)
|
|
207
|
+
elapsed = time.monotonic() - start
|
|
208
|
+
out = (stdout or b"").decode("utf-8", errors="replace")
|
|
209
|
+
issues = self.parse_output(out, file_path)
|
|
210
|
+
return MypyResult(
|
|
211
|
+
issues=tuple(issues),
|
|
212
|
+
duration_seconds=elapsed,
|
|
213
|
+
files_checked=1,
|
|
214
|
+
success=proc.returncode == 0 or not issues,
|
|
215
|
+
)
|
|
216
|
+
except MypyTimeoutError as e:
|
|
217
|
+
raise e from None
|
|
218
|
+
except FileNotFoundError:
|
|
219
|
+
self._logger.debug("mypy not found")
|
|
220
|
+
return MypyResult(
|
|
221
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
222
|
+
)
|
|
223
|
+
except Exception as e:
|
|
224
|
+
self._logger.warning("mypy failed for %s: %s", file_path, e)
|
|
225
|
+
return MypyResult(
|
|
226
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
def run_scoped_sync(
|
|
230
|
+
self,
|
|
231
|
+
file_path: Path,
|
|
232
|
+
*,
|
|
233
|
+
timeout: int | None = None,
|
|
234
|
+
) -> MypyResult:
|
|
235
|
+
"""
|
|
236
|
+
Run mypy synchronously with scoped flags (for use from scoring.py).
|
|
237
|
+
Uses subprocess.run to avoid event loop issues in sync callers.
|
|
238
|
+
"""
|
|
239
|
+
timeout_sec = timeout if timeout is not None else self.config.timeout
|
|
240
|
+
if not file_path.is_file():
|
|
241
|
+
return MypyResult(
|
|
242
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
243
|
+
)
|
|
244
|
+
cmd = [sys.executable, "-m", "mypy"] + self.get_scoped_flags() + [str(file_path)]
|
|
245
|
+
cwd = file_path.parent if file_path.parent.exists() else None
|
|
246
|
+
start = time.monotonic()
|
|
247
|
+
try:
|
|
248
|
+
result = subprocess.run( # nosec B603
|
|
249
|
+
cmd,
|
|
250
|
+
capture_output=True,
|
|
251
|
+
text=True,
|
|
252
|
+
encoding="utf-8",
|
|
253
|
+
errors="replace",
|
|
254
|
+
timeout=timeout_sec,
|
|
255
|
+
cwd=cwd,
|
|
256
|
+
)
|
|
257
|
+
elapsed = time.monotonic() - start
|
|
258
|
+
out = (result.stdout or "").strip()
|
|
259
|
+
issues = self.parse_output(out, file_path)
|
|
260
|
+
return MypyResult(
|
|
261
|
+
issues=tuple(issues),
|
|
262
|
+
duration_seconds=elapsed,
|
|
263
|
+
files_checked=1,
|
|
264
|
+
success=result.returncode == 0 or not issues,
|
|
265
|
+
)
|
|
266
|
+
except subprocess.TimeoutExpired:
|
|
267
|
+
elapsed = time.monotonic() - start
|
|
268
|
+
self._logger.warning("mypy timed out for %s after %.2fs", file_path, elapsed)
|
|
269
|
+
return MypyResult(
|
|
270
|
+
issues=(),
|
|
271
|
+
duration_seconds=elapsed,
|
|
272
|
+
files_checked=0,
|
|
273
|
+
success=False,
|
|
274
|
+
)
|
|
275
|
+
except FileNotFoundError:
|
|
276
|
+
self._logger.debug("mypy not found")
|
|
277
|
+
return MypyResult(
|
|
278
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
279
|
+
)
|
|
280
|
+
except Exception as e:
|
|
281
|
+
self._logger.warning("mypy failed for %s: %s", file_path, e)
|
|
282
|
+
return MypyResult(
|
|
283
|
+
issues=(), duration_seconds=0.0, files_checked=0, success=False
|
|
284
|
+
)
|
tapps_agents/beads/__init__.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
Beads (bd) integration: optional task-tracking for agents.
|
|
3
3
|
|
|
4
4
|
Use is_available(project_root) before run_bd. See docs/BEADS_INTEGRATION.md.
|
|
5
|
+
Task specs: .tapps-agents/task-specs/ for hydration/dehydration.
|
|
5
6
|
"""
|
|
6
7
|
|
|
7
8
|
from .client import (
|
|
@@ -13,6 +14,9 @@ from .client import (
|
|
|
13
14
|
run_bd,
|
|
14
15
|
)
|
|
15
16
|
from .parse import parse_bd_id_from_stdout
|
|
17
|
+
from .specs import TaskSpec, load_task_spec, load_task_specs, save_task_spec
|
|
18
|
+
|
|
19
|
+
from .hydration import HydrationReport, dehydrate_from_beads, hydrate_to_beads
|
|
16
20
|
|
|
17
21
|
__all__ = [
|
|
18
22
|
"BeadsRequiredError",
|
|
@@ -22,4 +26,11 @@ __all__ = [
|
|
|
22
26
|
"require_beads",
|
|
23
27
|
"resolve_bd_path",
|
|
24
28
|
"run_bd",
|
|
29
|
+
"TaskSpec",
|
|
30
|
+
"HydrationReport",
|
|
31
|
+
"dehydrate_from_beads",
|
|
32
|
+
"hydrate_to_beads",
|
|
33
|
+
"load_task_spec",
|
|
34
|
+
"load_task_specs",
|
|
35
|
+
"save_task_spec",
|
|
25
36
|
]
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hydration engine: create Beads issues from task specs, update specs from Beads.
|
|
3
|
+
|
|
4
|
+
Hydrate: for specs without beads_issue, run bd create and save id; recreate
|
|
5
|
+
dependency graph with bd dep add. Dehydrate: run bd list, update spec files
|
|
6
|
+
with current status. Handles missing bd gracefully (log, no crash).
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
from .client import is_available, run_bd
|
|
17
|
+
from .parse import parse_bd_id_from_stdout
|
|
18
|
+
from .specs import TaskSpec, load_task_specs, save_task_spec
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class HydrationReport:
|
|
25
|
+
"""Summary of a hydration run."""
|
|
26
|
+
|
|
27
|
+
created: int = 0
|
|
28
|
+
skipped: int = 0
|
|
29
|
+
failed: int = 0
|
|
30
|
+
deps_added: int = 0
|
|
31
|
+
bd_unavailable: bool = False
|
|
32
|
+
dry_run: bool = False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def hydrate_to_beads(
|
|
36
|
+
project_root: Path | None = None,
|
|
37
|
+
*,
|
|
38
|
+
dry_run: bool = False,
|
|
39
|
+
) -> HydrationReport:
|
|
40
|
+
"""
|
|
41
|
+
Create Beads issues for task specs that don't have beads_issue; recreate deps.
|
|
42
|
+
|
|
43
|
+
For each spec in .tapps-agents/task-specs/ without beads_issue, runs
|
|
44
|
+
bd create and stores the returned id in the spec file. Then runs
|
|
45
|
+
bd dep add for each dependency. If bd is not available, logs and returns
|
|
46
|
+
without raising.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
project_root: Project root (default: cwd).
|
|
50
|
+
dry_run: If True, do not run bd or write files; report what would be done.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
HydrationReport with created/skipped/failed/deps_added counts.
|
|
54
|
+
"""
|
|
55
|
+
project_root = project_root or Path.cwd()
|
|
56
|
+
report = HydrationReport(dry_run=dry_run)
|
|
57
|
+
|
|
58
|
+
if not is_available(project_root):
|
|
59
|
+
logger.warning("Hydration skipped: bd not available")
|
|
60
|
+
report.bd_unavailable = True
|
|
61
|
+
return report
|
|
62
|
+
|
|
63
|
+
specs = load_task_specs(project_root)
|
|
64
|
+
if not specs:
|
|
65
|
+
return report
|
|
66
|
+
|
|
67
|
+
# Map spec.id -> beads_issue id after creation
|
|
68
|
+
spec_to_bd: dict[str, str] = {}
|
|
69
|
+
for spec in specs:
|
|
70
|
+
if spec.beads_issue:
|
|
71
|
+
spec_to_bd[spec.id] = spec.beads_issue
|
|
72
|
+
report.skipped += 1
|
|
73
|
+
continue
|
|
74
|
+
if dry_run:
|
|
75
|
+
report.created += 1
|
|
76
|
+
continue
|
|
77
|
+
title = (spec.title or spec.id)[:200].strip()
|
|
78
|
+
desc = (spec.description or "")[:500].replace("\n", " ").strip()
|
|
79
|
+
args = ["create", title]
|
|
80
|
+
if desc:
|
|
81
|
+
args.extend(["-d", desc])
|
|
82
|
+
try:
|
|
83
|
+
r = run_bd(project_root, args)
|
|
84
|
+
if r.returncode != 0:
|
|
85
|
+
logger.warning("Hydration: bd create failed for %s: %s", spec.id, r.stderr)
|
|
86
|
+
report.failed += 1
|
|
87
|
+
continue
|
|
88
|
+
bd_id = parse_bd_id_from_stdout(r.stdout)
|
|
89
|
+
if bd_id:
|
|
90
|
+
spec_to_bd[spec.id] = bd_id
|
|
91
|
+
spec.beads_issue = bd_id
|
|
92
|
+
save_task_spec(spec, project_root)
|
|
93
|
+
report.created += 1
|
|
94
|
+
else:
|
|
95
|
+
logger.warning("Hydration: could not parse bd id for %s", spec.id)
|
|
96
|
+
report.failed += 1
|
|
97
|
+
except Exception as e:
|
|
98
|
+
logger.warning("Hydration: bd create failed for %s: %s", spec.id, e)
|
|
99
|
+
report.failed += 1
|
|
100
|
+
|
|
101
|
+
# Recreate dependency graph: bd dep add child parent (parent blocks child)
|
|
102
|
+
if dry_run:
|
|
103
|
+
for spec in specs:
|
|
104
|
+
if spec.dependencies and spec_to_bd.get(spec.id):
|
|
105
|
+
report.deps_added += len([d for d in spec.dependencies if spec_to_bd.get(d)])
|
|
106
|
+
return report
|
|
107
|
+
|
|
108
|
+
for spec in specs:
|
|
109
|
+
child_bd = spec_to_bd.get(spec.id)
|
|
110
|
+
if not child_bd or not spec.dependencies:
|
|
111
|
+
continue
|
|
112
|
+
for dep_id in spec.dependencies:
|
|
113
|
+
parent_bd = spec_to_bd.get(dep_id)
|
|
114
|
+
if not parent_bd:
|
|
115
|
+
continue
|
|
116
|
+
try:
|
|
117
|
+
r = run_bd(project_root, ["dep", "add", child_bd, parent_bd])
|
|
118
|
+
if r.returncode == 0:
|
|
119
|
+
report.deps_added += 1
|
|
120
|
+
else:
|
|
121
|
+
logger.warning(
|
|
122
|
+
"Hydration: bd dep add %s %s failed: %s",
|
|
123
|
+
child_bd,
|
|
124
|
+
parent_bd,
|
|
125
|
+
r.stderr,
|
|
126
|
+
)
|
|
127
|
+
except Exception as e:
|
|
128
|
+
logger.warning("Hydration: bd dep add %s %s failed: %s", child_bd, parent_bd, e)
|
|
129
|
+
|
|
130
|
+
return report
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def dehydrate_from_beads(project_root: Path | None = None) -> int:
|
|
134
|
+
"""
|
|
135
|
+
Update task spec files with current status from Beads.
|
|
136
|
+
|
|
137
|
+
Runs bd list (or equivalent), maps beads_issue id to status, and updates
|
|
138
|
+
each spec file. If bd is not available, logs and returns 0.
|
|
139
|
+
|
|
140
|
+
Args:
|
|
141
|
+
project_root: Project root (default: cwd).
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Number of spec files updated.
|
|
145
|
+
"""
|
|
146
|
+
project_root = project_root or Path.cwd()
|
|
147
|
+
|
|
148
|
+
if not is_available(project_root):
|
|
149
|
+
logger.warning("Dehydration skipped: bd not available")
|
|
150
|
+
return 0
|
|
151
|
+
|
|
152
|
+
# Try bd list --json; fallback to parsing stdout if format differs
|
|
153
|
+
try:
|
|
154
|
+
r = run_bd(project_root, ["list", "--json"])
|
|
155
|
+
except Exception as e:
|
|
156
|
+
logger.warning("Dehydration: bd list failed: %s", e)
|
|
157
|
+
return 0
|
|
158
|
+
|
|
159
|
+
if r.returncode != 0:
|
|
160
|
+
logger.warning("Dehydration: bd list failed: %s", r.stderr)
|
|
161
|
+
return 0
|
|
162
|
+
|
|
163
|
+
bd_status_by_id: dict[str, str] = {}
|
|
164
|
+
try:
|
|
165
|
+
raw = json.loads(r.stdout or "[]")
|
|
166
|
+
if isinstance(raw, list):
|
|
167
|
+
for item in raw:
|
|
168
|
+
if isinstance(item, dict):
|
|
169
|
+
bid = item.get("id") or item.get("bd_id") or item.get("issue_id")
|
|
170
|
+
status = item.get("status") or item.get("state") or "todo"
|
|
171
|
+
if bid:
|
|
172
|
+
bd_status_by_id[str(bid)] = str(status).lower()
|
|
173
|
+
elif isinstance(raw, dict):
|
|
174
|
+
for bid, info in raw.items():
|
|
175
|
+
if isinstance(info, dict):
|
|
176
|
+
status = info.get("status") or info.get("state") or "todo"
|
|
177
|
+
else:
|
|
178
|
+
status = str(info)
|
|
179
|
+
bd_status_by_id[str(bid)] = str(status).lower()
|
|
180
|
+
except json.JSONDecodeError:
|
|
181
|
+
# Fallback: parse line-based output for "id status" or "id\tstatus"
|
|
182
|
+
for line in (r.stdout or "").splitlines():
|
|
183
|
+
parts = line.strip().split()
|
|
184
|
+
if len(parts) >= 2:
|
|
185
|
+
bd_status_by_id[parts[0]] = parts[1].lower()
|
|
186
|
+
elif len(parts) == 1:
|
|
187
|
+
bd_status_by_id[parts[0]] = "todo"
|
|
188
|
+
|
|
189
|
+
specs = load_task_specs(project_root)
|
|
190
|
+
updated = 0
|
|
191
|
+
for spec in specs:
|
|
192
|
+
if not spec.beads_issue:
|
|
193
|
+
continue
|
|
194
|
+
new_status = bd_status_by_id.get(spec.beads_issue)
|
|
195
|
+
if not new_status:
|
|
196
|
+
continue
|
|
197
|
+
# Map bd status to our status if needed
|
|
198
|
+
if new_status in ("todo", "open", "pending"):
|
|
199
|
+
mapped = "todo"
|
|
200
|
+
elif new_status in ("in-progress", "in_progress", "wip"):
|
|
201
|
+
mapped = "in-progress"
|
|
202
|
+
elif new_status in ("done", "closed", "completed"):
|
|
203
|
+
mapped = "done"
|
|
204
|
+
elif new_status == "blocked":
|
|
205
|
+
mapped = "blocked"
|
|
206
|
+
else:
|
|
207
|
+
mapped = spec.status
|
|
208
|
+
if spec.status != mapped:
|
|
209
|
+
spec = spec.model_copy(update={"status": mapped})
|
|
210
|
+
save_task_spec(spec, project_root)
|
|
211
|
+
updated += 1
|
|
212
|
+
|
|
213
|
+
return updated
|