tapps-agents 3.5.39__py3-none-any.whl → 3.5.40__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. tapps_agents/__init__.py +2 -2
  2. tapps_agents/agents/enhancer/agent.py +2728 -2728
  3. tapps_agents/agents/implementer/agent.py +35 -13
  4. tapps_agents/agents/reviewer/agent.py +43 -10
  5. tapps_agents/agents/reviewer/scoring.py +59 -68
  6. tapps_agents/agents/reviewer/tools/__init__.py +24 -0
  7. tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
  8. tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
  9. tapps_agents/beads/__init__.py +11 -0
  10. tapps_agents/beads/hydration.py +213 -0
  11. tapps_agents/beads/specs.py +206 -0
  12. tapps_agents/cli/commands/health.py +19 -3
  13. tapps_agents/cli/commands/simple_mode.py +842 -676
  14. tapps_agents/cli/commands/task.py +219 -0
  15. tapps_agents/cli/commands/top_level.py +13 -0
  16. tapps_agents/cli/main.py +658 -651
  17. tapps_agents/cli/parsers/top_level.py +1978 -1881
  18. tapps_agents/core/config.py +1622 -1622
  19. tapps_agents/core/init_project.py +3012 -2897
  20. tapps_agents/epic/markdown_sync.py +105 -0
  21. tapps_agents/epic/orchestrator.py +1 -2
  22. tapps_agents/epic/parser.py +427 -423
  23. tapps_agents/experts/adaptive_domain_detector.py +0 -2
  24. tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
  25. tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
  26. tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
  27. tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
  28. tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
  29. tapps_agents/health/checks/outcomes.py +134 -46
  30. tapps_agents/health/orchestrator.py +12 -4
  31. tapps_agents/hooks/__init__.py +33 -0
  32. tapps_agents/hooks/config.py +140 -0
  33. tapps_agents/hooks/events.py +135 -0
  34. tapps_agents/hooks/executor.py +128 -0
  35. tapps_agents/hooks/manager.py +143 -0
  36. tapps_agents/session/__init__.py +19 -0
  37. tapps_agents/session/manager.py +256 -0
  38. tapps_agents/simple_mode/code_snippet_handler.py +382 -0
  39. tapps_agents/simple_mode/intent_parser.py +29 -4
  40. tapps_agents/simple_mode/orchestrators/base.py +185 -59
  41. tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
  42. tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +2 -2
  43. tapps_agents/simple_mode/workflow_suggester.py +37 -3
  44. tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
  45. tapps_agents/workflow/cursor_executor.py +2196 -2118
  46. tapps_agents/workflow/direct_execution_fallback.py +16 -3
  47. tapps_agents/workflow/message_formatter.py +2 -1
  48. tapps_agents/workflow/parallel_executor.py +43 -4
  49. tapps_agents/workflow/parser.py +375 -357
  50. tapps_agents/workflow/rules_generator.py +337 -337
  51. tapps_agents/workflow/skill_invoker.py +9 -3
  52. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/METADATA +5 -1
  53. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/RECORD +57 -53
  54. tapps_agents/agents/analyst/SKILL.md +0 -85
  55. tapps_agents/agents/architect/SKILL.md +0 -80
  56. tapps_agents/agents/debugger/SKILL.md +0 -66
  57. tapps_agents/agents/designer/SKILL.md +0 -78
  58. tapps_agents/agents/documenter/SKILL.md +0 -95
  59. tapps_agents/agents/enhancer/SKILL.md +0 -189
  60. tapps_agents/agents/implementer/SKILL.md +0 -117
  61. tapps_agents/agents/improver/SKILL.md +0 -55
  62. tapps_agents/agents/ops/SKILL.md +0 -64
  63. tapps_agents/agents/orchestrator/SKILL.md +0 -238
  64. tapps_agents/agents/planner/story_template.md +0 -37
  65. tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
  66. tapps_agents/agents/tester/SKILL.md +0 -71
  67. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/WHEEL +0 -0
  68. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/entry_points.txt +0 -0
  69. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/licenses/LICENSE +0 -0
  70. {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.40.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,250 @@
1
+ """
2
+ Ruff Output Grouping - ENH-002-S3
3
+
4
+ Parses Ruff JSON output and groups issues by error code for cleaner reports.
5
+ Sorts by severity (error > warning > info), then by count.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ import logging
12
+ from dataclasses import dataclass
13
+ from typing import Any
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class RuffParsingError(Exception):
19
+ """Ruff output parsing failed."""
20
+
21
+ def __init__(self, reason: str) -> None:
22
+ self.reason = reason
23
+ super().__init__(f"Ruff parsing failed: {reason}")
24
+
25
+
26
+ @dataclass(frozen=True)
27
+ class RuffIssue:
28
+ """Single Ruff linting issue."""
29
+
30
+ code: str
31
+ message: str
32
+ line: int
33
+ column: int
34
+ severity: str
35
+ fixable: bool
36
+
37
+ def to_dict(self) -> dict[str, Any]:
38
+ """Convert to dictionary for serialization."""
39
+ return {
40
+ "code": self.code,
41
+ "message": self.message,
42
+ "line": self.line,
43
+ "column": self.column,
44
+ "severity": self.severity,
45
+ "fixable": self.fixable,
46
+ }
47
+
48
+
49
+ @dataclass(frozen=True)
50
+ class GroupedRuffIssues:
51
+ """Grouped Ruff issues by error code."""
52
+
53
+ groups: dict[str, tuple[RuffIssue, ...]]
54
+ total_issues: int
55
+ unique_codes: int
56
+ severity_summary: dict[str, int]
57
+ fixable_count: int
58
+
59
+ def to_dict(self) -> dict[str, Any]:
60
+ """Convert to dictionary for serialization."""
61
+ return {
62
+ "groups": {
63
+ code: [i.to_dict() for i in issues]
64
+ for code, issues in self.groups.items()
65
+ },
66
+ "total_issues": self.total_issues,
67
+ "unique_codes": self.unique_codes,
68
+ "severity_summary": self.severity_summary,
69
+ "fixable_count": self.fixable_count,
70
+ }
71
+
72
+
73
+ @dataclass(frozen=True)
74
+ class RuffGroupingConfig:
75
+ """Configuration for Ruff grouping."""
76
+
77
+ enabled: bool = True
78
+ sort_by: str = "severity"
79
+ include_fix_suggestions: bool = True
80
+ max_issues_per_group: int = 10
81
+
82
+
83
+ def _severity_order(severity: str) -> int:
84
+ """Lower = higher priority (error=0, warning=1, info=2)."""
85
+ order = {"error": 0, "warning": 1, "info": 2, "fatal": 0}
86
+ return order.get(severity.lower(), 1)
87
+
88
+
89
+ class RuffGroupingParser:
90
+ """
91
+ Parse Ruff JSON and group issues by error code.
92
+
93
+ Sorts groups by severity then count; supports markdown, HTML, JSON output.
94
+ """
95
+
96
+ def __init__(self, config: RuffGroupingConfig | None = None) -> None:
97
+ self.config = config or RuffGroupingConfig()
98
+
99
+ def parse_and_group(self, ruff_json: str) -> GroupedRuffIssues:
100
+ """
101
+ Parse Ruff JSON output and group by error code.
102
+
103
+ Ruff JSON is a list of diagnostics; each has "code" (dict with "name"),
104
+ "message", "location" (row, column), "fix" (optional).
105
+ """
106
+ try:
107
+ data = json.loads(ruff_json) if ruff_json.strip() else []
108
+ except json.JSONDecodeError as e:
109
+ raise RuffParsingError(str(e)) from e
110
+ if not isinstance(data, list):
111
+ raise RuffParsingError("Expected a JSON array of diagnostics")
112
+
113
+ issues: list[RuffIssue] = []
114
+ for diag in data:
115
+ if not isinstance(diag, dict):
116
+ continue
117
+ code_info = diag.get("code")
118
+ if isinstance(code_info, dict):
119
+ code = code_info.get("name") or code_info.get("code") or "unknown"
120
+ else:
121
+ code = str(code_info) if code_info else "unknown"
122
+ message = diag.get("message", "")
123
+ loc = diag.get("location", {}) or {}
124
+ row = int(loc.get("row", 0)) if isinstance(loc, dict) else 0
125
+ col = int(loc.get("column", 0)) if isinstance(loc, dict) else 0
126
+ fix = diag.get("fix")
127
+ fixable = fix is not None and fix != {}
128
+ severity = "error"
129
+ if isinstance(code_info, dict):
130
+ severity = (code_info.get("severity") or "error").lower()
131
+ if code.startswith("E") or code.startswith("F"):
132
+ severity = "error"
133
+ elif code.startswith("W"):
134
+ severity = "warning"
135
+ elif code.startswith("I"):
136
+ severity = "info"
137
+ issues.append(
138
+ RuffIssue(
139
+ code=code,
140
+ message=message,
141
+ line=row,
142
+ column=col,
143
+ severity=severity,
144
+ fixable=fixable,
145
+ )
146
+ )
147
+
148
+ groups: dict[str, list[RuffIssue]] = {}
149
+ severity_summary: dict[str, int] = {}
150
+ fixable_count = 0
151
+ for i in issues:
152
+ groups.setdefault(i.code, []).append(i)
153
+ severity_summary[i.severity] = severity_summary.get(i.severity, 0) + 1
154
+ if i.fixable:
155
+ fixable_count += 1
156
+
157
+ return GroupedRuffIssues(
158
+ groups={k: tuple(v) for k, v in groups.items()},
159
+ total_issues=len(issues),
160
+ unique_codes=len(groups),
161
+ severity_summary=severity_summary,
162
+ fixable_count=fixable_count,
163
+ )
164
+
165
+ def sort_groups(
166
+ self,
167
+ groups: dict[str, tuple[RuffIssue, ...]],
168
+ by: str = "severity",
169
+ ) -> list[tuple[str, tuple[RuffIssue, ...]]]:
170
+ """
171
+ Sort groups by severity (error > warning > info), then count, then code.
172
+ """
173
+ items = list(groups.items())
174
+ if by == "code":
175
+ return sorted(items, key=lambda x: x[0])
176
+ if by == "count":
177
+ return sorted(items, key=lambda x: -len(x[1]))
178
+ # severity: worst severity first, then by count descending
179
+ def key(item: tuple[str, tuple[RuffIssue, ...]]) -> tuple[int, int, str]:
180
+ code, iss = item
181
+ min_sev = min(_severity_order(i.severity) for i in iss)
182
+ return (min_sev, -len(iss), code)
183
+
184
+ return sorted(items, key=key)
185
+
186
+ def render_grouped(
187
+ self,
188
+ grouped: GroupedRuffIssues,
189
+ format: str = "markdown",
190
+ ) -> str:
191
+ """Render grouped issues as markdown, HTML, or JSON."""
192
+ sorted_pairs = self.sort_groups(grouped.groups, by=self.config.sort_by)
193
+ if format == "json":
194
+ return json.dumps(grouped.to_dict(), indent=2)
195
+ if format == "markdown":
196
+ return self._render_markdown(sorted_pairs, grouped)
197
+ if format == "html":
198
+ return self._render_html(sorted_pairs, grouped)
199
+ return self._render_markdown(sorted_pairs, grouped)
200
+
201
+ def _render_markdown(
202
+ self,
203
+ sorted_pairs: list[tuple[str, tuple[RuffIssue, ...]]],
204
+ grouped: GroupedRuffIssues,
205
+ ) -> str:
206
+ lines = [
207
+ "### Issues by Code",
208
+ "",
209
+ f"Total: {grouped.total_issues} issues in {grouped.unique_codes} categories.",
210
+ ]
211
+ if self.config.include_fix_suggestions and grouped.fixable_count:
212
+ lines.append(f"*{grouped.fixable_count} auto-fixable*")
213
+ lines.append("")
214
+ max_per = self.config.max_issues_per_group
215
+ for code, iss in sorted_pairs:
216
+ fixable = sum(1 for i in iss if i.fixable)
217
+ sev = iss[0].severity if iss else "error"
218
+ lines.append(f"#### {code} ({len(iss)} issues, {sev})")
219
+ if self.config.include_fix_suggestions and fixable:
220
+ lines.append(f"*{fixable} auto-fixable*")
221
+ for i in iss[:max_per]:
222
+ lines.append(f"- Line {i.line}: {i.message}")
223
+ if len(iss) > max_per:
224
+ lines.append(f"- ... and {len(iss) - max_per} more")
225
+ lines.append("")
226
+ return "\n".join(lines).strip()
227
+
228
+ def _render_html(
229
+ self,
230
+ sorted_pairs: list[tuple[str, tuple[RuffIssue, ...]]],
231
+ grouped: GroupedRuffIssues,
232
+ ) -> str:
233
+ lines = [
234
+ "<div class='ruff-grouped'>",
235
+ f"<p>Total: {grouped.total_issues} issues in {grouped.unique_codes} categories.</p>",
236
+ ]
237
+ for code, iss in sorted_pairs:
238
+ fixable = sum(1 for i in iss if i.fixable)
239
+ sev = iss[0].severity if iss else "error"
240
+ lines.append(f"<details><summary>{code} ({len(iss)} issues, {sev})")
241
+ if fixable:
242
+ lines.append(f" — {fixable} auto-fixable")
243
+ lines.append("</summary><ul>")
244
+ for i in iss[: self.config.max_issues_per_group]:
245
+ lines.append(f"<li>Line {i.line}: {i.message}</li>")
246
+ if len(iss) > self.config.max_issues_per_group:
247
+ lines.append(f"<li>... and {len(iss) - self.config.max_issues_per_group} more</li>")
248
+ lines.append("</ul></details>")
249
+ lines.append("</div>")
250
+ return "\n".join(lines)
@@ -0,0 +1,284 @@
1
+ """
2
+ Scoped Mypy Executor - ENH-002-S2
3
+
4
+ Runs mypy with file-level scoping for ~70% performance improvement.
5
+ Uses --follow-imports=skip and --no-site-packages; filters results to target file only.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import asyncio
11
+ import logging
12
+ import re
13
+ import subprocess # nosec B404 - fixed args, no shell
14
+ import sys
15
+ import time
16
+ from dataclasses import dataclass
17
+ from pathlib import Path
18
+ from typing import Any
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class MypyTimeoutError(Exception):
24
+ """Mypy execution timed out."""
25
+
26
+ def __init__(self, timeout: int) -> None:
27
+ self.timeout = timeout
28
+ super().__init__(f"Mypy timed out after {timeout}s")
29
+
30
+
31
+ @dataclass(frozen=True)
32
+ class MypyIssue:
33
+ """Single mypy type checking issue."""
34
+
35
+ file_path: Path
36
+ line: int
37
+ column: int
38
+ severity: str
39
+ message: str
40
+ error_code: str | None
41
+
42
+ def to_dict(self) -> dict[str, Any]:
43
+ """Convert to dictionary for serialization."""
44
+ return {
45
+ "file_path": str(self.file_path),
46
+ "line": self.line,
47
+ "column": self.column,
48
+ "severity": self.severity,
49
+ "message": self.message,
50
+ "error_code": self.error_code,
51
+ }
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class MypyResult:
56
+ """Result of scoped mypy execution."""
57
+
58
+ issues: tuple[MypyIssue, ...]
59
+ duration_seconds: float
60
+ files_checked: int
61
+ success: bool
62
+
63
+ def to_dict(self) -> dict[str, Any]:
64
+ """Convert to dictionary for serialization."""
65
+ return {
66
+ "issues": [i.to_dict() for i in self.issues],
67
+ "duration_seconds": self.duration_seconds,
68
+ "files_checked": self.files_checked,
69
+ "success": self.success,
70
+ }
71
+
72
+
73
+ @dataclass(frozen=True)
74
+ class ScopedMypyConfig:
75
+ """Configuration for scoped mypy execution."""
76
+
77
+ enabled: bool = True
78
+ timeout: int = 10
79
+ flags: tuple[str, ...] = (
80
+ "--follow-imports=skip",
81
+ "--no-site-packages",
82
+ "--show-column-numbers",
83
+ "--show-error-codes",
84
+ "--no-error-summary",
85
+ "--no-color-output",
86
+ "--no-incremental",
87
+ )
88
+
89
+
90
+ class ScopedMypyExecutor:
91
+ """
92
+ Execute mypy with scoped imports for performance.
93
+
94
+ Uses --follow-imports=skip, --no-site-packages and filters results
95
+ to the target file only. Target: <10s vs 30-60s unscoped.
96
+ """
97
+
98
+ def __init__(self, config: ScopedMypyConfig | None = None) -> None:
99
+ self.config = config or ScopedMypyConfig()
100
+ self._logger = logging.getLogger(__name__)
101
+
102
+ def get_scoped_flags(self) -> list[str]:
103
+ """Return mypy flags for scoped execution."""
104
+ return list(self.config.flags)
105
+
106
+ def parse_output(self, raw_output: str, file_path: Path) -> list[MypyIssue]:
107
+ """
108
+ Parse mypy stdout and return issues for the target file only.
109
+
110
+ Mypy format: file.py:line:col: severity: message [error-code]
111
+ or file.py:line: severity: message [error-code]
112
+ """
113
+ issues: list[MypyIssue] = []
114
+ target_name = file_path.name
115
+ target_resolved = str(file_path.resolve()).replace("\\", "/")
116
+
117
+ for line in raw_output.splitlines():
118
+ line = line.strip()
119
+ if not line or "error:" not in line.lower():
120
+ continue
121
+ # Match file:line:col: severity: message [code] or file:line: severity: message [code]
122
+ match = re.match(
123
+ r"^(.+?):(\d+):(?:\d+:)?\s*(error|warning|note):\s*(.+)$",
124
+ line,
125
+ re.IGNORECASE,
126
+ )
127
+ if not match:
128
+ continue
129
+ path_part, line_str, severity, rest = match.groups()
130
+ path_part = path_part.replace("\\", "/")
131
+ if target_name not in path_part and target_resolved not in path_part:
132
+ try:
133
+ if Path(path_part).resolve() != file_path.resolve():
134
+ continue
135
+ except Exception:
136
+ continue
137
+ try:
138
+ line_num = int(line_str)
139
+ except ValueError:
140
+ continue
141
+ col_num = 0
142
+ if ":" in line:
143
+ col_match = re.match(r"^.+?:\d+:(\d+):", line)
144
+ if col_match:
145
+ try:
146
+ col_num = int(col_match.group(1))
147
+ except ValueError:
148
+ pass
149
+ error_code = None
150
+ if "[" in rest and "]" in rest:
151
+ start = rest.rfind("[")
152
+ end = rest.rfind("]")
153
+ if start < end:
154
+ error_code = rest[start + 1 : end].strip()
155
+ rest = rest[:start].strip()
156
+ issues.append(
157
+ MypyIssue(
158
+ file_path=file_path,
159
+ line=line_num,
160
+ column=col_num,
161
+ severity=severity.strip().lower(),
162
+ message=rest.strip(),
163
+ error_code=error_code,
164
+ )
165
+ )
166
+ return issues
167
+
168
+ async def execute_scoped(
169
+ self,
170
+ file_path: Path,
171
+ *,
172
+ timeout: int | None = None,
173
+ ) -> MypyResult:
174
+ """
175
+ Run mypy on a single file with scoped flags and filter to that file.
176
+
177
+ On timeout, returns a result with success=False and empty issues
178
+ (graceful fallback; caller can run full mypy if needed).
179
+ """
180
+ timeout_sec = timeout if timeout is not None else self.config.timeout
181
+ if not file_path.is_file():
182
+ return MypyResult(
183
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
184
+ )
185
+ cmd = [sys.executable, "-m", "mypy"] + self.get_scoped_flags() + [str(file_path)]
186
+ cwd = file_path.parent if file_path.parent.exists() else None
187
+ start = time.monotonic()
188
+ try:
189
+ proc = await asyncio.create_subprocess_exec(
190
+ *cmd,
191
+ stdout=asyncio.subprocess.PIPE,
192
+ stderr=asyncio.subprocess.PIPE,
193
+ cwd=cwd,
194
+ )
195
+ try:
196
+ stdout, stderr = await asyncio.wait_for(
197
+ proc.communicate(), timeout=timeout_sec
198
+ )
199
+ except TimeoutError:
200
+ proc.kill()
201
+ await proc.wait()
202
+ elapsed = time.monotonic() - start
203
+ self._logger.warning(
204
+ "mypy timed out for %s after %.2fs", file_path, elapsed
205
+ )
206
+ raise MypyTimeoutError(timeout_sec)
207
+ elapsed = time.monotonic() - start
208
+ out = (stdout or b"").decode("utf-8", errors="replace")
209
+ issues = self.parse_output(out, file_path)
210
+ return MypyResult(
211
+ issues=tuple(issues),
212
+ duration_seconds=elapsed,
213
+ files_checked=1,
214
+ success=proc.returncode == 0 or not issues,
215
+ )
216
+ except MypyTimeoutError as e:
217
+ raise e from None
218
+ except FileNotFoundError:
219
+ self._logger.debug("mypy not found")
220
+ return MypyResult(
221
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
222
+ )
223
+ except Exception as e:
224
+ self._logger.warning("mypy failed for %s: %s", file_path, e)
225
+ return MypyResult(
226
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
227
+ )
228
+
229
+ def run_scoped_sync(
230
+ self,
231
+ file_path: Path,
232
+ *,
233
+ timeout: int | None = None,
234
+ ) -> MypyResult:
235
+ """
236
+ Run mypy synchronously with scoped flags (for use from scoring.py).
237
+ Uses subprocess.run to avoid event loop issues in sync callers.
238
+ """
239
+ timeout_sec = timeout if timeout is not None else self.config.timeout
240
+ if not file_path.is_file():
241
+ return MypyResult(
242
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
243
+ )
244
+ cmd = [sys.executable, "-m", "mypy"] + self.get_scoped_flags() + [str(file_path)]
245
+ cwd = file_path.parent if file_path.parent.exists() else None
246
+ start = time.monotonic()
247
+ try:
248
+ result = subprocess.run( # nosec B603
249
+ cmd,
250
+ capture_output=True,
251
+ text=True,
252
+ encoding="utf-8",
253
+ errors="replace",
254
+ timeout=timeout_sec,
255
+ cwd=cwd,
256
+ )
257
+ elapsed = time.monotonic() - start
258
+ out = (result.stdout or "").strip()
259
+ issues = self.parse_output(out, file_path)
260
+ return MypyResult(
261
+ issues=tuple(issues),
262
+ duration_seconds=elapsed,
263
+ files_checked=1,
264
+ success=result.returncode == 0 or not issues,
265
+ )
266
+ except subprocess.TimeoutExpired:
267
+ elapsed = time.monotonic() - start
268
+ self._logger.warning("mypy timed out for %s after %.2fs", file_path, elapsed)
269
+ return MypyResult(
270
+ issues=(),
271
+ duration_seconds=elapsed,
272
+ files_checked=0,
273
+ success=False,
274
+ )
275
+ except FileNotFoundError:
276
+ self._logger.debug("mypy not found")
277
+ return MypyResult(
278
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
279
+ )
280
+ except Exception as e:
281
+ self._logger.warning("mypy failed for %s: %s", file_path, e)
282
+ return MypyResult(
283
+ issues=(), duration_seconds=0.0, files_checked=0, success=False
284
+ )
@@ -2,6 +2,7 @@
2
2
  Beads (bd) integration: optional task-tracking for agents.
3
3
 
4
4
  Use is_available(project_root) before run_bd. See docs/BEADS_INTEGRATION.md.
5
+ Task specs: .tapps-agents/task-specs/ for hydration/dehydration.
5
6
  """
6
7
 
7
8
  from .client import (
@@ -13,6 +14,9 @@ from .client import (
13
14
  run_bd,
14
15
  )
15
16
  from .parse import parse_bd_id_from_stdout
17
+ from .specs import TaskSpec, load_task_spec, load_task_specs, save_task_spec
18
+
19
+ from .hydration import HydrationReport, dehydrate_from_beads, hydrate_to_beads
16
20
 
17
21
  __all__ = [
18
22
  "BeadsRequiredError",
@@ -22,4 +26,11 @@ __all__ = [
22
26
  "require_beads",
23
27
  "resolve_bd_path",
24
28
  "run_bd",
29
+ "TaskSpec",
30
+ "HydrationReport",
31
+ "dehydrate_from_beads",
32
+ "hydrate_to_beads",
33
+ "load_task_spec",
34
+ "load_task_specs",
35
+ "save_task_spec",
25
36
  ]