tapps-agents 3.5.39__py3-none-any.whl → 3.5.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tapps_agents/__init__.py +2 -2
- tapps_agents/agents/enhancer/agent.py +2728 -2728
- tapps_agents/agents/implementer/agent.py +35 -13
- tapps_agents/agents/reviewer/agent.py +43 -10
- tapps_agents/agents/reviewer/scoring.py +59 -68
- tapps_agents/agents/reviewer/tools/__init__.py +24 -0
- tapps_agents/agents/reviewer/tools/ruff_grouping.py +250 -0
- tapps_agents/agents/reviewer/tools/scoped_mypy.py +284 -0
- tapps_agents/beads/__init__.py +11 -0
- tapps_agents/beads/hydration.py +213 -0
- tapps_agents/beads/specs.py +206 -0
- tapps_agents/cli/commands/health.py +19 -3
- tapps_agents/cli/commands/simple_mode.py +842 -676
- tapps_agents/cli/commands/task.py +227 -0
- tapps_agents/cli/commands/top_level.py +13 -0
- tapps_agents/cli/main.py +658 -651
- tapps_agents/cli/parsers/top_level.py +1978 -1881
- tapps_agents/core/config.py +1622 -1622
- tapps_agents/core/init_project.py +3012 -2897
- tapps_agents/epic/markdown_sync.py +105 -0
- tapps_agents/epic/orchestrator.py +1 -2
- tapps_agents/epic/parser.py +427 -423
- tapps_agents/experts/adaptive_domain_detector.py +0 -2
- tapps_agents/experts/knowledge/api-design-integration/api-security-patterns.md +15 -15
- tapps_agents/experts/knowledge/api-design-integration/external-api-integration.md +19 -44
- tapps_agents/health/checks/outcomes.backup_20260204_064058.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064256.py +324 -0
- tapps_agents/health/checks/outcomes.backup_20260204_064600.py +324 -0
- tapps_agents/health/checks/outcomes.py +134 -46
- tapps_agents/health/orchestrator.py +12 -4
- tapps_agents/hooks/__init__.py +33 -0
- tapps_agents/hooks/config.py +140 -0
- tapps_agents/hooks/events.py +135 -0
- tapps_agents/hooks/executor.py +128 -0
- tapps_agents/hooks/manager.py +143 -0
- tapps_agents/session/__init__.py +19 -0
- tapps_agents/session/manager.py +256 -0
- tapps_agents/simple_mode/code_snippet_handler.py +382 -0
- tapps_agents/simple_mode/intent_parser.py +29 -4
- tapps_agents/simple_mode/orchestrators/base.py +185 -59
- tapps_agents/simple_mode/orchestrators/build_orchestrator.py +2667 -2642
- tapps_agents/simple_mode/orchestrators/fix_orchestrator.py +2 -2
- tapps_agents/simple_mode/workflow_suggester.py +37 -3
- tapps_agents/workflow/agent_handlers/implementer_handler.py +18 -3
- tapps_agents/workflow/cursor_executor.py +2337 -2118
- tapps_agents/workflow/direct_execution_fallback.py +16 -3
- tapps_agents/workflow/message_formatter.py +2 -1
- tapps_agents/workflow/models.py +38 -1
- tapps_agents/workflow/parallel_executor.py +43 -4
- tapps_agents/workflow/parser.py +375 -357
- tapps_agents/workflow/rules_generator.py +337 -337
- tapps_agents/workflow/skill_invoker.py +9 -3
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/METADATA +5 -1
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/RECORD +58 -54
- tapps_agents/agents/analyst/SKILL.md +0 -85
- tapps_agents/agents/architect/SKILL.md +0 -80
- tapps_agents/agents/debugger/SKILL.md +0 -66
- tapps_agents/agents/designer/SKILL.md +0 -78
- tapps_agents/agents/documenter/SKILL.md +0 -95
- tapps_agents/agents/enhancer/SKILL.md +0 -189
- tapps_agents/agents/implementer/SKILL.md +0 -117
- tapps_agents/agents/improver/SKILL.md +0 -55
- tapps_agents/agents/ops/SKILL.md +0 -64
- tapps_agents/agents/orchestrator/SKILL.md +0 -238
- tapps_agents/agents/planner/story_template.md +0 -37
- tapps_agents/agents/reviewer/templates/quality-dashboard.html.j2 +0 -150
- tapps_agents/agents/tester/SKILL.md +0 -71
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/WHEEL +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/entry_points.txt +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/licenses/LICENSE +0 -0
- {tapps_agents-3.5.39.dist-info → tapps_agents-3.5.41.dist-info}/top_level.txt +0 -0
|
@@ -31,6 +31,90 @@ class OutcomeHealthCheck(HealthCheck):
|
|
|
31
31
|
self.reports_dir = reports_dir or (self.project_root / ".tapps-agents" / "reports")
|
|
32
32
|
self.accessor = CursorAnalyticsAccessor()
|
|
33
33
|
|
|
34
|
+
def _compute_outcomes_from_execution_metrics(self, days: int = 30) -> dict:
|
|
35
|
+
"""
|
|
36
|
+
Compute outcomes from execution metrics when review artifacts don't exist.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
days: Number of days to look back for metrics
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Dictionary with review_executions_count, success_rate, and gate_pass_rate
|
|
43
|
+
"""
|
|
44
|
+
try:
|
|
45
|
+
from datetime import UTC
|
|
46
|
+
from ...workflow.execution_metrics import ExecutionMetricsCollector
|
|
47
|
+
import logging
|
|
48
|
+
|
|
49
|
+
collector = ExecutionMetricsCollector(project_root=self.project_root)
|
|
50
|
+
|
|
51
|
+
# Get metrics with reasonable limit (5000 max for ~30 days of heavy usage)
|
|
52
|
+
MAX_METRICS_TO_SCAN = 5000
|
|
53
|
+
all_metrics = collector.get_metrics(limit=MAX_METRICS_TO_SCAN)
|
|
54
|
+
|
|
55
|
+
# Log warning if we hit the limit
|
|
56
|
+
if len(all_metrics) >= MAX_METRICS_TO_SCAN:
|
|
57
|
+
logging.getLogger(__name__).warning(
|
|
58
|
+
"Hit metrics scan limit (%d); results may be incomplete",
|
|
59
|
+
MAX_METRICS_TO_SCAN
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Filter for review executions within the last N days (timezone-aware)
|
|
63
|
+
cutoff_date = datetime.now(UTC) - timedelta(days=days)
|
|
64
|
+
review_metrics = []
|
|
65
|
+
for m in all_metrics:
|
|
66
|
+
# Parse timestamp and ensure timezone-aware comparison
|
|
67
|
+
try:
|
|
68
|
+
ts = datetime.fromisoformat(m.started_at.replace("Z", "+00:00"))
|
|
69
|
+
# Convert naive datetime to UTC if needed
|
|
70
|
+
if ts.tzinfo is None:
|
|
71
|
+
from datetime import UTC
|
|
72
|
+
ts = ts.replace(tzinfo=UTC)
|
|
73
|
+
|
|
74
|
+
if ts >= cutoff_date:
|
|
75
|
+
if m.command == "review" or (m.skill and "reviewer" in (m.skill or "").lower()):
|
|
76
|
+
review_metrics.append(m)
|
|
77
|
+
except (ValueError, AttributeError):
|
|
78
|
+
# Skip metrics with invalid timestamps
|
|
79
|
+
continue
|
|
80
|
+
|
|
81
|
+
if not review_metrics:
|
|
82
|
+
return {
|
|
83
|
+
"review_executions_count": 0,
|
|
84
|
+
"success_rate": 0.0,
|
|
85
|
+
"gate_pass_rate": None,
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
total = len(review_metrics)
|
|
89
|
+
success_count = sum(1 for m in review_metrics if m.status == "success")
|
|
90
|
+
success_rate = (success_count / total * 100) if total > 0 else 0.0
|
|
91
|
+
|
|
92
|
+
# Calculate gate pass rate (only for metrics that have gate_pass field)
|
|
93
|
+
gate_pass_metrics = [m for m in review_metrics if m.gate_pass is not None]
|
|
94
|
+
if gate_pass_metrics:
|
|
95
|
+
gate_pass_count = sum(1 for m in gate_pass_metrics if m.gate_pass is True)
|
|
96
|
+
gate_pass_rate = (gate_pass_count / len(gate_pass_metrics) * 100)
|
|
97
|
+
else:
|
|
98
|
+
gate_pass_rate = None
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
"review_executions_count": total,
|
|
102
|
+
"success_rate": success_rate,
|
|
103
|
+
"gate_pass_rate": gate_pass_rate,
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
except Exception as e:
|
|
107
|
+
# If fallback fails, log and return empty result
|
|
108
|
+
import logging
|
|
109
|
+
logging.getLogger(__name__).debug(
|
|
110
|
+
"Failed to compute outcomes from execution metrics: %s", e
|
|
111
|
+
)
|
|
112
|
+
return {
|
|
113
|
+
"review_executions_count": 0,
|
|
114
|
+
"success_rate": 0.0,
|
|
115
|
+
"gate_pass_rate": None,
|
|
116
|
+
}
|
|
117
|
+
|
|
34
118
|
def run(self) -> HealthCheckResult:
|
|
35
119
|
"""
|
|
36
120
|
Run outcome health check.
|
|
@@ -111,52 +195,56 @@ class OutcomeHealthCheck(HealthCheck):
|
|
|
111
195
|
# Check if we have any data; if not, try fallback to execution metrics (review steps)
|
|
112
196
|
if not review_artifacts and not agents_data:
|
|
113
197
|
# Fallback: derive outcomes from execution metrics (review steps, gate_pass)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
198
|
+
import logging
|
|
199
|
+
fallback_data = self._compute_outcomes_from_execution_metrics(days=30)
|
|
200
|
+
|
|
201
|
+
if fallback_data["review_executions_count"] > 0:
|
|
202
|
+
total = fallback_data["review_executions_count"]
|
|
203
|
+
success_rate = fallback_data["success_rate"]
|
|
204
|
+
gate_pass_rate = fallback_data["gate_pass_rate"]
|
|
205
|
+
|
|
206
|
+
# Calculate score: 60 base + 10 if success_rate ≥80% + 5 if gate_pass_rate ≥70%
|
|
207
|
+
fallback_score = 60.0
|
|
208
|
+
if success_rate >= 80.0:
|
|
209
|
+
fallback_score += 10.0
|
|
210
|
+
if gate_pass_rate is not None and gate_pass_rate >= 70.0:
|
|
211
|
+
fallback_score += 5.0
|
|
212
|
+
|
|
213
|
+
# Build message
|
|
214
|
+
gate_msg = f"{gate_pass_rate:.0f}% passed gate" if gate_pass_rate is not None else "no gate data"
|
|
215
|
+
message = (
|
|
216
|
+
f"Outcomes derived from execution metrics: {total} review steps, "
|
|
217
|
+
f"{gate_msg}"
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
logging.getLogger(__name__).info(
|
|
221
|
+
"Outcomes fallback activated: %d review executions processed", total
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
return HealthCheckResult(
|
|
225
|
+
name=self.name,
|
|
226
|
+
status="degraded",
|
|
227
|
+
score=fallback_score,
|
|
228
|
+
message=message,
|
|
229
|
+
details={
|
|
230
|
+
"average_score": 0.0,
|
|
231
|
+
"score_trend": "unknown",
|
|
232
|
+
"score_change": 0.0,
|
|
233
|
+
"review_artifacts_count": 0,
|
|
234
|
+
"improvement_cycles": 0,
|
|
235
|
+
"reports_dir": str(self.reports_dir),
|
|
236
|
+
"fallback_used": True,
|
|
237
|
+
"fallback_source": "execution_metrics",
|
|
238
|
+
"review_executions_count": total,
|
|
239
|
+
"success_rate": success_rate,
|
|
240
|
+
"gate_pass_rate": gate_pass_rate,
|
|
241
|
+
"issues": [],
|
|
242
|
+
},
|
|
243
|
+
remediation=[
|
|
244
|
+
"Run reviewer agent or quality workflows to generate review artifacts"
|
|
245
|
+
],
|
|
246
|
+
)
|
|
247
|
+
|
|
160
248
|
score = 50.0
|
|
161
249
|
issues.append("No quality metrics available")
|
|
162
250
|
remediation.append("Run reviewer agent or quality workflows to generate metrics")
|
|
@@ -175,9 +175,9 @@ class HealthOrchestrator:
|
|
|
175
175
|
|
|
176
176
|
overall_score = weighted_score / total_weight if total_weight > 0 else 0.0
|
|
177
177
|
|
|
178
|
-
# Determine overall status
|
|
178
|
+
# Determine overall status (HM-001-S3: degraded when score >= 75 and only non-critical unhealthy)
|
|
179
179
|
critical_checks = {"environment", "execution"}
|
|
180
|
-
|
|
180
|
+
non_critical_checks = {"outcomes", "knowledge_base", "context7_cache", "automation"}
|
|
181
181
|
unhealthy_checks = [
|
|
182
182
|
name
|
|
183
183
|
for name, result in results.items()
|
|
@@ -189,16 +189,19 @@ class HealthOrchestrator:
|
|
|
189
189
|
if name in results
|
|
190
190
|
)
|
|
191
191
|
|
|
192
|
+
status_reason: str | None = None
|
|
192
193
|
if status_counts["unhealthy"] > 0:
|
|
193
194
|
overall_status = "unhealthy"
|
|
194
|
-
# If score is high (>=75) and only non-critical checks are unhealthy, show degraded
|
|
195
195
|
if (
|
|
196
196
|
overall_score >= 75.0
|
|
197
197
|
and critical_healthy
|
|
198
198
|
and unhealthy_checks
|
|
199
|
-
and all(c in
|
|
199
|
+
and all(c in non_critical_checks for c in unhealthy_checks)
|
|
200
200
|
):
|
|
201
201
|
overall_status = "degraded"
|
|
202
|
+
status_reason = (
|
|
203
|
+
"Status degraded due to non-critical checks; core functionality is healthy"
|
|
204
|
+
)
|
|
202
205
|
elif status_counts["degraded"] > 0:
|
|
203
206
|
overall_status = "degraded"
|
|
204
207
|
else:
|
|
@@ -243,12 +246,17 @@ class HealthOrchestrator:
|
|
|
243
246
|
if rem not in prioritized_remediations:
|
|
244
247
|
prioritized_remediations.append(rem)
|
|
245
248
|
|
|
249
|
+
details: dict[str, Any] = {}
|
|
250
|
+
if status_reason:
|
|
251
|
+
details["status_reason"] = status_reason
|
|
252
|
+
|
|
246
253
|
return {
|
|
247
254
|
"status": overall_status,
|
|
248
255
|
"score": overall_score,
|
|
249
256
|
"message": f"Overall health: {overall_status} ({overall_score:.1f}/100)",
|
|
250
257
|
"checks_count": len(results),
|
|
251
258
|
"status_counts": status_counts,
|
|
259
|
+
"details": details,
|
|
252
260
|
"checks": {
|
|
253
261
|
name: {
|
|
254
262
|
"status": result.status,
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hook system for TappsCodingAgents.
|
|
3
|
+
|
|
4
|
+
Provides event-driven automation: UserPromptSubmit, PostToolUse, SessionStart,
|
|
5
|
+
SessionEnd, WorkflowComplete. Configuration via .tapps-agents/hooks.yaml.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .config import HookDefinition, HooksConfig, load_hooks_config
|
|
9
|
+
from .executor import HookResult, run_hook
|
|
10
|
+
from .events import (
|
|
11
|
+
HookEventType,
|
|
12
|
+
PostToolUseEvent,
|
|
13
|
+
SessionEndEvent,
|
|
14
|
+
SessionStartEvent,
|
|
15
|
+
UserPromptSubmitEvent,
|
|
16
|
+
WorkflowCompleteEvent,
|
|
17
|
+
)
|
|
18
|
+
from .manager import HookManager
|
|
19
|
+
|
|
20
|
+
__all__ = [
|
|
21
|
+
"HookDefinition",
|
|
22
|
+
"HookManager",
|
|
23
|
+
"HooksConfig",
|
|
24
|
+
"HookResult",
|
|
25
|
+
"load_hooks_config",
|
|
26
|
+
"run_hook",
|
|
27
|
+
"HookEventType",
|
|
28
|
+
"PostToolUseEvent",
|
|
29
|
+
"SessionEndEvent",
|
|
30
|
+
"SessionStartEvent",
|
|
31
|
+
"UserPromptSubmitEvent",
|
|
32
|
+
"WorkflowCompleteEvent",
|
|
33
|
+
]
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hook configuration schema and loader.
|
|
3
|
+
|
|
4
|
+
Loads and validates .tapps-agents/hooks.yaml with event definitions,
|
|
5
|
+
matchers, and commands. Supported events: UserPromptSubmit, PostToolUse,
|
|
6
|
+
SessionStart, SessionEnd, WorkflowComplete.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
import yaml
|
|
14
|
+
from pydantic import BaseModel, Field, model_validator
|
|
15
|
+
|
|
16
|
+
# Supported hook event types
|
|
17
|
+
HOOK_EVENT_TYPES = frozenset({
|
|
18
|
+
"UserPromptSubmit",
|
|
19
|
+
"PostToolUse",
|
|
20
|
+
"SessionStart",
|
|
21
|
+
"SessionEnd",
|
|
22
|
+
"WorkflowComplete",
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class HookDefinition(BaseModel):
|
|
27
|
+
"""Single hook definition within an event."""
|
|
28
|
+
|
|
29
|
+
name: str = Field(..., min_length=1, description="Hook display name")
|
|
30
|
+
command: str = Field(..., min_length=1, description="Shell command to execute")
|
|
31
|
+
enabled: bool = Field(default=True, description="Whether hook is enabled")
|
|
32
|
+
matcher: str | None = Field(
|
|
33
|
+
default=None,
|
|
34
|
+
description="Tool name matcher for PostToolUse (e.g. 'Write|Edit')",
|
|
35
|
+
)
|
|
36
|
+
file_patterns: list[str] | None = Field(
|
|
37
|
+
default=None,
|
|
38
|
+
description="Glob patterns for file filtering (e.g. ['*.py'])",
|
|
39
|
+
)
|
|
40
|
+
fail_on_error: bool = Field(
|
|
41
|
+
default=False,
|
|
42
|
+
description="If true, non-zero exit fails workflow",
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
model_config = {"extra": "forbid"}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class HooksConfig(BaseModel):
|
|
49
|
+
"""
|
|
50
|
+
Root schema for hooks.yaml.
|
|
51
|
+
|
|
52
|
+
Top-level key is 'hooks'. Each event maps to a list of HookDefinition.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
hooks: dict[str, list[HookDefinition]] = Field(
|
|
56
|
+
default_factory=dict,
|
|
57
|
+
description="Event name -> list of hook definitions",
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
model_config = {"extra": "forbid"}
|
|
61
|
+
|
|
62
|
+
@model_validator(mode="after")
|
|
63
|
+
def validate_event_names(self) -> "HooksConfig":
|
|
64
|
+
"""Ensure only supported event names are used."""
|
|
65
|
+
for event_name in self.hooks:
|
|
66
|
+
if event_name not in HOOK_EVENT_TYPES:
|
|
67
|
+
raise ValueError(
|
|
68
|
+
f"Unsupported hook event '{event_name}'. "
|
|
69
|
+
f"Supported: {', '.join(sorted(HOOK_EVENT_TYPES))}"
|
|
70
|
+
)
|
|
71
|
+
return self
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def load_hooks_config(
|
|
75
|
+
config_path: Path | str | None = None,
|
|
76
|
+
project_root: Path | None = None,
|
|
77
|
+
) -> HooksConfig:
|
|
78
|
+
"""
|
|
79
|
+
Load and validate hooks.yaml.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
config_path: Explicit path to hooks.yaml. If None, uses
|
|
83
|
+
project_root/.tapps-agents/hooks.yaml.
|
|
84
|
+
project_root: Project root (default: cwd). Used when config_path is None.
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Validated HooksConfig. Returns empty config (all events empty) when
|
|
88
|
+
file is missing (safe defaults).
|
|
89
|
+
|
|
90
|
+
Raises:
|
|
91
|
+
FileNotFoundError: If config_path is given and file doesn't exist.
|
|
92
|
+
yaml.YAMLError: On YAML parse errors (includes path/line in message).
|
|
93
|
+
ValueError: On schema validation failures.
|
|
94
|
+
"""
|
|
95
|
+
project_root = project_root or Path.cwd()
|
|
96
|
+
if config_path is None:
|
|
97
|
+
config_path = project_root / ".tapps-agents" / "hooks.yaml"
|
|
98
|
+
else:
|
|
99
|
+
config_path = Path(config_path)
|
|
100
|
+
if not config_path.is_absolute():
|
|
101
|
+
config_path = project_root / config_path
|
|
102
|
+
|
|
103
|
+
if not config_path.exists():
|
|
104
|
+
return HooksConfig(hooks={})
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
content = config_path.read_text(encoding="utf-8")
|
|
108
|
+
except OSError as e:
|
|
109
|
+
raise FileNotFoundError(
|
|
110
|
+
f"Cannot read hooks config from {config_path}: {e}"
|
|
111
|
+
) from e
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
raw = yaml.safe_load(content)
|
|
115
|
+
except yaml.YAMLError as e:
|
|
116
|
+
path_hint = str(config_path)
|
|
117
|
+
if hasattr(e, "problem_mark") and e.problem_mark:
|
|
118
|
+
line = e.problem_mark.line + 1
|
|
119
|
+
col = e.problem_mark.column + 1
|
|
120
|
+
path_hint = f"{config_path}:{line}:{col}"
|
|
121
|
+
raise yaml.YAMLError(f"Invalid YAML in {path_hint}: {e}") from e
|
|
122
|
+
|
|
123
|
+
if raw is None:
|
|
124
|
+
return HooksConfig(hooks={})
|
|
125
|
+
|
|
126
|
+
if not isinstance(raw, dict):
|
|
127
|
+
raise ValueError(
|
|
128
|
+
f"hooks.yaml must be a YAML object (dict), got {type(raw).__name__} "
|
|
129
|
+
f"at {config_path}"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
if "hooks" not in raw:
|
|
133
|
+
return HooksConfig(hooks={})
|
|
134
|
+
|
|
135
|
+
try:
|
|
136
|
+
return HooksConfig.model_validate(raw)
|
|
137
|
+
except Exception as e:
|
|
138
|
+
raise ValueError(
|
|
139
|
+
f"Hooks config validation failed at {config_path}: {e}"
|
|
140
|
+
) from e
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hook event definitions.
|
|
3
|
+
|
|
4
|
+
Defines the five hook events with data structures and serialization for
|
|
5
|
+
type-safe use across the hook system: UserPromptSubmit, PostToolUse,
|
|
6
|
+
SessionStart, SessionEnd, WorkflowComplete.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from dataclasses import asdict, dataclass
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class HookEventType(str, Enum):
|
|
17
|
+
"""Supported hook event types."""
|
|
18
|
+
|
|
19
|
+
USER_PROMPT_SUBMIT = "UserPromptSubmit"
|
|
20
|
+
POST_TOOL_USE = "PostToolUse"
|
|
21
|
+
SESSION_START = "SessionStart"
|
|
22
|
+
SESSION_END = "SessionEnd"
|
|
23
|
+
WORKFLOW_COMPLETE = "WorkflowComplete"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass(frozen=True)
|
|
27
|
+
class UserPromptSubmitEvent:
|
|
28
|
+
"""Payload for UserPromptSubmit - before workflow starts."""
|
|
29
|
+
|
|
30
|
+
prompt: str
|
|
31
|
+
project_root: str
|
|
32
|
+
workflow_type: str | None = None
|
|
33
|
+
|
|
34
|
+
def to_dict(self) -> dict[str, Any]:
|
|
35
|
+
"""Serialize for logging and context injection."""
|
|
36
|
+
return asdict(self)
|
|
37
|
+
|
|
38
|
+
def to_env(self) -> dict[str, str]:
|
|
39
|
+
"""Environment variables for hook execution."""
|
|
40
|
+
d: dict[str, str] = {"TAPPS_PROMPT": self.prompt, "TAPPS_PROJECT_ROOT": self.project_root}
|
|
41
|
+
if self.workflow_type:
|
|
42
|
+
d["TAPPS_WORKFLOW_TYPE"] = self.workflow_type
|
|
43
|
+
return d
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@dataclass(frozen=True)
|
|
47
|
+
class PostToolUseEvent:
|
|
48
|
+
"""Payload for PostToolUse - after Write/Edit completes."""
|
|
49
|
+
|
|
50
|
+
file_path: str | None
|
|
51
|
+
file_paths: list[str]
|
|
52
|
+
tool_name: str
|
|
53
|
+
project_root: str
|
|
54
|
+
workflow_id: str | None = None
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> dict[str, Any]:
|
|
57
|
+
"""Serialize for logging and context injection."""
|
|
58
|
+
return asdict(self)
|
|
59
|
+
|
|
60
|
+
def to_env(self) -> dict[str, str]:
|
|
61
|
+
"""Environment variables for hook execution."""
|
|
62
|
+
d: dict[str, str] = {
|
|
63
|
+
"TAPPS_FILE_PATH": self.file_path or "",
|
|
64
|
+
"TAPPS_FILE_PATHS": " ".join(self.file_paths),
|
|
65
|
+
"TAPPS_TOOL_NAME": self.tool_name,
|
|
66
|
+
"TAPPS_PROJECT_ROOT": self.project_root,
|
|
67
|
+
}
|
|
68
|
+
if self.workflow_id:
|
|
69
|
+
d["TAPPS_WORKFLOW_ID"] = self.workflow_id
|
|
70
|
+
return d
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass(frozen=True)
|
|
74
|
+
class SessionStartEvent:
|
|
75
|
+
"""Payload for SessionStart - CLI/Cursor session begins."""
|
|
76
|
+
|
|
77
|
+
session_id: str
|
|
78
|
+
project_root: str
|
|
79
|
+
|
|
80
|
+
def to_dict(self) -> dict[str, Any]:
|
|
81
|
+
"""Serialize for logging and context injection."""
|
|
82
|
+
return asdict(self)
|
|
83
|
+
|
|
84
|
+
def to_env(self) -> dict[str, str]:
|
|
85
|
+
"""Environment variables for hook execution."""
|
|
86
|
+
return {
|
|
87
|
+
"TAPPS_SESSION_ID": self.session_id,
|
|
88
|
+
"TAPPS_PROJECT_ROOT": self.project_root,
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@dataclass(frozen=True)
|
|
93
|
+
class SessionEndEvent:
|
|
94
|
+
"""Payload for SessionEnd - session ends."""
|
|
95
|
+
|
|
96
|
+
session_id: str
|
|
97
|
+
project_root: str
|
|
98
|
+
|
|
99
|
+
def to_dict(self) -> dict[str, Any]:
|
|
100
|
+
"""Serialize for logging and context injection."""
|
|
101
|
+
return asdict(self)
|
|
102
|
+
|
|
103
|
+
def to_env(self) -> dict[str, str]:
|
|
104
|
+
"""Environment variables for hook execution."""
|
|
105
|
+
return {
|
|
106
|
+
"TAPPS_SESSION_ID": self.session_id,
|
|
107
|
+
"TAPPS_PROJECT_ROOT": self.project_root,
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@dataclass(frozen=True)
|
|
112
|
+
class WorkflowCompleteEvent:
|
|
113
|
+
"""Payload for WorkflowComplete - after workflow success/fail."""
|
|
114
|
+
|
|
115
|
+
workflow_type: str
|
|
116
|
+
workflow_id: str
|
|
117
|
+
status: str # completed, failed, cancelled
|
|
118
|
+
project_root: str
|
|
119
|
+
beads_issue_id: str | None = None
|
|
120
|
+
|
|
121
|
+
def to_dict(self) -> dict[str, Any]:
|
|
122
|
+
"""Serialize for logging and context injection."""
|
|
123
|
+
return asdict(self)
|
|
124
|
+
|
|
125
|
+
def to_env(self) -> dict[str, str]:
|
|
126
|
+
"""Environment variables for hook execution."""
|
|
127
|
+
d: dict[str, str] = {
|
|
128
|
+
"TAPPS_WORKFLOW_TYPE": self.workflow_type,
|
|
129
|
+
"TAPPS_WORKFLOW_ID": self.workflow_id,
|
|
130
|
+
"TAPPS_WORKFLOW_STATUS": self.status,
|
|
131
|
+
"TAPPS_PROJECT_ROOT": self.project_root,
|
|
132
|
+
}
|
|
133
|
+
if self.beads_issue_id:
|
|
134
|
+
d["TAPPS_BEADS_ISSUE_ID"] = self.beads_issue_id
|
|
135
|
+
return d
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hook executor: run hook shell commands with TAPPS_* env vars, timeout, and capture.
|
|
3
|
+
|
|
4
|
+
Runs hooks synchronously; captures stdout/stderr; configurable timeout (default 30s).
|
|
5
|
+
Non-zero exit is logged and optionally fails the workflow when fail_on_error is True.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import logging
|
|
11
|
+
import os
|
|
12
|
+
import subprocess
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
|
|
16
|
+
from .config import HookDefinition
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
DEFAULT_TIMEOUT_SECONDS = 30
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class HookResult:
|
|
25
|
+
"""Result of running a single hook."""
|
|
26
|
+
|
|
27
|
+
stdout: str
|
|
28
|
+
stderr: str
|
|
29
|
+
returncode: int
|
|
30
|
+
timed_out: bool
|
|
31
|
+
hook_name: str
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def success(self) -> bool:
|
|
35
|
+
"""True if hook completed without timeout and returncode 0."""
|
|
36
|
+
return not self.timed_out and self.returncode == 0
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _substitute_placeholders(command: str, env: dict[str, str]) -> str:
|
|
40
|
+
"""Replace {name} in command with env value; names are lower-case env keys."""
|
|
41
|
+
result = command
|
|
42
|
+
for key, value in env.items():
|
|
43
|
+
# Support both {TAPPS_FILE_PATH} and {file_path} style
|
|
44
|
+
placeholder = "{" + key + "}"
|
|
45
|
+
if placeholder in result:
|
|
46
|
+
result = result.replace(placeholder, value)
|
|
47
|
+
# Snake-case style: TAPPS_FILE_PATH -> file_path
|
|
48
|
+
snake = key.replace("TAPPS_", "").lower()
|
|
49
|
+
if "{" + snake + "}" in result:
|
|
50
|
+
result = result.replace("{" + snake + "}", value)
|
|
51
|
+
return result
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def run_hook(
|
|
55
|
+
hook: HookDefinition,
|
|
56
|
+
env: dict[str, str],
|
|
57
|
+
*,
|
|
58
|
+
timeout_seconds: int | None = None,
|
|
59
|
+
project_root: Path | None = None,
|
|
60
|
+
) -> HookResult:
|
|
61
|
+
"""
|
|
62
|
+
Execute a hook's shell command with the given environment.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
hook: Hook definition (name, command, fail_on_error).
|
|
66
|
+
env: Environment variables for the process (e.g. TAPPS_FILE_PATH, TAPPS_PROMPT).
|
|
67
|
+
Merged over current process env; passed to the subprocess.
|
|
68
|
+
timeout_seconds: Max run time in seconds; default DEFAULT_TIMEOUT_SECONDS.
|
|
69
|
+
project_root: Optional project root; cwd for the subprocess when set.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
HookResult with stdout, stderr, returncode, timed_out, hook_name.
|
|
73
|
+
"""
|
|
74
|
+
timeout_seconds = timeout_seconds if timeout_seconds is not None else DEFAULT_TIMEOUT_SECONDS
|
|
75
|
+
full_env = {**os.environ, **env}
|
|
76
|
+
command = _substitute_placeholders(hook.command, full_env)
|
|
77
|
+
cwd = Path(project_root) if project_root else None
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
proc = subprocess.run(
|
|
81
|
+
command,
|
|
82
|
+
shell=True,
|
|
83
|
+
capture_output=True,
|
|
84
|
+
text=True,
|
|
85
|
+
encoding="utf-8",
|
|
86
|
+
errors="replace",
|
|
87
|
+
env=full_env,
|
|
88
|
+
cwd=cwd,
|
|
89
|
+
timeout=timeout_seconds,
|
|
90
|
+
)
|
|
91
|
+
stdout = proc.stdout or ""
|
|
92
|
+
stderr = proc.stderr or ""
|
|
93
|
+
if stderr.strip():
|
|
94
|
+
logger.warning("Hook %s stderr: %s", hook.name, stderr.strip())
|
|
95
|
+
if proc.returncode != 0:
|
|
96
|
+
logger.error(
|
|
97
|
+
"Hook %s exited with code %d: %s",
|
|
98
|
+
hook.name,
|
|
99
|
+
proc.returncode,
|
|
100
|
+
stderr.strip() or stdout.strip() or "(no output)",
|
|
101
|
+
)
|
|
102
|
+
return HookResult(
|
|
103
|
+
stdout=stdout,
|
|
104
|
+
stderr=stderr,
|
|
105
|
+
returncode=proc.returncode,
|
|
106
|
+
timed_out=False,
|
|
107
|
+
hook_name=hook.name,
|
|
108
|
+
)
|
|
109
|
+
except subprocess.TimeoutExpired as e:
|
|
110
|
+
logger.error("Hook %s timed out after %s seconds", hook.name, timeout_seconds)
|
|
111
|
+
stdout = (e.stdout or b"").decode("utf-8", errors="replace") if e.stdout else ""
|
|
112
|
+
stderr = (e.stderr or b"").decode("utf-8", errors="replace") if e.stderr else ""
|
|
113
|
+
return HookResult(
|
|
114
|
+
stdout=stdout,
|
|
115
|
+
stderr=stderr + f"\n(Hook timed out after {timeout_seconds}s)",
|
|
116
|
+
returncode=-1,
|
|
117
|
+
timed_out=True,
|
|
118
|
+
hook_name=hook.name,
|
|
119
|
+
)
|
|
120
|
+
except Exception as e:
|
|
121
|
+
logger.exception("Hook %s failed: %s", hook.name, e)
|
|
122
|
+
return HookResult(
|
|
123
|
+
stdout="",
|
|
124
|
+
stderr=str(e),
|
|
125
|
+
returncode=-1,
|
|
126
|
+
timed_out=False,
|
|
127
|
+
hook_name=hook.name,
|
|
128
|
+
)
|