htmlgraph 0.9.3__py3-none-any.whl → 0.27.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- htmlgraph/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/.htmlgraph/agents.json +72 -0
- htmlgraph/.htmlgraph/htmlgraph.db +0 -0
- htmlgraph/__init__.py +173 -17
- htmlgraph/__init__.pyi +123 -0
- htmlgraph/agent_detection.py +127 -0
- htmlgraph/agent_registry.py +45 -30
- htmlgraph/agents.py +160 -107
- htmlgraph/analytics/__init__.py +9 -2
- htmlgraph/analytics/cli.py +190 -51
- htmlgraph/analytics/cost_analyzer.py +391 -0
- htmlgraph/analytics/cost_monitor.py +664 -0
- htmlgraph/analytics/cost_reporter.py +675 -0
- htmlgraph/analytics/cross_session.py +617 -0
- htmlgraph/analytics/dependency.py +192 -100
- htmlgraph/analytics/pattern_learning.py +771 -0
- htmlgraph/analytics/session_graph.py +707 -0
- htmlgraph/analytics/strategic/__init__.py +80 -0
- htmlgraph/analytics/strategic/cost_optimizer.py +611 -0
- htmlgraph/analytics/strategic/pattern_detector.py +876 -0
- htmlgraph/analytics/strategic/preference_manager.py +709 -0
- htmlgraph/analytics/strategic/suggestion_engine.py +747 -0
- htmlgraph/analytics/work_type.py +190 -14
- htmlgraph/analytics_index.py +135 -51
- htmlgraph/api/__init__.py +3 -0
- htmlgraph/api/cost_alerts_websocket.py +416 -0
- htmlgraph/api/main.py +2498 -0
- htmlgraph/api/static/htmx.min.js +1 -0
- htmlgraph/api/static/style-redesign.css +1344 -0
- htmlgraph/api/static/style.css +1079 -0
- htmlgraph/api/templates/dashboard-redesign.html +1366 -0
- htmlgraph/api/templates/dashboard.html +794 -0
- htmlgraph/api/templates/partials/activity-feed-hierarchical.html +326 -0
- htmlgraph/api/templates/partials/activity-feed.html +1100 -0
- htmlgraph/api/templates/partials/agents-redesign.html +317 -0
- htmlgraph/api/templates/partials/agents.html +317 -0
- htmlgraph/api/templates/partials/event-traces.html +373 -0
- htmlgraph/api/templates/partials/features-kanban-redesign.html +509 -0
- htmlgraph/api/templates/partials/features.html +578 -0
- htmlgraph/api/templates/partials/metrics-redesign.html +346 -0
- htmlgraph/api/templates/partials/metrics.html +346 -0
- htmlgraph/api/templates/partials/orchestration-redesign.html +443 -0
- htmlgraph/api/templates/partials/orchestration.html +198 -0
- htmlgraph/api/templates/partials/spawners.html +375 -0
- htmlgraph/api/templates/partials/work-items.html +613 -0
- htmlgraph/api/websocket.py +538 -0
- htmlgraph/archive/__init__.py +24 -0
- htmlgraph/archive/bloom.py +234 -0
- htmlgraph/archive/fts.py +297 -0
- htmlgraph/archive/manager.py +583 -0
- htmlgraph/archive/search.py +244 -0
- htmlgraph/atomic_ops.py +560 -0
- htmlgraph/attribute_index.py +208 -0
- htmlgraph/bounded_paths.py +539 -0
- htmlgraph/builders/__init__.py +14 -0
- htmlgraph/builders/base.py +118 -29
- htmlgraph/builders/bug.py +150 -0
- htmlgraph/builders/chore.py +119 -0
- htmlgraph/builders/epic.py +150 -0
- htmlgraph/builders/feature.py +31 -6
- htmlgraph/builders/insight.py +195 -0
- htmlgraph/builders/metric.py +217 -0
- htmlgraph/builders/pattern.py +202 -0
- htmlgraph/builders/phase.py +162 -0
- htmlgraph/builders/spike.py +52 -19
- htmlgraph/builders/track.py +148 -72
- htmlgraph/cigs/__init__.py +81 -0
- htmlgraph/cigs/autonomy.py +385 -0
- htmlgraph/cigs/cost.py +475 -0
- htmlgraph/cigs/messages_basic.py +472 -0
- htmlgraph/cigs/messaging.py +365 -0
- htmlgraph/cigs/models.py +771 -0
- htmlgraph/cigs/pattern_storage.py +427 -0
- htmlgraph/cigs/patterns.py +503 -0
- htmlgraph/cigs/posttool_analyzer.py +234 -0
- htmlgraph/cigs/reporter.py +818 -0
- htmlgraph/cigs/tracker.py +317 -0
- htmlgraph/cli/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/cli/.htmlgraph/agents.json +72 -0
- htmlgraph/cli/.htmlgraph/htmlgraph.db +0 -0
- htmlgraph/cli/__init__.py +42 -0
- htmlgraph/cli/__main__.py +6 -0
- htmlgraph/cli/analytics.py +1424 -0
- htmlgraph/cli/base.py +685 -0
- htmlgraph/cli/constants.py +206 -0
- htmlgraph/cli/core.py +954 -0
- htmlgraph/cli/main.py +147 -0
- htmlgraph/cli/models.py +475 -0
- htmlgraph/cli/templates/__init__.py +1 -0
- htmlgraph/cli/templates/cost_dashboard.py +399 -0
- htmlgraph/cli/work/__init__.py +239 -0
- htmlgraph/cli/work/browse.py +115 -0
- htmlgraph/cli/work/features.py +568 -0
- htmlgraph/cli/work/orchestration.py +676 -0
- htmlgraph/cli/work/report.py +728 -0
- htmlgraph/cli/work/sessions.py +466 -0
- htmlgraph/cli/work/snapshot.py +559 -0
- htmlgraph/cli/work/tracks.py +486 -0
- htmlgraph/cli_commands/__init__.py +1 -0
- htmlgraph/cli_commands/feature.py +195 -0
- htmlgraph/cli_framework.py +115 -0
- htmlgraph/collections/__init__.py +18 -0
- htmlgraph/collections/base.py +415 -98
- htmlgraph/collections/bug.py +53 -0
- htmlgraph/collections/chore.py +53 -0
- htmlgraph/collections/epic.py +53 -0
- htmlgraph/collections/feature.py +12 -26
- htmlgraph/collections/insight.py +100 -0
- htmlgraph/collections/metric.py +92 -0
- htmlgraph/collections/pattern.py +97 -0
- htmlgraph/collections/phase.py +53 -0
- htmlgraph/collections/session.py +194 -0
- htmlgraph/collections/spike.py +56 -16
- htmlgraph/collections/task_delegation.py +241 -0
- htmlgraph/collections/todo.py +511 -0
- htmlgraph/collections/traces.py +487 -0
- htmlgraph/config/cost_models.json +56 -0
- htmlgraph/config.py +190 -0
- htmlgraph/context_analytics.py +344 -0
- htmlgraph/converter.py +216 -28
- htmlgraph/cost_analysis/__init__.py +5 -0
- htmlgraph/cost_analysis/analyzer.py +438 -0
- htmlgraph/dashboard.html +2406 -307
- htmlgraph/dashboard.html.backup +6592 -0
- htmlgraph/dashboard.html.bak +7181 -0
- htmlgraph/dashboard.html.bak2 +7231 -0
- htmlgraph/dashboard.html.bak3 +7232 -0
- htmlgraph/db/__init__.py +38 -0
- htmlgraph/db/queries.py +790 -0
- htmlgraph/db/schema.py +1788 -0
- htmlgraph/decorators.py +317 -0
- htmlgraph/dependency_models.py +19 -2
- htmlgraph/deploy.py +142 -125
- htmlgraph/deployment_models.py +474 -0
- htmlgraph/docs/API_REFERENCE.md +841 -0
- htmlgraph/docs/HTTP_API.md +750 -0
- htmlgraph/docs/INTEGRATION_GUIDE.md +752 -0
- htmlgraph/docs/ORCHESTRATION_PATTERNS.md +717 -0
- htmlgraph/docs/README.md +532 -0
- htmlgraph/docs/__init__.py +77 -0
- htmlgraph/docs/docs_version.py +55 -0
- htmlgraph/docs/metadata.py +93 -0
- htmlgraph/docs/migrations.py +232 -0
- htmlgraph/docs/template_engine.py +143 -0
- htmlgraph/docs/templates/_sections/cli_reference.md.j2 +52 -0
- htmlgraph/docs/templates/_sections/core_concepts.md.j2 +29 -0
- htmlgraph/docs/templates/_sections/sdk_basics.md.j2 +69 -0
- htmlgraph/docs/templates/base_agents.md.j2 +78 -0
- htmlgraph/docs/templates/example_user_override.md.j2 +47 -0
- htmlgraph/docs/version_check.py +163 -0
- htmlgraph/edge_index.py +182 -27
- htmlgraph/error_handler.py +544 -0
- htmlgraph/event_log.py +100 -52
- htmlgraph/event_migration.py +13 -4
- htmlgraph/exceptions.py +49 -0
- htmlgraph/file_watcher.py +101 -28
- htmlgraph/find_api.py +75 -63
- htmlgraph/git_events.py +145 -63
- htmlgraph/graph.py +1122 -106
- htmlgraph/hooks/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/hooks/.htmlgraph/agents.json +72 -0
- htmlgraph/hooks/.htmlgraph/index.sqlite +0 -0
- htmlgraph/hooks/__init__.py +45 -0
- htmlgraph/hooks/bootstrap.py +169 -0
- htmlgraph/hooks/cigs_pretool_enforcer.py +354 -0
- htmlgraph/hooks/concurrent_sessions.py +208 -0
- htmlgraph/hooks/context.py +350 -0
- htmlgraph/hooks/drift_handler.py +525 -0
- htmlgraph/hooks/event_tracker.py +1314 -0
- htmlgraph/hooks/git_commands.py +175 -0
- htmlgraph/hooks/hooks-config.example.json +12 -0
- htmlgraph/hooks/installer.py +343 -0
- htmlgraph/hooks/orchestrator.py +674 -0
- htmlgraph/hooks/orchestrator_reflector.py +223 -0
- htmlgraph/hooks/post-checkout.sh +28 -0
- htmlgraph/hooks/post-commit.sh +24 -0
- htmlgraph/hooks/post-merge.sh +26 -0
- htmlgraph/hooks/post_tool_use_failure.py +273 -0
- htmlgraph/hooks/post_tool_use_handler.py +257 -0
- htmlgraph/hooks/posttooluse.py +408 -0
- htmlgraph/hooks/pre-commit.sh +94 -0
- htmlgraph/hooks/pre-push.sh +28 -0
- htmlgraph/hooks/pretooluse.py +819 -0
- htmlgraph/hooks/prompt_analyzer.py +637 -0
- htmlgraph/hooks/session_handler.py +668 -0
- htmlgraph/hooks/session_summary.py +395 -0
- htmlgraph/hooks/state_manager.py +504 -0
- htmlgraph/hooks/subagent_detection.py +202 -0
- htmlgraph/hooks/subagent_stop.py +369 -0
- htmlgraph/hooks/task_enforcer.py +255 -0
- htmlgraph/hooks/task_validator.py +177 -0
- htmlgraph/hooks/validator.py +628 -0
- htmlgraph/ids.py +41 -27
- htmlgraph/index.d.ts +286 -0
- htmlgraph/learning.py +767 -0
- htmlgraph/mcp_server.py +69 -23
- htmlgraph/models.py +1586 -87
- htmlgraph/operations/README.md +62 -0
- htmlgraph/operations/__init__.py +79 -0
- htmlgraph/operations/analytics.py +339 -0
- htmlgraph/operations/bootstrap.py +289 -0
- htmlgraph/operations/events.py +244 -0
- htmlgraph/operations/fastapi_server.py +231 -0
- htmlgraph/operations/hooks.py +350 -0
- htmlgraph/operations/initialization.py +597 -0
- htmlgraph/operations/initialization.py.backup +228 -0
- htmlgraph/operations/server.py +303 -0
- htmlgraph/orchestration/__init__.py +58 -0
- htmlgraph/orchestration/claude_launcher.py +179 -0
- htmlgraph/orchestration/command_builder.py +72 -0
- htmlgraph/orchestration/headless_spawner.py +281 -0
- htmlgraph/orchestration/live_events.py +377 -0
- htmlgraph/orchestration/model_selection.py +327 -0
- htmlgraph/orchestration/plugin_manager.py +140 -0
- htmlgraph/orchestration/prompts.py +137 -0
- htmlgraph/orchestration/spawner_event_tracker.py +383 -0
- htmlgraph/orchestration/spawners/__init__.py +16 -0
- htmlgraph/orchestration/spawners/base.py +194 -0
- htmlgraph/orchestration/spawners/claude.py +173 -0
- htmlgraph/orchestration/spawners/codex.py +435 -0
- htmlgraph/orchestration/spawners/copilot.py +294 -0
- htmlgraph/orchestration/spawners/gemini.py +471 -0
- htmlgraph/orchestration/subprocess_runner.py +36 -0
- htmlgraph/orchestration/task_coordination.py +343 -0
- htmlgraph/orchestration.md +563 -0
- htmlgraph/orchestrator-system-prompt-optimized.txt +863 -0
- htmlgraph/orchestrator.py +669 -0
- htmlgraph/orchestrator_config.py +357 -0
- htmlgraph/orchestrator_mode.py +328 -0
- htmlgraph/orchestrator_validator.py +133 -0
- htmlgraph/parallel.py +646 -0
- htmlgraph/parser.py +160 -35
- htmlgraph/path_query.py +608 -0
- htmlgraph/pattern_matcher.py +636 -0
- htmlgraph/planning.py +147 -52
- htmlgraph/pydantic_models.py +476 -0
- htmlgraph/quality_gates.py +350 -0
- htmlgraph/query_builder.py +109 -72
- htmlgraph/query_composer.py +509 -0
- htmlgraph/reflection.py +443 -0
- htmlgraph/refs.py +344 -0
- htmlgraph/repo_hash.py +512 -0
- htmlgraph/repositories/__init__.py +292 -0
- htmlgraph/repositories/analytics_repository.py +455 -0
- htmlgraph/repositories/analytics_repository_standard.py +628 -0
- htmlgraph/repositories/feature_repository.py +581 -0
- htmlgraph/repositories/feature_repository_htmlfile.py +668 -0
- htmlgraph/repositories/feature_repository_memory.py +607 -0
- htmlgraph/repositories/feature_repository_sqlite.py +858 -0
- htmlgraph/repositories/filter_service.py +620 -0
- htmlgraph/repositories/filter_service_standard.py +445 -0
- htmlgraph/repositories/shared_cache.py +621 -0
- htmlgraph/repositories/shared_cache_memory.py +395 -0
- htmlgraph/repositories/track_repository.py +552 -0
- htmlgraph/repositories/track_repository_htmlfile.py +619 -0
- htmlgraph/repositories/track_repository_memory.py +508 -0
- htmlgraph/repositories/track_repository_sqlite.py +711 -0
- htmlgraph/routing.py +8 -19
- htmlgraph/scripts/deploy.py +1 -2
- htmlgraph/sdk/__init__.py +398 -0
- htmlgraph/sdk/__init__.pyi +14 -0
- htmlgraph/sdk/analytics/__init__.py +19 -0
- htmlgraph/sdk/analytics/engine.py +155 -0
- htmlgraph/sdk/analytics/helpers.py +178 -0
- htmlgraph/sdk/analytics/registry.py +109 -0
- htmlgraph/sdk/base.py +484 -0
- htmlgraph/sdk/constants.py +216 -0
- htmlgraph/sdk/core.pyi +308 -0
- htmlgraph/sdk/discovery.py +120 -0
- htmlgraph/sdk/help/__init__.py +12 -0
- htmlgraph/sdk/help/mixin.py +699 -0
- htmlgraph/sdk/mixins/__init__.py +15 -0
- htmlgraph/sdk/mixins/attribution.py +113 -0
- htmlgraph/sdk/mixins/mixin.py +410 -0
- htmlgraph/sdk/operations/__init__.py +12 -0
- htmlgraph/sdk/operations/mixin.py +427 -0
- htmlgraph/sdk/orchestration/__init__.py +17 -0
- htmlgraph/sdk/orchestration/coordinator.py +203 -0
- htmlgraph/sdk/orchestration/spawner.py +204 -0
- htmlgraph/sdk/planning/__init__.py +19 -0
- htmlgraph/sdk/planning/bottlenecks.py +93 -0
- htmlgraph/sdk/planning/mixin.py +211 -0
- htmlgraph/sdk/planning/parallel.py +186 -0
- htmlgraph/sdk/planning/queue.py +210 -0
- htmlgraph/sdk/planning/recommendations.py +87 -0
- htmlgraph/sdk/planning/smart_planning.py +319 -0
- htmlgraph/sdk/session/__init__.py +19 -0
- htmlgraph/sdk/session/continuity.py +57 -0
- htmlgraph/sdk/session/handoff.py +110 -0
- htmlgraph/sdk/session/info.py +309 -0
- htmlgraph/sdk/session/manager.py +103 -0
- htmlgraph/sdk/strategic/__init__.py +26 -0
- htmlgraph/sdk/strategic/mixin.py +563 -0
- htmlgraph/server.py +685 -180
- htmlgraph/services/__init__.py +10 -0
- htmlgraph/services/claiming.py +199 -0
- htmlgraph/session_hooks.py +300 -0
- htmlgraph/session_manager.py +1392 -175
- htmlgraph/session_registry.py +587 -0
- htmlgraph/session_state.py +436 -0
- htmlgraph/session_warning.py +201 -0
- htmlgraph/sessions/__init__.py +23 -0
- htmlgraph/sessions/handoff.py +756 -0
- htmlgraph/setup.py +34 -17
- htmlgraph/spike_index.py +143 -0
- htmlgraph/sync_docs.py +12 -15
- htmlgraph/system_prompts.py +450 -0
- htmlgraph/templates/AGENTS.md.template +366 -0
- htmlgraph/templates/CLAUDE.md.template +97 -0
- htmlgraph/templates/GEMINI.md.template +87 -0
- htmlgraph/templates/orchestration-view.html +350 -0
- htmlgraph/track_builder.py +146 -15
- htmlgraph/track_manager.py +69 -21
- htmlgraph/transcript.py +890 -0
- htmlgraph/transcript_analytics.py +699 -0
- htmlgraph/types.py +323 -0
- htmlgraph/validation.py +115 -0
- htmlgraph/watch.py +8 -5
- htmlgraph/work_type_utils.py +3 -2
- {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/dashboard.html +2406 -307
- htmlgraph-0.27.5.data/data/htmlgraph/templates/AGENTS.md.template +366 -0
- htmlgraph-0.27.5.data/data/htmlgraph/templates/CLAUDE.md.template +97 -0
- htmlgraph-0.27.5.data/data/htmlgraph/templates/GEMINI.md.template +87 -0
- {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/METADATA +97 -64
- htmlgraph-0.27.5.dist-info/RECORD +337 -0
- {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/entry_points.txt +1 -1
- htmlgraph/cli.py +0 -2688
- htmlgraph/sdk.py +0 -709
- htmlgraph-0.9.3.dist-info/RECORD +0 -61
- {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/styles.css +0 -0
- {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1314 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
logger = logging.getLogger(__name__)
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
HtmlGraph Event Tracker Module
|
|
7
|
+
|
|
8
|
+
Reusable event tracking logic for hook integrations.
|
|
9
|
+
Provides session management, drift detection, activity logging, and SQLite persistence.
|
|
10
|
+
|
|
11
|
+
Public API:
|
|
12
|
+
track_event(hook_type: str, tool_input: dict[str, Any]) -> dict
|
|
13
|
+
Main entry point for tracking hook events (PostToolUse, Stop, UserPromptSubmit)
|
|
14
|
+
|
|
15
|
+
Events are recorded to both:
|
|
16
|
+
- HTML files via SessionManager (existing)
|
|
17
|
+
- SQLite database via HtmlGraphDB (new - for dashboard queries)
|
|
18
|
+
|
|
19
|
+
Parent-child event linking:
|
|
20
|
+
- Database is the single source of truth for parent-child linking
|
|
21
|
+
- UserQuery events are stored in agent_events table with tool_name='UserQuery'
|
|
22
|
+
- get_parent_user_query() queries database for most recent UserQuery in session
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
import json
|
|
26
|
+
import os
|
|
27
|
+
import re
|
|
28
|
+
import subprocess
|
|
29
|
+
from datetime import datetime, timedelta, timezone
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Any, cast # noqa: F401
|
|
32
|
+
|
|
33
|
+
from htmlgraph.db.schema import HtmlGraphDB
|
|
34
|
+
from htmlgraph.ids import generate_id
|
|
35
|
+
from htmlgraph.session_manager import SessionManager
|
|
36
|
+
|
|
37
|
+
# Drift classification queue (stored in session directory)
|
|
38
|
+
DRIFT_QUEUE_FILE = "drift-queue.json"
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def get_model_from_status_cache(session_id: str | None = None) -> str | None:
|
|
42
|
+
"""
|
|
43
|
+
Read current model from SQLite model_cache table.
|
|
44
|
+
|
|
45
|
+
The status line script writes model info to the model_cache table.
|
|
46
|
+
This allows hooks to know which Claude model is currently running,
|
|
47
|
+
even though hooks don't receive model info directly from Claude Code.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
session_id: Unused, kept for backward compatibility.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Model display name (e.g., "Opus 4.5", "Sonnet", "Haiku") or None if not found.
|
|
54
|
+
"""
|
|
55
|
+
import sqlite3
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
# Try project database first
|
|
59
|
+
db_path = Path.cwd() / ".htmlgraph" / "htmlgraph.db"
|
|
60
|
+
if not db_path.exists():
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
conn = sqlite3.connect(str(db_path), timeout=1.0)
|
|
64
|
+
cursor = conn.cursor()
|
|
65
|
+
|
|
66
|
+
# Check if model_cache table exists and has data
|
|
67
|
+
cursor.execute("SELECT model FROM model_cache WHERE id = 1 LIMIT 1")
|
|
68
|
+
row = cursor.fetchone()
|
|
69
|
+
conn.close()
|
|
70
|
+
|
|
71
|
+
if row and row[0] and row[0] != "Claude":
|
|
72
|
+
return str(row[0])
|
|
73
|
+
return str(row[0]) if row else None
|
|
74
|
+
|
|
75
|
+
except Exception:
|
|
76
|
+
# Table doesn't exist or read error - silently fail
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
return None
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def load_drift_config() -> dict[str, Any]:
|
|
83
|
+
"""Load drift configuration from plugin config or project .claude directory."""
|
|
84
|
+
config_paths = [
|
|
85
|
+
Path(__file__).parent.parent.parent.parent.parent
|
|
86
|
+
/ ".claude"
|
|
87
|
+
/ "config"
|
|
88
|
+
/ "drift-config.json",
|
|
89
|
+
Path(os.environ.get("CLAUDE_PROJECT_DIR", ""))
|
|
90
|
+
/ ".claude"
|
|
91
|
+
/ "config"
|
|
92
|
+
/ "drift-config.json",
|
|
93
|
+
Path(os.environ.get("CLAUDE_PLUGIN_ROOT", "")) / "config" / "drift-config.json",
|
|
94
|
+
]
|
|
95
|
+
|
|
96
|
+
for config_path in config_paths:
|
|
97
|
+
if config_path.exists():
|
|
98
|
+
try:
|
|
99
|
+
with open(config_path) as f:
|
|
100
|
+
return cast(dict[Any, Any], json.load(f))
|
|
101
|
+
except Exception:
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
# Default config
|
|
105
|
+
return {
|
|
106
|
+
"drift_detection": {
|
|
107
|
+
"enabled": True,
|
|
108
|
+
"warning_threshold": 0.7,
|
|
109
|
+
"auto_classify_threshold": 0.85,
|
|
110
|
+
"min_activities_before_classify": 3,
|
|
111
|
+
"cooldown_minutes": 10,
|
|
112
|
+
},
|
|
113
|
+
"classification": {"enabled": True, "use_haiku_agent": True},
|
|
114
|
+
"queue": {
|
|
115
|
+
"max_pending_classifications": 5,
|
|
116
|
+
"max_age_hours": 48,
|
|
117
|
+
"process_on_stop": True,
|
|
118
|
+
"process_on_threshold": True,
|
|
119
|
+
},
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def get_parent_user_query(db: HtmlGraphDB, session_id: str) -> str | None:
|
|
124
|
+
"""
|
|
125
|
+
Get the most recent UserQuery event_id for this session from database.
|
|
126
|
+
|
|
127
|
+
This is the primary method for parent-child event linking.
|
|
128
|
+
Database is the single source of truth - no file-based state.
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
db: HtmlGraphDB instance
|
|
132
|
+
session_id: Session ID to query
|
|
133
|
+
|
|
134
|
+
Returns:
|
|
135
|
+
event_id of the most recent UserQuery event, or None if not found
|
|
136
|
+
"""
|
|
137
|
+
try:
|
|
138
|
+
if db.connection is None:
|
|
139
|
+
return None
|
|
140
|
+
cursor = db.connection.cursor()
|
|
141
|
+
cursor.execute(
|
|
142
|
+
"""
|
|
143
|
+
SELECT event_id FROM agent_events
|
|
144
|
+
WHERE session_id = ? AND tool_name = 'UserQuery'
|
|
145
|
+
ORDER BY timestamp DESC
|
|
146
|
+
LIMIT 1
|
|
147
|
+
""",
|
|
148
|
+
(session_id,),
|
|
149
|
+
)
|
|
150
|
+
row = cursor.fetchone()
|
|
151
|
+
if row:
|
|
152
|
+
return str(row[0])
|
|
153
|
+
return None
|
|
154
|
+
except Exception as e:
|
|
155
|
+
logger.warning(f"Debug: Database query for UserQuery failed: {e}")
|
|
156
|
+
return None
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def load_drift_queue(graph_dir: Path, max_age_hours: int = 48) -> dict[str, Any]:
|
|
160
|
+
"""
|
|
161
|
+
Load the drift queue from file and clean up stale entries.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
graph_dir: Path to .htmlgraph directory
|
|
165
|
+
max_age_hours: Maximum age in hours before activities are removed (default: 48)
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Drift queue dict with only recent activities
|
|
169
|
+
"""
|
|
170
|
+
queue_path = graph_dir / DRIFT_QUEUE_FILE
|
|
171
|
+
if queue_path.exists():
|
|
172
|
+
try:
|
|
173
|
+
with open(queue_path) as f:
|
|
174
|
+
queue = json.load(f)
|
|
175
|
+
|
|
176
|
+
# Filter out stale activities
|
|
177
|
+
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
|
|
178
|
+
original_count = len(queue.get("activities", []))
|
|
179
|
+
|
|
180
|
+
fresh_activities = []
|
|
181
|
+
for activity in queue.get("activities", []):
|
|
182
|
+
try:
|
|
183
|
+
activity_time = datetime.fromisoformat(
|
|
184
|
+
activity.get("timestamp", "")
|
|
185
|
+
)
|
|
186
|
+
if activity_time >= cutoff_time:
|
|
187
|
+
fresh_activities.append(activity)
|
|
188
|
+
except (ValueError, TypeError):
|
|
189
|
+
# Keep activities with invalid timestamps to avoid data loss
|
|
190
|
+
fresh_activities.append(activity)
|
|
191
|
+
|
|
192
|
+
# Update queue if we removed stale entries
|
|
193
|
+
if len(fresh_activities) < original_count:
|
|
194
|
+
queue["activities"] = fresh_activities
|
|
195
|
+
save_drift_queue(graph_dir, queue)
|
|
196
|
+
removed = original_count - len(fresh_activities)
|
|
197
|
+
logger.warning(
|
|
198
|
+
f"Cleaned {removed} stale drift queue entries (older than {max_age_hours}h)"
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
return cast(dict[Any, Any], queue)
|
|
202
|
+
except Exception:
|
|
203
|
+
pass
|
|
204
|
+
return {"activities": [], "last_classification": None}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def save_drift_queue(graph_dir: Path, queue: dict[str, Any]) -> None:
|
|
208
|
+
"""Save the drift queue to file."""
|
|
209
|
+
queue_path = graph_dir / DRIFT_QUEUE_FILE
|
|
210
|
+
try:
|
|
211
|
+
with open(queue_path, "w") as f:
|
|
212
|
+
json.dump(queue, f, indent=2, default=str)
|
|
213
|
+
except Exception as e:
|
|
214
|
+
logger.warning(f"Warning: Could not save drift queue: {e}")
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def clear_drift_queue_activities(graph_dir: Path) -> None:
|
|
218
|
+
"""
|
|
219
|
+
Clear activities from the drift queue after successful classification.
|
|
220
|
+
|
|
221
|
+
This removes stale entries that have been processed, preventing indefinite accumulation.
|
|
222
|
+
"""
|
|
223
|
+
queue_path = graph_dir / DRIFT_QUEUE_FILE
|
|
224
|
+
try:
|
|
225
|
+
# Load existing queue to preserve last_classification timestamp
|
|
226
|
+
queue = {"activities": [], "last_classification": datetime.now().isoformat()}
|
|
227
|
+
if queue_path.exists():
|
|
228
|
+
with open(queue_path) as f:
|
|
229
|
+
existing = json.load(f)
|
|
230
|
+
# Preserve the classification timestamp if it exists
|
|
231
|
+
if existing.get("last_classification"):
|
|
232
|
+
queue["last_classification"] = existing["last_classification"]
|
|
233
|
+
|
|
234
|
+
# Save cleared queue
|
|
235
|
+
with open(queue_path, "w") as f:
|
|
236
|
+
json.dump(queue, f, indent=2)
|
|
237
|
+
except Exception as e:
|
|
238
|
+
logger.warning(f"Warning: Could not clear drift queue: {e}")
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def add_to_drift_queue(
|
|
242
|
+
graph_dir: Path, activity: dict[str, Any], config: dict[str, Any]
|
|
243
|
+
) -> dict[str, Any]:
|
|
244
|
+
"""Add a high-drift activity to the queue."""
|
|
245
|
+
max_age_hours = config.get("queue", {}).get("max_age_hours", 48)
|
|
246
|
+
queue = load_drift_queue(graph_dir, max_age_hours=max_age_hours)
|
|
247
|
+
max_pending = config.get("queue", {}).get("max_pending_classifications", 5)
|
|
248
|
+
|
|
249
|
+
queue["activities"].append(
|
|
250
|
+
{
|
|
251
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
252
|
+
"tool": activity.get("tool"),
|
|
253
|
+
"summary": activity.get("summary"),
|
|
254
|
+
"file_paths": activity.get("file_paths", []),
|
|
255
|
+
"drift_score": activity.get("drift_score"),
|
|
256
|
+
"feature_id": activity.get("feature_id"),
|
|
257
|
+
}
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Keep only recent activities
|
|
261
|
+
queue["activities"] = queue["activities"][-max_pending:]
|
|
262
|
+
save_drift_queue(graph_dir, queue)
|
|
263
|
+
return queue
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def should_trigger_classification(
|
|
267
|
+
queue: dict[str, Any], config: dict[str, Any]
|
|
268
|
+
) -> bool:
|
|
269
|
+
"""Check if we should trigger auto-classification."""
|
|
270
|
+
drift_config = config.get("drift_detection", {})
|
|
271
|
+
|
|
272
|
+
if not config.get("classification", {}).get("enabled", True):
|
|
273
|
+
return False
|
|
274
|
+
|
|
275
|
+
min_activities = drift_config.get("min_activities_before_classify", 3)
|
|
276
|
+
cooldown_minutes = drift_config.get("cooldown_minutes", 10)
|
|
277
|
+
|
|
278
|
+
# Check minimum activities threshold
|
|
279
|
+
if len(queue.get("activities", [])) < min_activities:
|
|
280
|
+
return False
|
|
281
|
+
|
|
282
|
+
# Check cooldown
|
|
283
|
+
last_classification = queue.get("last_classification")
|
|
284
|
+
if last_classification:
|
|
285
|
+
try:
|
|
286
|
+
last_time = datetime.fromisoformat(last_classification)
|
|
287
|
+
if datetime.now() - last_time < timedelta(minutes=cooldown_minutes):
|
|
288
|
+
return False
|
|
289
|
+
except Exception:
|
|
290
|
+
pass
|
|
291
|
+
|
|
292
|
+
return True
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
def build_classification_prompt(queue: dict[str, Any], feature_id: str) -> str:
|
|
296
|
+
"""Build the prompt for the classification agent."""
|
|
297
|
+
activities = queue.get("activities", [])
|
|
298
|
+
|
|
299
|
+
activity_lines = []
|
|
300
|
+
for act in activities:
|
|
301
|
+
line = f"- {act.get('tool', 'unknown')}: {act.get('summary', 'no summary')}"
|
|
302
|
+
if act.get("file_paths"):
|
|
303
|
+
line += f" (files: {', '.join(act['file_paths'][:2])})"
|
|
304
|
+
line += f" [drift: {act.get('drift_score', 0):.2f}]"
|
|
305
|
+
activity_lines.append(line)
|
|
306
|
+
|
|
307
|
+
return f"""Classify these high-drift activities into a work item.
|
|
308
|
+
|
|
309
|
+
Current feature context: {feature_id}
|
|
310
|
+
|
|
311
|
+
Recent activities with high drift:
|
|
312
|
+
{chr(10).join(activity_lines)}
|
|
313
|
+
|
|
314
|
+
Based on the activity patterns:
|
|
315
|
+
1. Determine the work item type (bug, feature, spike, chore, or hotfix)
|
|
316
|
+
2. Create an appropriate title and description
|
|
317
|
+
3. Create the work item HTML file in .htmlgraph/
|
|
318
|
+
|
|
319
|
+
Use the classification rules:
|
|
320
|
+
- bug: fixing errors, incorrect behavior
|
|
321
|
+
- feature: new functionality, additions
|
|
322
|
+
- spike: research, exploration, investigation
|
|
323
|
+
- chore: maintenance, refactoring, cleanup
|
|
324
|
+
- hotfix: urgent production issues
|
|
325
|
+
|
|
326
|
+
Create the work item now using Write tool."""
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def resolve_project_path(cwd: str | None = None) -> str:
|
|
330
|
+
"""Resolve project path (git root or cwd)."""
|
|
331
|
+
start_dir = cwd or os.getcwd()
|
|
332
|
+
try:
|
|
333
|
+
result = subprocess.run(
|
|
334
|
+
["git", "rev-parse", "--show-toplevel"],
|
|
335
|
+
capture_output=True,
|
|
336
|
+
text=True,
|
|
337
|
+
cwd=start_dir,
|
|
338
|
+
timeout=5,
|
|
339
|
+
)
|
|
340
|
+
if result.returncode == 0:
|
|
341
|
+
return result.stdout.strip()
|
|
342
|
+
except Exception:
|
|
343
|
+
pass
|
|
344
|
+
return start_dir
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
def detect_model_from_hook_input(hook_input: dict[str, Any]) -> str | None:
|
|
348
|
+
"""
|
|
349
|
+
Detect the Claude model from hook input data.
|
|
350
|
+
|
|
351
|
+
Checks in order of priority:
|
|
352
|
+
1. Task() model parameter (if tool_name == 'Task')
|
|
353
|
+
2. HTMLGRAPH_MODEL environment variable (set by hooks)
|
|
354
|
+
3. ANTHROPIC_MODEL or CLAUDE_MODEL environment variables
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
hook_input: Hook input dict containing tool_name and tool_input
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
Model name (e.g., 'claude-opus', 'claude-sonnet', 'claude-haiku') or None
|
|
361
|
+
"""
|
|
362
|
+
# Get tool info
|
|
363
|
+
tool_name_value: Any = hook_input.get("tool_name", "") or hook_input.get("name", "")
|
|
364
|
+
tool_name = tool_name_value if isinstance(tool_name_value, str) else ""
|
|
365
|
+
tool_input_value: Any = hook_input.get("tool_input", {}) or hook_input.get(
|
|
366
|
+
"input", {}
|
|
367
|
+
)
|
|
368
|
+
tool_input = tool_input_value if isinstance(tool_input_value, dict) else {}
|
|
369
|
+
|
|
370
|
+
# 1. Check for Task() model parameter first
|
|
371
|
+
if tool_name == "Task" and "model" in tool_input:
|
|
372
|
+
model_value: Any = tool_input.get("model")
|
|
373
|
+
if model_value and isinstance(model_value, str):
|
|
374
|
+
model = model_value.strip().lower()
|
|
375
|
+
if model:
|
|
376
|
+
if not model.startswith("claude-"):
|
|
377
|
+
model = f"claude-{model}"
|
|
378
|
+
return cast(str, model)
|
|
379
|
+
|
|
380
|
+
# 2. Check environment variables (set by PreToolUse hook)
|
|
381
|
+
for env_var in ["HTMLGRAPH_MODEL", "ANTHROPIC_MODEL", "CLAUDE_MODEL"]:
|
|
382
|
+
value = os.environ.get(env_var)
|
|
383
|
+
if value and isinstance(value, str):
|
|
384
|
+
model = value.strip()
|
|
385
|
+
if model:
|
|
386
|
+
return model
|
|
387
|
+
|
|
388
|
+
return None
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def detect_agent_from_environment() -> tuple[str, str | None]:
|
|
392
|
+
"""
|
|
393
|
+
Detect the agent/model name from environment variables and status cache.
|
|
394
|
+
|
|
395
|
+
Checks multiple sources in order of priority:
|
|
396
|
+
1. HTMLGRAPH_AGENT - Explicit agent name set by user
|
|
397
|
+
2. HTMLGRAPH_SUBAGENT_TYPE - For subagent sessions
|
|
398
|
+
3. HTMLGRAPH_PARENT_AGENT - Parent agent context
|
|
399
|
+
4. HTMLGRAPH_MODEL - Model name (e.g., claude-haiku, claude-opus)
|
|
400
|
+
5. CLAUDE_MODEL - Model name if exposed by Claude Code
|
|
401
|
+
6. ANTHROPIC_MODEL - Alternative model env var
|
|
402
|
+
7. Status line cache (model only) - ~/.cache/claude-code/status-{session_id}.json
|
|
403
|
+
|
|
404
|
+
Falls back to 'claude-code' if no environment variable is set.
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Tuple of (agent_id, model_name). Model name may be None if not detected.
|
|
408
|
+
"""
|
|
409
|
+
# Check for explicit agent name first
|
|
410
|
+
agent_id = None
|
|
411
|
+
env_vars_agent = [
|
|
412
|
+
"HTMLGRAPH_AGENT",
|
|
413
|
+
"HTMLGRAPH_SUBAGENT_TYPE",
|
|
414
|
+
"HTMLGRAPH_PARENT_AGENT",
|
|
415
|
+
]
|
|
416
|
+
|
|
417
|
+
for var in env_vars_agent:
|
|
418
|
+
value = os.environ.get(var)
|
|
419
|
+
if value and value.strip():
|
|
420
|
+
agent_id = value.strip()
|
|
421
|
+
break
|
|
422
|
+
|
|
423
|
+
# Check for model name separately
|
|
424
|
+
model_name = None
|
|
425
|
+
env_vars_model = [
|
|
426
|
+
"HTMLGRAPH_MODEL",
|
|
427
|
+
"CLAUDE_MODEL",
|
|
428
|
+
"ANTHROPIC_MODEL",
|
|
429
|
+
]
|
|
430
|
+
|
|
431
|
+
for var in env_vars_model:
|
|
432
|
+
value = os.environ.get(var)
|
|
433
|
+
if value and value.strip():
|
|
434
|
+
model_name = value.strip()
|
|
435
|
+
break
|
|
436
|
+
|
|
437
|
+
# Fallback: Try to read model from status line cache
|
|
438
|
+
if not model_name:
|
|
439
|
+
model_name = get_model_from_status_cache()
|
|
440
|
+
|
|
441
|
+
# Default fallback for agent_id
|
|
442
|
+
if not agent_id:
|
|
443
|
+
agent_id = "claude-code"
|
|
444
|
+
|
|
445
|
+
return agent_id, model_name
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def extract_file_paths(tool_input: dict[str, Any], tool_name: str) -> list[str]:
|
|
449
|
+
"""Extract file paths from tool input based on tool type."""
|
|
450
|
+
paths = []
|
|
451
|
+
|
|
452
|
+
# Common path fields
|
|
453
|
+
for field in ["file_path", "path", "filepath"]:
|
|
454
|
+
if field in tool_input:
|
|
455
|
+
paths.append(tool_input[field])
|
|
456
|
+
|
|
457
|
+
# Glob/Grep patterns
|
|
458
|
+
if "pattern" in tool_input and tool_name in ["Glob", "Grep"]:
|
|
459
|
+
pattern = tool_input.get("pattern", "")
|
|
460
|
+
if "." in pattern:
|
|
461
|
+
paths.append(f"pattern:{pattern}")
|
|
462
|
+
|
|
463
|
+
# Bash commands - extract paths heuristically
|
|
464
|
+
if tool_name == "Bash" and "command" in tool_input:
|
|
465
|
+
cmd = tool_input["command"]
|
|
466
|
+
file_matches = re.findall(r"[\w./\-_]+\.[a-zA-Z]{1,5}", cmd)
|
|
467
|
+
paths.extend(file_matches[:3])
|
|
468
|
+
|
|
469
|
+
return paths
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
def format_tool_summary(
|
|
473
|
+
tool_name: str, tool_input: dict[str, Any], tool_result: dict | None = None
|
|
474
|
+
) -> str:
|
|
475
|
+
"""Format a human-readable summary of the tool call."""
|
|
476
|
+
if tool_name == "Read":
|
|
477
|
+
path = tool_input.get("file_path", "unknown")
|
|
478
|
+
return f"Read: {path}"
|
|
479
|
+
|
|
480
|
+
elif tool_name == "Write":
|
|
481
|
+
path = tool_input.get("file_path", "unknown")
|
|
482
|
+
return f"Write: {path}"
|
|
483
|
+
|
|
484
|
+
elif tool_name == "Edit":
|
|
485
|
+
path = tool_input.get("file_path", "unknown")
|
|
486
|
+
old = tool_input.get("old_string", "")[:30]
|
|
487
|
+
return f"Edit: {path} ({old}...)"
|
|
488
|
+
|
|
489
|
+
elif tool_name == "Bash":
|
|
490
|
+
cmd = tool_input.get("command", "")[:60]
|
|
491
|
+
desc = tool_input.get("description", "")
|
|
492
|
+
if desc:
|
|
493
|
+
return f"Bash: {desc}"
|
|
494
|
+
return f"Bash: {cmd}"
|
|
495
|
+
|
|
496
|
+
elif tool_name == "Glob":
|
|
497
|
+
pattern = tool_input.get("pattern", "")
|
|
498
|
+
return f"Glob: {pattern}"
|
|
499
|
+
|
|
500
|
+
elif tool_name == "Grep":
|
|
501
|
+
pattern = tool_input.get("pattern", "")
|
|
502
|
+
return f"Grep: {pattern}"
|
|
503
|
+
|
|
504
|
+
elif tool_name == "Task":
|
|
505
|
+
desc = tool_input.get("description", "")[:50]
|
|
506
|
+
agent = tool_input.get("subagent_type", "")
|
|
507
|
+
return f"Task ({agent}): {desc}"
|
|
508
|
+
|
|
509
|
+
elif tool_name == "TodoWrite":
|
|
510
|
+
todos = tool_input.get("todos", [])
|
|
511
|
+
return f"TodoWrite: {len(todos)} items"
|
|
512
|
+
|
|
513
|
+
elif tool_name == "WebSearch":
|
|
514
|
+
query = tool_input.get("query", "")[:40]
|
|
515
|
+
return f"WebSearch: {query}"
|
|
516
|
+
|
|
517
|
+
elif tool_name == "WebFetch":
|
|
518
|
+
url = tool_input.get("url", "")[:40]
|
|
519
|
+
return f"WebFetch: {url}"
|
|
520
|
+
|
|
521
|
+
elif tool_name == "UserQuery":
|
|
522
|
+
# Extract the actual prompt text from the tool_input
|
|
523
|
+
prompt = str(tool_input.get("prompt", ""))
|
|
524
|
+
preview = prompt[:100].replace("\n", " ")
|
|
525
|
+
if len(prompt) > 100:
|
|
526
|
+
preview += "..."
|
|
527
|
+
return preview
|
|
528
|
+
|
|
529
|
+
else:
|
|
530
|
+
return f"{tool_name}: {str(tool_input)[:50]}"
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
def record_event_to_sqlite(
|
|
534
|
+
db: HtmlGraphDB,
|
|
535
|
+
session_id: str,
|
|
536
|
+
tool_name: str,
|
|
537
|
+
tool_input: dict[str, Any],
|
|
538
|
+
tool_response: dict[str, Any],
|
|
539
|
+
is_error: bool,
|
|
540
|
+
file_paths: list[str] | None = None,
|
|
541
|
+
parent_event_id: str | None = None,
|
|
542
|
+
agent_id: str | None = None,
|
|
543
|
+
subagent_type: str | None = None,
|
|
544
|
+
model: str | None = None,
|
|
545
|
+
feature_id: str | None = None,
|
|
546
|
+
claude_task_id: str | None = None,
|
|
547
|
+
) -> str | None:
|
|
548
|
+
"""
|
|
549
|
+
Record a tool call event to SQLite database for dashboard queries.
|
|
550
|
+
|
|
551
|
+
Args:
|
|
552
|
+
db: HtmlGraphDB instance
|
|
553
|
+
session_id: Session ID from HtmlGraph
|
|
554
|
+
tool_name: Name of the tool called
|
|
555
|
+
tool_input: Tool input parameters
|
|
556
|
+
tool_response: Tool response/result
|
|
557
|
+
is_error: Whether the tool call resulted in an error
|
|
558
|
+
file_paths: File paths affected by the tool
|
|
559
|
+
parent_event_id: Parent event ID if this is a child event
|
|
560
|
+
agent_id: Agent identifier (optional)
|
|
561
|
+
subagent_type: Subagent type for Task delegations (optional)
|
|
562
|
+
model: Claude model name (e.g., claude-haiku, claude-opus) (optional)
|
|
563
|
+
feature_id: Feature ID for attribution (optional)
|
|
564
|
+
claude_task_id: Claude Code's internal task ID for tool attribution (optional)
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
event_id if successful, None otherwise
|
|
568
|
+
"""
|
|
569
|
+
try:
|
|
570
|
+
event_id = generate_id("event")
|
|
571
|
+
input_summary = format_tool_summary(tool_name, tool_input, tool_response)
|
|
572
|
+
|
|
573
|
+
# Build output summary from tool response
|
|
574
|
+
output_summary = ""
|
|
575
|
+
if isinstance(tool_response, dict): # type: ignore[arg-type]
|
|
576
|
+
if is_error:
|
|
577
|
+
output_summary = tool_response.get("error", "error")[:200]
|
|
578
|
+
else:
|
|
579
|
+
# Extract summary from response
|
|
580
|
+
content = tool_response.get("content", tool_response.get("output", ""))
|
|
581
|
+
if isinstance(content, str):
|
|
582
|
+
output_summary = content[:200]
|
|
583
|
+
elif isinstance(content, list):
|
|
584
|
+
output_summary = f"{len(content)} items"
|
|
585
|
+
else:
|
|
586
|
+
output_summary = "success"
|
|
587
|
+
|
|
588
|
+
# Build context metadata
|
|
589
|
+
context = {
|
|
590
|
+
"file_paths": file_paths or [],
|
|
591
|
+
"tool_input_keys": list(tool_input.keys()),
|
|
592
|
+
"is_error": is_error,
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
# Extract task_id from Tool response if not provided
|
|
596
|
+
if (
|
|
597
|
+
not claude_task_id
|
|
598
|
+
and tool_name == "Task"
|
|
599
|
+
and isinstance(tool_response, dict)
|
|
600
|
+
):
|
|
601
|
+
claude_task_id = tool_response.get("task_id")
|
|
602
|
+
|
|
603
|
+
# Insert event to SQLite
|
|
604
|
+
success = db.insert_event(
|
|
605
|
+
event_id=event_id,
|
|
606
|
+
agent_id=agent_id or "claude-code",
|
|
607
|
+
event_type="tool_call",
|
|
608
|
+
session_id=session_id,
|
|
609
|
+
tool_name=tool_name,
|
|
610
|
+
input_summary=input_summary,
|
|
611
|
+
output_summary=output_summary,
|
|
612
|
+
context=context,
|
|
613
|
+
parent_event_id=parent_event_id,
|
|
614
|
+
cost_tokens=0,
|
|
615
|
+
subagent_type=subagent_type,
|
|
616
|
+
model=model,
|
|
617
|
+
feature_id=feature_id,
|
|
618
|
+
claude_task_id=claude_task_id,
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
if success:
|
|
622
|
+
return event_id
|
|
623
|
+
return None
|
|
624
|
+
|
|
625
|
+
except Exception as e:
|
|
626
|
+
logger.warning(f"Warning: Could not record event to SQLite: {e}")
|
|
627
|
+
return None
|
|
628
|
+
|
|
629
|
+
|
|
630
|
+
def record_delegation_to_sqlite(
|
|
631
|
+
db: HtmlGraphDB,
|
|
632
|
+
session_id: str,
|
|
633
|
+
from_agent: str,
|
|
634
|
+
to_agent: str,
|
|
635
|
+
task_description: str,
|
|
636
|
+
task_input: dict[str, Any],
|
|
637
|
+
) -> str | None:
|
|
638
|
+
"""
|
|
639
|
+
Record a Task() delegation to agent_collaboration table.
|
|
640
|
+
|
|
641
|
+
Args:
|
|
642
|
+
db: HtmlGraphDB instance
|
|
643
|
+
session_id: Session ID from HtmlGraph
|
|
644
|
+
from_agent: Agent delegating the task (usually 'orchestrator' or 'claude-code')
|
|
645
|
+
to_agent: Target subagent type (e.g., 'general-purpose', 'researcher')
|
|
646
|
+
task_description: Task description/prompt
|
|
647
|
+
task_input: Full task input parameters
|
|
648
|
+
|
|
649
|
+
Returns:
|
|
650
|
+
handoff_id if successful, None otherwise
|
|
651
|
+
"""
|
|
652
|
+
try:
|
|
653
|
+
handoff_id = generate_id("handoff")
|
|
654
|
+
|
|
655
|
+
# Build context with task input
|
|
656
|
+
context = {
|
|
657
|
+
"task_input_keys": list(task_input.keys()),
|
|
658
|
+
"model": task_input.get("model"),
|
|
659
|
+
"temperature": task_input.get("temperature"),
|
|
660
|
+
}
|
|
661
|
+
|
|
662
|
+
# Insert delegation record
|
|
663
|
+
success = db.insert_collaboration(
|
|
664
|
+
handoff_id=handoff_id,
|
|
665
|
+
from_agent=from_agent,
|
|
666
|
+
to_agent=to_agent,
|
|
667
|
+
session_id=session_id,
|
|
668
|
+
handoff_type="delegation",
|
|
669
|
+
reason=task_description[:200],
|
|
670
|
+
context=context,
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
if success:
|
|
674
|
+
return handoff_id
|
|
675
|
+
return None
|
|
676
|
+
|
|
677
|
+
except Exception as e:
|
|
678
|
+
logger.warning(f"Warning: Could not record delegation to SQLite: {e}")
|
|
679
|
+
return None
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
def track_event(hook_type: str, hook_input: dict[str, Any]) -> dict[str, Any]:
|
|
683
|
+
"""
|
|
684
|
+
Track a hook event and log it to HtmlGraph (both HTML files and SQLite).
|
|
685
|
+
|
|
686
|
+
Args:
|
|
687
|
+
hook_type: Type of hook event ("PostToolUse", "Stop", "UserPromptSubmit")
|
|
688
|
+
hook_input: Hook input data from stdin
|
|
689
|
+
|
|
690
|
+
Returns:
|
|
691
|
+
Response dict with {"continue": True} and optional hookSpecificOutput
|
|
692
|
+
"""
|
|
693
|
+
cwd = hook_input.get("cwd")
|
|
694
|
+
project_dir = resolve_project_path(cwd if cwd else None)
|
|
695
|
+
graph_dir = Path(project_dir) / ".htmlgraph"
|
|
696
|
+
|
|
697
|
+
# Load drift configuration
|
|
698
|
+
drift_config = load_drift_config()
|
|
699
|
+
|
|
700
|
+
# Initialize SessionManager and SQLite DB
|
|
701
|
+
try:
|
|
702
|
+
manager = SessionManager(graph_dir)
|
|
703
|
+
except Exception as e:
|
|
704
|
+
logger.warning(f"Warning: Could not initialize SessionManager: {e}")
|
|
705
|
+
return {"continue": True}
|
|
706
|
+
|
|
707
|
+
# Initialize SQLite database for event recording
|
|
708
|
+
db = None
|
|
709
|
+
try:
|
|
710
|
+
from htmlgraph.config import get_database_path
|
|
711
|
+
from htmlgraph.db.schema import HtmlGraphDB
|
|
712
|
+
|
|
713
|
+
db = HtmlGraphDB(str(get_database_path()))
|
|
714
|
+
except Exception as e:
|
|
715
|
+
logger.warning(f"Warning: Could not initialize SQLite database: {e}")
|
|
716
|
+
# Continue without SQLite (graceful degradation)
|
|
717
|
+
|
|
718
|
+
# Detect agent and model from environment
|
|
719
|
+
detected_agent, detected_model = detect_agent_from_environment()
|
|
720
|
+
|
|
721
|
+
# Also try to detect model from hook input (more specific than environment)
|
|
722
|
+
model_from_input = detect_model_from_hook_input(hook_input)
|
|
723
|
+
if model_from_input:
|
|
724
|
+
detected_model = model_from_input
|
|
725
|
+
|
|
726
|
+
active_session = None
|
|
727
|
+
|
|
728
|
+
# Check if we're in a subagent context using multiple methods:
|
|
729
|
+
#
|
|
730
|
+
# PRECEDENCE ORDER:
|
|
731
|
+
# 1. Sessions table - if THIS session is already marked as subagent, use stored parent info
|
|
732
|
+
# (fixes persistence issue for subsequent tool calls in same subagent)
|
|
733
|
+
# 2. Environment variables - set by spawner router for first tool call
|
|
734
|
+
# 3. Fallback to normal orchestrator context
|
|
735
|
+
#
|
|
736
|
+
# Method 1: Check if current session is already a subagent (CRITICAL for persistence!)
|
|
737
|
+
# This fixes the issue where subsequent tool calls in the same subagent session
|
|
738
|
+
# lose the parent_event_id linkage.
|
|
739
|
+
subagent_type = None
|
|
740
|
+
parent_session_id = None
|
|
741
|
+
task_event_id_from_db = None # Will be set by Method 1 if found
|
|
742
|
+
hook_session_id = hook_input.get("session_id") or hook_input.get("sessionId")
|
|
743
|
+
|
|
744
|
+
if db and db.connection and hook_session_id:
|
|
745
|
+
try:
|
|
746
|
+
cursor = db.connection.cursor()
|
|
747
|
+
cursor.execute(
|
|
748
|
+
"""
|
|
749
|
+
SELECT parent_session_id, agent_assigned
|
|
750
|
+
FROM sessions
|
|
751
|
+
WHERE session_id = ? AND is_subagent = 1
|
|
752
|
+
LIMIT 1
|
|
753
|
+
""",
|
|
754
|
+
(hook_session_id,),
|
|
755
|
+
)
|
|
756
|
+
row = cursor.fetchone()
|
|
757
|
+
if row:
|
|
758
|
+
parent_session_id = row[0]
|
|
759
|
+
# Extract subagent_type from agent_assigned (e.g., "general-purpose-spawner" -> "general-purpose")
|
|
760
|
+
agent_assigned = row[1] or ""
|
|
761
|
+
if agent_assigned and agent_assigned.endswith("-spawner"):
|
|
762
|
+
subagent_type = agent_assigned[:-8] # Remove "-spawner" suffix
|
|
763
|
+
else:
|
|
764
|
+
subagent_type = "general-purpose" # Default if format unexpected
|
|
765
|
+
|
|
766
|
+
# CRITICAL FIX: When Method 1 succeeds, also find the task_delegation event!
|
|
767
|
+
# This ensures parent_activity_id will use the task event, not fall back to UserQuery
|
|
768
|
+
try:
|
|
769
|
+
# First try to find task in parent_session_id (if not NULL)
|
|
770
|
+
if parent_session_id:
|
|
771
|
+
cursor.execute(
|
|
772
|
+
"""
|
|
773
|
+
SELECT event_id
|
|
774
|
+
FROM agent_events
|
|
775
|
+
WHERE event_type = 'task_delegation'
|
|
776
|
+
AND subagent_type = ?
|
|
777
|
+
AND status = 'started'
|
|
778
|
+
AND session_id = ?
|
|
779
|
+
ORDER BY timestamp DESC
|
|
780
|
+
LIMIT 1
|
|
781
|
+
""",
|
|
782
|
+
(subagent_type, parent_session_id),
|
|
783
|
+
)
|
|
784
|
+
task_row = cursor.fetchone()
|
|
785
|
+
if task_row:
|
|
786
|
+
task_event_id_from_db = task_row[0]
|
|
787
|
+
|
|
788
|
+
# If not found (parent_session_id is NULL), fallback to finding most recent task
|
|
789
|
+
# This handles Claude Code's session reuse where parent_session_id can be NULL
|
|
790
|
+
if not task_event_id_from_db:
|
|
791
|
+
cursor.execute(
|
|
792
|
+
"""
|
|
793
|
+
SELECT event_id
|
|
794
|
+
FROM agent_events
|
|
795
|
+
WHERE event_type = 'task_delegation'
|
|
796
|
+
AND subagent_type = ?
|
|
797
|
+
AND status = 'started'
|
|
798
|
+
ORDER BY timestamp DESC
|
|
799
|
+
LIMIT 1
|
|
800
|
+
""",
|
|
801
|
+
(subagent_type,),
|
|
802
|
+
)
|
|
803
|
+
task_row = cursor.fetchone()
|
|
804
|
+
if task_row:
|
|
805
|
+
task_event_id_from_db = task_row[0]
|
|
806
|
+
logger.warning(
|
|
807
|
+
f"DEBUG Method 1 fallback: Found task_delegation={task_event_id_from_db} for {subagent_type}"
|
|
808
|
+
)
|
|
809
|
+
else:
|
|
810
|
+
logger.warning(
|
|
811
|
+
f"DEBUG Method 1: No task_delegation found for subagent_type={subagent_type}"
|
|
812
|
+
)
|
|
813
|
+
else:
|
|
814
|
+
logger.warning(
|
|
815
|
+
f"DEBUG Method 1: Found task_delegation={task_event_id_from_db} for subagent {subagent_type}"
|
|
816
|
+
)
|
|
817
|
+
except Exception as e:
|
|
818
|
+
logger.warning(
|
|
819
|
+
f"DEBUG: Error finding task_delegation for Method 1: {e}"
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
logger.debug(
|
|
823
|
+
f"DEBUG subagent persistence: Found current session as subagent in sessions table: "
|
|
824
|
+
f"type={subagent_type}, parent_session={parent_session_id}, task_event={task_event_id_from_db}",
|
|
825
|
+
)
|
|
826
|
+
except Exception as e:
|
|
827
|
+
logger.warning(f"DEBUG: Error checking sessions table for subagent: {e}")
|
|
828
|
+
|
|
829
|
+
# Method 2: Environment variables (for first tool call before session table is populated)
|
|
830
|
+
if not subagent_type:
|
|
831
|
+
subagent_type = os.environ.get("HTMLGRAPH_SUBAGENT_TYPE")
|
|
832
|
+
parent_session_id = os.environ.get("HTMLGRAPH_PARENT_SESSION")
|
|
833
|
+
|
|
834
|
+
# Method 3: Database detection of active task_delegation events
|
|
835
|
+
# CRITICAL: When Task() subprocess is launched, environment variables don't propagate
|
|
836
|
+
# So we must query the database for active task_delegation events to detect subagent context
|
|
837
|
+
# NOTE: Claude Code passes the SAME session_id to parent and subagent, so we CAN'T use
|
|
838
|
+
# session_id to distinguish them. Instead, look for the most recent task_delegation event
|
|
839
|
+
# and if found with status='started', we ARE the subagent.
|
|
840
|
+
#
|
|
841
|
+
# CRITICAL FIX: The actual PARENT session is hook_session_id (what Claude Code passes),
|
|
842
|
+
# NOT the session_id from the task_delegation event (which is the same as current).
|
|
843
|
+
# NOTE: DO NOT reinitialize task_event_id_from_db here - it may have been set by Method 1!
|
|
844
|
+
if not subagent_type and db and db.connection:
|
|
845
|
+
try:
|
|
846
|
+
cursor = db.connection.cursor()
|
|
847
|
+
# Find the most recent active task_delegation event
|
|
848
|
+
cursor.execute(
|
|
849
|
+
"""
|
|
850
|
+
SELECT event_id, subagent_type, session_id
|
|
851
|
+
FROM agent_events
|
|
852
|
+
WHERE event_type = 'task_delegation'
|
|
853
|
+
AND status = 'started'
|
|
854
|
+
AND tool_name = 'Task'
|
|
855
|
+
ORDER BY timestamp DESC
|
|
856
|
+
LIMIT 1
|
|
857
|
+
""",
|
|
858
|
+
)
|
|
859
|
+
row = cursor.fetchone()
|
|
860
|
+
if row:
|
|
861
|
+
task_event_id, detected_subagent_type, parent_sess = row
|
|
862
|
+
# If we found an active task_delegation, we're running as a subagent
|
|
863
|
+
# (Claude Code uses the same session_id for both parent and subagent)
|
|
864
|
+
subagent_type = detected_subagent_type or "general-purpose"
|
|
865
|
+
# IMPORTANT: Use the hook_session_id as parent, not parent_sess!
|
|
866
|
+
# The parent_sess from task_delegation is the same as current session
|
|
867
|
+
# (Claude Code reuses session_id). The actual parent is hook_session_id.
|
|
868
|
+
parent_session_id = hook_session_id
|
|
869
|
+
task_event_id_from_db = (
|
|
870
|
+
task_event_id # Store for later use as parent_event_id
|
|
871
|
+
)
|
|
872
|
+
logger.debug(
|
|
873
|
+
f"DEBUG subagent detection (database): Detected active task_delegation "
|
|
874
|
+
f"type={subagent_type}, parent_session={parent_session_id}, "
|
|
875
|
+
f"parent_event={task_event_id}"
|
|
876
|
+
)
|
|
877
|
+
except Exception as e:
|
|
878
|
+
logger.warning(f"DEBUG: Error detecting subagent from database: {e}")
|
|
879
|
+
|
|
880
|
+
if subagent_type and parent_session_id:
|
|
881
|
+
# We're in a subagent - create or get subagent session
|
|
882
|
+
# Use deterministic session ID based on parent + subagent type
|
|
883
|
+
subagent_session_id = f"{parent_session_id}-{subagent_type}"
|
|
884
|
+
|
|
885
|
+
# Check if subagent session already exists
|
|
886
|
+
existing = manager.session_converter.load(subagent_session_id)
|
|
887
|
+
if existing:
|
|
888
|
+
active_session = existing
|
|
889
|
+
logger.warning(
|
|
890
|
+
f"Debug: Using existing subagent session: {subagent_session_id}"
|
|
891
|
+
)
|
|
892
|
+
else:
|
|
893
|
+
# Create new subagent session with parent link
|
|
894
|
+
try:
|
|
895
|
+
active_session = manager.start_session(
|
|
896
|
+
session_id=subagent_session_id,
|
|
897
|
+
agent=f"{subagent_type}-spawner",
|
|
898
|
+
is_subagent=True,
|
|
899
|
+
parent_session_id=parent_session_id,
|
|
900
|
+
title=f"{subagent_type.capitalize()} Subagent",
|
|
901
|
+
)
|
|
902
|
+
logger.debug(
|
|
903
|
+
f"Debug: Created subagent session: {subagent_session_id} "
|
|
904
|
+
f"(parent: {parent_session_id})"
|
|
905
|
+
)
|
|
906
|
+
except Exception as e:
|
|
907
|
+
logger.warning(f"Warning: Could not create subagent session: {e}")
|
|
908
|
+
return {"continue": True}
|
|
909
|
+
|
|
910
|
+
# Override detected agent for subagent context
|
|
911
|
+
detected_agent = f"{subagent_type}-spawner"
|
|
912
|
+
else:
|
|
913
|
+
# Normal orchestrator/parent context
|
|
914
|
+
# CRITICAL: Use session_id from hook_input (Claude Code provides this)
|
|
915
|
+
# Only fall back to manager.get_active_session() if not in hook_input
|
|
916
|
+
# hook_session_id already defined at line 730
|
|
917
|
+
|
|
918
|
+
if hook_session_id:
|
|
919
|
+
# Claude Code provided session_id - use it directly
|
|
920
|
+
# Check if session already exists
|
|
921
|
+
existing = manager.session_converter.load(hook_session_id)
|
|
922
|
+
if existing:
|
|
923
|
+
active_session = existing
|
|
924
|
+
else:
|
|
925
|
+
# Create new session with Claude's session_id
|
|
926
|
+
try:
|
|
927
|
+
active_session = manager.start_session(
|
|
928
|
+
session_id=hook_session_id,
|
|
929
|
+
agent=detected_agent,
|
|
930
|
+
title=f"Session {datetime.now().strftime('%Y-%m-%d %H:%M')}",
|
|
931
|
+
)
|
|
932
|
+
except Exception:
|
|
933
|
+
return {"continue": True}
|
|
934
|
+
else:
|
|
935
|
+
# Fallback: No session_id in hook_input - use global session cache
|
|
936
|
+
active_session = manager.get_active_session()
|
|
937
|
+
if not active_session:
|
|
938
|
+
# No active HtmlGraph session yet; start one
|
|
939
|
+
try:
|
|
940
|
+
active_session = manager.start_session(
|
|
941
|
+
session_id=None,
|
|
942
|
+
agent=detected_agent,
|
|
943
|
+
title=f"Session {datetime.now().strftime('%Y-%m-%d %H:%M')}",
|
|
944
|
+
)
|
|
945
|
+
except Exception:
|
|
946
|
+
return {"continue": True}
|
|
947
|
+
|
|
948
|
+
active_session_id = active_session.id
|
|
949
|
+
|
|
950
|
+
# Ensure session exists in SQLite database (for foreign key constraints)
|
|
951
|
+
if db:
|
|
952
|
+
try:
|
|
953
|
+
# Get attributes safely - MagicMock objects can cause SQLite binding errors
|
|
954
|
+
# When getattr is called on a MagicMock, it returns another MagicMock, not the default
|
|
955
|
+
def safe_getattr(obj: Any, attr: str, default: Any) -> Any:
|
|
956
|
+
"""Get attribute safely, returning default for MagicMock/invalid values."""
|
|
957
|
+
try:
|
|
958
|
+
val = getattr(obj, attr, default)
|
|
959
|
+
# Check if it's a mock object (has _mock_name attribute)
|
|
960
|
+
if hasattr(val, "_mock_name"):
|
|
961
|
+
return default
|
|
962
|
+
return val
|
|
963
|
+
except Exception:
|
|
964
|
+
return default
|
|
965
|
+
|
|
966
|
+
is_subagent_raw = safe_getattr(active_session, "is_subagent", False)
|
|
967
|
+
is_subagent = (
|
|
968
|
+
bool(is_subagent_raw) if isinstance(is_subagent_raw, bool) else False
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
transcript_id = safe_getattr(active_session, "transcript_id", None)
|
|
972
|
+
transcript_path = safe_getattr(active_session, "transcript_path", None)
|
|
973
|
+
# Ensure strings or None, not mock objects
|
|
974
|
+
if transcript_id is not None and not isinstance(transcript_id, str):
|
|
975
|
+
transcript_id = None
|
|
976
|
+
if transcript_path is not None and not isinstance(transcript_path, str):
|
|
977
|
+
transcript_path = None
|
|
978
|
+
|
|
979
|
+
db.insert_session(
|
|
980
|
+
session_id=active_session_id,
|
|
981
|
+
agent_assigned=safe_getattr(active_session, "agent", None)
|
|
982
|
+
or detected_agent,
|
|
983
|
+
is_subagent=is_subagent,
|
|
984
|
+
transcript_id=transcript_id,
|
|
985
|
+
transcript_path=transcript_path,
|
|
986
|
+
)
|
|
987
|
+
except Exception as e:
|
|
988
|
+
# Session may already exist, that's OK - continue
|
|
989
|
+
logger.warning(
|
|
990
|
+
f"Debug: Could not insert session to SQLite (may already exist): {e}"
|
|
991
|
+
)
|
|
992
|
+
|
|
993
|
+
# Handle different hook types
|
|
994
|
+
if hook_type == "Stop":
|
|
995
|
+
# Session is ending - track stop event
|
|
996
|
+
try:
|
|
997
|
+
result = manager.track_activity(
|
|
998
|
+
session_id=active_session_id, tool="Stop", summary="Agent stopped"
|
|
999
|
+
)
|
|
1000
|
+
|
|
1001
|
+
# Record to SQLite if available
|
|
1002
|
+
if db:
|
|
1003
|
+
record_event_to_sqlite(
|
|
1004
|
+
db=db,
|
|
1005
|
+
session_id=active_session_id,
|
|
1006
|
+
tool_name="Stop",
|
|
1007
|
+
tool_input={},
|
|
1008
|
+
tool_response={"content": "Agent stopped"},
|
|
1009
|
+
is_error=False,
|
|
1010
|
+
agent_id=detected_agent,
|
|
1011
|
+
model=detected_model,
|
|
1012
|
+
feature_id=result.feature_id if result else None,
|
|
1013
|
+
)
|
|
1014
|
+
except Exception as e:
|
|
1015
|
+
logger.warning(f"Warning: Could not track stop: {e}")
|
|
1016
|
+
return {"continue": True}
|
|
1017
|
+
|
|
1018
|
+
elif hook_type == "UserPromptSubmit":
|
|
1019
|
+
# User submitted a query
|
|
1020
|
+
prompt = hook_input.get("prompt", "")
|
|
1021
|
+
preview = prompt[:100].replace("\n", " ")
|
|
1022
|
+
if len(prompt) > 100:
|
|
1023
|
+
preview += "..."
|
|
1024
|
+
|
|
1025
|
+
try:
|
|
1026
|
+
result = manager.track_activity(
|
|
1027
|
+
session_id=active_session_id, tool="UserQuery", summary=f'"{preview}"'
|
|
1028
|
+
)
|
|
1029
|
+
|
|
1030
|
+
# Record to SQLite if available
|
|
1031
|
+
# UserQuery event is stored in database - no file-based state needed
|
|
1032
|
+
# Subsequent tool calls query database for parent via get_parent_user_query()
|
|
1033
|
+
if db:
|
|
1034
|
+
record_event_to_sqlite(
|
|
1035
|
+
db=db,
|
|
1036
|
+
session_id=active_session_id,
|
|
1037
|
+
tool_name="UserQuery",
|
|
1038
|
+
tool_input={"prompt": prompt},
|
|
1039
|
+
tool_response={"content": "Query received"},
|
|
1040
|
+
is_error=False,
|
|
1041
|
+
agent_id=detected_agent,
|
|
1042
|
+
model=detected_model,
|
|
1043
|
+
feature_id=result.feature_id if result else None,
|
|
1044
|
+
)
|
|
1045
|
+
|
|
1046
|
+
except Exception as e:
|
|
1047
|
+
logger.warning(f"Warning: Could not track query: {e}")
|
|
1048
|
+
return {"continue": True}
|
|
1049
|
+
|
|
1050
|
+
elif hook_type == "PostToolUse":
|
|
1051
|
+
# Tool was used - track it
|
|
1052
|
+
tool_name = hook_input.get("tool_name", "unknown")
|
|
1053
|
+
tool_input_data = hook_input.get("tool_input", {})
|
|
1054
|
+
tool_response = (
|
|
1055
|
+
hook_input.get("tool_response", hook_input.get("tool_result", {})) or {}
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
# Skip tracking for some tools
|
|
1059
|
+
skip_tools = {"AskUserQuestion"}
|
|
1060
|
+
if tool_name in skip_tools:
|
|
1061
|
+
return {"continue": True}
|
|
1062
|
+
|
|
1063
|
+
# Extract file paths
|
|
1064
|
+
file_paths = extract_file_paths(tool_input_data, tool_name)
|
|
1065
|
+
|
|
1066
|
+
# Format summary
|
|
1067
|
+
summary = format_tool_summary(tool_name, tool_input_data, tool_response)
|
|
1068
|
+
|
|
1069
|
+
# Determine success
|
|
1070
|
+
if isinstance(tool_response, dict): # type: ignore[arg-type]
|
|
1071
|
+
success_field = tool_response.get("success")
|
|
1072
|
+
if isinstance(success_field, bool):
|
|
1073
|
+
is_error = not success_field
|
|
1074
|
+
else:
|
|
1075
|
+
is_error = bool(tool_response.get("is_error", False))
|
|
1076
|
+
|
|
1077
|
+
# Additional check for Bash failures: detect non-zero exit codes
|
|
1078
|
+
if tool_name == "Bash" and not is_error:
|
|
1079
|
+
output = str(
|
|
1080
|
+
tool_response.get("output", "") or tool_response.get("content", "")
|
|
1081
|
+
)
|
|
1082
|
+
# Check for exit code patterns (e.g., "Exit code 1", "exit status 1")
|
|
1083
|
+
if re.search(
|
|
1084
|
+
r"Exit code [1-9]\d*|exit status [1-9]\d*", output, re.IGNORECASE
|
|
1085
|
+
):
|
|
1086
|
+
is_error = True
|
|
1087
|
+
else:
|
|
1088
|
+
# For list or other non-dict responses (like Playwright), assume success
|
|
1089
|
+
is_error = False
|
|
1090
|
+
|
|
1091
|
+
# Get drift thresholds from config
|
|
1092
|
+
drift_settings = drift_config.get("drift_detection", {})
|
|
1093
|
+
warning_threshold = drift_settings.get("warning_threshold") or 0.7
|
|
1094
|
+
auto_classify_threshold = drift_settings.get("auto_classify_threshold") or 0.85
|
|
1095
|
+
|
|
1096
|
+
# Determine parent activity context using database-only lookup
|
|
1097
|
+
parent_activity_id = None
|
|
1098
|
+
|
|
1099
|
+
# Check environment variable FIRST for cross-process parent linking
|
|
1100
|
+
# This is set by PreToolUse hook when Task() spawns a subagent
|
|
1101
|
+
env_parent = os.environ.get("HTMLGRAPH_PARENT_EVENT") or os.environ.get(
|
|
1102
|
+
"HTMLGRAPH_PARENT_QUERY_EVENT"
|
|
1103
|
+
)
|
|
1104
|
+
if env_parent:
|
|
1105
|
+
parent_activity_id = env_parent
|
|
1106
|
+
# If we detected a Task delegation event via database detection (Method 3),
|
|
1107
|
+
# use that as the parent for all tool calls within the subagent
|
|
1108
|
+
elif task_event_id_from_db:
|
|
1109
|
+
parent_activity_id = task_event_id_from_db
|
|
1110
|
+
# CRITICAL FIX: Check for active task_delegation EVEN IF task_event_id_from_db not set
|
|
1111
|
+
# This handles Claude Code's session reuse where parent_session_id is NULL
|
|
1112
|
+
# When tool calls come from a subagent, they should be under the task_delegation parent,
|
|
1113
|
+
# NOT under UserQuery. So we MUST check for active tasks BEFORE falling back to UserQuery.
|
|
1114
|
+
# IMPORTANT: This must work EVEN IF db is None, so try to get it from htmlgraph_db
|
|
1115
|
+
else:
|
|
1116
|
+
# Ensure we have a db connection (may not have been passed in for parent session)
|
|
1117
|
+
db_to_use = db
|
|
1118
|
+
if not db_to_use:
|
|
1119
|
+
try:
|
|
1120
|
+
from htmlgraph.config import get_database_path
|
|
1121
|
+
from htmlgraph.db.schema import HtmlGraphDB
|
|
1122
|
+
|
|
1123
|
+
db_to_use = HtmlGraphDB(str(get_database_path()))
|
|
1124
|
+
except Exception:
|
|
1125
|
+
db_to_use = None
|
|
1126
|
+
|
|
1127
|
+
# Try to find an active task_delegation event
|
|
1128
|
+
if db_to_use:
|
|
1129
|
+
try:
|
|
1130
|
+
cursor = db_to_use.connection.cursor() # type: ignore[union-attr]
|
|
1131
|
+
cursor.execute(
|
|
1132
|
+
"""
|
|
1133
|
+
SELECT event_id
|
|
1134
|
+
FROM agent_events
|
|
1135
|
+
WHERE event_type = 'task_delegation'
|
|
1136
|
+
AND status = 'started'
|
|
1137
|
+
ORDER BY timestamp DESC
|
|
1138
|
+
LIMIT 1
|
|
1139
|
+
""",
|
|
1140
|
+
)
|
|
1141
|
+
task_row = cursor.fetchone()
|
|
1142
|
+
if task_row:
|
|
1143
|
+
parent_activity_id = task_row[0]
|
|
1144
|
+
logger.warning(
|
|
1145
|
+
f"DEBUG: Found active task_delegation={parent_activity_id} in parent_activity_id fallback"
|
|
1146
|
+
)
|
|
1147
|
+
except Exception as e:
|
|
1148
|
+
logger.warning(
|
|
1149
|
+
f"DEBUG: Error finding task_delegation in parent_activity_id: {e}"
|
|
1150
|
+
)
|
|
1151
|
+
|
|
1152
|
+
# Only if no active task found, fall back to UserQuery
|
|
1153
|
+
if not parent_activity_id:
|
|
1154
|
+
parent_activity_id = get_parent_user_query(
|
|
1155
|
+
db_to_use, active_session_id
|
|
1156
|
+
)
|
|
1157
|
+
|
|
1158
|
+
# Track the activity
|
|
1159
|
+
nudge = None
|
|
1160
|
+
try:
|
|
1161
|
+
result = manager.track_activity(
|
|
1162
|
+
session_id=active_session_id,
|
|
1163
|
+
tool=tool_name,
|
|
1164
|
+
summary=summary,
|
|
1165
|
+
file_paths=file_paths if file_paths else None,
|
|
1166
|
+
success=not is_error,
|
|
1167
|
+
parent_activity_id=parent_activity_id,
|
|
1168
|
+
)
|
|
1169
|
+
|
|
1170
|
+
# Record to SQLite if available
|
|
1171
|
+
if db:
|
|
1172
|
+
# Extract subagent_type for Task delegations
|
|
1173
|
+
task_subagent_type = None
|
|
1174
|
+
if tool_name == "Task":
|
|
1175
|
+
task_subagent_type = tool_input_data.get(
|
|
1176
|
+
"subagent_type", "general-purpose"
|
|
1177
|
+
)
|
|
1178
|
+
|
|
1179
|
+
record_event_to_sqlite(
|
|
1180
|
+
db=db,
|
|
1181
|
+
session_id=active_session_id,
|
|
1182
|
+
tool_name=tool_name,
|
|
1183
|
+
tool_input=tool_input_data,
|
|
1184
|
+
tool_response=tool_response,
|
|
1185
|
+
is_error=is_error,
|
|
1186
|
+
file_paths=file_paths if file_paths else None,
|
|
1187
|
+
parent_event_id=parent_activity_id, # Link to parent event
|
|
1188
|
+
agent_id=detected_agent,
|
|
1189
|
+
subagent_type=task_subagent_type,
|
|
1190
|
+
model=detected_model,
|
|
1191
|
+
feature_id=result.feature_id if result else None,
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
# If this was a Task() delegation, also record to agent_collaboration
|
|
1195
|
+
if tool_name == "Task" and db:
|
|
1196
|
+
subagent = tool_input_data.get("subagent_type", "general-purpose")
|
|
1197
|
+
description = tool_input_data.get("description", "")
|
|
1198
|
+
record_delegation_to_sqlite(
|
|
1199
|
+
db=db,
|
|
1200
|
+
session_id=active_session_id,
|
|
1201
|
+
from_agent=detected_agent,
|
|
1202
|
+
to_agent=subagent,
|
|
1203
|
+
task_description=description,
|
|
1204
|
+
task_input=tool_input_data,
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
# Check for drift and handle accordingly
|
|
1208
|
+
# Skip drift detection for child activities (they inherit parent's context)
|
|
1209
|
+
if result and hasattr(result, "drift_score") and not parent_activity_id:
|
|
1210
|
+
drift_score = result.drift_score
|
|
1211
|
+
feature_id = getattr(result, "feature_id", "unknown")
|
|
1212
|
+
|
|
1213
|
+
# Skip drift detection if no score available
|
|
1214
|
+
if drift_score is None:
|
|
1215
|
+
pass # No active features - can't calculate drift
|
|
1216
|
+
elif drift_score >= auto_classify_threshold:
|
|
1217
|
+
# High drift - add to classification queue
|
|
1218
|
+
queue = add_to_drift_queue(
|
|
1219
|
+
graph_dir,
|
|
1220
|
+
{
|
|
1221
|
+
"tool": tool_name,
|
|
1222
|
+
"summary": summary,
|
|
1223
|
+
"file_paths": file_paths,
|
|
1224
|
+
"drift_score": drift_score,
|
|
1225
|
+
"feature_id": feature_id,
|
|
1226
|
+
},
|
|
1227
|
+
drift_config,
|
|
1228
|
+
)
|
|
1229
|
+
|
|
1230
|
+
# Check if we should trigger classification
|
|
1231
|
+
if should_trigger_classification(queue, drift_config):
|
|
1232
|
+
classification_prompt = build_classification_prompt(
|
|
1233
|
+
queue, feature_id
|
|
1234
|
+
)
|
|
1235
|
+
|
|
1236
|
+
# Try to run headless classification
|
|
1237
|
+
use_headless = drift_config.get("classification", {}).get(
|
|
1238
|
+
"use_headless", True
|
|
1239
|
+
)
|
|
1240
|
+
if use_headless:
|
|
1241
|
+
try:
|
|
1242
|
+
# Run claude in print mode for classification
|
|
1243
|
+
proc_result = subprocess.run(
|
|
1244
|
+
[
|
|
1245
|
+
"claude",
|
|
1246
|
+
"-p",
|
|
1247
|
+
classification_prompt,
|
|
1248
|
+
"--model",
|
|
1249
|
+
"haiku",
|
|
1250
|
+
"--dangerously-skip-permissions",
|
|
1251
|
+
],
|
|
1252
|
+
capture_output=True,
|
|
1253
|
+
text=True,
|
|
1254
|
+
timeout=120,
|
|
1255
|
+
cwd=str(graph_dir.parent),
|
|
1256
|
+
env={
|
|
1257
|
+
**os.environ,
|
|
1258
|
+
# Prevent hooks from writing new HtmlGraph sessions/events
|
|
1259
|
+
# when we spawn nested `claude` processes.
|
|
1260
|
+
"HTMLGRAPH_DISABLE_TRACKING": "1",
|
|
1261
|
+
},
|
|
1262
|
+
)
|
|
1263
|
+
if proc_result.returncode == 0:
|
|
1264
|
+
nudge = "Drift auto-classification completed. Check .htmlgraph/ for new work item."
|
|
1265
|
+
# Clear the queue after successful classification
|
|
1266
|
+
clear_drift_queue_activities(graph_dir)
|
|
1267
|
+
else:
|
|
1268
|
+
# Fallback to manual prompt
|
|
1269
|
+
nudge = f"""HIGH DRIFT ({drift_score:.2f}) - Headless classification failed.
|
|
1270
|
+
|
|
1271
|
+
{len(queue["activities"])} activities don't align with '{feature_id}'.
|
|
1272
|
+
|
|
1273
|
+
Please classify manually: bug, feature, spike, or chore in .htmlgraph/"""
|
|
1274
|
+
except Exception as e:
|
|
1275
|
+
nudge = f"Drift classification error: {e}. Please classify manually."
|
|
1276
|
+
else:
|
|
1277
|
+
nudge = f"""HIGH DRIFT DETECTED ({drift_score:.2f}) - Auto-classification triggered.
|
|
1278
|
+
|
|
1279
|
+
{len(queue["activities"])} activities don't align with '{feature_id}'.
|
|
1280
|
+
|
|
1281
|
+
ACTION REQUIRED: Spawn a Haiku agent to classify this work:
|
|
1282
|
+
```
|
|
1283
|
+
Task tool with subagent_type="general-purpose", model="haiku", prompt:
|
|
1284
|
+
{classification_prompt[:500]}...
|
|
1285
|
+
```
|
|
1286
|
+
|
|
1287
|
+
Or manually create a work item in .htmlgraph/ (bug, feature, spike, or chore)."""
|
|
1288
|
+
|
|
1289
|
+
# Mark classification as triggered
|
|
1290
|
+
queue["last_classification"] = datetime.now(
|
|
1291
|
+
timezone.utc
|
|
1292
|
+
).isoformat()
|
|
1293
|
+
save_drift_queue(graph_dir, queue)
|
|
1294
|
+
else:
|
|
1295
|
+
nudge = f"Drift detected ({drift_score:.2f}): Activity queued for classification ({len(queue['activities'])}/{drift_settings.get('min_activities_before_classify', 3)} needed)."
|
|
1296
|
+
|
|
1297
|
+
elif drift_score > warning_threshold:
|
|
1298
|
+
# Moderate drift - just warn
|
|
1299
|
+
nudge = f"Drift detected ({drift_score:.2f}): Activity may not align with {feature_id}. Consider refocusing or updating the feature."
|
|
1300
|
+
|
|
1301
|
+
except Exception as e:
|
|
1302
|
+
logger.warning(f"Warning: Could not track activity: {e}")
|
|
1303
|
+
|
|
1304
|
+
# Build response
|
|
1305
|
+
response: dict[str, Any] = {"continue": True}
|
|
1306
|
+
if nudge:
|
|
1307
|
+
response["hookSpecificOutput"] = {
|
|
1308
|
+
"hookEventName": hook_type,
|
|
1309
|
+
"additionalContext": nudge,
|
|
1310
|
+
}
|
|
1311
|
+
return response
|
|
1312
|
+
|
|
1313
|
+
# Unknown hook type
|
|
1314
|
+
return {"continue": True}
|