htmlgraph 0.9.3__py3-none-any.whl → 0.27.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (331) hide show
  1. htmlgraph/.htmlgraph/.session-warning-state.json +6 -0
  2. htmlgraph/.htmlgraph/agents.json +72 -0
  3. htmlgraph/.htmlgraph/htmlgraph.db +0 -0
  4. htmlgraph/__init__.py +173 -17
  5. htmlgraph/__init__.pyi +123 -0
  6. htmlgraph/agent_detection.py +127 -0
  7. htmlgraph/agent_registry.py +45 -30
  8. htmlgraph/agents.py +160 -107
  9. htmlgraph/analytics/__init__.py +9 -2
  10. htmlgraph/analytics/cli.py +190 -51
  11. htmlgraph/analytics/cost_analyzer.py +391 -0
  12. htmlgraph/analytics/cost_monitor.py +664 -0
  13. htmlgraph/analytics/cost_reporter.py +675 -0
  14. htmlgraph/analytics/cross_session.py +617 -0
  15. htmlgraph/analytics/dependency.py +192 -100
  16. htmlgraph/analytics/pattern_learning.py +771 -0
  17. htmlgraph/analytics/session_graph.py +707 -0
  18. htmlgraph/analytics/strategic/__init__.py +80 -0
  19. htmlgraph/analytics/strategic/cost_optimizer.py +611 -0
  20. htmlgraph/analytics/strategic/pattern_detector.py +876 -0
  21. htmlgraph/analytics/strategic/preference_manager.py +709 -0
  22. htmlgraph/analytics/strategic/suggestion_engine.py +747 -0
  23. htmlgraph/analytics/work_type.py +190 -14
  24. htmlgraph/analytics_index.py +135 -51
  25. htmlgraph/api/__init__.py +3 -0
  26. htmlgraph/api/cost_alerts_websocket.py +416 -0
  27. htmlgraph/api/main.py +2498 -0
  28. htmlgraph/api/static/htmx.min.js +1 -0
  29. htmlgraph/api/static/style-redesign.css +1344 -0
  30. htmlgraph/api/static/style.css +1079 -0
  31. htmlgraph/api/templates/dashboard-redesign.html +1366 -0
  32. htmlgraph/api/templates/dashboard.html +794 -0
  33. htmlgraph/api/templates/partials/activity-feed-hierarchical.html +326 -0
  34. htmlgraph/api/templates/partials/activity-feed.html +1100 -0
  35. htmlgraph/api/templates/partials/agents-redesign.html +317 -0
  36. htmlgraph/api/templates/partials/agents.html +317 -0
  37. htmlgraph/api/templates/partials/event-traces.html +373 -0
  38. htmlgraph/api/templates/partials/features-kanban-redesign.html +509 -0
  39. htmlgraph/api/templates/partials/features.html +578 -0
  40. htmlgraph/api/templates/partials/metrics-redesign.html +346 -0
  41. htmlgraph/api/templates/partials/metrics.html +346 -0
  42. htmlgraph/api/templates/partials/orchestration-redesign.html +443 -0
  43. htmlgraph/api/templates/partials/orchestration.html +198 -0
  44. htmlgraph/api/templates/partials/spawners.html +375 -0
  45. htmlgraph/api/templates/partials/work-items.html +613 -0
  46. htmlgraph/api/websocket.py +538 -0
  47. htmlgraph/archive/__init__.py +24 -0
  48. htmlgraph/archive/bloom.py +234 -0
  49. htmlgraph/archive/fts.py +297 -0
  50. htmlgraph/archive/manager.py +583 -0
  51. htmlgraph/archive/search.py +244 -0
  52. htmlgraph/atomic_ops.py +560 -0
  53. htmlgraph/attribute_index.py +208 -0
  54. htmlgraph/bounded_paths.py +539 -0
  55. htmlgraph/builders/__init__.py +14 -0
  56. htmlgraph/builders/base.py +118 -29
  57. htmlgraph/builders/bug.py +150 -0
  58. htmlgraph/builders/chore.py +119 -0
  59. htmlgraph/builders/epic.py +150 -0
  60. htmlgraph/builders/feature.py +31 -6
  61. htmlgraph/builders/insight.py +195 -0
  62. htmlgraph/builders/metric.py +217 -0
  63. htmlgraph/builders/pattern.py +202 -0
  64. htmlgraph/builders/phase.py +162 -0
  65. htmlgraph/builders/spike.py +52 -19
  66. htmlgraph/builders/track.py +148 -72
  67. htmlgraph/cigs/__init__.py +81 -0
  68. htmlgraph/cigs/autonomy.py +385 -0
  69. htmlgraph/cigs/cost.py +475 -0
  70. htmlgraph/cigs/messages_basic.py +472 -0
  71. htmlgraph/cigs/messaging.py +365 -0
  72. htmlgraph/cigs/models.py +771 -0
  73. htmlgraph/cigs/pattern_storage.py +427 -0
  74. htmlgraph/cigs/patterns.py +503 -0
  75. htmlgraph/cigs/posttool_analyzer.py +234 -0
  76. htmlgraph/cigs/reporter.py +818 -0
  77. htmlgraph/cigs/tracker.py +317 -0
  78. htmlgraph/cli/.htmlgraph/.session-warning-state.json +6 -0
  79. htmlgraph/cli/.htmlgraph/agents.json +72 -0
  80. htmlgraph/cli/.htmlgraph/htmlgraph.db +0 -0
  81. htmlgraph/cli/__init__.py +42 -0
  82. htmlgraph/cli/__main__.py +6 -0
  83. htmlgraph/cli/analytics.py +1424 -0
  84. htmlgraph/cli/base.py +685 -0
  85. htmlgraph/cli/constants.py +206 -0
  86. htmlgraph/cli/core.py +954 -0
  87. htmlgraph/cli/main.py +147 -0
  88. htmlgraph/cli/models.py +475 -0
  89. htmlgraph/cli/templates/__init__.py +1 -0
  90. htmlgraph/cli/templates/cost_dashboard.py +399 -0
  91. htmlgraph/cli/work/__init__.py +239 -0
  92. htmlgraph/cli/work/browse.py +115 -0
  93. htmlgraph/cli/work/features.py +568 -0
  94. htmlgraph/cli/work/orchestration.py +676 -0
  95. htmlgraph/cli/work/report.py +728 -0
  96. htmlgraph/cli/work/sessions.py +466 -0
  97. htmlgraph/cli/work/snapshot.py +559 -0
  98. htmlgraph/cli/work/tracks.py +486 -0
  99. htmlgraph/cli_commands/__init__.py +1 -0
  100. htmlgraph/cli_commands/feature.py +195 -0
  101. htmlgraph/cli_framework.py +115 -0
  102. htmlgraph/collections/__init__.py +18 -0
  103. htmlgraph/collections/base.py +415 -98
  104. htmlgraph/collections/bug.py +53 -0
  105. htmlgraph/collections/chore.py +53 -0
  106. htmlgraph/collections/epic.py +53 -0
  107. htmlgraph/collections/feature.py +12 -26
  108. htmlgraph/collections/insight.py +100 -0
  109. htmlgraph/collections/metric.py +92 -0
  110. htmlgraph/collections/pattern.py +97 -0
  111. htmlgraph/collections/phase.py +53 -0
  112. htmlgraph/collections/session.py +194 -0
  113. htmlgraph/collections/spike.py +56 -16
  114. htmlgraph/collections/task_delegation.py +241 -0
  115. htmlgraph/collections/todo.py +511 -0
  116. htmlgraph/collections/traces.py +487 -0
  117. htmlgraph/config/cost_models.json +56 -0
  118. htmlgraph/config.py +190 -0
  119. htmlgraph/context_analytics.py +344 -0
  120. htmlgraph/converter.py +216 -28
  121. htmlgraph/cost_analysis/__init__.py +5 -0
  122. htmlgraph/cost_analysis/analyzer.py +438 -0
  123. htmlgraph/dashboard.html +2406 -307
  124. htmlgraph/dashboard.html.backup +6592 -0
  125. htmlgraph/dashboard.html.bak +7181 -0
  126. htmlgraph/dashboard.html.bak2 +7231 -0
  127. htmlgraph/dashboard.html.bak3 +7232 -0
  128. htmlgraph/db/__init__.py +38 -0
  129. htmlgraph/db/queries.py +790 -0
  130. htmlgraph/db/schema.py +1788 -0
  131. htmlgraph/decorators.py +317 -0
  132. htmlgraph/dependency_models.py +19 -2
  133. htmlgraph/deploy.py +142 -125
  134. htmlgraph/deployment_models.py +474 -0
  135. htmlgraph/docs/API_REFERENCE.md +841 -0
  136. htmlgraph/docs/HTTP_API.md +750 -0
  137. htmlgraph/docs/INTEGRATION_GUIDE.md +752 -0
  138. htmlgraph/docs/ORCHESTRATION_PATTERNS.md +717 -0
  139. htmlgraph/docs/README.md +532 -0
  140. htmlgraph/docs/__init__.py +77 -0
  141. htmlgraph/docs/docs_version.py +55 -0
  142. htmlgraph/docs/metadata.py +93 -0
  143. htmlgraph/docs/migrations.py +232 -0
  144. htmlgraph/docs/template_engine.py +143 -0
  145. htmlgraph/docs/templates/_sections/cli_reference.md.j2 +52 -0
  146. htmlgraph/docs/templates/_sections/core_concepts.md.j2 +29 -0
  147. htmlgraph/docs/templates/_sections/sdk_basics.md.j2 +69 -0
  148. htmlgraph/docs/templates/base_agents.md.j2 +78 -0
  149. htmlgraph/docs/templates/example_user_override.md.j2 +47 -0
  150. htmlgraph/docs/version_check.py +163 -0
  151. htmlgraph/edge_index.py +182 -27
  152. htmlgraph/error_handler.py +544 -0
  153. htmlgraph/event_log.py +100 -52
  154. htmlgraph/event_migration.py +13 -4
  155. htmlgraph/exceptions.py +49 -0
  156. htmlgraph/file_watcher.py +101 -28
  157. htmlgraph/find_api.py +75 -63
  158. htmlgraph/git_events.py +145 -63
  159. htmlgraph/graph.py +1122 -106
  160. htmlgraph/hooks/.htmlgraph/.session-warning-state.json +6 -0
  161. htmlgraph/hooks/.htmlgraph/agents.json +72 -0
  162. htmlgraph/hooks/.htmlgraph/index.sqlite +0 -0
  163. htmlgraph/hooks/__init__.py +45 -0
  164. htmlgraph/hooks/bootstrap.py +169 -0
  165. htmlgraph/hooks/cigs_pretool_enforcer.py +354 -0
  166. htmlgraph/hooks/concurrent_sessions.py +208 -0
  167. htmlgraph/hooks/context.py +350 -0
  168. htmlgraph/hooks/drift_handler.py +525 -0
  169. htmlgraph/hooks/event_tracker.py +1314 -0
  170. htmlgraph/hooks/git_commands.py +175 -0
  171. htmlgraph/hooks/hooks-config.example.json +12 -0
  172. htmlgraph/hooks/installer.py +343 -0
  173. htmlgraph/hooks/orchestrator.py +674 -0
  174. htmlgraph/hooks/orchestrator_reflector.py +223 -0
  175. htmlgraph/hooks/post-checkout.sh +28 -0
  176. htmlgraph/hooks/post-commit.sh +24 -0
  177. htmlgraph/hooks/post-merge.sh +26 -0
  178. htmlgraph/hooks/post_tool_use_failure.py +273 -0
  179. htmlgraph/hooks/post_tool_use_handler.py +257 -0
  180. htmlgraph/hooks/posttooluse.py +408 -0
  181. htmlgraph/hooks/pre-commit.sh +94 -0
  182. htmlgraph/hooks/pre-push.sh +28 -0
  183. htmlgraph/hooks/pretooluse.py +819 -0
  184. htmlgraph/hooks/prompt_analyzer.py +637 -0
  185. htmlgraph/hooks/session_handler.py +668 -0
  186. htmlgraph/hooks/session_summary.py +395 -0
  187. htmlgraph/hooks/state_manager.py +504 -0
  188. htmlgraph/hooks/subagent_detection.py +202 -0
  189. htmlgraph/hooks/subagent_stop.py +369 -0
  190. htmlgraph/hooks/task_enforcer.py +255 -0
  191. htmlgraph/hooks/task_validator.py +177 -0
  192. htmlgraph/hooks/validator.py +628 -0
  193. htmlgraph/ids.py +41 -27
  194. htmlgraph/index.d.ts +286 -0
  195. htmlgraph/learning.py +767 -0
  196. htmlgraph/mcp_server.py +69 -23
  197. htmlgraph/models.py +1586 -87
  198. htmlgraph/operations/README.md +62 -0
  199. htmlgraph/operations/__init__.py +79 -0
  200. htmlgraph/operations/analytics.py +339 -0
  201. htmlgraph/operations/bootstrap.py +289 -0
  202. htmlgraph/operations/events.py +244 -0
  203. htmlgraph/operations/fastapi_server.py +231 -0
  204. htmlgraph/operations/hooks.py +350 -0
  205. htmlgraph/operations/initialization.py +597 -0
  206. htmlgraph/operations/initialization.py.backup +228 -0
  207. htmlgraph/operations/server.py +303 -0
  208. htmlgraph/orchestration/__init__.py +58 -0
  209. htmlgraph/orchestration/claude_launcher.py +179 -0
  210. htmlgraph/orchestration/command_builder.py +72 -0
  211. htmlgraph/orchestration/headless_spawner.py +281 -0
  212. htmlgraph/orchestration/live_events.py +377 -0
  213. htmlgraph/orchestration/model_selection.py +327 -0
  214. htmlgraph/orchestration/plugin_manager.py +140 -0
  215. htmlgraph/orchestration/prompts.py +137 -0
  216. htmlgraph/orchestration/spawner_event_tracker.py +383 -0
  217. htmlgraph/orchestration/spawners/__init__.py +16 -0
  218. htmlgraph/orchestration/spawners/base.py +194 -0
  219. htmlgraph/orchestration/spawners/claude.py +173 -0
  220. htmlgraph/orchestration/spawners/codex.py +435 -0
  221. htmlgraph/orchestration/spawners/copilot.py +294 -0
  222. htmlgraph/orchestration/spawners/gemini.py +471 -0
  223. htmlgraph/orchestration/subprocess_runner.py +36 -0
  224. htmlgraph/orchestration/task_coordination.py +343 -0
  225. htmlgraph/orchestration.md +563 -0
  226. htmlgraph/orchestrator-system-prompt-optimized.txt +863 -0
  227. htmlgraph/orchestrator.py +669 -0
  228. htmlgraph/orchestrator_config.py +357 -0
  229. htmlgraph/orchestrator_mode.py +328 -0
  230. htmlgraph/orchestrator_validator.py +133 -0
  231. htmlgraph/parallel.py +646 -0
  232. htmlgraph/parser.py +160 -35
  233. htmlgraph/path_query.py +608 -0
  234. htmlgraph/pattern_matcher.py +636 -0
  235. htmlgraph/planning.py +147 -52
  236. htmlgraph/pydantic_models.py +476 -0
  237. htmlgraph/quality_gates.py +350 -0
  238. htmlgraph/query_builder.py +109 -72
  239. htmlgraph/query_composer.py +509 -0
  240. htmlgraph/reflection.py +443 -0
  241. htmlgraph/refs.py +344 -0
  242. htmlgraph/repo_hash.py +512 -0
  243. htmlgraph/repositories/__init__.py +292 -0
  244. htmlgraph/repositories/analytics_repository.py +455 -0
  245. htmlgraph/repositories/analytics_repository_standard.py +628 -0
  246. htmlgraph/repositories/feature_repository.py +581 -0
  247. htmlgraph/repositories/feature_repository_htmlfile.py +668 -0
  248. htmlgraph/repositories/feature_repository_memory.py +607 -0
  249. htmlgraph/repositories/feature_repository_sqlite.py +858 -0
  250. htmlgraph/repositories/filter_service.py +620 -0
  251. htmlgraph/repositories/filter_service_standard.py +445 -0
  252. htmlgraph/repositories/shared_cache.py +621 -0
  253. htmlgraph/repositories/shared_cache_memory.py +395 -0
  254. htmlgraph/repositories/track_repository.py +552 -0
  255. htmlgraph/repositories/track_repository_htmlfile.py +619 -0
  256. htmlgraph/repositories/track_repository_memory.py +508 -0
  257. htmlgraph/repositories/track_repository_sqlite.py +711 -0
  258. htmlgraph/routing.py +8 -19
  259. htmlgraph/scripts/deploy.py +1 -2
  260. htmlgraph/sdk/__init__.py +398 -0
  261. htmlgraph/sdk/__init__.pyi +14 -0
  262. htmlgraph/sdk/analytics/__init__.py +19 -0
  263. htmlgraph/sdk/analytics/engine.py +155 -0
  264. htmlgraph/sdk/analytics/helpers.py +178 -0
  265. htmlgraph/sdk/analytics/registry.py +109 -0
  266. htmlgraph/sdk/base.py +484 -0
  267. htmlgraph/sdk/constants.py +216 -0
  268. htmlgraph/sdk/core.pyi +308 -0
  269. htmlgraph/sdk/discovery.py +120 -0
  270. htmlgraph/sdk/help/__init__.py +12 -0
  271. htmlgraph/sdk/help/mixin.py +699 -0
  272. htmlgraph/sdk/mixins/__init__.py +15 -0
  273. htmlgraph/sdk/mixins/attribution.py +113 -0
  274. htmlgraph/sdk/mixins/mixin.py +410 -0
  275. htmlgraph/sdk/operations/__init__.py +12 -0
  276. htmlgraph/sdk/operations/mixin.py +427 -0
  277. htmlgraph/sdk/orchestration/__init__.py +17 -0
  278. htmlgraph/sdk/orchestration/coordinator.py +203 -0
  279. htmlgraph/sdk/orchestration/spawner.py +204 -0
  280. htmlgraph/sdk/planning/__init__.py +19 -0
  281. htmlgraph/sdk/planning/bottlenecks.py +93 -0
  282. htmlgraph/sdk/planning/mixin.py +211 -0
  283. htmlgraph/sdk/planning/parallel.py +186 -0
  284. htmlgraph/sdk/planning/queue.py +210 -0
  285. htmlgraph/sdk/planning/recommendations.py +87 -0
  286. htmlgraph/sdk/planning/smart_planning.py +319 -0
  287. htmlgraph/sdk/session/__init__.py +19 -0
  288. htmlgraph/sdk/session/continuity.py +57 -0
  289. htmlgraph/sdk/session/handoff.py +110 -0
  290. htmlgraph/sdk/session/info.py +309 -0
  291. htmlgraph/sdk/session/manager.py +103 -0
  292. htmlgraph/sdk/strategic/__init__.py +26 -0
  293. htmlgraph/sdk/strategic/mixin.py +563 -0
  294. htmlgraph/server.py +685 -180
  295. htmlgraph/services/__init__.py +10 -0
  296. htmlgraph/services/claiming.py +199 -0
  297. htmlgraph/session_hooks.py +300 -0
  298. htmlgraph/session_manager.py +1392 -175
  299. htmlgraph/session_registry.py +587 -0
  300. htmlgraph/session_state.py +436 -0
  301. htmlgraph/session_warning.py +201 -0
  302. htmlgraph/sessions/__init__.py +23 -0
  303. htmlgraph/sessions/handoff.py +756 -0
  304. htmlgraph/setup.py +34 -17
  305. htmlgraph/spike_index.py +143 -0
  306. htmlgraph/sync_docs.py +12 -15
  307. htmlgraph/system_prompts.py +450 -0
  308. htmlgraph/templates/AGENTS.md.template +366 -0
  309. htmlgraph/templates/CLAUDE.md.template +97 -0
  310. htmlgraph/templates/GEMINI.md.template +87 -0
  311. htmlgraph/templates/orchestration-view.html +350 -0
  312. htmlgraph/track_builder.py +146 -15
  313. htmlgraph/track_manager.py +69 -21
  314. htmlgraph/transcript.py +890 -0
  315. htmlgraph/transcript_analytics.py +699 -0
  316. htmlgraph/types.py +323 -0
  317. htmlgraph/validation.py +115 -0
  318. htmlgraph/watch.py +8 -5
  319. htmlgraph/work_type_utils.py +3 -2
  320. {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/dashboard.html +2406 -307
  321. htmlgraph-0.27.5.data/data/htmlgraph/templates/AGENTS.md.template +366 -0
  322. htmlgraph-0.27.5.data/data/htmlgraph/templates/CLAUDE.md.template +97 -0
  323. htmlgraph-0.27.5.data/data/htmlgraph/templates/GEMINI.md.template +87 -0
  324. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/METADATA +97 -64
  325. htmlgraph-0.27.5.dist-info/RECORD +337 -0
  326. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/entry_points.txt +1 -1
  327. htmlgraph/cli.py +0 -2688
  328. htmlgraph/sdk.py +0 -709
  329. htmlgraph-0.9.3.dist-info/RECORD +0 -61
  330. {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/styles.css +0 -0
  331. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/WHEEL +0 -0
htmlgraph/learning.py ADDED
@@ -0,0 +1,767 @@
1
+ from __future__ import annotations
2
+
3
+ """
4
+ Active Learning Persistence Module.
5
+
6
+ Bridges TranscriptAnalytics to the HtmlGraph for persistent learning.
7
+ Analyzes sessions and persists patterns, insights, and metrics to the graph.
8
+ """
9
+
10
+
11
+ from collections import Counter
12
+ from datetime import datetime
13
+ from typing import TYPE_CHECKING, Any, cast
14
+
15
+ if TYPE_CHECKING:
16
+ from htmlgraph.sdk import SDK
17
+
18
+
19
+ class LearningPersistence:
20
+ """Persists analytics insights to the HtmlGraph.
21
+
22
+ Example:
23
+ >>> sdk = SDK(agent="claude")
24
+ >>> learning = LearningPersistence(sdk)
25
+ >>> learning.persist_session_insight("sess-123")
26
+ >>> learning.persist_patterns()
27
+ """
28
+
29
+ def __init__(self, sdk: SDK):
30
+ self.sdk = sdk
31
+
32
+ def persist_session_insight(self, session_id: str) -> str | None:
33
+ """Analyze a session and persist insight to graph.
34
+
35
+ Args:
36
+ session_id: Session to analyze
37
+
38
+ Returns:
39
+ Insight ID if created, None if session not found
40
+ """
41
+ # Use session_manager to get full Session object with activity_log
42
+ # (sdk.sessions.get returns generic Node without activity_log)
43
+ session = self.sdk.session_manager.get_session(session_id)
44
+ if not session:
45
+ return None
46
+
47
+ # Calculate health metrics from activity log
48
+ health = self._calculate_health(session)
49
+
50
+ # Create insight using builder pattern
51
+ # Add issues and recommendations BEFORE save() since Node objects are immutable
52
+ builder = (
53
+ self.sdk.insights.create(f"Session Analysis: {session_id}")
54
+ .for_session(session_id)
55
+ .set_health_scores(
56
+ efficiency=health.get("efficiency", 0.0),
57
+ retry_rate=health.get("retry_rate", 0.0),
58
+ context_rebuilds=health.get("context_rebuilds", 0),
59
+ tool_diversity=health.get("tool_diversity", 0.0),
60
+ error_recovery=health.get("error_recovery", 0.0),
61
+ )
62
+ )
63
+
64
+ # Add issues via builder
65
+ for issue in health.get("issues", []):
66
+ builder.add_issue(issue)
67
+
68
+ # Add recommendations via builder
69
+ for rec in health.get("recommendations", []):
70
+ builder.add_recommendation(rec)
71
+
72
+ # Save and return
73
+ insight = builder.save()
74
+ return cast(str, insight.id)
75
+
76
+ def _calculate_health(self, session: Any) -> dict[str, Any]:
77
+ """Calculate health metrics from session activity log."""
78
+ health: dict[str, Any] = {
79
+ "efficiency": 0.8, # Default reasonable value
80
+ "retry_rate": 0.0,
81
+ "context_rebuilds": 0,
82
+ "tool_diversity": 0.5,
83
+ "error_recovery": 1.0,
84
+ "issues": [],
85
+ "recommendations": [],
86
+ }
87
+
88
+ if not hasattr(session, "activity_log") or not session.activity_log:
89
+ return health
90
+
91
+ activities = session.activity_log
92
+ total = len(activities)
93
+
94
+ if total == 0:
95
+ return health
96
+
97
+ # Count tool usage
98
+ tools = [
99
+ a.tool if not isinstance(a, dict) else a.get("tool", "") for a in activities
100
+ ]
101
+ tool_counts = Counter(tools)
102
+ unique_tools = len(tool_counts)
103
+
104
+ # Tool diversity (0-1, normalized by 10 expected tools)
105
+ health["tool_diversity"] = min(unique_tools / 10.0, 1.0)
106
+
107
+ # Detect retries (same tool twice in a row)
108
+ retries = sum(1 for i in range(1, len(tools)) if tools[i] == tools[i - 1])
109
+ health["retry_rate"] = retries / total if total > 0 else 0.0
110
+
111
+ # Detect context rebuilds (Read same file multiple times)
112
+ reads = [
113
+ a
114
+ for a in activities
115
+ if (hasattr(a, "tool") and a.tool == "Read")
116
+ or (isinstance(a, dict) and a.get("tool") == "Read")
117
+ ]
118
+ if reads:
119
+ read_targets = [
120
+ str(
121
+ getattr(r, "summary", "")
122
+ if hasattr(r, "summary")
123
+ else r.get("summary", "")
124
+ )
125
+ for r in reads
126
+ ]
127
+ rebuild_count = len(read_targets) - len(set(read_targets))
128
+ health["context_rebuilds"] = rebuild_count
129
+
130
+ # Calculate efficiency (inverse of wasted operations)
131
+ wasted = retries + health["context_rebuilds"]
132
+ health["efficiency"] = max(0.0, 1.0 - (wasted / total))
133
+
134
+ # Generate issues
135
+ if health["retry_rate"] > 0.2:
136
+ health["issues"].append(f"High retry rate: {health['retry_rate']:.0%}")
137
+ health["recommendations"].append(
138
+ "Consider reading more context before acting"
139
+ )
140
+
141
+ if health["context_rebuilds"] > 2:
142
+ health["issues"].append(
143
+ f"Excessive context rebuilds: {health['context_rebuilds']}"
144
+ )
145
+ health["recommendations"].append("Cache file contents or take notes")
146
+
147
+ if health["tool_diversity"] < 0.3:
148
+ health["issues"].append("Low tool diversity")
149
+ health["recommendations"].append("Consider using more specialized tools")
150
+
151
+ return health
152
+
153
+ def persist_patterns(self, min_count: int = 2) -> list[str]:
154
+ """Detect and persist workflow patterns IN SESSIONS (not as separate files).
155
+
156
+ This refactored version stores patterns inline within session HTML files
157
+ to avoid creating 2,890+ individual pattern files.
158
+
159
+ Args:
160
+ min_count: Minimum occurrences to persist a pattern
161
+
162
+ Returns:
163
+ List of session IDs that had patterns updated
164
+ """
165
+ # Collect tool sequences per session (not globally)
166
+ session_ids_updated: list[str] = []
167
+
168
+ for session in self.sdk.session_manager.session_converter.load_all():
169
+ if not session.activity_log:
170
+ continue
171
+
172
+ # Extract 3-tool sequences from this session
173
+ tools = [
174
+ a.tool if not isinstance(a, dict) else a.get("tool", "")
175
+ for a in session.activity_log
176
+ ]
177
+
178
+ # Count sequences in this session
179
+ sequences: list[tuple[Any, ...]] = []
180
+ for i in range(len(tools) - 2):
181
+ seq = tools[i : i + 3]
182
+ if all(seq): # No empty tools
183
+ sequences.append(tuple(seq))
184
+
185
+ seq_counts = Counter(sequences)
186
+
187
+ # Update session's detected_patterns
188
+ patterns_updated = False
189
+ for seq, count in seq_counts.items(): # type: ignore[assignment]
190
+ if count >= min_count:
191
+ # Check if pattern already exists in this session
192
+ existing = next(
193
+ (
194
+ p
195
+ for p in session.detected_patterns
196
+ if p.get("sequence") == list(seq)
197
+ ),
198
+ None,
199
+ )
200
+
201
+ if existing:
202
+ # Update existing pattern
203
+ existing["detection_count"] = count
204
+ existing["last_detected"] = datetime.now().isoformat()
205
+ patterns_updated = True
206
+ else:
207
+ # Add new pattern to session
208
+ pattern_type = self._classify_pattern(list(seq))
209
+ now = datetime.now()
210
+ session.detected_patterns.append(
211
+ {
212
+ "sequence": list(seq),
213
+ "pattern_type": pattern_type,
214
+ "detection_count": count,
215
+ "first_detected": now.isoformat(),
216
+ "last_detected": now.isoformat(),
217
+ }
218
+ )
219
+ patterns_updated = True
220
+
221
+ # Save updated session if patterns were modified
222
+ if patterns_updated:
223
+ self.sdk.session_manager.session_converter.save(session)
224
+ session_ids_updated.append(session.id)
225
+
226
+ # Also persist parallel patterns
227
+ parallel_session_ids = self.persist_parallel_patterns(min_count=min_count)
228
+ session_ids_updated.extend(parallel_session_ids)
229
+
230
+ return session_ids_updated
231
+
232
+ def persist_parallel_patterns(self, min_count: int = 2) -> list[str]:
233
+ """Detect and persist parallel execution patterns IN SESSIONS.
234
+
235
+ Identifies when multiple tools are invoked in parallel (same parent_activity_id).
236
+ This is especially useful for detecting orchestrator patterns like parallel Task delegation.
237
+
238
+ Args:
239
+ min_count: Minimum occurrences to persist a pattern
240
+
241
+ Returns:
242
+ List of session IDs that had parallel patterns updated
243
+ """
244
+ from collections import defaultdict
245
+
246
+ session_ids_updated: list[str] = []
247
+
248
+ for session in self.sdk.session_manager.session_converter.load_all():
249
+ if not session.activity_log:
250
+ continue
251
+
252
+ # Group activities by parent_activity_id
253
+ parent_groups: dict[str, list[Any]] = defaultdict(list)
254
+ for activity in session.activity_log:
255
+ parent_id = (
256
+ activity.parent_activity_id
257
+ if not isinstance(activity, dict)
258
+ else activity.get("parent_activity_id")
259
+ )
260
+ if parent_id: # Only track activities with a parent
261
+ parent_groups[parent_id].append(activity)
262
+
263
+ # Collect parallel patterns for this session
264
+ parallel_patterns: list[tuple[str, ...]] = []
265
+ for parent_id, activities in parent_groups.items():
266
+ if len(activities) < 2:
267
+ continue
268
+
269
+ # Sort by timestamp
270
+ sorted_activities = sorted(
271
+ activities,
272
+ key=lambda a: (
273
+ a.timestamp
274
+ if not isinstance(a, dict)
275
+ else a.get("timestamp", datetime.min)
276
+ ),
277
+ )
278
+
279
+ # Extract tool sequence
280
+ tools = tuple(
281
+ a.tool if not isinstance(a, dict) else a.get("tool", "")
282
+ for a in sorted_activities
283
+ )
284
+
285
+ # Filter out empty tools
286
+ if all(tools):
287
+ parallel_patterns.append(tools)
288
+
289
+ # Count parallel patterns in this session
290
+ pattern_counts = Counter(parallel_patterns)
291
+
292
+ # Update session's detected_patterns with parallel patterns
293
+ patterns_updated = False
294
+ for tools, count in pattern_counts.items():
295
+ if count >= min_count:
296
+ tool_names = list(tools)
297
+
298
+ # Check if pattern already exists in this session
299
+ # Parallel patterns have special naming: "Parallel[N]: tool1 || tool2"
300
+ existing = next(
301
+ (
302
+ p
303
+ for p in session.detected_patterns
304
+ if p.get("sequence") == tool_names
305
+ and p.get("is_parallel", False)
306
+ ),
307
+ None,
308
+ )
309
+
310
+ if existing:
311
+ # Update existing parallel pattern
312
+ existing["detection_count"] = count
313
+ existing["last_detected"] = datetime.now().isoformat()
314
+ patterns_updated = True
315
+ else:
316
+ # Add new parallel pattern to session
317
+ pattern_type = self._classify_pattern(
318
+ tool_names, is_parallel=True
319
+ )
320
+ now = datetime.now()
321
+ session.detected_patterns.append(
322
+ {
323
+ "sequence": tool_names,
324
+ "pattern_type": pattern_type,
325
+ "detection_count": count,
326
+ "first_detected": now.isoformat(),
327
+ "last_detected": now.isoformat(),
328
+ "is_parallel": True,
329
+ "parallel_count": len(tools),
330
+ }
331
+ )
332
+ patterns_updated = True
333
+
334
+ # Save updated session if patterns were modified
335
+ if patterns_updated:
336
+ self.sdk.session_manager.session_converter.save(session)
337
+ session_ids_updated.append(session.id)
338
+
339
+ return session_ids_updated
340
+
341
+ def _classify_pattern(self, sequence: list[str], is_parallel: bool = False) -> str:
342
+ """Classify a pattern as optimal, anti-pattern, or neutral.
343
+
344
+ Args:
345
+ sequence: List of tool names in the pattern
346
+ is_parallel: Whether this is a parallel execution pattern
347
+
348
+ Returns:
349
+ Pattern classification string
350
+ """
351
+ seq = tuple(sequence)
352
+
353
+ # Orchestrator patterns (parallel execution)
354
+ if is_parallel:
355
+ # Parallel Task delegation is optimal (orchestrator pattern)
356
+ if all(tool == "Task" for tool in sequence) and len(sequence) >= 2:
357
+ return "optimal"
358
+ # Mixed parallel operations can also be optimal
359
+ if "Task" in sequence:
360
+ return "optimal"
361
+ # Other parallel patterns are neutral
362
+ return "neutral"
363
+
364
+ # Sequential anti-patterns for orchestrators
365
+ # Multiple sequential Tasks without parallelism is an anti-pattern
366
+ if seq == ("Task", "Task", "Task"):
367
+ return "anti-pattern"
368
+
369
+ # Known optimal patterns (sequential)
370
+ optimal = [
371
+ ("Read", "Edit", "Bash"), # Read, modify, test
372
+ ("Grep", "Read", "Edit"), # Search, understand, modify
373
+ ("Glob", "Read", "Edit"), # Find, understand, modify
374
+ ]
375
+
376
+ # Known anti-patterns (sequential)
377
+ anti = [
378
+ ("Edit", "Edit", "Edit"), # Too many edits without testing
379
+ ("Bash", "Bash", "Bash"), # Command spam
380
+ ("Read", "Read", "Read"), # Excessive reading without action
381
+ ]
382
+
383
+ if seq in optimal:
384
+ return "optimal"
385
+ elif seq in anti:
386
+ return "anti-pattern"
387
+ else:
388
+ return "neutral"
389
+
390
+ def persist_metrics(self, period: str = "weekly") -> str | None:
391
+ """Aggregate and persist metrics for the current period.
392
+
393
+ Args:
394
+ period: "daily", "weekly", or "monthly"
395
+
396
+ Returns:
397
+ Metric ID if created
398
+ """
399
+ from datetime import timedelta
400
+
401
+ now = datetime.now()
402
+
403
+ # Calculate period boundaries
404
+ if period == "daily":
405
+ start = now.replace(hour=0, minute=0, second=0, microsecond=0)
406
+ end = start + timedelta(days=1)
407
+ elif period == "weekly":
408
+ start = now - timedelta(days=now.weekday())
409
+ start = start.replace(hour=0, minute=0, second=0, microsecond=0)
410
+ end = start + timedelta(days=7)
411
+ else: # monthly
412
+ start = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
413
+ if now.month == 12:
414
+ end = start.replace(year=now.year + 1, month=1)
415
+ else:
416
+ end = start.replace(month=now.month + 1)
417
+
418
+ # Collect insights for this period
419
+ insights = list(self.sdk.insights.all())
420
+ period_insights = [
421
+ i
422
+ for i in insights
423
+ if hasattr(i, "analyzed_at")
424
+ and i.analyzed_at
425
+ and start <= i.analyzed_at <= end
426
+ ]
427
+
428
+ if not period_insights:
429
+ # Use all insights if none in period
430
+ period_insights = insights
431
+
432
+ if not period_insights:
433
+ return None
434
+
435
+ # Calculate aggregate metrics
436
+ efficiency_scores = [
437
+ getattr(i, "efficiency_score", 0.0)
438
+ for i in period_insights
439
+ if getattr(i, "efficiency_score", None)
440
+ ]
441
+ avg_efficiency = (
442
+ sum(efficiency_scores) / len(efficiency_scores)
443
+ if efficiency_scores
444
+ else 0.0
445
+ )
446
+
447
+ # Create metric
448
+ metric = (
449
+ self.sdk.metrics.create(
450
+ f"Efficiency Metric: {period} ending {end.strftime('%Y-%m-%d')}"
451
+ )
452
+ .set_scope("session")
453
+ .set_period(period, start, end)
454
+ .set_metrics(
455
+ {
456
+ "avg_efficiency": avg_efficiency,
457
+ "sessions_analyzed": len(period_insights),
458
+ }
459
+ )
460
+ .save()
461
+ )
462
+
463
+ # Note: After save(), metric is a Node object
464
+ # The sessions_in_period is tracked in metric_values
465
+ metric.properties = metric.properties or {}
466
+ metric.properties["data_points_count"] = len(period_insights)
467
+ metric.properties["sessions_in_period"] = [
468
+ getattr(i, "session_id", i.id)
469
+ for i in period_insights
470
+ if hasattr(i, "session_id") or hasattr(i, "id")
471
+ ]
472
+ self.sdk.metrics.update(metric)
473
+
474
+ return cast(str, metric.id)
475
+
476
+ def analyze_for_orchestrator(self, session_id: str) -> dict[str, Any]:
477
+ """Analyze session and return compact feedback for orchestrator.
478
+
479
+ This method is called on work item completion to surface:
480
+ - Anti-patterns detected in the session
481
+ - Errors encountered
482
+ - Efficiency metrics
483
+ - Test execution results (pytest)
484
+ - Actionable recommendations
485
+
486
+ Args:
487
+ session_id: Session to analyze
488
+
489
+ Returns:
490
+ Dict with analysis results for orchestrator feedback
491
+ """
492
+ result: dict[str, Any] = {
493
+ "session_id": session_id,
494
+ "anti_patterns": [],
495
+ "errors": [],
496
+ "error_count": 0,
497
+ "efficiency": 0.8,
498
+ "issues": [],
499
+ "recommendations": [],
500
+ "test_runs": [],
501
+ "test_summary": None,
502
+ "summary": "",
503
+ }
504
+
505
+ session = self.sdk.session_manager.get_session(session_id)
506
+ if (
507
+ not session
508
+ or not hasattr(session, "activity_log")
509
+ or not session.activity_log
510
+ ):
511
+ result["summary"] = "No activity data available for analysis"
512
+ return result
513
+
514
+ activities = session.activity_log
515
+
516
+ # Count errors (success=False)
517
+ errors = []
518
+ for a in activities:
519
+ success = a.success if not isinstance(a, dict) else a.get("success", True)
520
+ if not success:
521
+ tool = a.tool if not isinstance(a, dict) else a.get("tool", "")
522
+ summary = a.summary if not isinstance(a, dict) else a.get("summary", "")
523
+ errors.append({"tool": tool, "summary": summary[:100]})
524
+
525
+ result["errors"] = errors[:10] # Limit to 10 most recent
526
+ result["error_count"] = len(errors)
527
+
528
+ # Detect anti-patterns in this session
529
+ tools = [
530
+ a.tool if not isinstance(a, dict) else a.get("tool", "") for a in activities
531
+ ]
532
+
533
+ # Known anti-patterns
534
+ anti_patterns = [
535
+ ("Edit", "Edit", "Edit"),
536
+ ("Bash", "Bash", "Bash"),
537
+ ("Read", "Read", "Read"),
538
+ ]
539
+
540
+ # Count anti-pattern occurrences
541
+ anti_pattern_counts: Counter[tuple[str, ...]] = Counter()
542
+ for i in range(len(tools) - 2):
543
+ seq = tuple(tools[i : i + 3])
544
+ if seq in anti_patterns:
545
+ anti_pattern_counts[seq] += 1
546
+
547
+ for seq, count in anti_pattern_counts.most_common():
548
+ result["anti_patterns"].append(
549
+ {
550
+ "sequence": list(seq),
551
+ "count": count,
552
+ "description": self._describe_anti_pattern(seq),
553
+ }
554
+ )
555
+
556
+ # Calculate health metrics
557
+ health = self._calculate_health(session)
558
+ result["efficiency"] = health.get("efficiency", 0.8)
559
+ result["issues"] = health.get("issues", [])
560
+ result["recommendations"] = health.get("recommendations", [])
561
+
562
+ # Analyze test runs (pytest)
563
+ test_analysis = self._analyze_test_runs(activities)
564
+ result["test_runs"] = test_analysis["test_runs"]
565
+ result["test_summary"] = test_analysis["summary"]
566
+
567
+ # Add test-related issues and recommendations
568
+ if test_analysis.get("issues"):
569
+ result["issues"].extend(test_analysis["issues"])
570
+ if test_analysis.get("recommendations"):
571
+ result["recommendations"].extend(test_analysis["recommendations"])
572
+
573
+ # Generate summary
574
+ summary_parts = []
575
+ if result["error_count"] > 0:
576
+ summary_parts.append(f"{result['error_count']} errors")
577
+ if result["anti_patterns"]:
578
+ total_anti = sum(p["count"] for p in result["anti_patterns"])
579
+ summary_parts.append(f"{total_anti} anti-pattern occurrences")
580
+ if result["efficiency"] < 0.7:
581
+ summary_parts.append(f"low efficiency ({result['efficiency']:.0%})")
582
+
583
+ # Include test summary in main summary
584
+ if result["test_summary"]:
585
+ summary_parts.append(result["test_summary"])
586
+
587
+ if summary_parts:
588
+ result["summary"] = "⚠️ Issues: " + ", ".join(summary_parts)
589
+ else:
590
+ result["summary"] = "✓ Session completed cleanly"
591
+
592
+ return result
593
+
594
+ def _analyze_test_runs(self, activities: list[Any]) -> dict[str, Any]:
595
+ """Analyze pytest test runs from activity log.
596
+
597
+ Args:
598
+ activities: List of ActivityEntry objects
599
+
600
+ Returns:
601
+ Dict with test_runs, summary, issues, recommendations
602
+ """
603
+ import re
604
+
605
+ result: dict[str, Any] = {
606
+ "test_runs": [],
607
+ "summary": None,
608
+ "issues": [],
609
+ "recommendations": [],
610
+ }
611
+
612
+ # Find all pytest runs in Bash activities
613
+ for activity in activities:
614
+ tool = (
615
+ activity.tool
616
+ if not isinstance(activity, dict)
617
+ else activity.get("tool", "")
618
+ )
619
+ summary = (
620
+ activity.summary
621
+ if not isinstance(activity, dict)
622
+ else activity.get("summary", "")
623
+ )
624
+ success = (
625
+ activity.success
626
+ if not isinstance(activity, dict)
627
+ else activity.get("success", True)
628
+ )
629
+
630
+ # Check if this is a pytest run
631
+ if tool == "Bash" and (
632
+ "pytest" in summary.lower() or "py.test" in summary.lower()
633
+ ):
634
+ test_run: dict[str, Any] = {
635
+ "command": summary,
636
+ "success": success,
637
+ "passed": None,
638
+ "failed": None,
639
+ "skipped": None,
640
+ "errors": None,
641
+ }
642
+
643
+ # Try to extract test results from payload if available
644
+ payload = (
645
+ activity.payload
646
+ if not isinstance(activity, dict)
647
+ else activity.get("payload", {})
648
+ )
649
+ if payload and isinstance(payload, dict):
650
+ output = payload.get("output", "") or payload.get("stdout", "")
651
+ if output:
652
+ # Parse pytest output for results
653
+ # Example: "5 passed, 2 failed, 1 skipped in 2.34s"
654
+ # Example: "===== 10 passed in 1.23s ====="
655
+ passed_match = re.search(r"(\d+)\s+passed", output)
656
+ failed_match = re.search(r"(\d+)\s+failed", output)
657
+ skipped_match = re.search(r"(\d+)\s+skipped", output)
658
+ error_match = re.search(r"(\d+)\s+error", output)
659
+
660
+ if passed_match:
661
+ test_run["passed"] = int(passed_match.group(1))
662
+ if failed_match:
663
+ test_run["failed"] = int(failed_match.group(1))
664
+ if skipped_match:
665
+ test_run["skipped"] = int(skipped_match.group(1))
666
+ if error_match:
667
+ test_run["errors"] = int(error_match.group(1))
668
+
669
+ result["test_runs"].append(test_run)
670
+
671
+ # Generate summary and recommendations
672
+ if result["test_runs"]:
673
+ total_runs = len(result["test_runs"])
674
+ successful_runs = sum(1 for r in result["test_runs"] if r["success"])
675
+ failed_runs = total_runs - successful_runs
676
+
677
+ # Calculate total test results across all runs
678
+ total_passed = sum(r["passed"] or 0 for r in result["test_runs"])
679
+ total_failed = sum(r["failed"] or 0 for r in result["test_runs"])
680
+ total_errors = sum(r["errors"] or 0 for r in result["test_runs"])
681
+
682
+ # Generate summary
683
+ summary_parts = [f"{total_runs} test run{'s' if total_runs > 1 else ''}"]
684
+ if total_passed > 0:
685
+ summary_parts.append(f"{total_passed} passed")
686
+ if total_failed > 0:
687
+ summary_parts.append(f"{total_failed} failed")
688
+ if total_errors > 0:
689
+ summary_parts.append(f"{total_errors} errors")
690
+
691
+ result["summary"] = ", ".join(summary_parts)
692
+
693
+ # Add issues and recommendations
694
+ if failed_runs > 0:
695
+ result["issues"].append(
696
+ f"{failed_runs} test run{'s' if failed_runs > 1 else ''} failed"
697
+ )
698
+
699
+ if total_runs > 5:
700
+ result["issues"].append(f"High test run count: {total_runs}")
701
+ result["recommendations"].append(
702
+ "Consider fixing tests in one batch to reduce test iterations"
703
+ )
704
+
705
+ if total_failed > 0 and successful_runs == 0:
706
+ result["recommendations"].append(
707
+ "No passing test runs - verify test environment and dependencies"
708
+ )
709
+
710
+ # Positive feedback for good testing practices
711
+ if successful_runs > 0 and failed_runs == 0:
712
+ result["summary"] = f"✓ {result['summary']}"
713
+
714
+ return result
715
+
716
+ def _describe_anti_pattern(self, seq: tuple) -> str:
717
+ """Return human-readable description of an anti-pattern."""
718
+ descriptions = {
719
+ (
720
+ "Edit",
721
+ "Edit",
722
+ "Edit",
723
+ ): "Multiple edits without testing - run tests between changes",
724
+ ("Bash", "Bash", "Bash"): "Command spam - plan commands before executing",
725
+ (
726
+ "Read",
727
+ "Read",
728
+ "Read",
729
+ ): "Excessive reading - take notes or use grep to find specific content",
730
+ }
731
+ return descriptions.get(seq, f"Repeated {seq[0]} without variation")
732
+
733
+
734
+ def analyze_on_completion(sdk: SDK, session_id: str) -> dict:
735
+ """Analyze session on work item completion and return orchestrator feedback.
736
+
737
+ This is the main entry point called by complete_feature().
738
+
739
+ Returns:
740
+ Dict with:
741
+ - anti_patterns: List of detected anti-patterns with counts
742
+ - errors: List of error summaries
743
+ - error_count: Total error count
744
+ - efficiency: Efficiency score (0.0-1.0)
745
+ - issues: List of detected issues
746
+ - recommendations: List of recommendations
747
+ - summary: One-line summary for orchestrator
748
+ """
749
+ learning = LearningPersistence(sdk)
750
+ return learning.analyze_for_orchestrator(session_id)
751
+
752
+
753
+ def auto_persist_on_session_end(sdk: SDK, session_id: str) -> dict:
754
+ """Convenience function to auto-persist learning data when session ends.
755
+
756
+ Returns:
757
+ Dict with insight_id, pattern_ids, metric_id
758
+ """
759
+ learning = LearningPersistence(sdk)
760
+
761
+ result = {
762
+ "insight_id": learning.persist_session_insight(session_id),
763
+ "pattern_ids": learning.persist_patterns(),
764
+ "metric_id": learning.persist_metrics(),
765
+ }
766
+
767
+ return result