htmlgraph 0.9.3__py3-none-any.whl → 0.27.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (331) hide show
  1. htmlgraph/.htmlgraph/.session-warning-state.json +6 -0
  2. htmlgraph/.htmlgraph/agents.json +72 -0
  3. htmlgraph/.htmlgraph/htmlgraph.db +0 -0
  4. htmlgraph/__init__.py +173 -17
  5. htmlgraph/__init__.pyi +123 -0
  6. htmlgraph/agent_detection.py +127 -0
  7. htmlgraph/agent_registry.py +45 -30
  8. htmlgraph/agents.py +160 -107
  9. htmlgraph/analytics/__init__.py +9 -2
  10. htmlgraph/analytics/cli.py +190 -51
  11. htmlgraph/analytics/cost_analyzer.py +391 -0
  12. htmlgraph/analytics/cost_monitor.py +664 -0
  13. htmlgraph/analytics/cost_reporter.py +675 -0
  14. htmlgraph/analytics/cross_session.py +617 -0
  15. htmlgraph/analytics/dependency.py +192 -100
  16. htmlgraph/analytics/pattern_learning.py +771 -0
  17. htmlgraph/analytics/session_graph.py +707 -0
  18. htmlgraph/analytics/strategic/__init__.py +80 -0
  19. htmlgraph/analytics/strategic/cost_optimizer.py +611 -0
  20. htmlgraph/analytics/strategic/pattern_detector.py +876 -0
  21. htmlgraph/analytics/strategic/preference_manager.py +709 -0
  22. htmlgraph/analytics/strategic/suggestion_engine.py +747 -0
  23. htmlgraph/analytics/work_type.py +190 -14
  24. htmlgraph/analytics_index.py +135 -51
  25. htmlgraph/api/__init__.py +3 -0
  26. htmlgraph/api/cost_alerts_websocket.py +416 -0
  27. htmlgraph/api/main.py +2498 -0
  28. htmlgraph/api/static/htmx.min.js +1 -0
  29. htmlgraph/api/static/style-redesign.css +1344 -0
  30. htmlgraph/api/static/style.css +1079 -0
  31. htmlgraph/api/templates/dashboard-redesign.html +1366 -0
  32. htmlgraph/api/templates/dashboard.html +794 -0
  33. htmlgraph/api/templates/partials/activity-feed-hierarchical.html +326 -0
  34. htmlgraph/api/templates/partials/activity-feed.html +1100 -0
  35. htmlgraph/api/templates/partials/agents-redesign.html +317 -0
  36. htmlgraph/api/templates/partials/agents.html +317 -0
  37. htmlgraph/api/templates/partials/event-traces.html +373 -0
  38. htmlgraph/api/templates/partials/features-kanban-redesign.html +509 -0
  39. htmlgraph/api/templates/partials/features.html +578 -0
  40. htmlgraph/api/templates/partials/metrics-redesign.html +346 -0
  41. htmlgraph/api/templates/partials/metrics.html +346 -0
  42. htmlgraph/api/templates/partials/orchestration-redesign.html +443 -0
  43. htmlgraph/api/templates/partials/orchestration.html +198 -0
  44. htmlgraph/api/templates/partials/spawners.html +375 -0
  45. htmlgraph/api/templates/partials/work-items.html +613 -0
  46. htmlgraph/api/websocket.py +538 -0
  47. htmlgraph/archive/__init__.py +24 -0
  48. htmlgraph/archive/bloom.py +234 -0
  49. htmlgraph/archive/fts.py +297 -0
  50. htmlgraph/archive/manager.py +583 -0
  51. htmlgraph/archive/search.py +244 -0
  52. htmlgraph/atomic_ops.py +560 -0
  53. htmlgraph/attribute_index.py +208 -0
  54. htmlgraph/bounded_paths.py +539 -0
  55. htmlgraph/builders/__init__.py +14 -0
  56. htmlgraph/builders/base.py +118 -29
  57. htmlgraph/builders/bug.py +150 -0
  58. htmlgraph/builders/chore.py +119 -0
  59. htmlgraph/builders/epic.py +150 -0
  60. htmlgraph/builders/feature.py +31 -6
  61. htmlgraph/builders/insight.py +195 -0
  62. htmlgraph/builders/metric.py +217 -0
  63. htmlgraph/builders/pattern.py +202 -0
  64. htmlgraph/builders/phase.py +162 -0
  65. htmlgraph/builders/spike.py +52 -19
  66. htmlgraph/builders/track.py +148 -72
  67. htmlgraph/cigs/__init__.py +81 -0
  68. htmlgraph/cigs/autonomy.py +385 -0
  69. htmlgraph/cigs/cost.py +475 -0
  70. htmlgraph/cigs/messages_basic.py +472 -0
  71. htmlgraph/cigs/messaging.py +365 -0
  72. htmlgraph/cigs/models.py +771 -0
  73. htmlgraph/cigs/pattern_storage.py +427 -0
  74. htmlgraph/cigs/patterns.py +503 -0
  75. htmlgraph/cigs/posttool_analyzer.py +234 -0
  76. htmlgraph/cigs/reporter.py +818 -0
  77. htmlgraph/cigs/tracker.py +317 -0
  78. htmlgraph/cli/.htmlgraph/.session-warning-state.json +6 -0
  79. htmlgraph/cli/.htmlgraph/agents.json +72 -0
  80. htmlgraph/cli/.htmlgraph/htmlgraph.db +0 -0
  81. htmlgraph/cli/__init__.py +42 -0
  82. htmlgraph/cli/__main__.py +6 -0
  83. htmlgraph/cli/analytics.py +1424 -0
  84. htmlgraph/cli/base.py +685 -0
  85. htmlgraph/cli/constants.py +206 -0
  86. htmlgraph/cli/core.py +954 -0
  87. htmlgraph/cli/main.py +147 -0
  88. htmlgraph/cli/models.py +475 -0
  89. htmlgraph/cli/templates/__init__.py +1 -0
  90. htmlgraph/cli/templates/cost_dashboard.py +399 -0
  91. htmlgraph/cli/work/__init__.py +239 -0
  92. htmlgraph/cli/work/browse.py +115 -0
  93. htmlgraph/cli/work/features.py +568 -0
  94. htmlgraph/cli/work/orchestration.py +676 -0
  95. htmlgraph/cli/work/report.py +728 -0
  96. htmlgraph/cli/work/sessions.py +466 -0
  97. htmlgraph/cli/work/snapshot.py +559 -0
  98. htmlgraph/cli/work/tracks.py +486 -0
  99. htmlgraph/cli_commands/__init__.py +1 -0
  100. htmlgraph/cli_commands/feature.py +195 -0
  101. htmlgraph/cli_framework.py +115 -0
  102. htmlgraph/collections/__init__.py +18 -0
  103. htmlgraph/collections/base.py +415 -98
  104. htmlgraph/collections/bug.py +53 -0
  105. htmlgraph/collections/chore.py +53 -0
  106. htmlgraph/collections/epic.py +53 -0
  107. htmlgraph/collections/feature.py +12 -26
  108. htmlgraph/collections/insight.py +100 -0
  109. htmlgraph/collections/metric.py +92 -0
  110. htmlgraph/collections/pattern.py +97 -0
  111. htmlgraph/collections/phase.py +53 -0
  112. htmlgraph/collections/session.py +194 -0
  113. htmlgraph/collections/spike.py +56 -16
  114. htmlgraph/collections/task_delegation.py +241 -0
  115. htmlgraph/collections/todo.py +511 -0
  116. htmlgraph/collections/traces.py +487 -0
  117. htmlgraph/config/cost_models.json +56 -0
  118. htmlgraph/config.py +190 -0
  119. htmlgraph/context_analytics.py +344 -0
  120. htmlgraph/converter.py +216 -28
  121. htmlgraph/cost_analysis/__init__.py +5 -0
  122. htmlgraph/cost_analysis/analyzer.py +438 -0
  123. htmlgraph/dashboard.html +2406 -307
  124. htmlgraph/dashboard.html.backup +6592 -0
  125. htmlgraph/dashboard.html.bak +7181 -0
  126. htmlgraph/dashboard.html.bak2 +7231 -0
  127. htmlgraph/dashboard.html.bak3 +7232 -0
  128. htmlgraph/db/__init__.py +38 -0
  129. htmlgraph/db/queries.py +790 -0
  130. htmlgraph/db/schema.py +1788 -0
  131. htmlgraph/decorators.py +317 -0
  132. htmlgraph/dependency_models.py +19 -2
  133. htmlgraph/deploy.py +142 -125
  134. htmlgraph/deployment_models.py +474 -0
  135. htmlgraph/docs/API_REFERENCE.md +841 -0
  136. htmlgraph/docs/HTTP_API.md +750 -0
  137. htmlgraph/docs/INTEGRATION_GUIDE.md +752 -0
  138. htmlgraph/docs/ORCHESTRATION_PATTERNS.md +717 -0
  139. htmlgraph/docs/README.md +532 -0
  140. htmlgraph/docs/__init__.py +77 -0
  141. htmlgraph/docs/docs_version.py +55 -0
  142. htmlgraph/docs/metadata.py +93 -0
  143. htmlgraph/docs/migrations.py +232 -0
  144. htmlgraph/docs/template_engine.py +143 -0
  145. htmlgraph/docs/templates/_sections/cli_reference.md.j2 +52 -0
  146. htmlgraph/docs/templates/_sections/core_concepts.md.j2 +29 -0
  147. htmlgraph/docs/templates/_sections/sdk_basics.md.j2 +69 -0
  148. htmlgraph/docs/templates/base_agents.md.j2 +78 -0
  149. htmlgraph/docs/templates/example_user_override.md.j2 +47 -0
  150. htmlgraph/docs/version_check.py +163 -0
  151. htmlgraph/edge_index.py +182 -27
  152. htmlgraph/error_handler.py +544 -0
  153. htmlgraph/event_log.py +100 -52
  154. htmlgraph/event_migration.py +13 -4
  155. htmlgraph/exceptions.py +49 -0
  156. htmlgraph/file_watcher.py +101 -28
  157. htmlgraph/find_api.py +75 -63
  158. htmlgraph/git_events.py +145 -63
  159. htmlgraph/graph.py +1122 -106
  160. htmlgraph/hooks/.htmlgraph/.session-warning-state.json +6 -0
  161. htmlgraph/hooks/.htmlgraph/agents.json +72 -0
  162. htmlgraph/hooks/.htmlgraph/index.sqlite +0 -0
  163. htmlgraph/hooks/__init__.py +45 -0
  164. htmlgraph/hooks/bootstrap.py +169 -0
  165. htmlgraph/hooks/cigs_pretool_enforcer.py +354 -0
  166. htmlgraph/hooks/concurrent_sessions.py +208 -0
  167. htmlgraph/hooks/context.py +350 -0
  168. htmlgraph/hooks/drift_handler.py +525 -0
  169. htmlgraph/hooks/event_tracker.py +1314 -0
  170. htmlgraph/hooks/git_commands.py +175 -0
  171. htmlgraph/hooks/hooks-config.example.json +12 -0
  172. htmlgraph/hooks/installer.py +343 -0
  173. htmlgraph/hooks/orchestrator.py +674 -0
  174. htmlgraph/hooks/orchestrator_reflector.py +223 -0
  175. htmlgraph/hooks/post-checkout.sh +28 -0
  176. htmlgraph/hooks/post-commit.sh +24 -0
  177. htmlgraph/hooks/post-merge.sh +26 -0
  178. htmlgraph/hooks/post_tool_use_failure.py +273 -0
  179. htmlgraph/hooks/post_tool_use_handler.py +257 -0
  180. htmlgraph/hooks/posttooluse.py +408 -0
  181. htmlgraph/hooks/pre-commit.sh +94 -0
  182. htmlgraph/hooks/pre-push.sh +28 -0
  183. htmlgraph/hooks/pretooluse.py +819 -0
  184. htmlgraph/hooks/prompt_analyzer.py +637 -0
  185. htmlgraph/hooks/session_handler.py +668 -0
  186. htmlgraph/hooks/session_summary.py +395 -0
  187. htmlgraph/hooks/state_manager.py +504 -0
  188. htmlgraph/hooks/subagent_detection.py +202 -0
  189. htmlgraph/hooks/subagent_stop.py +369 -0
  190. htmlgraph/hooks/task_enforcer.py +255 -0
  191. htmlgraph/hooks/task_validator.py +177 -0
  192. htmlgraph/hooks/validator.py +628 -0
  193. htmlgraph/ids.py +41 -27
  194. htmlgraph/index.d.ts +286 -0
  195. htmlgraph/learning.py +767 -0
  196. htmlgraph/mcp_server.py +69 -23
  197. htmlgraph/models.py +1586 -87
  198. htmlgraph/operations/README.md +62 -0
  199. htmlgraph/operations/__init__.py +79 -0
  200. htmlgraph/operations/analytics.py +339 -0
  201. htmlgraph/operations/bootstrap.py +289 -0
  202. htmlgraph/operations/events.py +244 -0
  203. htmlgraph/operations/fastapi_server.py +231 -0
  204. htmlgraph/operations/hooks.py +350 -0
  205. htmlgraph/operations/initialization.py +597 -0
  206. htmlgraph/operations/initialization.py.backup +228 -0
  207. htmlgraph/operations/server.py +303 -0
  208. htmlgraph/orchestration/__init__.py +58 -0
  209. htmlgraph/orchestration/claude_launcher.py +179 -0
  210. htmlgraph/orchestration/command_builder.py +72 -0
  211. htmlgraph/orchestration/headless_spawner.py +281 -0
  212. htmlgraph/orchestration/live_events.py +377 -0
  213. htmlgraph/orchestration/model_selection.py +327 -0
  214. htmlgraph/orchestration/plugin_manager.py +140 -0
  215. htmlgraph/orchestration/prompts.py +137 -0
  216. htmlgraph/orchestration/spawner_event_tracker.py +383 -0
  217. htmlgraph/orchestration/spawners/__init__.py +16 -0
  218. htmlgraph/orchestration/spawners/base.py +194 -0
  219. htmlgraph/orchestration/spawners/claude.py +173 -0
  220. htmlgraph/orchestration/spawners/codex.py +435 -0
  221. htmlgraph/orchestration/spawners/copilot.py +294 -0
  222. htmlgraph/orchestration/spawners/gemini.py +471 -0
  223. htmlgraph/orchestration/subprocess_runner.py +36 -0
  224. htmlgraph/orchestration/task_coordination.py +343 -0
  225. htmlgraph/orchestration.md +563 -0
  226. htmlgraph/orchestrator-system-prompt-optimized.txt +863 -0
  227. htmlgraph/orchestrator.py +669 -0
  228. htmlgraph/orchestrator_config.py +357 -0
  229. htmlgraph/orchestrator_mode.py +328 -0
  230. htmlgraph/orchestrator_validator.py +133 -0
  231. htmlgraph/parallel.py +646 -0
  232. htmlgraph/parser.py +160 -35
  233. htmlgraph/path_query.py +608 -0
  234. htmlgraph/pattern_matcher.py +636 -0
  235. htmlgraph/planning.py +147 -52
  236. htmlgraph/pydantic_models.py +476 -0
  237. htmlgraph/quality_gates.py +350 -0
  238. htmlgraph/query_builder.py +109 -72
  239. htmlgraph/query_composer.py +509 -0
  240. htmlgraph/reflection.py +443 -0
  241. htmlgraph/refs.py +344 -0
  242. htmlgraph/repo_hash.py +512 -0
  243. htmlgraph/repositories/__init__.py +292 -0
  244. htmlgraph/repositories/analytics_repository.py +455 -0
  245. htmlgraph/repositories/analytics_repository_standard.py +628 -0
  246. htmlgraph/repositories/feature_repository.py +581 -0
  247. htmlgraph/repositories/feature_repository_htmlfile.py +668 -0
  248. htmlgraph/repositories/feature_repository_memory.py +607 -0
  249. htmlgraph/repositories/feature_repository_sqlite.py +858 -0
  250. htmlgraph/repositories/filter_service.py +620 -0
  251. htmlgraph/repositories/filter_service_standard.py +445 -0
  252. htmlgraph/repositories/shared_cache.py +621 -0
  253. htmlgraph/repositories/shared_cache_memory.py +395 -0
  254. htmlgraph/repositories/track_repository.py +552 -0
  255. htmlgraph/repositories/track_repository_htmlfile.py +619 -0
  256. htmlgraph/repositories/track_repository_memory.py +508 -0
  257. htmlgraph/repositories/track_repository_sqlite.py +711 -0
  258. htmlgraph/routing.py +8 -19
  259. htmlgraph/scripts/deploy.py +1 -2
  260. htmlgraph/sdk/__init__.py +398 -0
  261. htmlgraph/sdk/__init__.pyi +14 -0
  262. htmlgraph/sdk/analytics/__init__.py +19 -0
  263. htmlgraph/sdk/analytics/engine.py +155 -0
  264. htmlgraph/sdk/analytics/helpers.py +178 -0
  265. htmlgraph/sdk/analytics/registry.py +109 -0
  266. htmlgraph/sdk/base.py +484 -0
  267. htmlgraph/sdk/constants.py +216 -0
  268. htmlgraph/sdk/core.pyi +308 -0
  269. htmlgraph/sdk/discovery.py +120 -0
  270. htmlgraph/sdk/help/__init__.py +12 -0
  271. htmlgraph/sdk/help/mixin.py +699 -0
  272. htmlgraph/sdk/mixins/__init__.py +15 -0
  273. htmlgraph/sdk/mixins/attribution.py +113 -0
  274. htmlgraph/sdk/mixins/mixin.py +410 -0
  275. htmlgraph/sdk/operations/__init__.py +12 -0
  276. htmlgraph/sdk/operations/mixin.py +427 -0
  277. htmlgraph/sdk/orchestration/__init__.py +17 -0
  278. htmlgraph/sdk/orchestration/coordinator.py +203 -0
  279. htmlgraph/sdk/orchestration/spawner.py +204 -0
  280. htmlgraph/sdk/planning/__init__.py +19 -0
  281. htmlgraph/sdk/planning/bottlenecks.py +93 -0
  282. htmlgraph/sdk/planning/mixin.py +211 -0
  283. htmlgraph/sdk/planning/parallel.py +186 -0
  284. htmlgraph/sdk/planning/queue.py +210 -0
  285. htmlgraph/sdk/planning/recommendations.py +87 -0
  286. htmlgraph/sdk/planning/smart_planning.py +319 -0
  287. htmlgraph/sdk/session/__init__.py +19 -0
  288. htmlgraph/sdk/session/continuity.py +57 -0
  289. htmlgraph/sdk/session/handoff.py +110 -0
  290. htmlgraph/sdk/session/info.py +309 -0
  291. htmlgraph/sdk/session/manager.py +103 -0
  292. htmlgraph/sdk/strategic/__init__.py +26 -0
  293. htmlgraph/sdk/strategic/mixin.py +563 -0
  294. htmlgraph/server.py +685 -180
  295. htmlgraph/services/__init__.py +10 -0
  296. htmlgraph/services/claiming.py +199 -0
  297. htmlgraph/session_hooks.py +300 -0
  298. htmlgraph/session_manager.py +1392 -175
  299. htmlgraph/session_registry.py +587 -0
  300. htmlgraph/session_state.py +436 -0
  301. htmlgraph/session_warning.py +201 -0
  302. htmlgraph/sessions/__init__.py +23 -0
  303. htmlgraph/sessions/handoff.py +756 -0
  304. htmlgraph/setup.py +34 -17
  305. htmlgraph/spike_index.py +143 -0
  306. htmlgraph/sync_docs.py +12 -15
  307. htmlgraph/system_prompts.py +450 -0
  308. htmlgraph/templates/AGENTS.md.template +366 -0
  309. htmlgraph/templates/CLAUDE.md.template +97 -0
  310. htmlgraph/templates/GEMINI.md.template +87 -0
  311. htmlgraph/templates/orchestration-view.html +350 -0
  312. htmlgraph/track_builder.py +146 -15
  313. htmlgraph/track_manager.py +69 -21
  314. htmlgraph/transcript.py +890 -0
  315. htmlgraph/transcript_analytics.py +699 -0
  316. htmlgraph/types.py +323 -0
  317. htmlgraph/validation.py +115 -0
  318. htmlgraph/watch.py +8 -5
  319. htmlgraph/work_type_utils.py +3 -2
  320. {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/dashboard.html +2406 -307
  321. htmlgraph-0.27.5.data/data/htmlgraph/templates/AGENTS.md.template +366 -0
  322. htmlgraph-0.27.5.data/data/htmlgraph/templates/CLAUDE.md.template +97 -0
  323. htmlgraph-0.27.5.data/data/htmlgraph/templates/GEMINI.md.template +87 -0
  324. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/METADATA +97 -64
  325. htmlgraph-0.27.5.dist-info/RECORD +337 -0
  326. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/entry_points.txt +1 -1
  327. htmlgraph/cli.py +0 -2688
  328. htmlgraph/sdk.py +0 -709
  329. htmlgraph-0.9.3.dist-info/RECORD +0 -61
  330. {htmlgraph-0.9.3.data → htmlgraph-0.27.5.data}/data/htmlgraph/styles.css +0 -0
  331. {htmlgraph-0.9.3.dist-info → htmlgraph-0.27.5.dist-info}/WHEEL +0 -0
htmlgraph/parallel.py ADDED
@@ -0,0 +1,646 @@
1
+ from __future__ import annotations
2
+
3
+ """
4
+ Parallel workflow execution coordinator for multi-agent task processing.
5
+
6
+ This module provides a comprehensive framework for executing multiple tasks in parallel
7
+ using specialized subagents. It implements a 6-phase workflow that optimizes for
8
+ context efficiency, minimizes conflicts, and provides health monitoring.
9
+
10
+ Available Classes:
11
+ - ParallelWorkflow: Main coordinator implementing the 6-phase parallel execution pattern
12
+ - ParallelAnalysis: Result of pre-flight analysis with parallelization recommendations
13
+ - PreparedTask: A task prepared for parallel execution with cached context
14
+ - AgentResult: Result from a single parallel agent execution
15
+ - AggregateResult: Aggregated results from all parallel agents
16
+
17
+ Six-Phase Workflow:
18
+ 1. Pre-flight Analysis: Assess if parallelization is beneficial
19
+ 2. Context Preparation: Cache shared context to reduce redundant reads
20
+ 3. Dispatch: Generate optimized prompts for Task tool
21
+ 4. Monitor: Track agent health during execution (health tracking)
22
+ 5. Aggregate: Collect and analyze results from all agents
23
+ 6. Validate: Verify execution quality and detect conflicts
24
+
25
+ Key Benefits:
26
+ - Context efficiency: Shared context cached, ~15x token reduction per agent
27
+ - Conflict detection: Identifies file conflicts before they happen
28
+ - Health monitoring: Tracks agent efficiency and anti-patterns
29
+ - Risk assessment: Analyzes if parallelization is worthwhile
30
+ - Cost-benefit analysis: Estimates speedup vs. token cost
31
+
32
+ Usage:
33
+ from htmlgraph.parallel import ParallelWorkflow
34
+ from htmlgraph.sdk import SDK
35
+
36
+ sdk = SDK(agent="claude")
37
+ workflow = ParallelWorkflow(sdk)
38
+
39
+ # Phase 1: Pre-flight analysis
40
+ analysis = workflow.analyze(max_agents=5)
41
+ if analysis.can_parallelize:
42
+ print(f"Recommendation: {analysis.recommendation}")
43
+ print(f"Expected speedup: {analysis.speedup_factor:.1f}x")
44
+
45
+ # Phase 2: Prepare context
46
+ tasks = workflow.prepare_tasks(
47
+ analysis.ready_tasks,
48
+ shared_files=["src/config.py", "src/models.py"]
49
+ )
50
+
51
+ # Phase 3: Generate prompts for Task tool
52
+ prompts = workflow.generate_prompts(tasks)
53
+
54
+ # Phase 4: Execute (use prompts with Task tool)
55
+ # agent_ids = [spawn_agent(p) for p in prompts]
56
+
57
+ # Phase 5: Aggregate results
58
+ results = workflow.aggregate(agent_ids)
59
+ print(f"Success: {results.successful}/{results.total_agents}")
60
+ print(f"Speedup: {results.parallel_speedup:.1f}x")
61
+
62
+ # Phase 6: Validate
63
+ validation = workflow.validate(results)
64
+ if validation["no_conflicts"] and validation["all_successful"]:
65
+ print("Parallel execution successful!")
66
+
67
+ # Link transcripts to features for traceability
68
+ workflow.link_transcripts([
69
+ ("feat-001", "agent-abc123"),
70
+ ("feat-002", "agent-def456")
71
+ ])
72
+
73
+ Best Practices:
74
+ - Only parallelize independent tasks (no shared file edits)
75
+ - Use pre-flight analysis to verify benefit > cost
76
+ - Monitor health scores to catch inefficient agents early
77
+ - Link transcripts for full traceability
78
+ - Limit to 3-5 parallel agents for optimal results
79
+ """
80
+
81
+
82
+ from dataclasses import dataclass, field
83
+ from datetime import datetime
84
+ from typing import TYPE_CHECKING, Any, cast
85
+
86
+ if TYPE_CHECKING:
87
+ from htmlgraph.sdk import SDK
88
+
89
+
90
+ @dataclass
91
+ class ParallelAnalysis:
92
+ """Result of pre-flight analysis for parallel work."""
93
+
94
+ can_parallelize: bool
95
+ max_parallelism: int
96
+ ready_tasks: list[str] # Task IDs ready to run (Level 0)
97
+ blocked_tasks: list[str] # Tasks waiting on dependencies
98
+ bottlenecks: list[dict[str, Any]] # Blocking issues
99
+ risks: list[str] # Potential problems
100
+
101
+ # Cost-benefit
102
+ estimated_sequential_time: float # minutes
103
+ estimated_parallel_time: float # minutes
104
+ speedup_factor: float
105
+
106
+ # Recommendations
107
+ recommendation: str
108
+ warnings: list[str] = field(default_factory=list)
109
+
110
+
111
+ @dataclass
112
+ class PreparedTask:
113
+ """A task prepared for parallel execution."""
114
+
115
+ task_id: str
116
+ title: str
117
+ priority: str
118
+ assigned_agent: str | None
119
+
120
+ # Context
121
+ instructions: str
122
+ cached_context: dict[str, str] # file -> summary
123
+ files_to_read: list[str]
124
+ files_to_avoid: list[str] # Being edited by other agents
125
+
126
+ # Metadata
127
+ estimated_duration: float # minutes
128
+ capabilities_required: list[str]
129
+
130
+
131
+ @dataclass
132
+ class AgentResult:
133
+ """Result from a parallel agent."""
134
+
135
+ agent_id: str
136
+ task_id: str
137
+ status: str # success, failed, partial
138
+ duration_seconds: float
139
+ files_modified: list[str]
140
+ health_score: float
141
+ anti_patterns: int
142
+ summary: str
143
+ errors: list[str] = field(default_factory=list)
144
+
145
+
146
+ @dataclass
147
+ class AggregateResult:
148
+ """Aggregated results from parallel execution."""
149
+
150
+ total_agents: int
151
+ successful: int
152
+ failed: int
153
+ total_duration_seconds: float
154
+ parallel_speedup: float
155
+ avg_health_score: float
156
+ total_anti_patterns: int
157
+ files_modified: list[str]
158
+ conflicts: list[str]
159
+ recommendations: list[str]
160
+
161
+
162
+ class ParallelWorkflow:
163
+ """
164
+ Coordinator for optimal parallel agent execution.
165
+
166
+ Implements the 6-phase workflow:
167
+ 1. Pre-flight analysis
168
+ 2. Context preparation
169
+ 3. Dispatch (prompt generation)
170
+ 4. Monitor (health tracking)
171
+ 5. Aggregate (result collection)
172
+ 6. Validate (verification)
173
+ """
174
+
175
+ # Thresholds from transcript analytics
176
+ RETRY_RATE_THRESHOLD = 0.3
177
+ CONTEXT_REBUILD_THRESHOLD = 5
178
+ TOOL_DIVERSITY_THRESHOLD = 0.3
179
+ MIN_TASK_DURATION_MINUTES = 2.0
180
+ TOKEN_COST_MULTIPLIER = 15 # Parallel uses ~15x tokens
181
+
182
+ def __init__(self, sdk: SDK):
183
+ self.sdk = sdk
184
+ self._graph_dir = sdk._directory
185
+
186
+ def analyze(self, max_agents: int = 5) -> ParallelAnalysis:
187
+ """
188
+ Phase 1: Pre-flight analysis.
189
+
190
+ Determines if parallelization is beneficial and identifies ready tasks.
191
+ """
192
+ # Get parallel opportunities
193
+ try:
194
+ parallel = self.sdk.get_parallel_work(max_agents=max_agents)
195
+ except Exception:
196
+ parallel = {"max_parallelism": 0, "ready_now": [], "blocked": []}
197
+
198
+ ready_tasks = parallel.get("ready_now", [])
199
+ blocked_tasks = parallel.get("blocked", [])
200
+ max_parallelism = parallel.get("max_parallelism", 0)
201
+
202
+ # Get bottlenecks
203
+ try:
204
+ bottlenecks = self.sdk.find_bottlenecks(top_n=3)
205
+ except Exception:
206
+ bottlenecks = []
207
+
208
+ # Assess risks
209
+ risks = self._assess_risks(ready_tasks)
210
+
211
+ # Estimate times
212
+ task_count = len(ready_tasks)
213
+ avg_task_time = 5.0 # minutes (conservative estimate)
214
+ sequential_time = task_count * avg_task_time
215
+ parallel_time = avg_task_time if task_count > 0 else 0
216
+ speedup = sequential_time / parallel_time if parallel_time > 0 else 1.0
217
+
218
+ # Determine if parallelization is worthwhile
219
+ can_parallelize = (
220
+ max_parallelism >= 2 and len(ready_tasks) >= 2 and len(risks) == 0
221
+ )
222
+
223
+ # Generate recommendation
224
+ if not can_parallelize:
225
+ if len(ready_tasks) < 2:
226
+ recommendation = "Not enough independent tasks. Work sequentially."
227
+ elif len(risks) > 0:
228
+ recommendation = f"Risks detected: {', '.join(risks)}. Resolve first."
229
+ else:
230
+ recommendation = "Sequential execution recommended."
231
+ elif speedup < 1.5:
232
+ recommendation = "Marginal benefit. Consider sequential for simplicity."
233
+ can_parallelize = False
234
+ else:
235
+ recommendation = f"Parallelize {min(max_agents, len(ready_tasks))} tasks for {speedup:.1f}x speedup."
236
+
237
+ # Warnings
238
+ warnings = []
239
+ if len(bottlenecks) > 0:
240
+ warnings.append(f"{len(bottlenecks)} bottlenecks blocking downstream work")
241
+ if self.TOKEN_COST_MULTIPLIER * len(ready_tasks) > 50:
242
+ warnings.append(
243
+ f"High token cost: ~{self.TOKEN_COST_MULTIPLIER}x per agent"
244
+ )
245
+
246
+ return ParallelAnalysis(
247
+ can_parallelize=can_parallelize,
248
+ max_parallelism=max_parallelism,
249
+ ready_tasks=ready_tasks,
250
+ blocked_tasks=blocked_tasks,
251
+ bottlenecks=cast(list[dict[str, Any]], bottlenecks),
252
+ risks=risks,
253
+ estimated_sequential_time=sequential_time,
254
+ estimated_parallel_time=parallel_time,
255
+ speedup_factor=speedup,
256
+ recommendation=recommendation,
257
+ warnings=warnings,
258
+ )
259
+
260
+ def prepare_tasks(
261
+ self,
262
+ task_ids: list[str],
263
+ shared_files: list[str] | None = None,
264
+ ) -> list[PreparedTask]:
265
+ """
266
+ Phase 2: Context preparation.
267
+
268
+ Prepares tasks with cached context to reduce redundant reads.
269
+ """
270
+ prepared = []
271
+
272
+ # Generate shared context cache
273
+ cached_context = {}
274
+ if shared_files:
275
+ for file_path in shared_files:
276
+ try:
277
+ # In practice, this would read and summarize
278
+ cached_context[file_path] = f"[Pre-cached summary of {file_path}]"
279
+ except Exception:
280
+ pass
281
+
282
+ # Track which files each agent will edit
283
+ file_assignments: dict[str, str] = {}
284
+
285
+ for task_id in task_ids:
286
+ feature = self.sdk.features.get(task_id)
287
+ if not feature:
288
+ continue
289
+
290
+ # Infer files this task might edit
291
+ likely_files = self._infer_task_files(feature)
292
+
293
+ # Check for conflicts
294
+ files_to_avoid = []
295
+ for file_path in likely_files:
296
+ if file_path in file_assignments:
297
+ files_to_avoid.append(file_path)
298
+ else:
299
+ file_assignments[file_path] = task_id
300
+
301
+ # Generate instructions
302
+ instructions = self._generate_instructions(feature)
303
+
304
+ prepared.append(
305
+ PreparedTask(
306
+ task_id=task_id,
307
+ title=feature.title,
308
+ priority=getattr(feature, "priority", "medium"),
309
+ assigned_agent=getattr(feature, "agent_assigned", None),
310
+ instructions=instructions,
311
+ cached_context=cached_context,
312
+ files_to_read=likely_files,
313
+ files_to_avoid=files_to_avoid,
314
+ estimated_duration=5.0,
315
+ capabilities_required=getattr(feature, "required_capabilities", []),
316
+ )
317
+ )
318
+
319
+ return prepared
320
+
321
+ def generate_prompts(self, tasks: list[PreparedTask]) -> list[dict[str, str]]:
322
+ """
323
+ Phase 3: Generate prompts for Task tool.
324
+
325
+ Returns list of {prompt, description} dicts ready for Task tool.
326
+ """
327
+ prompts = []
328
+
329
+ for task in tasks:
330
+ # Build context section
331
+ context_lines = []
332
+ if task.cached_context:
333
+ context_lines.append("## Pre-Cached Context (DO NOT re-read these)")
334
+ for file_path, summary in task.cached_context.items():
335
+ context_lines.append(f"- {file_path}: {summary}")
336
+
337
+ if task.files_to_avoid:
338
+ context_lines.append("")
339
+ context_lines.append("## Files to AVOID (other agents editing)")
340
+ for file_path in task.files_to_avoid:
341
+ context_lines.append(f"- {file_path}")
342
+
343
+ context_section = "\n".join(context_lines)
344
+
345
+ # Build efficiency guidelines
346
+ guidelines = """
347
+ ## Efficiency Guidelines
348
+ - Use Grep before Read (search then read, not read everything)
349
+ - Batch Edit operations (multiple changes in one edit)
350
+ - Use Glob to find files (not repeated Read attempts)
351
+ - Check cached context before reading shared files
352
+ - Mark feature file as complete when done
353
+ """
354
+
355
+ prompt = f"""Work on feature {task.task_id}: "{task.title}"
356
+ Priority: {task.priority}
357
+
358
+ {task.instructions}
359
+
360
+ {context_section}
361
+
362
+ {guidelines}
363
+
364
+ ## Required Output
365
+ Return a summary including:
366
+ 1. What changes were made
367
+ 2. Files modified
368
+ 3. Any blockers or issues found
369
+ 4. Whether the feature is complete
370
+ """
371
+
372
+ prompts.append(
373
+ {
374
+ "prompt": prompt,
375
+ "description": f"{task.task_id}: {task.title[:30]}",
376
+ "subagent_type": "general-purpose",
377
+ }
378
+ )
379
+
380
+ return prompts
381
+
382
+ def aggregate(self, agent_ids: list[str]) -> AggregateResult:
383
+ """
384
+ Phase 5: Aggregate results from parallel agents.
385
+
386
+ Analyzes transcripts and collects metrics.
387
+ """
388
+ from htmlgraph.transcript_analytics import TranscriptAnalytics
389
+
390
+ analytics = TranscriptAnalytics(self._graph_dir)
391
+ results: list[AgentResult] = []
392
+
393
+ all_files: list[str] = []
394
+ conflicts: list[str] = []
395
+
396
+ for agent_id in agent_ids:
397
+ health = analytics.calculate_session_health(agent_id)
398
+ anti_patterns = analytics.detect_anti_patterns(agent_id)
399
+
400
+ if health:
401
+ result = AgentResult(
402
+ agent_id=agent_id,
403
+ task_id="", # Would be extracted from transcript
404
+ status="success" if health.overall_score() > 0.5 else "partial",
405
+ duration_seconds=health.duration_seconds,
406
+ files_modified=[], # Would be extracted
407
+ health_score=health.overall_score(),
408
+ anti_patterns=sum(p[0].count for p in anti_patterns),
409
+ summary="",
410
+ )
411
+ results.append(result)
412
+
413
+ # Check for file conflicts
414
+ file_counts: dict[str, int] = {}
415
+ for result in results:
416
+ for file_path in result.files_modified:
417
+ file_counts[file_path] = file_counts.get(file_path, 0) + 1
418
+ if file_counts[file_path] > 1:
419
+ conflicts.append(file_path)
420
+ all_files.append(file_path)
421
+
422
+ # Calculate aggregate metrics
423
+ total_duration = sum(r.duration_seconds for r in results)
424
+ avg_health = (
425
+ sum(r.health_score for r in results) / len(results) if results else 0.0
426
+ )
427
+ total_anti = sum(r.anti_patterns for r in results)
428
+
429
+ # Estimate speedup
430
+ max_duration = max((r.duration_seconds for r in results), default=0)
431
+ sequential_estimate = total_duration
432
+ speedup = sequential_estimate / max_duration if max_duration > 0 else 1.0
433
+
434
+ # Generate recommendations
435
+ recommendations = []
436
+ if avg_health < 0.7:
437
+ recommendations.append("Low average health. Review agent prompts.")
438
+ if total_anti > 5:
439
+ recommendations.append(f"{total_anti} anti-patterns detected. Add caching.")
440
+ if conflicts:
441
+ recommendations.append(f"File conflicts: {', '.join(conflicts)}")
442
+
443
+ return AggregateResult(
444
+ total_agents=len(agent_ids),
445
+ successful=len([r for r in results if r.status == "success"]),
446
+ failed=len([r for r in results if r.status == "failed"]),
447
+ total_duration_seconds=total_duration,
448
+ parallel_speedup=speedup,
449
+ avg_health_score=avg_health,
450
+ total_anti_patterns=total_anti,
451
+ files_modified=list(set(all_files)),
452
+ conflicts=conflicts,
453
+ recommendations=recommendations,
454
+ )
455
+
456
+ def validate(self, result: AggregateResult) -> dict[str, bool]:
457
+ """
458
+ Phase 6: Validate parallel execution results.
459
+ """
460
+ return {
461
+ "no_conflicts": len(result.conflicts) == 0,
462
+ "all_successful": result.failed == 0,
463
+ "healthy_execution": result.avg_health_score >= 0.7,
464
+ "acceptable_anti_patterns": result.total_anti_patterns <= 5,
465
+ }
466
+
467
+ def _assess_risks(self, task_ids: list[str]) -> list[str]:
468
+ """Identify risks that prevent parallelization."""
469
+ risks = []
470
+
471
+ # Check for shared file edits (would need feature analysis)
472
+ # This is a simplified check
473
+ if len(task_ids) > 5:
474
+ risks.append("Many tasks increase conflict risk")
475
+
476
+ return risks
477
+
478
+ def _infer_task_files(self, feature: Any) -> list[str]:
479
+ """Infer which files a task might need to edit."""
480
+ # In practice, this would analyze feature content
481
+ return []
482
+
483
+ def _generate_instructions(self, feature: Any) -> str:
484
+ """Generate task-specific instructions."""
485
+ steps = getattr(feature, "steps", [])
486
+ if steps:
487
+ step_lines = []
488
+ for i, step in enumerate(steps, 1):
489
+ status = "✅" if getattr(step, "completed", False) else "⏳"
490
+ desc = getattr(step, "description", str(step))
491
+ step_lines.append(f"{i}. {status} {desc}")
492
+ return "## Steps\n" + "\n".join(step_lines)
493
+ return "Complete this feature according to its description."
494
+
495
+ def link_transcripts(
496
+ self,
497
+ feature_transcript_pairs: list[tuple[str, str]],
498
+ ) -> dict[str, Any]:
499
+ """
500
+ Link Claude Code transcripts to features after parallel execution.
501
+
502
+ This enables full traceability from features to the agent sessions
503
+ that implemented them.
504
+
505
+ Args:
506
+ feature_transcript_pairs: List of (feature_id, transcript_id) tuples
507
+
508
+ Returns:
509
+ Summary of linking results
510
+
511
+ Example:
512
+ >>> workflow = ParallelWorkflow(sdk)
513
+ >>> # After parallel agents complete...
514
+ >>> results = workflow.link_transcripts([
515
+ ... ("feat-001", "agent-a91736"),
516
+ ... ("feat-002", "agent-748080"),
517
+ ... ("feat-003", "agent-0ef7b6"),
518
+ ... ])
519
+ >>> print(results["linked_count"]) # 3
520
+ """
521
+ # Use SDK's session manager to ensure shared graph instances
522
+ manager = self.sdk.session_manager
523
+ linked = []
524
+ failed = []
525
+
526
+ for feature_id, transcript_id in feature_transcript_pairs:
527
+ try:
528
+ feature = self.sdk.features.get(feature_id)
529
+ if not feature:
530
+ failed.append(
531
+ {
532
+ "feature_id": feature_id,
533
+ "transcript_id": transcript_id,
534
+ "error": "Feature not found",
535
+ }
536
+ )
537
+ continue
538
+
539
+ graph = manager.features_graph
540
+ manager._link_transcript_to_feature(feature, transcript_id, graph)
541
+ graph.update(feature)
542
+
543
+ linked.append(
544
+ {
545
+ "feature_id": feature_id,
546
+ "transcript_id": transcript_id,
547
+ "tool_count": feature.properties.get(
548
+ "transcript_tool_count", 0
549
+ ),
550
+ "duration_seconds": feature.properties.get(
551
+ "transcript_duration_seconds", 0
552
+ ),
553
+ }
554
+ )
555
+ except Exception as e:
556
+ failed.append(
557
+ {
558
+ "feature_id": feature_id,
559
+ "transcript_id": transcript_id,
560
+ "error": str(e),
561
+ }
562
+ )
563
+
564
+ return {
565
+ "linked_count": len(linked),
566
+ "failed_count": len(failed),
567
+ "linked": linked,
568
+ "failed": failed,
569
+ }
570
+
571
+ def auto_link_by_timestamp(
572
+ self,
573
+ feature_ids: list[str],
574
+ time_window_minutes: int = 30,
575
+ ) -> dict[str, Any]:
576
+ """
577
+ Auto-link transcripts to features based on completion timestamp matching.
578
+
579
+ Finds agent transcripts that ran within the time window of each feature's
580
+ completion and links them.
581
+
582
+ Args:
583
+ feature_ids: Features to find transcripts for
584
+ time_window_minutes: Maximum time difference to consider a match
585
+
586
+ Returns:
587
+ Summary with linked features and their transcripts
588
+ """
589
+ from datetime import timedelta
590
+
591
+ from htmlgraph.transcript import TranscriptReader
592
+
593
+ reader = TranscriptReader()
594
+ pairs = []
595
+ unmatched = []
596
+
597
+ for feature_id in feature_ids:
598
+ feature = self.sdk.features.get(feature_id)
599
+ if not feature:
600
+ unmatched.append(feature_id)
601
+ continue
602
+
603
+ completed_at = feature.properties.get("completed_at")
604
+ if not completed_at:
605
+ unmatched.append(feature_id)
606
+ continue
607
+
608
+ # Parse completion time
609
+ try:
610
+ completion_time = datetime.fromisoformat(completed_at)
611
+ except (ValueError, TypeError):
612
+ unmatched.append(feature_id)
613
+ continue
614
+
615
+ # Find transcripts in time window
616
+ since = completion_time - timedelta(minutes=time_window_minutes)
617
+ sessions = reader.list_sessions(since=since)
618
+
619
+ # Find agent sessions (not main sessions)
620
+ for session in sessions:
621
+ if session.session_id.startswith("agent-"):
622
+ # Check if within time window
623
+ if session.ended_at:
624
+ time_diff = abs(
625
+ (session.ended_at - completion_time).total_seconds()
626
+ )
627
+ if time_diff <= time_window_minutes * 60:
628
+ pairs.append((feature_id, session.session_id))
629
+ break
630
+
631
+ if not any(p[0] == feature_id for p in pairs):
632
+ unmatched.append(feature_id)
633
+
634
+ # Link the matched pairs
635
+ if pairs:
636
+ result = self.link_transcripts(pairs)
637
+ result["unmatched_features"] = unmatched
638
+ return result
639
+
640
+ return {
641
+ "linked_count": 0,
642
+ "failed_count": 0,
643
+ "linked": [],
644
+ "failed": [],
645
+ "unmatched_features": unmatched,
646
+ }