htmlgraph 0.26.25__py3-none-any.whl → 0.27.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- htmlgraph/__init__.py +23 -1
- htmlgraph/__init__.pyi +123 -0
- htmlgraph/agent_registry.py +2 -1
- htmlgraph/analytics/cli.py +3 -3
- htmlgraph/analytics/cost_analyzer.py +5 -1
- htmlgraph/analytics/cost_monitor.py +664 -0
- htmlgraph/analytics/cross_session.py +13 -9
- htmlgraph/analytics/dependency.py +10 -6
- htmlgraph/analytics/strategic/__init__.py +80 -0
- htmlgraph/analytics/strategic/cost_optimizer.py +611 -0
- htmlgraph/analytics/strategic/pattern_detector.py +876 -0
- htmlgraph/analytics/strategic/preference_manager.py +709 -0
- htmlgraph/analytics/strategic/suggestion_engine.py +747 -0
- htmlgraph/analytics/work_type.py +15 -11
- htmlgraph/analytics_index.py +2 -1
- htmlgraph/api/cost_alerts_websocket.py +416 -0
- htmlgraph/api/main.py +167 -62
- htmlgraph/api/websocket.py +538 -0
- htmlgraph/attribute_index.py +2 -1
- htmlgraph/builders/base.py +2 -1
- htmlgraph/builders/bug.py +2 -1
- htmlgraph/builders/chore.py +2 -1
- htmlgraph/builders/epic.py +2 -1
- htmlgraph/builders/feature.py +2 -1
- htmlgraph/builders/insight.py +2 -1
- htmlgraph/builders/metric.py +2 -1
- htmlgraph/builders/pattern.py +2 -1
- htmlgraph/builders/phase.py +2 -1
- htmlgraph/builders/spike.py +2 -1
- htmlgraph/builders/track.py +2 -1
- htmlgraph/cli/analytics.py +2 -1
- htmlgraph/cli/base.py +2 -1
- htmlgraph/cli/core.py +2 -1
- htmlgraph/cli/main.py +2 -1
- htmlgraph/cli/models.py +2 -1
- htmlgraph/cli/templates/cost_dashboard.py +2 -1
- htmlgraph/cli/work/__init__.py +2 -1
- htmlgraph/cli/work/browse.py +2 -1
- htmlgraph/cli/work/features.py +2 -1
- htmlgraph/cli/work/orchestration.py +2 -1
- htmlgraph/cli/work/report.py +2 -1
- htmlgraph/cli/work/sessions.py +2 -1
- htmlgraph/cli/work/snapshot.py +2 -1
- htmlgraph/cli/work/tracks.py +2 -1
- htmlgraph/collections/base.py +10 -5
- htmlgraph/collections/bug.py +2 -1
- htmlgraph/collections/chore.py +2 -1
- htmlgraph/collections/epic.py +2 -1
- htmlgraph/collections/feature.py +2 -1
- htmlgraph/collections/insight.py +2 -1
- htmlgraph/collections/metric.py +2 -1
- htmlgraph/collections/pattern.py +2 -1
- htmlgraph/collections/phase.py +2 -1
- htmlgraph/collections/session.py +12 -7
- htmlgraph/collections/spike.py +6 -1
- htmlgraph/collections/task_delegation.py +7 -2
- htmlgraph/collections/todo.py +2 -1
- htmlgraph/collections/traces.py +15 -10
- htmlgraph/config/cost_models.json +56 -0
- htmlgraph/context_analytics.py +2 -1
- htmlgraph/db/schema.py +67 -6
- htmlgraph/dependency_models.py +2 -1
- htmlgraph/edge_index.py +2 -1
- htmlgraph/event_log.py +83 -64
- htmlgraph/event_migration.py +2 -1
- htmlgraph/file_watcher.py +12 -8
- htmlgraph/find_api.py +2 -1
- htmlgraph/git_events.py +6 -2
- htmlgraph/hooks/cigs_pretool_enforcer.py +5 -1
- htmlgraph/hooks/drift_handler.py +3 -3
- htmlgraph/hooks/event_tracker.py +40 -61
- htmlgraph/hooks/installer.py +5 -1
- htmlgraph/hooks/orchestrator.py +4 -0
- htmlgraph/hooks/orchestrator_reflector.py +4 -0
- htmlgraph/hooks/post_tool_use_failure.py +7 -3
- htmlgraph/hooks/posttooluse.py +4 -0
- htmlgraph/hooks/prompt_analyzer.py +5 -5
- htmlgraph/hooks/session_handler.py +2 -1
- htmlgraph/hooks/session_summary.py +6 -2
- htmlgraph/hooks/validator.py +8 -4
- htmlgraph/ids.py +2 -1
- htmlgraph/learning.py +2 -1
- htmlgraph/mcp_server.py +2 -1
- htmlgraph/operations/analytics.py +2 -1
- htmlgraph/operations/bootstrap.py +2 -1
- htmlgraph/operations/events.py +2 -1
- htmlgraph/operations/fastapi_server.py +2 -1
- htmlgraph/operations/hooks.py +2 -1
- htmlgraph/operations/initialization.py +2 -1
- htmlgraph/operations/server.py +2 -1
- htmlgraph/orchestration/claude_launcher.py +23 -20
- htmlgraph/orchestration/command_builder.py +2 -1
- htmlgraph/orchestration/headless_spawner.py +6 -2
- htmlgraph/orchestration/model_selection.py +7 -3
- htmlgraph/orchestration/plugin_manager.py +24 -19
- htmlgraph/orchestration/spawners/claude.py +5 -2
- htmlgraph/orchestration/spawners/codex.py +12 -19
- htmlgraph/orchestration/spawners/copilot.py +13 -18
- htmlgraph/orchestration/spawners/gemini.py +12 -19
- htmlgraph/orchestration/subprocess_runner.py +6 -3
- htmlgraph/orchestration/task_coordination.py +16 -8
- htmlgraph/orchestrator.py +2 -1
- htmlgraph/parallel.py +2 -1
- htmlgraph/query_builder.py +2 -1
- htmlgraph/reflection.py +2 -1
- htmlgraph/refs.py +2 -1
- htmlgraph/repo_hash.py +2 -1
- htmlgraph/repositories/__init__.py +292 -0
- htmlgraph/repositories/analytics_repository.py +455 -0
- htmlgraph/repositories/analytics_repository_standard.py +628 -0
- htmlgraph/repositories/feature_repository.py +581 -0
- htmlgraph/repositories/feature_repository_htmlfile.py +668 -0
- htmlgraph/repositories/feature_repository_memory.py +607 -0
- htmlgraph/repositories/feature_repository_sqlite.py +858 -0
- htmlgraph/repositories/filter_service.py +620 -0
- htmlgraph/repositories/filter_service_standard.py +445 -0
- htmlgraph/repositories/shared_cache.py +621 -0
- htmlgraph/repositories/shared_cache_memory.py +395 -0
- htmlgraph/repositories/track_repository.py +552 -0
- htmlgraph/repositories/track_repository_htmlfile.py +619 -0
- htmlgraph/repositories/track_repository_memory.py +508 -0
- htmlgraph/repositories/track_repository_sqlite.py +711 -0
- htmlgraph/sdk/__init__.py +398 -0
- htmlgraph/sdk/__init__.pyi +14 -0
- htmlgraph/sdk/analytics/__init__.py +19 -0
- htmlgraph/sdk/analytics/engine.py +155 -0
- htmlgraph/sdk/analytics/helpers.py +178 -0
- htmlgraph/sdk/analytics/registry.py +109 -0
- htmlgraph/sdk/base.py +484 -0
- htmlgraph/sdk/constants.py +216 -0
- htmlgraph/sdk/core.pyi +308 -0
- htmlgraph/sdk/discovery.py +120 -0
- htmlgraph/sdk/help/__init__.py +12 -0
- htmlgraph/sdk/help/mixin.py +699 -0
- htmlgraph/sdk/mixins/__init__.py +15 -0
- htmlgraph/sdk/mixins/attribution.py +113 -0
- htmlgraph/sdk/mixins/mixin.py +410 -0
- htmlgraph/sdk/operations/__init__.py +12 -0
- htmlgraph/sdk/operations/mixin.py +427 -0
- htmlgraph/sdk/orchestration/__init__.py +17 -0
- htmlgraph/sdk/orchestration/coordinator.py +203 -0
- htmlgraph/sdk/orchestration/spawner.py +204 -0
- htmlgraph/sdk/planning/__init__.py +19 -0
- htmlgraph/sdk/planning/bottlenecks.py +93 -0
- htmlgraph/sdk/planning/mixin.py +211 -0
- htmlgraph/sdk/planning/parallel.py +186 -0
- htmlgraph/sdk/planning/queue.py +210 -0
- htmlgraph/sdk/planning/recommendations.py +87 -0
- htmlgraph/sdk/planning/smart_planning.py +319 -0
- htmlgraph/sdk/session/__init__.py +19 -0
- htmlgraph/sdk/session/continuity.py +57 -0
- htmlgraph/sdk/session/handoff.py +110 -0
- htmlgraph/sdk/session/info.py +309 -0
- htmlgraph/sdk/session/manager.py +103 -0
- htmlgraph/sdk/strategic/__init__.py +26 -0
- htmlgraph/sdk/strategic/mixin.py +563 -0
- htmlgraph/server.py +21 -17
- htmlgraph/session_warning.py +2 -1
- htmlgraph/sessions/handoff.py +4 -3
- htmlgraph/system_prompts.py +2 -1
- htmlgraph/track_builder.py +2 -1
- htmlgraph/transcript.py +2 -1
- htmlgraph/watch.py +2 -1
- htmlgraph/work_type_utils.py +2 -1
- {htmlgraph-0.26.25.dist-info → htmlgraph-0.27.1.dist-info}/METADATA +1 -1
- htmlgraph-0.27.1.dist-info/RECORD +332 -0
- htmlgraph/sdk.py +0 -3500
- htmlgraph-0.26.25.dist-info/RECORD +0 -274
- {htmlgraph-0.26.25.data → htmlgraph-0.27.1.data}/data/htmlgraph/dashboard.html +0 -0
- {htmlgraph-0.26.25.data → htmlgraph-0.27.1.data}/data/htmlgraph/styles.css +0 -0
- {htmlgraph-0.26.25.data → htmlgraph-0.27.1.data}/data/htmlgraph/templates/AGENTS.md.template +0 -0
- {htmlgraph-0.26.25.data → htmlgraph-0.27.1.data}/data/htmlgraph/templates/CLAUDE.md.template +0 -0
- {htmlgraph-0.26.25.data → htmlgraph-0.27.1.data}/data/htmlgraph/templates/GEMINI.md.template +0 -0
- {htmlgraph-0.26.25.dist-info → htmlgraph-0.27.1.dist-info}/WHEEL +0 -0
- {htmlgraph-0.26.25.dist-info → htmlgraph-0.27.1.dist-info}/entry_points.txt +0 -0
htmlgraph/sdk.py
DELETED
|
@@ -1,3500 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
HtmlGraph SDK - AI-Friendly Interface
|
|
3
|
-
|
|
4
|
-
Provides a fluent, ergonomic API for AI agents with:
|
|
5
|
-
- Auto-discovery of .htmlgraph directory
|
|
6
|
-
- Method chaining for all operations
|
|
7
|
-
- Context managers for auto-save
|
|
8
|
-
- Batch operations
|
|
9
|
-
- Minimal boilerplate
|
|
10
|
-
|
|
11
|
-
Example:
|
|
12
|
-
from htmlgraph import SDK
|
|
13
|
-
|
|
14
|
-
# Auto-discovers .htmlgraph directory
|
|
15
|
-
sdk = SDK(agent="claude")
|
|
16
|
-
|
|
17
|
-
# Fluent feature creation
|
|
18
|
-
feature = sdk.features.create(
|
|
19
|
-
title="User Authentication",
|
|
20
|
-
track="auth"
|
|
21
|
-
).add_steps([
|
|
22
|
-
"Create login endpoint",
|
|
23
|
-
"Add JWT middleware",
|
|
24
|
-
"Write tests"
|
|
25
|
-
]).set_priority("high").save()
|
|
26
|
-
|
|
27
|
-
# Work on a feature
|
|
28
|
-
with sdk.features.get("feature-001") as feature:
|
|
29
|
-
feature.start()
|
|
30
|
-
feature.complete_step(0)
|
|
31
|
-
# Auto-saves on exit
|
|
32
|
-
|
|
33
|
-
# Query
|
|
34
|
-
todos = sdk.features.where(status="todo", priority="high")
|
|
35
|
-
|
|
36
|
-
# Batch operations
|
|
37
|
-
sdk.features.mark_done(["feat-001", "feat-002", "feat-003"])
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
from __future__ import annotations
|
|
41
|
-
|
|
42
|
-
import os
|
|
43
|
-
from pathlib import Path
|
|
44
|
-
from typing import Any, cast
|
|
45
|
-
|
|
46
|
-
from htmlgraph.agent_detection import detect_agent_name
|
|
47
|
-
from htmlgraph.agents import AgentInterface
|
|
48
|
-
from htmlgraph.analytics import Analytics, CrossSessionAnalytics, DependencyAnalytics
|
|
49
|
-
from htmlgraph.collections import (
|
|
50
|
-
BaseCollection,
|
|
51
|
-
BugCollection,
|
|
52
|
-
ChoreCollection,
|
|
53
|
-
EpicCollection,
|
|
54
|
-
FeatureCollection,
|
|
55
|
-
PhaseCollection,
|
|
56
|
-
SpikeCollection,
|
|
57
|
-
TaskDelegationCollection,
|
|
58
|
-
TodoCollection,
|
|
59
|
-
)
|
|
60
|
-
from htmlgraph.collections.insight import InsightCollection
|
|
61
|
-
from htmlgraph.collections.metric import MetricCollection
|
|
62
|
-
from htmlgraph.collections.pattern import PatternCollection
|
|
63
|
-
from htmlgraph.collections.session import SessionCollection
|
|
64
|
-
from htmlgraph.context_analytics import ContextAnalytics
|
|
65
|
-
from htmlgraph.db.schema import HtmlGraphDB
|
|
66
|
-
from htmlgraph.graph import HtmlGraph
|
|
67
|
-
from htmlgraph.models import Node, Step
|
|
68
|
-
from htmlgraph.session_manager import SessionManager
|
|
69
|
-
from htmlgraph.session_warning import check_and_show_warning
|
|
70
|
-
from htmlgraph.system_prompts import SystemPromptManager
|
|
71
|
-
from htmlgraph.track_builder import TrackCollection
|
|
72
|
-
from htmlgraph.types import (
|
|
73
|
-
ActiveWorkItem,
|
|
74
|
-
BottleneckDict,
|
|
75
|
-
SessionStartInfo,
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
class SDK:
|
|
80
|
-
"""
|
|
81
|
-
Main SDK interface for AI agents.
|
|
82
|
-
|
|
83
|
-
Auto-discovers .htmlgraph directory and provides fluent API for all collections.
|
|
84
|
-
|
|
85
|
-
Available Collections:
|
|
86
|
-
- features: Feature work items with builder support
|
|
87
|
-
- bugs: Bug reports
|
|
88
|
-
- chores: Maintenance and chore tasks
|
|
89
|
-
- spikes: Investigation and research spikes
|
|
90
|
-
- epics: Large bodies of work
|
|
91
|
-
- phases: Project phases
|
|
92
|
-
- sessions: Agent sessions
|
|
93
|
-
- tracks: Work tracks
|
|
94
|
-
- agents: Agent information
|
|
95
|
-
- todos: Persistent task tracking (mirrors TodoWrite API)
|
|
96
|
-
- patterns: Workflow patterns (optimal/anti-pattern)
|
|
97
|
-
- insights: Session health insights
|
|
98
|
-
- metrics: Aggregated time-series metrics
|
|
99
|
-
|
|
100
|
-
System Prompt Management:
|
|
101
|
-
sdk.system_prompts - Manage system prompts
|
|
102
|
-
.get_active() - Get active prompt (project override OR plugin default)
|
|
103
|
-
.get_default() - Get plugin default system prompt
|
|
104
|
-
.get_project() - Get project-level override if exists
|
|
105
|
-
.create(template) - Create project-level override
|
|
106
|
-
.validate() - Validate prompt token count
|
|
107
|
-
.delete() - Delete project override (fall back to default)
|
|
108
|
-
.get_stats() - Get prompt statistics
|
|
109
|
-
|
|
110
|
-
Analytics & Decision Support:
|
|
111
|
-
sdk.dep_analytics - Dependency analysis
|
|
112
|
-
.find_bottlenecks(top_n=5) - Find blocking tasks
|
|
113
|
-
.get_parallel_work(max_agents=5) - Find parallelizable work
|
|
114
|
-
.recommend_next_tasks(agent_count=1) - Smart recommendations
|
|
115
|
-
.assess_dependency_risk() - Check for circular deps
|
|
116
|
-
.impact_analysis(node_id) - See what task unlocks
|
|
117
|
-
|
|
118
|
-
sdk.analytics - Work analytics
|
|
119
|
-
.get_work_type_distribution() - Breakdown by type
|
|
120
|
-
.get_spike_to_feature_ratio() - Investigation vs implementation
|
|
121
|
-
.get_maintenance_burden() - Chore vs feature ratio
|
|
122
|
-
|
|
123
|
-
sdk.context - Context tracking
|
|
124
|
-
.get_context_usage() - Session context metrics
|
|
125
|
-
.get_context_efficiency() - Efficiency score
|
|
126
|
-
|
|
127
|
-
Discovery & Help:
|
|
128
|
-
sdk.help() - Get structured help for all operations
|
|
129
|
-
sdk.help('analytics') - Get analytics-specific help
|
|
130
|
-
sdk.help('features') - Get feature collection help
|
|
131
|
-
|
|
132
|
-
Error Handling Patterns
|
|
133
|
-
=======================
|
|
134
|
-
|
|
135
|
-
SDK methods follow consistent error handling patterns by operation type:
|
|
136
|
-
|
|
137
|
-
LOOKUP OPERATIONS (Return None):
|
|
138
|
-
Single-item lookups return None when not found.
|
|
139
|
-
Always check the result before using.
|
|
140
|
-
|
|
141
|
-
>>> feature = sdk.features.get("nonexistent")
|
|
142
|
-
>>> if feature:
|
|
143
|
-
... print(feature.title)
|
|
144
|
-
... else:
|
|
145
|
-
... print("Not found")
|
|
146
|
-
|
|
147
|
-
QUERY OPERATIONS (Return Empty List):
|
|
148
|
-
Queries return empty list when no matches or on error.
|
|
149
|
-
Safe to iterate without checking.
|
|
150
|
-
|
|
151
|
-
>>> results = sdk.features.where(status="impossible")
|
|
152
|
-
>>> for r in results: # Empty iteration is safe
|
|
153
|
-
... print(r.title)
|
|
154
|
-
|
|
155
|
-
EDIT OPERATIONS (Raise Exception):
|
|
156
|
-
Edit operations raise NodeNotFoundError when target missing.
|
|
157
|
-
Use try/except to handle gracefully.
|
|
158
|
-
|
|
159
|
-
>>> from htmlgraph.exceptions import NodeNotFoundError
|
|
160
|
-
>>> try:
|
|
161
|
-
... with sdk.features.edit("nonexistent") as f:
|
|
162
|
-
... f.status = "done"
|
|
163
|
-
... except NodeNotFoundError:
|
|
164
|
-
... print("Feature not found")
|
|
165
|
-
|
|
166
|
-
CREATE OPERATIONS (Raise on Validation):
|
|
167
|
-
Create operations raise ValidationError on invalid input.
|
|
168
|
-
|
|
169
|
-
>>> try:
|
|
170
|
-
... sdk.features.create("") # Empty title
|
|
171
|
-
... except ValidationError:
|
|
172
|
-
... print("Title required")
|
|
173
|
-
|
|
174
|
-
BATCH OPERATIONS (Return Results Dict):
|
|
175
|
-
Batch operations return dict with success_count, failed_ids, and warnings.
|
|
176
|
-
Provides detailed feedback for partial failures.
|
|
177
|
-
|
|
178
|
-
>>> result = sdk.features.mark_done(["feat-1", "missing", "feat-2"])
|
|
179
|
-
>>> print(f"Completed {result['success_count']} of 3")
|
|
180
|
-
>>> if result['failed_ids']:
|
|
181
|
-
... print(f"Failed: {result['failed_ids']}")
|
|
182
|
-
... print(f"Reasons: {result['warnings']}")
|
|
183
|
-
|
|
184
|
-
Pattern Summary:
|
|
185
|
-
| Operation Type | Error Behavior | Example Method |
|
|
186
|
-
|----------------|--------------------|-----------------------|
|
|
187
|
-
| Lookup | Return None | .get(id) |
|
|
188
|
-
| Query | Return [] | .where(), .all() |
|
|
189
|
-
| Edit | Raise Exception | .edit(id) |
|
|
190
|
-
| Create | Raise on Invalid | .create(title) |
|
|
191
|
-
| Batch | Return Results Dict| .mark_done([ids]) |
|
|
192
|
-
| Delete | Return Bool | .delete(id) |
|
|
193
|
-
|
|
194
|
-
Available Exceptions:
|
|
195
|
-
- NodeNotFoundError: Node with ID not found
|
|
196
|
-
- ValidationError: Invalid input parameters
|
|
197
|
-
- ClaimConflictError: Node already claimed by another agent
|
|
198
|
-
|
|
199
|
-
Example:
|
|
200
|
-
sdk = SDK(agent="claude")
|
|
201
|
-
|
|
202
|
-
# Work with features (has builder support)
|
|
203
|
-
feature = sdk.features.create("User Auth")
|
|
204
|
-
.set_priority("high")
|
|
205
|
-
.add_steps(["Login", "Logout"])
|
|
206
|
-
.save()
|
|
207
|
-
|
|
208
|
-
# Work with bugs
|
|
209
|
-
high_bugs = sdk.bugs.where(status="todo", priority="high")
|
|
210
|
-
with sdk.bugs.edit("bug-001") as bug:
|
|
211
|
-
bug.status = "in-progress"
|
|
212
|
-
|
|
213
|
-
# Work with any collection
|
|
214
|
-
all_spikes = sdk.spikes.all()
|
|
215
|
-
sdk.chores.mark_done(["chore-001", "chore-002"])
|
|
216
|
-
sdk.epics.assign(["epic-001"], agent="claude")
|
|
217
|
-
"""
|
|
218
|
-
|
|
219
|
-
def __init__(
|
|
220
|
-
self,
|
|
221
|
-
directory: Path | str | None = None,
|
|
222
|
-
agent: str | None = None,
|
|
223
|
-
parent_session: str | None = None,
|
|
224
|
-
db_path: str | None = None,
|
|
225
|
-
):
|
|
226
|
-
"""
|
|
227
|
-
Initialize SDK.
|
|
228
|
-
|
|
229
|
-
Args:
|
|
230
|
-
directory: Path to .htmlgraph directory (auto-discovered if not provided)
|
|
231
|
-
agent: REQUIRED - Agent identifier for operations.
|
|
232
|
-
Used to attribute work items (features, spikes, bugs, etc) to the agent.
|
|
233
|
-
Examples: agent='explorer', agent='coder', agent='tester'
|
|
234
|
-
Critical for: Work attribution, result retrieval, orchestrator tracking
|
|
235
|
-
Falls back to: CLAUDE_AGENT_NAME env var, then detect_agent_name()
|
|
236
|
-
Raises ValueError if not provided and cannot be detected
|
|
237
|
-
parent_session: Parent session ID to log activities to (for nested contexts)
|
|
238
|
-
db_path: Path to SQLite database file (optional, defaults to ~/.htmlgraph/htmlgraph.db)
|
|
239
|
-
"""
|
|
240
|
-
if directory is None:
|
|
241
|
-
directory = self._discover_htmlgraph()
|
|
242
|
-
|
|
243
|
-
if agent is None:
|
|
244
|
-
# Try environment variable fallback
|
|
245
|
-
agent = os.getenv("CLAUDE_AGENT_NAME")
|
|
246
|
-
|
|
247
|
-
if agent is None:
|
|
248
|
-
# Try automatic detection
|
|
249
|
-
detected = detect_agent_name()
|
|
250
|
-
if detected and detected != "cli":
|
|
251
|
-
# Only accept detected if it's not the default fallback
|
|
252
|
-
agent = detected
|
|
253
|
-
else:
|
|
254
|
-
# No valid agent found - fail fast with helpful error message
|
|
255
|
-
raise ValueError(
|
|
256
|
-
"Agent identifier is required for work attribution. "
|
|
257
|
-
"Pass agent='name' to SDK() initialization. "
|
|
258
|
-
"Examples: SDK(agent='explorer'), SDK(agent='coder'), SDK(agent='tester')\n"
|
|
259
|
-
"Alternatively, set CLAUDE_AGENT_NAME environment variable.\n"
|
|
260
|
-
"Critical for: Work attribution, result retrieval, orchestrator tracking"
|
|
261
|
-
)
|
|
262
|
-
|
|
263
|
-
self._directory = Path(directory)
|
|
264
|
-
self._agent_id = agent
|
|
265
|
-
self._parent_session = parent_session or os.getenv("HTMLGRAPH_PARENT_SESSION")
|
|
266
|
-
|
|
267
|
-
# Initialize SQLite database (Phase 2)
|
|
268
|
-
self._db = HtmlGraphDB(
|
|
269
|
-
db_path or str(Path.home() / ".htmlgraph" / "htmlgraph.db")
|
|
270
|
-
)
|
|
271
|
-
self._db.connect()
|
|
272
|
-
self._db.create_tables()
|
|
273
|
-
|
|
274
|
-
# Initialize underlying HtmlGraphs first (for backward compatibility and sharing)
|
|
275
|
-
# These are shared with SessionManager to avoid double-loading features
|
|
276
|
-
self._graph = HtmlGraph(self._directory / "features")
|
|
277
|
-
self._bugs_graph = HtmlGraph(self._directory / "bugs")
|
|
278
|
-
|
|
279
|
-
# Initialize SessionManager with shared graph instances to avoid double-loading
|
|
280
|
-
self.session_manager = SessionManager(
|
|
281
|
-
self._directory,
|
|
282
|
-
features_graph=self._graph,
|
|
283
|
-
bugs_graph=self._bugs_graph,
|
|
284
|
-
)
|
|
285
|
-
|
|
286
|
-
# Agent interface (for backward compatibility)
|
|
287
|
-
self._agent_interface = AgentInterface(
|
|
288
|
-
self._directory / "features", agent_id=agent
|
|
289
|
-
)
|
|
290
|
-
|
|
291
|
-
# Collection interfaces - all work item types (all with builder support)
|
|
292
|
-
self.features = FeatureCollection(self)
|
|
293
|
-
self.bugs = BugCollection(self)
|
|
294
|
-
self.chores = ChoreCollection(self)
|
|
295
|
-
self.spikes = SpikeCollection(self)
|
|
296
|
-
self.epics = EpicCollection(self)
|
|
297
|
-
self.phases = PhaseCollection(self)
|
|
298
|
-
|
|
299
|
-
# Non-work collections
|
|
300
|
-
self.sessions: SessionCollection = SessionCollection(self)
|
|
301
|
-
self.tracks: TrackCollection = TrackCollection(
|
|
302
|
-
self
|
|
303
|
-
) # Use specialized collection with builder support
|
|
304
|
-
self.agents: BaseCollection = BaseCollection(self, "agents", "agent")
|
|
305
|
-
|
|
306
|
-
# Learning collections (Active Learning Persistence)
|
|
307
|
-
self.patterns = PatternCollection(self)
|
|
308
|
-
self.insights = InsightCollection(self)
|
|
309
|
-
self.metrics = MetricCollection(self)
|
|
310
|
-
|
|
311
|
-
# Todo collection (persistent task tracking)
|
|
312
|
-
self.todos = TodoCollection(self)
|
|
313
|
-
|
|
314
|
-
# Task delegation collection (observability for spawned agents)
|
|
315
|
-
self.task_delegations = TaskDelegationCollection(self)
|
|
316
|
-
|
|
317
|
-
# Create learning directories if needed
|
|
318
|
-
(self._directory / "patterns").mkdir(exist_ok=True)
|
|
319
|
-
(self._directory / "insights").mkdir(exist_ok=True)
|
|
320
|
-
(self._directory / "metrics").mkdir(exist_ok=True)
|
|
321
|
-
(self._directory / "todos").mkdir(exist_ok=True)
|
|
322
|
-
(self._directory / "task-delegations").mkdir(exist_ok=True)
|
|
323
|
-
|
|
324
|
-
# Initialize RefManager and set on all collections
|
|
325
|
-
from htmlgraph.refs import RefManager
|
|
326
|
-
|
|
327
|
-
self.refs = RefManager(self._directory)
|
|
328
|
-
|
|
329
|
-
# Set ref manager on all work item collections
|
|
330
|
-
self.features.set_ref_manager(self.refs)
|
|
331
|
-
self.bugs.set_ref_manager(self.refs)
|
|
332
|
-
self.chores.set_ref_manager(self.refs)
|
|
333
|
-
self.spikes.set_ref_manager(self.refs)
|
|
334
|
-
self.epics.set_ref_manager(self.refs)
|
|
335
|
-
self.phases.set_ref_manager(self.refs)
|
|
336
|
-
self.tracks.set_ref_manager(self.refs)
|
|
337
|
-
self.todos.set_ref_manager(self.refs)
|
|
338
|
-
|
|
339
|
-
# Analytics interface (Phase 2: Work Type Analytics)
|
|
340
|
-
self.analytics = Analytics(self)
|
|
341
|
-
|
|
342
|
-
# Dependency analytics interface (Advanced graph analytics)
|
|
343
|
-
self.dep_analytics = DependencyAnalytics(self._graph)
|
|
344
|
-
|
|
345
|
-
# Cross-session analytics interface (Git commit-based analytics)
|
|
346
|
-
self.cross_session_analytics = CrossSessionAnalytics(self)
|
|
347
|
-
|
|
348
|
-
# Context analytics interface (Context usage tracking)
|
|
349
|
-
self.context = ContextAnalytics(self)
|
|
350
|
-
|
|
351
|
-
# Pattern learning interface (Phase 2: Behavior Pattern Learning)
|
|
352
|
-
from htmlgraph.analytics.pattern_learning import PatternLearner
|
|
353
|
-
|
|
354
|
-
self.pattern_learning = PatternLearner(self._directory)
|
|
355
|
-
|
|
356
|
-
# Lazy-loaded orchestrator for subagent management
|
|
357
|
-
self._orchestrator: Any = None
|
|
358
|
-
|
|
359
|
-
# System prompt manager (lazy-loaded)
|
|
360
|
-
self._system_prompts: SystemPromptManager | None = None
|
|
361
|
-
|
|
362
|
-
# Session warning system (workaround for Claude Code hook bug #10373)
|
|
363
|
-
# Shows orchestrator instructions on first SDK usage per session
|
|
364
|
-
self._session_warning = check_and_show_warning(
|
|
365
|
-
self._directory,
|
|
366
|
-
agent=self._agent_id,
|
|
367
|
-
session_id=None, # Will be set by session manager if available
|
|
368
|
-
)
|
|
369
|
-
|
|
370
|
-
@staticmethod
|
|
371
|
-
def _discover_htmlgraph() -> Path:
|
|
372
|
-
"""
|
|
373
|
-
Auto-discover .htmlgraph directory.
|
|
374
|
-
|
|
375
|
-
Searches current directory and parents.
|
|
376
|
-
"""
|
|
377
|
-
current = Path.cwd()
|
|
378
|
-
|
|
379
|
-
# Check current directory
|
|
380
|
-
if (current / ".htmlgraph").exists():
|
|
381
|
-
return current / ".htmlgraph"
|
|
382
|
-
|
|
383
|
-
# Check parent directories
|
|
384
|
-
for parent in current.parents:
|
|
385
|
-
if (parent / ".htmlgraph").exists():
|
|
386
|
-
return parent / ".htmlgraph"
|
|
387
|
-
|
|
388
|
-
# Default to current directory
|
|
389
|
-
return current / ".htmlgraph"
|
|
390
|
-
|
|
391
|
-
@property
|
|
392
|
-
def agent(self) -> str | None:
|
|
393
|
-
"""Get current agent ID."""
|
|
394
|
-
return self._agent_id
|
|
395
|
-
|
|
396
|
-
@property
|
|
397
|
-
def system_prompts(self) -> SystemPromptManager:
|
|
398
|
-
"""
|
|
399
|
-
Access system prompt management.
|
|
400
|
-
|
|
401
|
-
Provides methods to:
|
|
402
|
-
- Get active prompt (project override OR plugin default)
|
|
403
|
-
- Create/delete project-level overrides
|
|
404
|
-
- Validate token counts
|
|
405
|
-
- Get prompt statistics
|
|
406
|
-
|
|
407
|
-
Lazy-loaded on first access.
|
|
408
|
-
|
|
409
|
-
Returns:
|
|
410
|
-
SystemPromptManager instance
|
|
411
|
-
|
|
412
|
-
Example:
|
|
413
|
-
>>> sdk = SDK(agent="claude")
|
|
414
|
-
|
|
415
|
-
# Get active prompt
|
|
416
|
-
>>> prompt = sdk.system_prompts.get_active()
|
|
417
|
-
|
|
418
|
-
# Create project override
|
|
419
|
-
>>> sdk.system_prompts.create("## Custom prompt\\n...")
|
|
420
|
-
|
|
421
|
-
# Validate token count
|
|
422
|
-
>>> result = sdk.system_prompts.validate()
|
|
423
|
-
>>> print(result['message'])
|
|
424
|
-
|
|
425
|
-
# Get statistics
|
|
426
|
-
>>> stats = sdk.system_prompts.get_stats()
|
|
427
|
-
>>> print(f"Source: {stats['source']}")
|
|
428
|
-
"""
|
|
429
|
-
if self._system_prompts is None:
|
|
430
|
-
self._system_prompts = SystemPromptManager(self._directory)
|
|
431
|
-
return self._system_prompts
|
|
432
|
-
|
|
433
|
-
def dismiss_session_warning(self) -> bool:
|
|
434
|
-
"""
|
|
435
|
-
Dismiss the session warning after reading it.
|
|
436
|
-
|
|
437
|
-
IMPORTANT: Call this as your FIRST action after seeing the orchestrator
|
|
438
|
-
warning. This confirms you've read the instructions.
|
|
439
|
-
|
|
440
|
-
Returns:
|
|
441
|
-
True if warning was dismissed, False if already dismissed
|
|
442
|
-
|
|
443
|
-
Example:
|
|
444
|
-
sdk = SDK(agent="claude")
|
|
445
|
-
# Warning shown automatically...
|
|
446
|
-
|
|
447
|
-
# First action: dismiss to confirm you read it
|
|
448
|
-
sdk.dismiss_session_warning()
|
|
449
|
-
|
|
450
|
-
# Now proceed with orchestration
|
|
451
|
-
sdk.spawn_coder(feature_id="feat-123", ...)
|
|
452
|
-
"""
|
|
453
|
-
if self._session_warning:
|
|
454
|
-
return self._session_warning.dismiss(
|
|
455
|
-
agent=self._agent_id,
|
|
456
|
-
session_id=None,
|
|
457
|
-
)
|
|
458
|
-
return False
|
|
459
|
-
|
|
460
|
-
def get_warning_status(self) -> dict[str, Any]:
|
|
461
|
-
"""
|
|
462
|
-
Get current session warning status.
|
|
463
|
-
|
|
464
|
-
Returns:
|
|
465
|
-
Dict with dismissed status, timestamp, and show count
|
|
466
|
-
"""
|
|
467
|
-
if self._session_warning:
|
|
468
|
-
return self._session_warning.get_status()
|
|
469
|
-
return {"dismissed": True, "show_count": 0}
|
|
470
|
-
|
|
471
|
-
def ref(self, short_ref: str) -> Node | None:
|
|
472
|
-
"""
|
|
473
|
-
Resolve a short ref to a Node object.
|
|
474
|
-
|
|
475
|
-
Short refs are stable identifiers like @f1, @t2, @b5 that map to
|
|
476
|
-
full node IDs. This method resolves the short ref and fetches the
|
|
477
|
-
corresponding node from the appropriate collection.
|
|
478
|
-
|
|
479
|
-
Args:
|
|
480
|
-
short_ref: Short ref like "@f1", "@t2", "@b5", etc.
|
|
481
|
-
|
|
482
|
-
Returns:
|
|
483
|
-
Node object or None if not found
|
|
484
|
-
|
|
485
|
-
Example:
|
|
486
|
-
>>> sdk = SDK(agent="claude")
|
|
487
|
-
>>> feature = sdk.ref("@f1")
|
|
488
|
-
>>> if feature:
|
|
489
|
-
... print(feature.title)
|
|
490
|
-
... feature.status = "done"
|
|
491
|
-
... sdk.features.update(feature)
|
|
492
|
-
"""
|
|
493
|
-
# Resolve short ref to full ID
|
|
494
|
-
full_id = self.refs.resolve_ref(short_ref)
|
|
495
|
-
if not full_id:
|
|
496
|
-
return None
|
|
497
|
-
|
|
498
|
-
# Determine type from ref prefix and fetch from appropriate collection
|
|
499
|
-
if len(short_ref) < 2:
|
|
500
|
-
return None
|
|
501
|
-
|
|
502
|
-
prefix = short_ref[1] # Get letter after @
|
|
503
|
-
|
|
504
|
-
# Map prefix to collection
|
|
505
|
-
collection_map = {
|
|
506
|
-
"f": self.features,
|
|
507
|
-
"t": self.tracks,
|
|
508
|
-
"b": self.bugs,
|
|
509
|
-
"s": self.spikes,
|
|
510
|
-
"c": self.chores,
|
|
511
|
-
"e": self.epics,
|
|
512
|
-
"d": self.todos,
|
|
513
|
-
"p": self.phases,
|
|
514
|
-
}
|
|
515
|
-
|
|
516
|
-
collection = collection_map.get(prefix)
|
|
517
|
-
if not collection:
|
|
518
|
-
return None
|
|
519
|
-
|
|
520
|
-
# Get node from collection
|
|
521
|
-
if hasattr(collection, "get"):
|
|
522
|
-
return cast(Node | None, collection.get(full_id))
|
|
523
|
-
|
|
524
|
-
return None
|
|
525
|
-
|
|
526
|
-
# =========================================================================
|
|
527
|
-
# SQLite Database Integration (Phase 2)
|
|
528
|
-
# =========================================================================
|
|
529
|
-
|
|
530
|
-
def db(self) -> HtmlGraphDB:
|
|
531
|
-
"""
|
|
532
|
-
Get the SQLite database instance.
|
|
533
|
-
|
|
534
|
-
Returns:
|
|
535
|
-
HtmlGraphDB instance for executing queries
|
|
536
|
-
|
|
537
|
-
Example:
|
|
538
|
-
>>> sdk = SDK(agent="claude")
|
|
539
|
-
>>> db = sdk.db()
|
|
540
|
-
>>> events = db.get_session_events("sess-123")
|
|
541
|
-
>>> features = db.get_features_by_status("todo")
|
|
542
|
-
"""
|
|
543
|
-
return self._db
|
|
544
|
-
|
|
545
|
-
def query(self, sql: str, params: tuple = ()) -> list[dict[str, Any]]:
|
|
546
|
-
"""
|
|
547
|
-
Execute a raw SQL query on the SQLite database.
|
|
548
|
-
|
|
549
|
-
Args:
|
|
550
|
-
sql: SQL query string
|
|
551
|
-
params: Query parameters (for safe parameterized queries)
|
|
552
|
-
|
|
553
|
-
Returns:
|
|
554
|
-
List of result dictionaries
|
|
555
|
-
|
|
556
|
-
Example:
|
|
557
|
-
>>> sdk = SDK(agent="claude")
|
|
558
|
-
>>> results = sdk.query(
|
|
559
|
-
... "SELECT * FROM features WHERE status = ? AND priority = ?",
|
|
560
|
-
... ("todo", "high")
|
|
561
|
-
... )
|
|
562
|
-
>>> for row in results:
|
|
563
|
-
... print(row["title"])
|
|
564
|
-
"""
|
|
565
|
-
if not self._db.connection:
|
|
566
|
-
self._db.connect()
|
|
567
|
-
|
|
568
|
-
cursor = self._db.connection.cursor() # type: ignore[union-attr]
|
|
569
|
-
cursor.execute(sql, params)
|
|
570
|
-
rows = cursor.fetchall()
|
|
571
|
-
return [dict(row) for row in rows]
|
|
572
|
-
|
|
573
|
-
def execute_query_builder(
|
|
574
|
-
self, sql: str, params: tuple = ()
|
|
575
|
-
) -> list[dict[str, Any]]:
|
|
576
|
-
"""
|
|
577
|
-
Execute a query using the Queries builder.
|
|
578
|
-
|
|
579
|
-
Args:
|
|
580
|
-
sql: SQL query from Queries builder
|
|
581
|
-
params: Parameters from Queries builder
|
|
582
|
-
|
|
583
|
-
Returns:
|
|
584
|
-
List of result dictionaries
|
|
585
|
-
|
|
586
|
-
Example:
|
|
587
|
-
>>> sdk = SDK(agent="claude")
|
|
588
|
-
>>> sql, params = Queries.get_features_by_status("todo", limit=5)
|
|
589
|
-
>>> results = sdk.execute_query_builder(sql, params)
|
|
590
|
-
"""
|
|
591
|
-
return self.query(sql, params)
|
|
592
|
-
|
|
593
|
-
def export_to_html(
|
|
594
|
-
self,
|
|
595
|
-
output_dir: str | None = None,
|
|
596
|
-
include_features: bool = True,
|
|
597
|
-
include_sessions: bool = True,
|
|
598
|
-
include_events: bool = False,
|
|
599
|
-
) -> dict[str, int]:
|
|
600
|
-
"""
|
|
601
|
-
Export SQLite data to HTML files for backward compatibility.
|
|
602
|
-
|
|
603
|
-
Args:
|
|
604
|
-
output_dir: Directory to export to (defaults to .htmlgraph)
|
|
605
|
-
include_features: Export features
|
|
606
|
-
include_sessions: Export sessions
|
|
607
|
-
include_events: Export events (detailed, use with care)
|
|
608
|
-
|
|
609
|
-
Returns:
|
|
610
|
-
Dict with export counts: {"features": int, "sessions": int, "events": int}
|
|
611
|
-
|
|
612
|
-
Example:
|
|
613
|
-
>>> sdk = SDK(agent="claude")
|
|
614
|
-
>>> result = sdk.export_to_html()
|
|
615
|
-
>>> print(f"Exported {result['features']} features")
|
|
616
|
-
"""
|
|
617
|
-
if output_dir is None:
|
|
618
|
-
output_dir = str(self._directory)
|
|
619
|
-
|
|
620
|
-
output_path = Path(output_dir)
|
|
621
|
-
counts = {"features": 0, "sessions": 0, "events": 0}
|
|
622
|
-
|
|
623
|
-
if include_features:
|
|
624
|
-
# Export all features from SQLite to HTML
|
|
625
|
-
features_dir = output_path / "features"
|
|
626
|
-
features_dir.mkdir(parents=True, exist_ok=True)
|
|
627
|
-
|
|
628
|
-
try:
|
|
629
|
-
cursor = self._db.connection.cursor() # type: ignore[union-attr]
|
|
630
|
-
cursor.execute("SELECT * FROM features")
|
|
631
|
-
rows = cursor.fetchall()
|
|
632
|
-
|
|
633
|
-
for row in rows:
|
|
634
|
-
feature_dict = dict(row)
|
|
635
|
-
feature_id = feature_dict["id"]
|
|
636
|
-
# Write HTML file (simplified export)
|
|
637
|
-
html_file = features_dir / f"{feature_id}.html"
|
|
638
|
-
html_file.write_text(
|
|
639
|
-
f"<h1>{feature_dict['title']}</h1>"
|
|
640
|
-
f"<p>Status: {feature_dict['status']}</p>"
|
|
641
|
-
f"<p>Type: {feature_dict['type']}</p>"
|
|
642
|
-
)
|
|
643
|
-
counts["features"] += 1
|
|
644
|
-
except Exception as e:
|
|
645
|
-
import logging
|
|
646
|
-
|
|
647
|
-
logging.error(f"Error exporting features: {e}")
|
|
648
|
-
|
|
649
|
-
if include_sessions:
|
|
650
|
-
# Export all sessions from SQLite to HTML
|
|
651
|
-
sessions_dir = output_path / "sessions"
|
|
652
|
-
sessions_dir.mkdir(parents=True, exist_ok=True)
|
|
653
|
-
|
|
654
|
-
try:
|
|
655
|
-
cursor = self._db.connection.cursor() # type: ignore[union-attr]
|
|
656
|
-
cursor.execute("SELECT * FROM sessions")
|
|
657
|
-
rows = cursor.fetchall()
|
|
658
|
-
|
|
659
|
-
for row in rows:
|
|
660
|
-
session_dict = dict(row)
|
|
661
|
-
session_id = session_dict["session_id"]
|
|
662
|
-
# Write HTML file (simplified export)
|
|
663
|
-
html_file = sessions_dir / f"{session_id}.html"
|
|
664
|
-
html_file.write_text(
|
|
665
|
-
f"<h1>Session {session_id}</h1>"
|
|
666
|
-
f"<p>Agent: {session_dict['agent_assigned']}</p>"
|
|
667
|
-
f"<p>Status: {session_dict['status']}</p>"
|
|
668
|
-
)
|
|
669
|
-
counts["sessions"] += 1
|
|
670
|
-
except Exception as e:
|
|
671
|
-
import logging
|
|
672
|
-
|
|
673
|
-
logging.error(f"Error exporting sessions: {e}")
|
|
674
|
-
|
|
675
|
-
return counts
|
|
676
|
-
|
|
677
|
-
def _log_event(
|
|
678
|
-
self,
|
|
679
|
-
event_type: str,
|
|
680
|
-
tool_name: str | None = None,
|
|
681
|
-
input_summary: str | None = None,
|
|
682
|
-
output_summary: str | None = None,
|
|
683
|
-
context: dict[str, Any] | None = None,
|
|
684
|
-
cost_tokens: int = 0,
|
|
685
|
-
) -> bool:
|
|
686
|
-
"""
|
|
687
|
-
Log an event to the SQLite database with parent-child linking.
|
|
688
|
-
|
|
689
|
-
Internal method used by collections to track operations.
|
|
690
|
-
Automatically creates a session if one doesn't exist.
|
|
691
|
-
Reads parent event ID from HTMLGRAPH_PARENT_ACTIVITY env var for hierarchical tracking.
|
|
692
|
-
|
|
693
|
-
Args:
|
|
694
|
-
event_type: Type of event (tool_call, completion, error, etc.)
|
|
695
|
-
tool_name: Tool that was called
|
|
696
|
-
input_summary: Summary of input
|
|
697
|
-
output_summary: Summary of output
|
|
698
|
-
context: Additional context metadata
|
|
699
|
-
cost_tokens: Token cost estimate
|
|
700
|
-
|
|
701
|
-
Returns:
|
|
702
|
-
True if logged successfully, False otherwise
|
|
703
|
-
|
|
704
|
-
Example (internal use):
|
|
705
|
-
>>> sdk._log_event(
|
|
706
|
-
... event_type="tool_call",
|
|
707
|
-
... tool_name="Edit",
|
|
708
|
-
... input_summary="Edit file.py",
|
|
709
|
-
... cost_tokens=100
|
|
710
|
-
... )
|
|
711
|
-
"""
|
|
712
|
-
from uuid import uuid4
|
|
713
|
-
|
|
714
|
-
event_id = f"evt-{uuid4().hex[:12]}"
|
|
715
|
-
session_id = self._parent_session or "cli-session"
|
|
716
|
-
|
|
717
|
-
# Read parent event ID from environment variable for hierarchical linking
|
|
718
|
-
parent_event_id = os.getenv("HTMLGRAPH_PARENT_ACTIVITY")
|
|
719
|
-
|
|
720
|
-
# Ensure session exists before logging event
|
|
721
|
-
try:
|
|
722
|
-
self._ensure_session_exists(session_id, parent_event_id=parent_event_id)
|
|
723
|
-
except Exception as e:
|
|
724
|
-
import logging
|
|
725
|
-
|
|
726
|
-
logging.debug(f"Failed to ensure session exists: {e}")
|
|
727
|
-
# Continue anyway - session creation failure shouldn't block event logging
|
|
728
|
-
|
|
729
|
-
return self._db.insert_event(
|
|
730
|
-
event_id=event_id,
|
|
731
|
-
agent_id=self._agent_id,
|
|
732
|
-
event_type=event_type,
|
|
733
|
-
session_id=session_id,
|
|
734
|
-
tool_name=tool_name,
|
|
735
|
-
input_summary=input_summary,
|
|
736
|
-
output_summary=output_summary,
|
|
737
|
-
context=context,
|
|
738
|
-
parent_event_id=parent_event_id,
|
|
739
|
-
cost_tokens=cost_tokens,
|
|
740
|
-
)
|
|
741
|
-
|
|
742
|
-
def _ensure_session_exists(
|
|
743
|
-
self, session_id: str, parent_event_id: str | None = None
|
|
744
|
-
) -> None:
|
|
745
|
-
"""
|
|
746
|
-
Create a session record if it doesn't exist.
|
|
747
|
-
|
|
748
|
-
Args:
|
|
749
|
-
session_id: Session ID to ensure exists
|
|
750
|
-
parent_event_id: Event that spawned this session (optional)
|
|
751
|
-
"""
|
|
752
|
-
if not self._db.connection:
|
|
753
|
-
self._db.connect()
|
|
754
|
-
|
|
755
|
-
cursor = self._db.connection.cursor() # type: ignore[union-attr]
|
|
756
|
-
cursor.execute(
|
|
757
|
-
"SELECT COUNT(*) FROM sessions WHERE session_id = ?", (session_id,)
|
|
758
|
-
)
|
|
759
|
-
exists = cursor.fetchone()[0] > 0
|
|
760
|
-
|
|
761
|
-
if not exists:
|
|
762
|
-
# Create session record
|
|
763
|
-
self._db.insert_session(
|
|
764
|
-
session_id=session_id,
|
|
765
|
-
agent_assigned=self._agent_id,
|
|
766
|
-
is_subagent=self._parent_session is not None,
|
|
767
|
-
parent_session_id=self._parent_session,
|
|
768
|
-
parent_event_id=parent_event_id,
|
|
769
|
-
)
|
|
770
|
-
|
|
771
|
-
def reload(self) -> None:
|
|
772
|
-
"""Reload all data from disk."""
|
|
773
|
-
self._graph.reload()
|
|
774
|
-
self._agent_interface.reload()
|
|
775
|
-
# SessionManager reloads implicitly on access via its converters/graphs
|
|
776
|
-
|
|
777
|
-
def summary(self, max_items: int = 10) -> str:
|
|
778
|
-
"""
|
|
779
|
-
Get project summary.
|
|
780
|
-
|
|
781
|
-
Returns:
|
|
782
|
-
Compact overview for AI agent orientation
|
|
783
|
-
"""
|
|
784
|
-
return self._agent_interface.get_summary(max_items)
|
|
785
|
-
|
|
786
|
-
def my_work(self) -> dict[str, Any]:
|
|
787
|
-
"""
|
|
788
|
-
Get current agent's workload.
|
|
789
|
-
|
|
790
|
-
Returns:
|
|
791
|
-
Dict with in_progress, completed counts
|
|
792
|
-
"""
|
|
793
|
-
if not self._agent_id:
|
|
794
|
-
raise ValueError("No agent ID set")
|
|
795
|
-
return self._agent_interface.get_workload(self._agent_id)
|
|
796
|
-
|
|
797
|
-
def next_task(
|
|
798
|
-
self, priority: str | None = None, auto_claim: bool = True
|
|
799
|
-
) -> Node | None:
|
|
800
|
-
"""
|
|
801
|
-
Get next available task for this agent.
|
|
802
|
-
|
|
803
|
-
Args:
|
|
804
|
-
priority: Optional priority filter
|
|
805
|
-
auto_claim: Automatically claim the task
|
|
806
|
-
|
|
807
|
-
Returns:
|
|
808
|
-
Next available Node or None
|
|
809
|
-
"""
|
|
810
|
-
return self._agent_interface.get_next_task(
|
|
811
|
-
agent_id=self._agent_id,
|
|
812
|
-
priority=priority,
|
|
813
|
-
node_type="feature",
|
|
814
|
-
auto_claim=auto_claim,
|
|
815
|
-
)
|
|
816
|
-
|
|
817
|
-
def set_session_handoff(
|
|
818
|
-
self,
|
|
819
|
-
handoff_notes: str | None = None,
|
|
820
|
-
recommended_next: str | None = None,
|
|
821
|
-
blockers: list[str] | None = None,
|
|
822
|
-
session_id: str | None = None,
|
|
823
|
-
) -> Any:
|
|
824
|
-
"""
|
|
825
|
-
Set handoff context on a session.
|
|
826
|
-
|
|
827
|
-
Args:
|
|
828
|
-
handoff_notes: Notes for next session/agent
|
|
829
|
-
recommended_next: Suggested next steps
|
|
830
|
-
blockers: List of blockers
|
|
831
|
-
session_id: Specific session ID (defaults to active session)
|
|
832
|
-
|
|
833
|
-
Returns:
|
|
834
|
-
Updated Session or None if not found
|
|
835
|
-
"""
|
|
836
|
-
if not session_id:
|
|
837
|
-
if self._agent_id:
|
|
838
|
-
active = self.session_manager.get_active_session_for_agent(
|
|
839
|
-
self._agent_id
|
|
840
|
-
)
|
|
841
|
-
else:
|
|
842
|
-
active = self.session_manager.get_active_session()
|
|
843
|
-
if not active:
|
|
844
|
-
return None
|
|
845
|
-
session_id = active.id
|
|
846
|
-
|
|
847
|
-
return self.session_manager.set_session_handoff(
|
|
848
|
-
session_id=session_id,
|
|
849
|
-
handoff_notes=handoff_notes,
|
|
850
|
-
recommended_next=recommended_next,
|
|
851
|
-
blockers=blockers,
|
|
852
|
-
)
|
|
853
|
-
|
|
854
|
-
def continue_from_last(
|
|
855
|
-
self,
|
|
856
|
-
agent: str | None = None,
|
|
857
|
-
auto_create_session: bool = True,
|
|
858
|
-
) -> tuple[Any, Any]:
|
|
859
|
-
"""
|
|
860
|
-
Continue work from the last completed session.
|
|
861
|
-
|
|
862
|
-
Loads context from previous session including handoff notes,
|
|
863
|
-
recommended files, blockers, and recent commits.
|
|
864
|
-
|
|
865
|
-
Args:
|
|
866
|
-
agent: Filter by agent (None = current SDK agent)
|
|
867
|
-
auto_create_session: Create new session if True
|
|
868
|
-
|
|
869
|
-
Returns:
|
|
870
|
-
Tuple of (new_session, resume_info) or (None, None)
|
|
871
|
-
|
|
872
|
-
Example:
|
|
873
|
-
>>> sdk = SDK(agent="claude")
|
|
874
|
-
>>> session, resume = sdk.continue_from_last()
|
|
875
|
-
>>> if resume:
|
|
876
|
-
... print(resume.summary)
|
|
877
|
-
... print(resume.next_focus)
|
|
878
|
-
... for file in resume.recommended_files:
|
|
879
|
-
... print(f" - {file}")
|
|
880
|
-
"""
|
|
881
|
-
if not agent:
|
|
882
|
-
agent = self._agent_id
|
|
883
|
-
|
|
884
|
-
return self.session_manager.continue_from_last(
|
|
885
|
-
agent=agent,
|
|
886
|
-
auto_create_session=auto_create_session,
|
|
887
|
-
)
|
|
888
|
-
|
|
889
|
-
def end_session_with_handoff(
|
|
890
|
-
self,
|
|
891
|
-
session_id: str | None = None,
|
|
892
|
-
summary: str | None = None,
|
|
893
|
-
next_focus: str | None = None,
|
|
894
|
-
blockers: list[str] | None = None,
|
|
895
|
-
keep_context: list[str] | None = None,
|
|
896
|
-
auto_recommend_context: bool = True,
|
|
897
|
-
) -> Any:
|
|
898
|
-
"""
|
|
899
|
-
End session with handoff information for next session.
|
|
900
|
-
|
|
901
|
-
Args:
|
|
902
|
-
session_id: Session to end (None = active session)
|
|
903
|
-
summary: What was accomplished
|
|
904
|
-
next_focus: What should be done next
|
|
905
|
-
blockers: List of blockers
|
|
906
|
-
keep_context: List of files to keep context for
|
|
907
|
-
auto_recommend_context: Auto-recommend files from git
|
|
908
|
-
|
|
909
|
-
Returns:
|
|
910
|
-
Updated Session or None
|
|
911
|
-
|
|
912
|
-
Example:
|
|
913
|
-
>>> sdk.end_session_with_handoff(
|
|
914
|
-
... summary="Completed OAuth integration",
|
|
915
|
-
... next_focus="Implement JWT token refresh",
|
|
916
|
-
... blockers=["Waiting for security review"],
|
|
917
|
-
... keep_context=["src/auth/oauth.py"]
|
|
918
|
-
... )
|
|
919
|
-
"""
|
|
920
|
-
if not session_id:
|
|
921
|
-
if self._agent_id:
|
|
922
|
-
active = self.session_manager.get_active_session_for_agent(
|
|
923
|
-
self._agent_id
|
|
924
|
-
)
|
|
925
|
-
else:
|
|
926
|
-
active = self.session_manager.get_active_session()
|
|
927
|
-
if not active:
|
|
928
|
-
return None
|
|
929
|
-
session_id = active.id
|
|
930
|
-
|
|
931
|
-
return self.session_manager.end_session_with_handoff(
|
|
932
|
-
session_id=session_id,
|
|
933
|
-
summary=summary,
|
|
934
|
-
next_focus=next_focus,
|
|
935
|
-
blockers=blockers,
|
|
936
|
-
keep_context=keep_context,
|
|
937
|
-
auto_recommend_context=auto_recommend_context,
|
|
938
|
-
)
|
|
939
|
-
|
|
940
|
-
def start_session(
|
|
941
|
-
self,
|
|
942
|
-
session_id: str | None = None,
|
|
943
|
-
title: str | None = None,
|
|
944
|
-
agent: str | None = None,
|
|
945
|
-
) -> Any:
|
|
946
|
-
"""
|
|
947
|
-
Start a new session.
|
|
948
|
-
|
|
949
|
-
Args:
|
|
950
|
-
session_id: Optional session ID
|
|
951
|
-
title: Optional session title
|
|
952
|
-
agent: Optional agent override (defaults to SDK agent)
|
|
953
|
-
|
|
954
|
-
Returns:
|
|
955
|
-
New Session instance
|
|
956
|
-
"""
|
|
957
|
-
return self.session_manager.start_session(
|
|
958
|
-
session_id=session_id,
|
|
959
|
-
agent=agent or self._agent_id or "cli",
|
|
960
|
-
title=title,
|
|
961
|
-
parent_session_id=self._parent_session,
|
|
962
|
-
)
|
|
963
|
-
|
|
964
|
-
def end_session(
|
|
965
|
-
self,
|
|
966
|
-
session_id: str,
|
|
967
|
-
handoff_notes: str | None = None,
|
|
968
|
-
recommended_next: str | None = None,
|
|
969
|
-
blockers: list[str] | None = None,
|
|
970
|
-
) -> Any:
|
|
971
|
-
"""
|
|
972
|
-
End a session.
|
|
973
|
-
|
|
974
|
-
Args:
|
|
975
|
-
session_id: Session ID to end
|
|
976
|
-
handoff_notes: Optional handoff notes
|
|
977
|
-
recommended_next: Optional recommendations
|
|
978
|
-
blockers: Optional blockers
|
|
979
|
-
|
|
980
|
-
Returns:
|
|
981
|
-
Ended Session instance
|
|
982
|
-
"""
|
|
983
|
-
return self.session_manager.end_session(
|
|
984
|
-
session_id=session_id,
|
|
985
|
-
handoff_notes=handoff_notes,
|
|
986
|
-
recommended_next=recommended_next,
|
|
987
|
-
blockers=blockers,
|
|
988
|
-
)
|
|
989
|
-
|
|
990
|
-
def get_status(self) -> dict[str, Any]:
|
|
991
|
-
"""
|
|
992
|
-
Get project status.
|
|
993
|
-
|
|
994
|
-
Returns:
|
|
995
|
-
Dict with status metrics (WIP, counts, etc.)
|
|
996
|
-
"""
|
|
997
|
-
return self.session_manager.get_status()
|
|
998
|
-
|
|
999
|
-
def dedupe_sessions(
|
|
1000
|
-
self,
|
|
1001
|
-
max_events: int = 1,
|
|
1002
|
-
move_dir_name: str = "_orphans",
|
|
1003
|
-
dry_run: bool = False,
|
|
1004
|
-
stale_extra_active: bool = True,
|
|
1005
|
-
) -> dict[str, int]:
|
|
1006
|
-
"""
|
|
1007
|
-
Move low-signal sessions (e.g. SessionStart-only) out of the main sessions dir.
|
|
1008
|
-
|
|
1009
|
-
Args:
|
|
1010
|
-
max_events: Maximum events threshold (sessions with <= this many events are moved)
|
|
1011
|
-
move_dir_name: Directory name to move orphaned sessions to
|
|
1012
|
-
dry_run: If True, only report what would be done without actually moving files
|
|
1013
|
-
stale_extra_active: If True, also mark extra active sessions as stale
|
|
1014
|
-
|
|
1015
|
-
Returns:
|
|
1016
|
-
Dict with counts: {"scanned": int, "moved": int, "missing": int, "staled_active": int, "kept_active": int}
|
|
1017
|
-
|
|
1018
|
-
Example:
|
|
1019
|
-
>>> sdk = SDK(agent="claude")
|
|
1020
|
-
>>> result = sdk.dedupe_sessions(max_events=1, dry_run=False)
|
|
1021
|
-
>>> print(f"Scanned: {result['scanned']}, Moved: {result['moved']}")
|
|
1022
|
-
"""
|
|
1023
|
-
return self.session_manager.dedupe_orphan_sessions(
|
|
1024
|
-
max_events=max_events,
|
|
1025
|
-
move_dir_name=move_dir_name,
|
|
1026
|
-
dry_run=dry_run,
|
|
1027
|
-
stale_extra_active=stale_extra_active,
|
|
1028
|
-
)
|
|
1029
|
-
|
|
1030
|
-
def track_activity(
|
|
1031
|
-
self,
|
|
1032
|
-
tool: str,
|
|
1033
|
-
summary: str,
|
|
1034
|
-
file_paths: list[str] | None = None,
|
|
1035
|
-
success: bool = True,
|
|
1036
|
-
feature_id: str | None = None,
|
|
1037
|
-
session_id: str | None = None,
|
|
1038
|
-
parent_activity_id: str | None = None,
|
|
1039
|
-
payload: dict[str, Any] | None = None,
|
|
1040
|
-
) -> Any:
|
|
1041
|
-
"""
|
|
1042
|
-
Track an activity in the current or specified session.
|
|
1043
|
-
|
|
1044
|
-
Args:
|
|
1045
|
-
tool: Tool name (Edit, Bash, Read, etc.)
|
|
1046
|
-
summary: Human-readable summary of the activity
|
|
1047
|
-
file_paths: Files involved in this activity
|
|
1048
|
-
success: Whether the tool call succeeded
|
|
1049
|
-
feature_id: Explicit feature ID (skips attribution if provided)
|
|
1050
|
-
session_id: Session ID (defaults to parent session if available, then active session)
|
|
1051
|
-
parent_activity_id: ID of parent activity (e.g., Skill/Task invocation)
|
|
1052
|
-
payload: Optional rich payload data
|
|
1053
|
-
|
|
1054
|
-
Returns:
|
|
1055
|
-
Created ActivityEntry with attribution
|
|
1056
|
-
|
|
1057
|
-
Example:
|
|
1058
|
-
>>> sdk = SDK(agent="claude")
|
|
1059
|
-
>>> entry = sdk.track_activity(
|
|
1060
|
-
... tool="CustomTool",
|
|
1061
|
-
... summary="Performed custom analysis",
|
|
1062
|
-
... file_paths=["src/main.py"],
|
|
1063
|
-
... success=True
|
|
1064
|
-
... )
|
|
1065
|
-
>>> print(f"Tracked: [{entry.tool}] {entry.summary}")
|
|
1066
|
-
"""
|
|
1067
|
-
# Determine target session: explicit parameter > parent_session > active > none
|
|
1068
|
-
if not session_id:
|
|
1069
|
-
# Priority 1: Parent session (explicitly provided or from env var)
|
|
1070
|
-
if self._parent_session:
|
|
1071
|
-
session_id = self._parent_session
|
|
1072
|
-
else:
|
|
1073
|
-
# Priority 2: Active session for this agent
|
|
1074
|
-
active = self.session_manager.get_active_session(agent=self._agent_id)
|
|
1075
|
-
if active:
|
|
1076
|
-
session_id = active.id
|
|
1077
|
-
else:
|
|
1078
|
-
raise ValueError(
|
|
1079
|
-
"No active session. Start one with sdk.start_session()"
|
|
1080
|
-
)
|
|
1081
|
-
|
|
1082
|
-
# Get parent activity ID from environment if not provided
|
|
1083
|
-
if not parent_activity_id:
|
|
1084
|
-
parent_activity_id = os.getenv("HTMLGRAPH_PARENT_ACTIVITY")
|
|
1085
|
-
|
|
1086
|
-
return self.session_manager.track_activity(
|
|
1087
|
-
session_id=session_id,
|
|
1088
|
-
tool=tool,
|
|
1089
|
-
summary=summary,
|
|
1090
|
-
file_paths=file_paths,
|
|
1091
|
-
success=success,
|
|
1092
|
-
feature_id=feature_id,
|
|
1093
|
-
parent_activity_id=parent_activity_id,
|
|
1094
|
-
payload=payload,
|
|
1095
|
-
)
|
|
1096
|
-
|
|
1097
|
-
# =========================================================================
|
|
1098
|
-
# Strategic Planning & Analytics (Agent-Friendly Interface)
|
|
1099
|
-
# =========================================================================
|
|
1100
|
-
|
|
1101
|
-
def find_bottlenecks(self, top_n: int = 5) -> list[BottleneckDict]:
|
|
1102
|
-
"""
|
|
1103
|
-
Identify tasks blocking the most downstream work.
|
|
1104
|
-
|
|
1105
|
-
Note: Prefer using sdk.dep_analytics.find_bottlenecks() directly.
|
|
1106
|
-
This method exists for backward compatibility.
|
|
1107
|
-
|
|
1108
|
-
Args:
|
|
1109
|
-
top_n: Maximum number of bottlenecks to return
|
|
1110
|
-
|
|
1111
|
-
Returns:
|
|
1112
|
-
List of bottleneck tasks with impact metrics
|
|
1113
|
-
|
|
1114
|
-
Example:
|
|
1115
|
-
>>> sdk = SDK(agent="claude")
|
|
1116
|
-
>>> # Preferred approach
|
|
1117
|
-
>>> bottlenecks = sdk.dep_analytics.find_bottlenecks(top_n=3)
|
|
1118
|
-
>>> # Or via SDK (backward compatibility)
|
|
1119
|
-
>>> bottlenecks = sdk.find_bottlenecks(top_n=3)
|
|
1120
|
-
>>> for bn in bottlenecks:
|
|
1121
|
-
... print(f"{bn['title']} blocks {bn['blocks_count']} tasks")
|
|
1122
|
-
"""
|
|
1123
|
-
bottlenecks = self.dep_analytics.find_bottlenecks(top_n=top_n)
|
|
1124
|
-
|
|
1125
|
-
# Convert to agent-friendly dict format for backward compatibility
|
|
1126
|
-
return [
|
|
1127
|
-
{
|
|
1128
|
-
"id": bn.id,
|
|
1129
|
-
"title": bn.title,
|
|
1130
|
-
"status": bn.status,
|
|
1131
|
-
"priority": bn.priority,
|
|
1132
|
-
"blocks_count": bn.transitive_blocking,
|
|
1133
|
-
"impact_score": bn.weighted_impact,
|
|
1134
|
-
"blocked_tasks": bn.blocked_nodes[:5],
|
|
1135
|
-
}
|
|
1136
|
-
for bn in bottlenecks
|
|
1137
|
-
]
|
|
1138
|
-
|
|
1139
|
-
def get_parallel_work(self, max_agents: int = 5) -> dict[str, Any]:
|
|
1140
|
-
"""
|
|
1141
|
-
Find tasks that can be worked on simultaneously.
|
|
1142
|
-
|
|
1143
|
-
Note: Prefer using sdk.dep_analytics.find_parallelizable_work() directly.
|
|
1144
|
-
This method exists for backward compatibility.
|
|
1145
|
-
|
|
1146
|
-
Args:
|
|
1147
|
-
max_agents: Maximum number of parallel agents to plan for
|
|
1148
|
-
|
|
1149
|
-
Returns:
|
|
1150
|
-
Dict with parallelization opportunities
|
|
1151
|
-
|
|
1152
|
-
Example:
|
|
1153
|
-
>>> sdk = SDK(agent="claude")
|
|
1154
|
-
>>> # Preferred approach
|
|
1155
|
-
>>> report = sdk.dep_analytics.find_parallelizable_work(status="todo")
|
|
1156
|
-
>>> # Or via SDK (backward compatibility)
|
|
1157
|
-
>>> parallel = sdk.get_parallel_work(max_agents=3)
|
|
1158
|
-
>>> print(f"Can work on {parallel['max_parallelism']} tasks at once")
|
|
1159
|
-
>>> print(f"Ready now: {parallel['ready_now']}")
|
|
1160
|
-
"""
|
|
1161
|
-
report = self.dep_analytics.find_parallelizable_work(status="todo")
|
|
1162
|
-
|
|
1163
|
-
ready_now = (
|
|
1164
|
-
report.dependency_levels[0].nodes if report.dependency_levels else []
|
|
1165
|
-
)
|
|
1166
|
-
|
|
1167
|
-
return {
|
|
1168
|
-
"max_parallelism": report.max_parallelism,
|
|
1169
|
-
"ready_now": ready_now[:max_agents],
|
|
1170
|
-
"total_ready": len(ready_now),
|
|
1171
|
-
"level_count": len(report.dependency_levels),
|
|
1172
|
-
"next_level": report.dependency_levels[1].nodes
|
|
1173
|
-
if len(report.dependency_levels) > 1
|
|
1174
|
-
else [],
|
|
1175
|
-
}
|
|
1176
|
-
|
|
1177
|
-
def recommend_next_work(self, agent_count: int = 1) -> list[dict[str, Any]]:
|
|
1178
|
-
"""
|
|
1179
|
-
Get smart recommendations for what to work on next.
|
|
1180
|
-
|
|
1181
|
-
Note: Prefer using sdk.dep_analytics.recommend_next_tasks() directly.
|
|
1182
|
-
This method exists for backward compatibility.
|
|
1183
|
-
|
|
1184
|
-
Considers priority, dependencies, and transitive impact.
|
|
1185
|
-
|
|
1186
|
-
Args:
|
|
1187
|
-
agent_count: Number of agents/tasks to recommend
|
|
1188
|
-
|
|
1189
|
-
Returns:
|
|
1190
|
-
List of recommended tasks with reasoning
|
|
1191
|
-
|
|
1192
|
-
Example:
|
|
1193
|
-
>>> sdk = SDK(agent="claude")
|
|
1194
|
-
>>> # Preferred approach
|
|
1195
|
-
>>> recs = sdk.dep_analytics.recommend_next_tasks(agent_count=3)
|
|
1196
|
-
>>> # Or via SDK (backward compatibility)
|
|
1197
|
-
>>> recs = sdk.recommend_next_work(agent_count=3)
|
|
1198
|
-
>>> for rec in recs:
|
|
1199
|
-
... print(f"{rec['title']} (score: {rec['score']})")
|
|
1200
|
-
... print(f" Reasons: {rec['reasons']}")
|
|
1201
|
-
"""
|
|
1202
|
-
recommendations = self.dep_analytics.recommend_next_tasks(
|
|
1203
|
-
agent_count=agent_count, lookahead=5
|
|
1204
|
-
)
|
|
1205
|
-
|
|
1206
|
-
return [
|
|
1207
|
-
{
|
|
1208
|
-
"id": rec.id,
|
|
1209
|
-
"title": rec.title,
|
|
1210
|
-
"priority": rec.priority,
|
|
1211
|
-
"score": rec.score,
|
|
1212
|
-
"reasons": rec.reasons,
|
|
1213
|
-
"estimated_hours": rec.estimated_effort,
|
|
1214
|
-
"unlocks_count": len(rec.unlocks),
|
|
1215
|
-
"unlocks": rec.unlocks[:3],
|
|
1216
|
-
}
|
|
1217
|
-
for rec in recommendations.recommendations
|
|
1218
|
-
]
|
|
1219
|
-
|
|
1220
|
-
def assess_risks(self) -> dict[str, Any]:
|
|
1221
|
-
"""
|
|
1222
|
-
Assess dependency-related risks in the project.
|
|
1223
|
-
|
|
1224
|
-
Note: Prefer using sdk.dep_analytics.assess_dependency_risk() directly.
|
|
1225
|
-
This method exists for backward compatibility.
|
|
1226
|
-
|
|
1227
|
-
Identifies single points of failure, circular dependencies,
|
|
1228
|
-
and orphaned tasks.
|
|
1229
|
-
|
|
1230
|
-
Returns:
|
|
1231
|
-
Dict with risk assessment results
|
|
1232
|
-
|
|
1233
|
-
Example:
|
|
1234
|
-
>>> sdk = SDK(agent="claude")
|
|
1235
|
-
>>> # Preferred approach
|
|
1236
|
-
>>> risk = sdk.dep_analytics.assess_dependency_risk()
|
|
1237
|
-
>>> # Or via SDK (backward compatibility)
|
|
1238
|
-
>>> risks = sdk.assess_risks()
|
|
1239
|
-
>>> if risks['high_risk_count'] > 0:
|
|
1240
|
-
... print(f"Warning: {risks['high_risk_count']} high-risk tasks")
|
|
1241
|
-
"""
|
|
1242
|
-
risk = self.dep_analytics.assess_dependency_risk()
|
|
1243
|
-
|
|
1244
|
-
return {
|
|
1245
|
-
"high_risk_count": len(risk.high_risk),
|
|
1246
|
-
"high_risk_tasks": [
|
|
1247
|
-
{
|
|
1248
|
-
"id": node.id,
|
|
1249
|
-
"title": node.title,
|
|
1250
|
-
"risk_score": node.risk_score,
|
|
1251
|
-
"risk_factors": [f.description for f in node.risk_factors],
|
|
1252
|
-
}
|
|
1253
|
-
for node in risk.high_risk
|
|
1254
|
-
],
|
|
1255
|
-
"circular_dependencies": risk.circular_dependencies,
|
|
1256
|
-
"orphaned_count": len(risk.orphaned_nodes),
|
|
1257
|
-
"orphaned_tasks": risk.orphaned_nodes[:5],
|
|
1258
|
-
"recommendations": risk.recommendations,
|
|
1259
|
-
}
|
|
1260
|
-
|
|
1261
|
-
def analyze_impact(self, node_id: str) -> dict[str, Any]:
|
|
1262
|
-
"""
|
|
1263
|
-
Analyze the impact of completing a specific task.
|
|
1264
|
-
|
|
1265
|
-
Note: Prefer using sdk.dep_analytics.impact_analysis() directly.
|
|
1266
|
-
This method exists for backward compatibility.
|
|
1267
|
-
|
|
1268
|
-
Args:
|
|
1269
|
-
node_id: Task to analyze
|
|
1270
|
-
|
|
1271
|
-
Returns:
|
|
1272
|
-
Dict with impact analysis
|
|
1273
|
-
|
|
1274
|
-
Example:
|
|
1275
|
-
>>> sdk = SDK(agent="claude")
|
|
1276
|
-
>>> # Preferred approach
|
|
1277
|
-
>>> impact = sdk.dep_analytics.impact_analysis("feature-001")
|
|
1278
|
-
>>> # Or via SDK (backward compatibility)
|
|
1279
|
-
>>> impact = sdk.analyze_impact("feature-001")
|
|
1280
|
-
>>> print(f"Completing this unlocks {impact['unlocks_count']} tasks")
|
|
1281
|
-
"""
|
|
1282
|
-
impact = self.dep_analytics.impact_analysis(node_id)
|
|
1283
|
-
|
|
1284
|
-
return {
|
|
1285
|
-
"node_id": node_id,
|
|
1286
|
-
"direct_dependents": impact.direct_dependents,
|
|
1287
|
-
"total_impact": impact.transitive_dependents,
|
|
1288
|
-
"completion_impact": impact.completion_impact,
|
|
1289
|
-
"unlocks_count": len(impact.affected_nodes),
|
|
1290
|
-
"affected_tasks": impact.affected_nodes[:10],
|
|
1291
|
-
}
|
|
1292
|
-
|
|
1293
|
-
def get_work_queue(
|
|
1294
|
-
self, agent_id: str | None = None, limit: int = 10, min_score: float = 0.0
|
|
1295
|
-
) -> list[dict[str, Any]]:
|
|
1296
|
-
"""
|
|
1297
|
-
Get prioritized work queue showing recommended work, active work, and dependencies.
|
|
1298
|
-
|
|
1299
|
-
This method provides a comprehensive view of:
|
|
1300
|
-
1. Recommended next work (using smart analytics)
|
|
1301
|
-
2. Active work by all agents
|
|
1302
|
-
3. Blocked items and what's blocking them
|
|
1303
|
-
4. Priority-based scoring
|
|
1304
|
-
|
|
1305
|
-
Args:
|
|
1306
|
-
agent_id: Agent to get queue for (defaults to SDK agent)
|
|
1307
|
-
limit: Maximum number of items to return (default: 10)
|
|
1308
|
-
min_score: Minimum score threshold (default: 0.0)
|
|
1309
|
-
|
|
1310
|
-
Returns:
|
|
1311
|
-
List of work queue items with scoring and metadata:
|
|
1312
|
-
- task_id: Work item ID
|
|
1313
|
-
- title: Work item title
|
|
1314
|
-
- status: Current status
|
|
1315
|
-
- priority: Priority level
|
|
1316
|
-
- score: Routing score
|
|
1317
|
-
- complexity: Complexity level (if set)
|
|
1318
|
-
- effort: Estimated effort (if set)
|
|
1319
|
-
- blocks_count: Number of tasks this blocks (if any)
|
|
1320
|
-
- blocked_by: List of blocking task IDs (if blocked)
|
|
1321
|
-
- agent_assigned: Current assignee (if any)
|
|
1322
|
-
- type: Work item type (feature, bug, spike, etc.)
|
|
1323
|
-
|
|
1324
|
-
Example:
|
|
1325
|
-
>>> sdk = SDK(agent="claude")
|
|
1326
|
-
>>> queue = sdk.get_work_queue(limit=5)
|
|
1327
|
-
>>> for item in queue:
|
|
1328
|
-
... print(f"{item['score']:.1f} - {item['title']}")
|
|
1329
|
-
... if item.get('blocked_by'):
|
|
1330
|
-
... print(f" ⚠️ Blocked by: {', '.join(item['blocked_by'])}")
|
|
1331
|
-
"""
|
|
1332
|
-
from htmlgraph.routing import AgentCapabilityRegistry, CapabilityMatcher
|
|
1333
|
-
|
|
1334
|
-
agent = agent_id or self._agent_id or "cli"
|
|
1335
|
-
|
|
1336
|
-
# Get all work item types
|
|
1337
|
-
all_work = []
|
|
1338
|
-
for collection_name in ["features", "bugs", "spikes", "chores", "epics"]:
|
|
1339
|
-
collection = getattr(self, collection_name, None)
|
|
1340
|
-
if collection:
|
|
1341
|
-
# Get todo and blocked items
|
|
1342
|
-
for item in collection.where(status="todo"):
|
|
1343
|
-
all_work.append(item)
|
|
1344
|
-
for item in collection.where(status="blocked"):
|
|
1345
|
-
all_work.append(item)
|
|
1346
|
-
|
|
1347
|
-
if not all_work:
|
|
1348
|
-
return []
|
|
1349
|
-
|
|
1350
|
-
# Get recommendations from analytics (uses strategic scoring)
|
|
1351
|
-
recommendations = self.recommend_next_work(agent_count=limit * 2)
|
|
1352
|
-
rec_scores = {rec["id"]: rec["score"] for rec in recommendations}
|
|
1353
|
-
|
|
1354
|
-
# Build routing registry
|
|
1355
|
-
registry = AgentCapabilityRegistry()
|
|
1356
|
-
|
|
1357
|
-
# Register current agent
|
|
1358
|
-
registry.register_agent(agent, capabilities=[], wip_limit=5)
|
|
1359
|
-
|
|
1360
|
-
# Get current WIP count for agent
|
|
1361
|
-
wip_count = len(self.features.where(status="in-progress", agent_assigned=agent))
|
|
1362
|
-
registry.set_wip(agent, wip_count)
|
|
1363
|
-
|
|
1364
|
-
# Score each work item
|
|
1365
|
-
queue_items = []
|
|
1366
|
-
for item in all_work:
|
|
1367
|
-
# Use strategic score if available, otherwise use routing score
|
|
1368
|
-
if item.id in rec_scores:
|
|
1369
|
-
score = rec_scores[item.id]
|
|
1370
|
-
else:
|
|
1371
|
-
# Fallback to routing score
|
|
1372
|
-
agent_profile = registry.get_agent(agent)
|
|
1373
|
-
if agent_profile:
|
|
1374
|
-
score = CapabilityMatcher.score_agent_task_fit(agent_profile, item)
|
|
1375
|
-
else:
|
|
1376
|
-
score = 0.0
|
|
1377
|
-
|
|
1378
|
-
# Apply minimum score filter
|
|
1379
|
-
if score < min_score:
|
|
1380
|
-
continue
|
|
1381
|
-
|
|
1382
|
-
# Build queue item
|
|
1383
|
-
queue_item = {
|
|
1384
|
-
"task_id": item.id,
|
|
1385
|
-
"title": item.title,
|
|
1386
|
-
"status": item.status,
|
|
1387
|
-
"priority": item.priority,
|
|
1388
|
-
"score": score,
|
|
1389
|
-
"type": item.type,
|
|
1390
|
-
"complexity": getattr(item, "complexity", None),
|
|
1391
|
-
"effort": getattr(item, "estimated_effort", None),
|
|
1392
|
-
"agent_assigned": getattr(item, "agent_assigned", None),
|
|
1393
|
-
"blocks_count": 0,
|
|
1394
|
-
"blocked_by": [],
|
|
1395
|
-
}
|
|
1396
|
-
|
|
1397
|
-
# Add dependency information
|
|
1398
|
-
if hasattr(item, "edges"):
|
|
1399
|
-
# Check if this item blocks others
|
|
1400
|
-
blocks = item.edges.get("blocks", [])
|
|
1401
|
-
queue_item["blocks_count"] = len(blocks)
|
|
1402
|
-
|
|
1403
|
-
# Check if this item is blocked
|
|
1404
|
-
blocked_by = item.edges.get("blocked_by", [])
|
|
1405
|
-
queue_item["blocked_by"] = blocked_by
|
|
1406
|
-
|
|
1407
|
-
queue_items.append(queue_item)
|
|
1408
|
-
|
|
1409
|
-
# Sort by score (descending)
|
|
1410
|
-
queue_items.sort(key=lambda x: x["score"], reverse=True)
|
|
1411
|
-
|
|
1412
|
-
# Limit results
|
|
1413
|
-
return queue_items[:limit]
|
|
1414
|
-
|
|
1415
|
-
def work_next(
|
|
1416
|
-
self,
|
|
1417
|
-
agent_id: str | None = None,
|
|
1418
|
-
auto_claim: bool = False,
|
|
1419
|
-
min_score: float = 0.0,
|
|
1420
|
-
) -> Node | None:
|
|
1421
|
-
"""
|
|
1422
|
-
Get the next best task for an agent using smart routing.
|
|
1423
|
-
|
|
1424
|
-
Uses both strategic analytics and capability-based routing to find
|
|
1425
|
-
the optimal next task.
|
|
1426
|
-
|
|
1427
|
-
Args:
|
|
1428
|
-
agent_id: Agent to get task for (defaults to SDK agent)
|
|
1429
|
-
auto_claim: Automatically claim the task (default: False)
|
|
1430
|
-
min_score: Minimum score threshold (default: 0.0)
|
|
1431
|
-
|
|
1432
|
-
Returns:
|
|
1433
|
-
Next best Node or None if no suitable task found
|
|
1434
|
-
|
|
1435
|
-
Example:
|
|
1436
|
-
>>> sdk = SDK(agent="claude")
|
|
1437
|
-
>>> task = sdk.work_next(auto_claim=True)
|
|
1438
|
-
>>> if task:
|
|
1439
|
-
... print(f"Working on: {task.title}")
|
|
1440
|
-
... # Task is automatically claimed and assigned
|
|
1441
|
-
"""
|
|
1442
|
-
agent = agent_id or self._agent_id or "cli"
|
|
1443
|
-
|
|
1444
|
-
# Get work queue - get more items since we filter for actionable (todo) only
|
|
1445
|
-
queue = self.get_work_queue(agent_id=agent, limit=20, min_score=min_score)
|
|
1446
|
-
|
|
1447
|
-
if not queue:
|
|
1448
|
-
return None
|
|
1449
|
-
|
|
1450
|
-
# Find the first actionable (todo) task - blocked tasks are not actionable
|
|
1451
|
-
top_item = None
|
|
1452
|
-
for item in queue:
|
|
1453
|
-
if item["status"] == "todo":
|
|
1454
|
-
top_item = item
|
|
1455
|
-
break
|
|
1456
|
-
|
|
1457
|
-
if top_item is None:
|
|
1458
|
-
return None
|
|
1459
|
-
|
|
1460
|
-
# Fetch the actual node
|
|
1461
|
-
task = None
|
|
1462
|
-
for collection_name in ["features", "bugs", "spikes", "chores", "epics"]:
|
|
1463
|
-
collection = getattr(self, collection_name, None)
|
|
1464
|
-
if collection:
|
|
1465
|
-
try:
|
|
1466
|
-
task = collection.get(top_item["task_id"])
|
|
1467
|
-
if task:
|
|
1468
|
-
break
|
|
1469
|
-
except (ValueError, FileNotFoundError):
|
|
1470
|
-
continue
|
|
1471
|
-
|
|
1472
|
-
if not task:
|
|
1473
|
-
return None
|
|
1474
|
-
|
|
1475
|
-
# Auto-claim if requested
|
|
1476
|
-
if auto_claim and task.status == "todo" and collection is not None:
|
|
1477
|
-
# Claim the task
|
|
1478
|
-
# collection.edit returns context manager or None
|
|
1479
|
-
task_editor: Any = collection.edit(task.id)
|
|
1480
|
-
if task_editor is not None:
|
|
1481
|
-
# collection.edit returns context manager
|
|
1482
|
-
with task_editor as t:
|
|
1483
|
-
t.status = "in-progress"
|
|
1484
|
-
t.agent_assigned = agent
|
|
1485
|
-
|
|
1486
|
-
result: Node | None = task
|
|
1487
|
-
return result
|
|
1488
|
-
|
|
1489
|
-
# =========================================================================
|
|
1490
|
-
# Planning Workflow Integration
|
|
1491
|
-
# =========================================================================
|
|
1492
|
-
|
|
1493
|
-
def start_planning_spike(
|
|
1494
|
-
self,
|
|
1495
|
-
title: str,
|
|
1496
|
-
context: str = "",
|
|
1497
|
-
timebox_hours: float = 4.0,
|
|
1498
|
-
auto_start: bool = True,
|
|
1499
|
-
) -> Node:
|
|
1500
|
-
"""
|
|
1501
|
-
Create a planning spike to research and design before implementation.
|
|
1502
|
-
|
|
1503
|
-
This is for timeboxed investigation before creating a full track.
|
|
1504
|
-
|
|
1505
|
-
Args:
|
|
1506
|
-
title: Spike title (e.g., "Plan User Authentication System")
|
|
1507
|
-
context: Background information
|
|
1508
|
-
timebox_hours: Time limit for spike (default: 4 hours)
|
|
1509
|
-
auto_start: Automatically start the spike (default: True)
|
|
1510
|
-
|
|
1511
|
-
Returns:
|
|
1512
|
-
Created spike Node
|
|
1513
|
-
|
|
1514
|
-
Example:
|
|
1515
|
-
>>> sdk = SDK(agent="claude")
|
|
1516
|
-
>>> spike = sdk.start_planning_spike(
|
|
1517
|
-
... "Plan Real-time Notifications",
|
|
1518
|
-
... context="Users need live updates. Research options.",
|
|
1519
|
-
... timebox_hours=3.0
|
|
1520
|
-
... )
|
|
1521
|
-
"""
|
|
1522
|
-
from htmlgraph.ids import generate_id
|
|
1523
|
-
from htmlgraph.models import Spike, SpikeType
|
|
1524
|
-
|
|
1525
|
-
# Create spike directly (SpikeBuilder doesn't exist yet)
|
|
1526
|
-
spike_id = generate_id(node_type="spike", title=title)
|
|
1527
|
-
spike = Spike(
|
|
1528
|
-
id=spike_id,
|
|
1529
|
-
title=title,
|
|
1530
|
-
type="spike",
|
|
1531
|
-
status="in-progress" if auto_start and self._agent_id else "todo",
|
|
1532
|
-
spike_type=SpikeType.ARCHITECTURAL,
|
|
1533
|
-
timebox_hours=int(timebox_hours),
|
|
1534
|
-
agent_assigned=self._agent_id if auto_start and self._agent_id else None,
|
|
1535
|
-
steps=[
|
|
1536
|
-
Step(description="Research existing solutions and patterns"),
|
|
1537
|
-
Step(description="Define requirements and constraints"),
|
|
1538
|
-
Step(description="Design high-level architecture"),
|
|
1539
|
-
Step(description="Identify dependencies and risks"),
|
|
1540
|
-
Step(description="Create implementation plan"),
|
|
1541
|
-
],
|
|
1542
|
-
content=f"<p>{context}</p>" if context else "",
|
|
1543
|
-
edges={},
|
|
1544
|
-
properties={},
|
|
1545
|
-
)
|
|
1546
|
-
|
|
1547
|
-
self._graph.add(spike)
|
|
1548
|
-
return spike
|
|
1549
|
-
|
|
1550
|
-
def create_track_from_plan(
|
|
1551
|
-
self,
|
|
1552
|
-
title: str,
|
|
1553
|
-
description: str,
|
|
1554
|
-
spike_id: str | None = None,
|
|
1555
|
-
priority: str = "high",
|
|
1556
|
-
requirements: list[str | tuple[str, str]] | None = None,
|
|
1557
|
-
phases: list[tuple[str, list[str]]] | None = None,
|
|
1558
|
-
) -> dict[str, Any]:
|
|
1559
|
-
"""
|
|
1560
|
-
Create a track with spec and plan from planning results.
|
|
1561
|
-
|
|
1562
|
-
Args:
|
|
1563
|
-
title: Track title
|
|
1564
|
-
description: Track description
|
|
1565
|
-
spike_id: Optional spike ID that led to this track
|
|
1566
|
-
priority: Track priority (default: "high")
|
|
1567
|
-
requirements: List of requirements (strings or (req, priority) tuples)
|
|
1568
|
-
phases: List of (phase_name, tasks) tuples for the plan
|
|
1569
|
-
|
|
1570
|
-
Returns:
|
|
1571
|
-
Dict with track, spec, and plan details
|
|
1572
|
-
|
|
1573
|
-
Example:
|
|
1574
|
-
>>> sdk = SDK(agent="claude")
|
|
1575
|
-
>>> track_info = sdk.create_track_from_plan(
|
|
1576
|
-
... title="User Authentication System",
|
|
1577
|
-
... description="OAuth 2.0 with JWT tokens",
|
|
1578
|
-
... requirements=[
|
|
1579
|
-
... ("OAuth 2.0 integration", "must-have"),
|
|
1580
|
-
... ("JWT token management", "must-have"),
|
|
1581
|
-
... "Password reset flow"
|
|
1582
|
-
... ],
|
|
1583
|
-
... phases=[
|
|
1584
|
-
... ("Phase 1: OAuth", ["Setup providers (2h)", "Callback (2h)"]),
|
|
1585
|
-
... ("Phase 2: JWT", ["Token signing (2h)", "Refresh (1.5h)"])
|
|
1586
|
-
... ]
|
|
1587
|
-
... )
|
|
1588
|
-
"""
|
|
1589
|
-
|
|
1590
|
-
builder = (
|
|
1591
|
-
self.tracks.builder()
|
|
1592
|
-
.title(title)
|
|
1593
|
-
.description(description)
|
|
1594
|
-
.priority(priority)
|
|
1595
|
-
)
|
|
1596
|
-
|
|
1597
|
-
# Add reference to planning spike if provided
|
|
1598
|
-
if spike_id:
|
|
1599
|
-
# Access internal data for track builder
|
|
1600
|
-
data: dict[str, Any] = builder._data # type: ignore[attr-defined]
|
|
1601
|
-
data["properties"]["planning_spike"] = spike_id
|
|
1602
|
-
|
|
1603
|
-
# Add spec if requirements provided
|
|
1604
|
-
if requirements:
|
|
1605
|
-
# Convert simple strings to (requirement, "must-have") tuples
|
|
1606
|
-
req_list = []
|
|
1607
|
-
for req in requirements:
|
|
1608
|
-
if isinstance(req, str):
|
|
1609
|
-
req_list.append((req, "must-have"))
|
|
1610
|
-
else:
|
|
1611
|
-
req_list.append(req)
|
|
1612
|
-
|
|
1613
|
-
builder.with_spec(
|
|
1614
|
-
overview=description,
|
|
1615
|
-
context=f"Track created from planning spike: {spike_id}"
|
|
1616
|
-
if spike_id
|
|
1617
|
-
else "",
|
|
1618
|
-
requirements=req_list,
|
|
1619
|
-
acceptance_criteria=[],
|
|
1620
|
-
)
|
|
1621
|
-
|
|
1622
|
-
# Add plan if phases provided
|
|
1623
|
-
if phases:
|
|
1624
|
-
builder.with_plan_phases(phases)
|
|
1625
|
-
|
|
1626
|
-
track = builder.create()
|
|
1627
|
-
|
|
1628
|
-
return {
|
|
1629
|
-
"track_id": track.id,
|
|
1630
|
-
"title": track.title,
|
|
1631
|
-
"has_spec": bool(requirements),
|
|
1632
|
-
"has_plan": bool(phases),
|
|
1633
|
-
"spike_id": spike_id,
|
|
1634
|
-
"priority": priority,
|
|
1635
|
-
}
|
|
1636
|
-
|
|
1637
|
-
def smart_plan(
|
|
1638
|
-
self,
|
|
1639
|
-
description: str,
|
|
1640
|
-
create_spike: bool = True,
|
|
1641
|
-
timebox_hours: float = 4.0,
|
|
1642
|
-
research_completed: bool = False,
|
|
1643
|
-
research_findings: dict[str, Any] | None = None,
|
|
1644
|
-
) -> dict[str, Any]:
|
|
1645
|
-
"""
|
|
1646
|
-
Smart planning workflow: analyzes project context and creates spike or track.
|
|
1647
|
-
|
|
1648
|
-
This is the main entry point for planning new work. It:
|
|
1649
|
-
1. Checks current project state
|
|
1650
|
-
2. Provides context from strategic analytics
|
|
1651
|
-
3. Creates a planning spike or track as appropriate
|
|
1652
|
-
|
|
1653
|
-
**IMPORTANT: Research Phase Required**
|
|
1654
|
-
For complex features, you should complete research BEFORE planning:
|
|
1655
|
-
1. Use /htmlgraph:research or WebSearch to gather best practices
|
|
1656
|
-
2. Document findings (libraries, patterns, anti-patterns)
|
|
1657
|
-
3. Pass research_completed=True and research_findings to this method
|
|
1658
|
-
4. This ensures planning is informed by industry best practices
|
|
1659
|
-
|
|
1660
|
-
Research-first workflow:
|
|
1661
|
-
1. /htmlgraph:research "{topic}" → Gather external knowledge
|
|
1662
|
-
2. sdk.smart_plan(..., research_completed=True) → Plan with context
|
|
1663
|
-
3. Complete spike steps → Design solution
|
|
1664
|
-
4. Create track from plan → Structure implementation
|
|
1665
|
-
|
|
1666
|
-
Args:
|
|
1667
|
-
description: What you want to plan (e.g., "User authentication system")
|
|
1668
|
-
create_spike: Create a spike for research (default: True)
|
|
1669
|
-
timebox_hours: If creating spike, time limit (default: 4 hours)
|
|
1670
|
-
research_completed: Whether research was performed (default: False)
|
|
1671
|
-
research_findings: Structured research findings (optional)
|
|
1672
|
-
|
|
1673
|
-
Returns:
|
|
1674
|
-
Dict with planning context and created spike/track info
|
|
1675
|
-
|
|
1676
|
-
Example:
|
|
1677
|
-
>>> sdk = SDK(agent="claude")
|
|
1678
|
-
>>> # WITH research (recommended for complex work)
|
|
1679
|
-
>>> research = {
|
|
1680
|
-
... "topic": "OAuth 2.0 best practices",
|
|
1681
|
-
... "sources_count": 5,
|
|
1682
|
-
... "recommended_library": "authlib",
|
|
1683
|
-
... "key_insights": ["Use PKCE", "Implement token rotation"]
|
|
1684
|
-
... }
|
|
1685
|
-
>>> plan = sdk.smart_plan(
|
|
1686
|
-
... "User authentication system",
|
|
1687
|
-
... create_spike=True,
|
|
1688
|
-
... research_completed=True,
|
|
1689
|
-
... research_findings=research
|
|
1690
|
-
... )
|
|
1691
|
-
>>> print(f"Created: {plan['spike_id']}")
|
|
1692
|
-
>>> print(f"Research informed: {plan['research_informed']}")
|
|
1693
|
-
"""
|
|
1694
|
-
# Get project context from strategic analytics
|
|
1695
|
-
bottlenecks = self.find_bottlenecks(top_n=3)
|
|
1696
|
-
risks = self.assess_risks()
|
|
1697
|
-
parallel = self.get_parallel_work(max_agents=5)
|
|
1698
|
-
|
|
1699
|
-
context = {
|
|
1700
|
-
"bottlenecks_count": len(bottlenecks),
|
|
1701
|
-
"high_risk_count": risks["high_risk_count"],
|
|
1702
|
-
"parallel_capacity": parallel["max_parallelism"],
|
|
1703
|
-
"description": description,
|
|
1704
|
-
}
|
|
1705
|
-
|
|
1706
|
-
# Build context string with research info
|
|
1707
|
-
context_str = f"Project context:\n- {len(bottlenecks)} bottlenecks\n- {risks['high_risk_count']} high-risk items\n- {parallel['max_parallelism']} parallel capacity"
|
|
1708
|
-
|
|
1709
|
-
if research_completed and research_findings:
|
|
1710
|
-
context_str += f"\n\nResearch completed:\n- Topic: {research_findings.get('topic', description)}"
|
|
1711
|
-
if "sources_count" in research_findings:
|
|
1712
|
-
context_str += f"\n- Sources: {research_findings['sources_count']}"
|
|
1713
|
-
if "recommended_library" in research_findings:
|
|
1714
|
-
context_str += (
|
|
1715
|
-
f"\n- Recommended: {research_findings['recommended_library']}"
|
|
1716
|
-
)
|
|
1717
|
-
|
|
1718
|
-
# Validation: warn if complex work planned without research
|
|
1719
|
-
is_complex = any(
|
|
1720
|
-
[
|
|
1721
|
-
"auth" in description.lower(),
|
|
1722
|
-
"security" in description.lower(),
|
|
1723
|
-
"real-time" in description.lower(),
|
|
1724
|
-
"websocket" in description.lower(),
|
|
1725
|
-
"oauth" in description.lower(),
|
|
1726
|
-
"performance" in description.lower(),
|
|
1727
|
-
"integration" in description.lower(),
|
|
1728
|
-
]
|
|
1729
|
-
)
|
|
1730
|
-
|
|
1731
|
-
warnings = []
|
|
1732
|
-
if is_complex and not research_completed:
|
|
1733
|
-
warnings.append(
|
|
1734
|
-
"⚠️ Complex feature detected without research. "
|
|
1735
|
-
"Consider using /htmlgraph:research first to gather best practices."
|
|
1736
|
-
)
|
|
1737
|
-
|
|
1738
|
-
if create_spike:
|
|
1739
|
-
spike = self.start_planning_spike(
|
|
1740
|
-
title=f"Plan: {description}",
|
|
1741
|
-
context=context_str,
|
|
1742
|
-
timebox_hours=timebox_hours,
|
|
1743
|
-
)
|
|
1744
|
-
|
|
1745
|
-
# Store research metadata in spike properties if provided
|
|
1746
|
-
if research_completed and research_findings:
|
|
1747
|
-
spike.properties["research_completed"] = True
|
|
1748
|
-
spike.properties["research_findings"] = research_findings
|
|
1749
|
-
self._graph.update(spike)
|
|
1750
|
-
|
|
1751
|
-
result = {
|
|
1752
|
-
"type": "spike",
|
|
1753
|
-
"spike_id": spike.id,
|
|
1754
|
-
"title": spike.title,
|
|
1755
|
-
"status": spike.status,
|
|
1756
|
-
"timebox_hours": timebox_hours,
|
|
1757
|
-
"project_context": context,
|
|
1758
|
-
"research_informed": research_completed,
|
|
1759
|
-
"next_steps": [
|
|
1760
|
-
"Research and design the solution"
|
|
1761
|
-
if not research_completed
|
|
1762
|
-
else "Design solution using research findings",
|
|
1763
|
-
"Complete spike steps",
|
|
1764
|
-
"Use SDK.create_track_from_plan() to create track",
|
|
1765
|
-
],
|
|
1766
|
-
}
|
|
1767
|
-
|
|
1768
|
-
if warnings:
|
|
1769
|
-
result["warnings"] = warnings
|
|
1770
|
-
|
|
1771
|
-
return result
|
|
1772
|
-
else:
|
|
1773
|
-
# Direct track creation (for when you already know what to do)
|
|
1774
|
-
track_info = self.create_track_from_plan(
|
|
1775
|
-
title=description, description=f"Planned with context: {context}"
|
|
1776
|
-
)
|
|
1777
|
-
|
|
1778
|
-
result = {
|
|
1779
|
-
"type": "track",
|
|
1780
|
-
**track_info,
|
|
1781
|
-
"project_context": context,
|
|
1782
|
-
"research_informed": research_completed,
|
|
1783
|
-
"next_steps": [
|
|
1784
|
-
"Create features from track plan",
|
|
1785
|
-
"Link features to track",
|
|
1786
|
-
"Start implementation",
|
|
1787
|
-
],
|
|
1788
|
-
}
|
|
1789
|
-
|
|
1790
|
-
if warnings:
|
|
1791
|
-
result["warnings"] = warnings
|
|
1792
|
-
|
|
1793
|
-
return result
|
|
1794
|
-
|
|
1795
|
-
def plan_parallel_work(
|
|
1796
|
-
self,
|
|
1797
|
-
max_agents: int = 5,
|
|
1798
|
-
shared_files: list[str] | None = None,
|
|
1799
|
-
) -> dict[str, Any]:
|
|
1800
|
-
"""
|
|
1801
|
-
Plan and prepare parallel work execution.
|
|
1802
|
-
|
|
1803
|
-
This integrates with smart_plan to enable parallel agent dispatch.
|
|
1804
|
-
Uses the 6-phase ParallelWorkflow:
|
|
1805
|
-
1. Pre-flight analysis (dependencies, risks)
|
|
1806
|
-
2. Context preparation (shared file caching)
|
|
1807
|
-
3. Prompt generation (for Task tool)
|
|
1808
|
-
|
|
1809
|
-
Args:
|
|
1810
|
-
max_agents: Maximum parallel agents (default: 5)
|
|
1811
|
-
shared_files: Files to pre-cache for all agents
|
|
1812
|
-
|
|
1813
|
-
Returns:
|
|
1814
|
-
Dict with parallel execution plan:
|
|
1815
|
-
- can_parallelize: Whether parallelization is recommended
|
|
1816
|
-
- analysis: Pre-flight analysis results
|
|
1817
|
-
- prompts: Ready-to-use Task tool prompts
|
|
1818
|
-
- recommendations: Optimization suggestions
|
|
1819
|
-
|
|
1820
|
-
Example:
|
|
1821
|
-
>>> sdk = SDK(agent="orchestrator")
|
|
1822
|
-
>>> plan = sdk.plan_parallel_work(max_agents=3)
|
|
1823
|
-
>>> if plan["can_parallelize"]:
|
|
1824
|
-
... # Use prompts with Task tool
|
|
1825
|
-
... for p in plan["prompts"]:
|
|
1826
|
-
... Task(prompt=p["prompt"], description=p["description"])
|
|
1827
|
-
"""
|
|
1828
|
-
from htmlgraph.parallel import ParallelWorkflow
|
|
1829
|
-
|
|
1830
|
-
workflow = ParallelWorkflow(self)
|
|
1831
|
-
|
|
1832
|
-
# Phase 1: Pre-flight analysis
|
|
1833
|
-
analysis = workflow.analyze(max_agents=max_agents)
|
|
1834
|
-
|
|
1835
|
-
result = {
|
|
1836
|
-
"can_parallelize": analysis.can_parallelize,
|
|
1837
|
-
"max_parallelism": analysis.max_parallelism,
|
|
1838
|
-
"ready_tasks": analysis.ready_tasks,
|
|
1839
|
-
"blocked_tasks": analysis.blocked_tasks,
|
|
1840
|
-
"speedup_factor": analysis.speedup_factor,
|
|
1841
|
-
"recommendation": analysis.recommendation,
|
|
1842
|
-
"warnings": analysis.warnings,
|
|
1843
|
-
"prompts": [],
|
|
1844
|
-
}
|
|
1845
|
-
|
|
1846
|
-
if not analysis.can_parallelize:
|
|
1847
|
-
result["reason"] = analysis.recommendation
|
|
1848
|
-
return result
|
|
1849
|
-
|
|
1850
|
-
# Phase 2 & 3: Prepare tasks and generate prompts
|
|
1851
|
-
tasks = workflow.prepare_tasks(
|
|
1852
|
-
analysis.ready_tasks[:max_agents],
|
|
1853
|
-
shared_files=shared_files,
|
|
1854
|
-
)
|
|
1855
|
-
prompts = workflow.generate_prompts(tasks)
|
|
1856
|
-
|
|
1857
|
-
result["prompts"] = prompts
|
|
1858
|
-
result["task_count"] = len(prompts)
|
|
1859
|
-
|
|
1860
|
-
# Add efficiency guidelines
|
|
1861
|
-
result["guidelines"] = {
|
|
1862
|
-
"dispatch": "Send ALL Task calls in a SINGLE message for true parallelism",
|
|
1863
|
-
"patterns": [
|
|
1864
|
-
"Grep → Read (search before reading)",
|
|
1865
|
-
"Read → Edit → Bash (read, modify, test)",
|
|
1866
|
-
"Glob → Read (find files first)",
|
|
1867
|
-
],
|
|
1868
|
-
"avoid": [
|
|
1869
|
-
"Sequential Task calls (loses parallelism)",
|
|
1870
|
-
"Read → Read → Read (cache instead)",
|
|
1871
|
-
"Edit → Edit → Edit (batch edits)",
|
|
1872
|
-
],
|
|
1873
|
-
}
|
|
1874
|
-
|
|
1875
|
-
return result
|
|
1876
|
-
|
|
1877
|
-
def aggregate_parallel_results(
|
|
1878
|
-
self,
|
|
1879
|
-
agent_ids: list[str],
|
|
1880
|
-
) -> dict[str, Any]:
|
|
1881
|
-
"""
|
|
1882
|
-
Aggregate results from parallel agent execution.
|
|
1883
|
-
|
|
1884
|
-
Call this after parallel agents complete to:
|
|
1885
|
-
- Collect health metrics
|
|
1886
|
-
- Detect anti-patterns
|
|
1887
|
-
- Identify conflicts
|
|
1888
|
-
- Generate recommendations
|
|
1889
|
-
|
|
1890
|
-
Args:
|
|
1891
|
-
agent_ids: List of agent/transcript IDs to analyze
|
|
1892
|
-
|
|
1893
|
-
Returns:
|
|
1894
|
-
Dict with aggregated results and validation
|
|
1895
|
-
|
|
1896
|
-
Example:
|
|
1897
|
-
>>> # After parallel work completes
|
|
1898
|
-
>>> results = sdk.aggregate_parallel_results([
|
|
1899
|
-
... "agent-abc123",
|
|
1900
|
-
... "agent-def456",
|
|
1901
|
-
... "agent-ghi789",
|
|
1902
|
-
... ])
|
|
1903
|
-
>>> print(f"Health: {results['avg_health_score']:.0%}")
|
|
1904
|
-
>>> print(f"Conflicts: {results['conflicts']}")
|
|
1905
|
-
"""
|
|
1906
|
-
from htmlgraph.parallel import ParallelWorkflow
|
|
1907
|
-
|
|
1908
|
-
workflow = ParallelWorkflow(self)
|
|
1909
|
-
|
|
1910
|
-
# Phase 5: Aggregate
|
|
1911
|
-
aggregate = workflow.aggregate(agent_ids)
|
|
1912
|
-
|
|
1913
|
-
# Phase 6: Validate
|
|
1914
|
-
validation = workflow.validate(aggregate)
|
|
1915
|
-
|
|
1916
|
-
return {
|
|
1917
|
-
"total_agents": aggregate.total_agents,
|
|
1918
|
-
"successful": aggregate.successful,
|
|
1919
|
-
"failed": aggregate.failed,
|
|
1920
|
-
"total_duration_seconds": aggregate.total_duration_seconds,
|
|
1921
|
-
"parallel_speedup": aggregate.parallel_speedup,
|
|
1922
|
-
"avg_health_score": aggregate.avg_health_score,
|
|
1923
|
-
"total_anti_patterns": aggregate.total_anti_patterns,
|
|
1924
|
-
"files_modified": aggregate.files_modified,
|
|
1925
|
-
"conflicts": aggregate.conflicts,
|
|
1926
|
-
"recommendations": aggregate.recommendations,
|
|
1927
|
-
"validation": validation,
|
|
1928
|
-
"all_passed": all(validation.values()),
|
|
1929
|
-
}
|
|
1930
|
-
|
|
1931
|
-
# =========================================================================
|
|
1932
|
-
# Subagent Orchestration
|
|
1933
|
-
# =========================================================================
|
|
1934
|
-
|
|
1935
|
-
@property
|
|
1936
|
-
def orchestrator(self) -> Any:
|
|
1937
|
-
"""
|
|
1938
|
-
Get the subagent orchestrator for spawning explorer/coder agents.
|
|
1939
|
-
|
|
1940
|
-
Lazy-loaded on first access.
|
|
1941
|
-
|
|
1942
|
-
Returns:
|
|
1943
|
-
SubagentOrchestrator instance
|
|
1944
|
-
|
|
1945
|
-
Example:
|
|
1946
|
-
>>> sdk = SDK(agent="claude")
|
|
1947
|
-
>>> explorer = sdk.orchestrator.spawn_explorer(
|
|
1948
|
-
... task="Find all API endpoints",
|
|
1949
|
-
... scope="src/"
|
|
1950
|
-
... )
|
|
1951
|
-
"""
|
|
1952
|
-
if self._orchestrator is None:
|
|
1953
|
-
from htmlgraph.orchestrator import SubagentOrchestrator
|
|
1954
|
-
|
|
1955
|
-
self._orchestrator = SubagentOrchestrator(self) # type: ignore[assignment]
|
|
1956
|
-
return self._orchestrator
|
|
1957
|
-
|
|
1958
|
-
def spawn_explorer(
|
|
1959
|
-
self,
|
|
1960
|
-
task: str,
|
|
1961
|
-
scope: str | None = None,
|
|
1962
|
-
patterns: list[str] | None = None,
|
|
1963
|
-
questions: list[str] | None = None,
|
|
1964
|
-
) -> dict[str, Any]:
|
|
1965
|
-
"""
|
|
1966
|
-
Spawn an explorer subagent for codebase discovery.
|
|
1967
|
-
|
|
1968
|
-
Explorer agents are optimized for finding files, searching patterns,
|
|
1969
|
-
and mapping code without modifying anything.
|
|
1970
|
-
|
|
1971
|
-
Args:
|
|
1972
|
-
task: What to explore/discover
|
|
1973
|
-
scope: Directory scope (e.g., "src/")
|
|
1974
|
-
patterns: Glob patterns to focus on
|
|
1975
|
-
questions: Specific questions to answer
|
|
1976
|
-
|
|
1977
|
-
Returns:
|
|
1978
|
-
Dict with prompt ready for Task tool
|
|
1979
|
-
|
|
1980
|
-
Note:
|
|
1981
|
-
Returns dict with 'prompt', 'description', 'subagent_type' keys.
|
|
1982
|
-
Returns empty dict if spawning fails.
|
|
1983
|
-
|
|
1984
|
-
Example:
|
|
1985
|
-
>>> prompt = sdk.spawn_explorer(
|
|
1986
|
-
... task="Find all database models",
|
|
1987
|
-
... scope="src/models/",
|
|
1988
|
-
... questions=["What ORM is used?"]
|
|
1989
|
-
... )
|
|
1990
|
-
>>> # Execute with Task tool
|
|
1991
|
-
>>> Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
1992
|
-
|
|
1993
|
-
See also:
|
|
1994
|
-
spawn_coder: Spawn implementation agent with feature context
|
|
1995
|
-
orchestrate: Full exploration + implementation workflow
|
|
1996
|
-
"""
|
|
1997
|
-
subagent_prompt = self.orchestrator.spawn_explorer(
|
|
1998
|
-
task=task,
|
|
1999
|
-
scope=scope,
|
|
2000
|
-
patterns=patterns,
|
|
2001
|
-
questions=questions,
|
|
2002
|
-
)
|
|
2003
|
-
result: dict[str, Any] = subagent_prompt.to_task_kwargs()
|
|
2004
|
-
return result
|
|
2005
|
-
|
|
2006
|
-
def spawn_coder(
|
|
2007
|
-
self,
|
|
2008
|
-
feature_id: str,
|
|
2009
|
-
context: str | None = None,
|
|
2010
|
-
files_to_modify: list[str] | None = None,
|
|
2011
|
-
test_command: str | None = None,
|
|
2012
|
-
) -> dict[str, Any]:
|
|
2013
|
-
"""
|
|
2014
|
-
Spawn a coder subagent for implementing changes.
|
|
2015
|
-
|
|
2016
|
-
Coder agents are optimized for reading, modifying, and testing code.
|
|
2017
|
-
|
|
2018
|
-
Args:
|
|
2019
|
-
feature_id: Feature being implemented
|
|
2020
|
-
context: Results from explorer (string summary)
|
|
2021
|
-
files_to_modify: Specific files to change
|
|
2022
|
-
test_command: Command to verify changes
|
|
2023
|
-
|
|
2024
|
-
Returns:
|
|
2025
|
-
Dict with prompt ready for Task tool
|
|
2026
|
-
|
|
2027
|
-
Note:
|
|
2028
|
-
Returns dict with 'prompt', 'description', 'subagent_type' keys.
|
|
2029
|
-
Requires valid feature_id. Returns empty dict if feature not found.
|
|
2030
|
-
|
|
2031
|
-
Example:
|
|
2032
|
-
>>> prompt = sdk.spawn_coder(
|
|
2033
|
-
... feature_id="feat-add-auth",
|
|
2034
|
-
... context=explorer_results,
|
|
2035
|
-
... test_command="uv run pytest tests/auth/"
|
|
2036
|
-
... )
|
|
2037
|
-
>>> Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
2038
|
-
|
|
2039
|
-
See also:
|
|
2040
|
-
spawn_explorer: Explore codebase before implementation
|
|
2041
|
-
orchestrate: Full exploration + implementation workflow
|
|
2042
|
-
"""
|
|
2043
|
-
subagent_prompt = self.orchestrator.spawn_coder(
|
|
2044
|
-
feature_id=feature_id,
|
|
2045
|
-
context=context,
|
|
2046
|
-
files_to_modify=files_to_modify,
|
|
2047
|
-
test_command=test_command,
|
|
2048
|
-
)
|
|
2049
|
-
result: dict[str, Any] = subagent_prompt.to_task_kwargs()
|
|
2050
|
-
return result
|
|
2051
|
-
|
|
2052
|
-
def orchestrate(
|
|
2053
|
-
self,
|
|
2054
|
-
feature_id: str,
|
|
2055
|
-
exploration_scope: str | None = None,
|
|
2056
|
-
test_command: str | None = None,
|
|
2057
|
-
) -> dict[str, Any]:
|
|
2058
|
-
"""
|
|
2059
|
-
Orchestrate full feature implementation with explorer and coder.
|
|
2060
|
-
|
|
2061
|
-
Generates prompts for a two-phase workflow:
|
|
2062
|
-
1. Explorer discovers relevant code and patterns
|
|
2063
|
-
2. Coder implements the feature based on explorer findings
|
|
2064
|
-
|
|
2065
|
-
Args:
|
|
2066
|
-
feature_id: Feature to implement
|
|
2067
|
-
exploration_scope: Directory to explore
|
|
2068
|
-
test_command: Test command for verification
|
|
2069
|
-
|
|
2070
|
-
Returns:
|
|
2071
|
-
Dict with explorer and coder prompts
|
|
2072
|
-
|
|
2073
|
-
Example:
|
|
2074
|
-
>>> prompts = sdk.orchestrate(
|
|
2075
|
-
... "feat-add-caching",
|
|
2076
|
-
... exploration_scope="src/cache/",
|
|
2077
|
-
... test_command="uv run pytest tests/cache/"
|
|
2078
|
-
... )
|
|
2079
|
-
>>> # Phase 1: Run explorer
|
|
2080
|
-
>>> Task(prompt=prompts["explorer"]["prompt"], ...)
|
|
2081
|
-
>>> # Phase 2: Run coder with explorer results
|
|
2082
|
-
>>> Task(prompt=prompts["coder"]["prompt"], ...)
|
|
2083
|
-
|
|
2084
|
-
See also:
|
|
2085
|
-
spawn_explorer: Just the exploration phase
|
|
2086
|
-
spawn_coder: Just the implementation phase
|
|
2087
|
-
"""
|
|
2088
|
-
prompts = self.orchestrator.orchestrate_feature(
|
|
2089
|
-
feature_id=feature_id,
|
|
2090
|
-
exploration_scope=exploration_scope,
|
|
2091
|
-
test_command=test_command,
|
|
2092
|
-
)
|
|
2093
|
-
return {
|
|
2094
|
-
"explorer": prompts["explorer"].to_task_kwargs(),
|
|
2095
|
-
"coder": prompts["coder"].to_task_kwargs(),
|
|
2096
|
-
"workflow": [
|
|
2097
|
-
"1. Execute explorer Task and collect results",
|
|
2098
|
-
"2. Parse explorer results for files and patterns",
|
|
2099
|
-
"3. Execute coder Task with explorer context",
|
|
2100
|
-
"4. Verify coder results and update feature status",
|
|
2101
|
-
],
|
|
2102
|
-
}
|
|
2103
|
-
|
|
2104
|
-
# =========================================================================
|
|
2105
|
-
# Session Management Optimization
|
|
2106
|
-
# =========================================================================
|
|
2107
|
-
|
|
2108
|
-
def get_session_start_info(
|
|
2109
|
-
self,
|
|
2110
|
-
include_git_log: bool = True,
|
|
2111
|
-
git_log_count: int = 5,
|
|
2112
|
-
analytics_top_n: int = 3,
|
|
2113
|
-
analytics_max_agents: int = 3,
|
|
2114
|
-
) -> SessionStartInfo:
|
|
2115
|
-
"""
|
|
2116
|
-
Get comprehensive session start information in a single call.
|
|
2117
|
-
|
|
2118
|
-
Consolidates all information needed for session start into one method,
|
|
2119
|
-
reducing context usage from 6+ tool calls to 1.
|
|
2120
|
-
|
|
2121
|
-
Args:
|
|
2122
|
-
include_git_log: Include recent git commits (default: True)
|
|
2123
|
-
git_log_count: Number of recent commits to include (default: 5)
|
|
2124
|
-
analytics_top_n: Number of bottlenecks/recommendations (default: 3)
|
|
2125
|
-
analytics_max_agents: Max agents for parallel work analysis (default: 3)
|
|
2126
|
-
|
|
2127
|
-
Returns:
|
|
2128
|
-
Dict with comprehensive session start context:
|
|
2129
|
-
- status: Project status (nodes, collections, WIP)
|
|
2130
|
-
- active_work: Current active work item (if any)
|
|
2131
|
-
- features: List of features with status
|
|
2132
|
-
- sessions: Recent sessions
|
|
2133
|
-
- git_log: Recent commits (if include_git_log=True)
|
|
2134
|
-
- analytics: Strategic insights (bottlenecks, recommendations, parallel)
|
|
2135
|
-
|
|
2136
|
-
Note:
|
|
2137
|
-
Returns empty dict {} if session context unavailable.
|
|
2138
|
-
Always check for expected keys before accessing.
|
|
2139
|
-
|
|
2140
|
-
Example:
|
|
2141
|
-
>>> sdk = SDK(agent="claude")
|
|
2142
|
-
>>> info = sdk.get_session_start_info()
|
|
2143
|
-
>>> print(f"Project: {info['status']['total_nodes']} nodes")
|
|
2144
|
-
>>> print(f"WIP: {info['status']['in_progress_count']}")
|
|
2145
|
-
>>> if info.get('active_work'):
|
|
2146
|
-
... print(f"Active: {info['active_work']['title']}")
|
|
2147
|
-
>>> for bn in info['analytics']['bottlenecks']:
|
|
2148
|
-
... print(f"Bottleneck: {bn['title']}")
|
|
2149
|
-
"""
|
|
2150
|
-
import subprocess
|
|
2151
|
-
|
|
2152
|
-
result = {}
|
|
2153
|
-
|
|
2154
|
-
# 1. Project status
|
|
2155
|
-
result["status"] = self.get_status()
|
|
2156
|
-
|
|
2157
|
-
# 2. Active work item (validation status) - always include, even if None
|
|
2158
|
-
result["active_work"] = self.get_active_work_item() # type: ignore[assignment]
|
|
2159
|
-
|
|
2160
|
-
# 3. Features list (simplified)
|
|
2161
|
-
features_list: list[dict[str, object]] = []
|
|
2162
|
-
for feature in self.features.all():
|
|
2163
|
-
features_list.append(
|
|
2164
|
-
{
|
|
2165
|
-
"id": feature.id,
|
|
2166
|
-
"title": feature.title,
|
|
2167
|
-
"status": feature.status,
|
|
2168
|
-
"priority": feature.priority,
|
|
2169
|
-
"steps_total": len(feature.steps),
|
|
2170
|
-
"steps_completed": sum(1 for s in feature.steps if s.completed),
|
|
2171
|
-
}
|
|
2172
|
-
)
|
|
2173
|
-
result["features"] = features_list # type: ignore[assignment]
|
|
2174
|
-
|
|
2175
|
-
# 4. Sessions list (recent 20)
|
|
2176
|
-
sessions_list: list[dict[str, Any]] = []
|
|
2177
|
-
for session in self.sessions.all()[:20]:
|
|
2178
|
-
sessions_list.append(
|
|
2179
|
-
{
|
|
2180
|
-
"id": session.id,
|
|
2181
|
-
"status": session.status,
|
|
2182
|
-
"agent": session.properties.get("agent", "unknown"),
|
|
2183
|
-
"event_count": session.properties.get("event_count", 0),
|
|
2184
|
-
"started": session.created.isoformat()
|
|
2185
|
-
if hasattr(session, "created")
|
|
2186
|
-
else None,
|
|
2187
|
-
}
|
|
2188
|
-
)
|
|
2189
|
-
result["sessions"] = sessions_list # type: ignore[assignment]
|
|
2190
|
-
|
|
2191
|
-
# 5. Git log (if requested)
|
|
2192
|
-
if include_git_log:
|
|
2193
|
-
try:
|
|
2194
|
-
git_result = subprocess.run(
|
|
2195
|
-
["git", "log", "--oneline", f"-{git_log_count}"],
|
|
2196
|
-
capture_output=True,
|
|
2197
|
-
text=True,
|
|
2198
|
-
check=True,
|
|
2199
|
-
cwd=self._directory.parent,
|
|
2200
|
-
)
|
|
2201
|
-
git_lines: list[str] = git_result.stdout.strip().split("\n")
|
|
2202
|
-
result["git_log"] = git_lines # type: ignore[assignment]
|
|
2203
|
-
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
2204
|
-
empty_list: list[str] = []
|
|
2205
|
-
result["git_log"] = empty_list # type: ignore[assignment]
|
|
2206
|
-
|
|
2207
|
-
# 6. Strategic analytics
|
|
2208
|
-
result["analytics"] = {
|
|
2209
|
-
"bottlenecks": self.find_bottlenecks(top_n=analytics_top_n),
|
|
2210
|
-
"recommendations": self.recommend_next_work(agent_count=analytics_top_n),
|
|
2211
|
-
"parallel": self.get_parallel_work(max_agents=analytics_max_agents),
|
|
2212
|
-
}
|
|
2213
|
-
|
|
2214
|
-
return result # type: ignore[return-value]
|
|
2215
|
-
|
|
2216
|
-
def get_active_work_item(
|
|
2217
|
-
self,
|
|
2218
|
-
agent: str | None = None,
|
|
2219
|
-
filter_by_agent: bool = False,
|
|
2220
|
-
work_types: list[str] | None = None,
|
|
2221
|
-
) -> ActiveWorkItem | None:
|
|
2222
|
-
"""
|
|
2223
|
-
Get the currently active work item (in-progress status).
|
|
2224
|
-
|
|
2225
|
-
This is used by the PreToolUse validation hook to check if code changes
|
|
2226
|
-
have an active work item for attribution.
|
|
2227
|
-
|
|
2228
|
-
Args:
|
|
2229
|
-
agent: Agent ID for filtering (optional)
|
|
2230
|
-
filter_by_agent: If True, filter by agent. If False (default), return any active work item
|
|
2231
|
-
work_types: Work item types to check (defaults to all: features, bugs, spikes, chores, epics)
|
|
2232
|
-
|
|
2233
|
-
Returns:
|
|
2234
|
-
Dict with work item details or None if no active work item found:
|
|
2235
|
-
- id: Work item ID
|
|
2236
|
-
- title: Work item title
|
|
2237
|
-
- type: Work item type (feature, bug, spike, chore, epic)
|
|
2238
|
-
- status: Should be "in-progress"
|
|
2239
|
-
- agent: Assigned agent
|
|
2240
|
-
- steps_total: Total steps
|
|
2241
|
-
- steps_completed: Completed steps
|
|
2242
|
-
- auto_generated: (spikes only) True if auto-generated spike
|
|
2243
|
-
- spike_subtype: (spikes only) "session-init" or "transition"
|
|
2244
|
-
|
|
2245
|
-
Example:
|
|
2246
|
-
>>> sdk = SDK(agent="claude")
|
|
2247
|
-
>>> # Get any active work item
|
|
2248
|
-
>>> active = sdk.get_active_work_item()
|
|
2249
|
-
>>> if active:
|
|
2250
|
-
... print(f"Working on: {active['title']}")
|
|
2251
|
-
...
|
|
2252
|
-
>>> # Get only this agent's active work item
|
|
2253
|
-
>>> active = sdk.get_active_work_item(filter_by_agent=True)
|
|
2254
|
-
"""
|
|
2255
|
-
# Default to all work item types
|
|
2256
|
-
if work_types is None:
|
|
2257
|
-
work_types = ["features", "bugs", "spikes", "chores", "epics"]
|
|
2258
|
-
|
|
2259
|
-
# Search across all work item types
|
|
2260
|
-
# Separate real work items from auto-generated spikes
|
|
2261
|
-
real_work_items = []
|
|
2262
|
-
auto_spikes = []
|
|
2263
|
-
|
|
2264
|
-
for work_type in work_types:
|
|
2265
|
-
collection = getattr(self, work_type, None)
|
|
2266
|
-
if collection is None:
|
|
2267
|
-
continue
|
|
2268
|
-
|
|
2269
|
-
# Query for in-progress items
|
|
2270
|
-
in_progress = collection.where(status="in-progress")
|
|
2271
|
-
|
|
2272
|
-
for item in in_progress:
|
|
2273
|
-
# Filter by agent if requested
|
|
2274
|
-
if filter_by_agent:
|
|
2275
|
-
agent_id = agent or self._agent_id
|
|
2276
|
-
if agent_id and hasattr(item, "agent_assigned"):
|
|
2277
|
-
if item.agent_assigned != agent_id:
|
|
2278
|
-
continue
|
|
2279
|
-
|
|
2280
|
-
item_dict = {
|
|
2281
|
-
"id": item.id,
|
|
2282
|
-
"title": item.title,
|
|
2283
|
-
"type": item.type,
|
|
2284
|
-
"status": item.status,
|
|
2285
|
-
"agent": getattr(item, "agent_assigned", None),
|
|
2286
|
-
"steps_total": len(item.steps) if hasattr(item, "steps") else 0,
|
|
2287
|
-
"steps_completed": sum(1 for s in item.steps if s.completed)
|
|
2288
|
-
if hasattr(item, "steps")
|
|
2289
|
-
else 0,
|
|
2290
|
-
}
|
|
2291
|
-
|
|
2292
|
-
# Add spike-specific fields for auto-spike detection
|
|
2293
|
-
if item.type == "spike":
|
|
2294
|
-
item_dict["auto_generated"] = getattr(item, "auto_generated", False)
|
|
2295
|
-
item_dict["spike_subtype"] = getattr(item, "spike_subtype", None)
|
|
2296
|
-
|
|
2297
|
-
# Separate auto-spikes from real work
|
|
2298
|
-
# Auto-spikes are temporary tracking items (session-init, transition, conversation-init)
|
|
2299
|
-
is_auto_spike = item_dict["auto_generated"] and item_dict[
|
|
2300
|
-
"spike_subtype"
|
|
2301
|
-
] in ("session-init", "transition", "conversation-init")
|
|
2302
|
-
|
|
2303
|
-
if is_auto_spike:
|
|
2304
|
-
auto_spikes.append(item_dict)
|
|
2305
|
-
else:
|
|
2306
|
-
# Real user-created spike
|
|
2307
|
-
real_work_items.append(item_dict)
|
|
2308
|
-
else:
|
|
2309
|
-
# Features, bugs, chores, epics are always real work
|
|
2310
|
-
real_work_items.append(item_dict)
|
|
2311
|
-
|
|
2312
|
-
# Prioritize real work items over auto-spikes
|
|
2313
|
-
# Auto-spikes should only show if there's NO other active work item
|
|
2314
|
-
if real_work_items:
|
|
2315
|
-
return real_work_items[0] # type: ignore[return-value]
|
|
2316
|
-
|
|
2317
|
-
if auto_spikes:
|
|
2318
|
-
return auto_spikes[0] # type: ignore[return-value]
|
|
2319
|
-
|
|
2320
|
-
return None
|
|
2321
|
-
|
|
2322
|
-
# =========================================================================
|
|
2323
|
-
# Operations Layer - Server, Hooks, Events, Analytics
|
|
2324
|
-
# =========================================================================
|
|
2325
|
-
|
|
2326
|
-
def start_server(
|
|
2327
|
-
self,
|
|
2328
|
-
port: int = 8080,
|
|
2329
|
-
host: str = "localhost",
|
|
2330
|
-
watch: bool = True,
|
|
2331
|
-
auto_port: bool = False,
|
|
2332
|
-
) -> Any:
|
|
2333
|
-
"""
|
|
2334
|
-
Start HtmlGraph server for browsing graph via web UI.
|
|
2335
|
-
|
|
2336
|
-
Args:
|
|
2337
|
-
port: Port to listen on (default: 8080)
|
|
2338
|
-
host: Host to bind to (default: "localhost")
|
|
2339
|
-
watch: Enable file watching for auto-reload (default: True)
|
|
2340
|
-
auto_port: Automatically find available port if specified port is in use (default: False)
|
|
2341
|
-
|
|
2342
|
-
Returns:
|
|
2343
|
-
ServerStartResult with handle, warnings, and config used
|
|
2344
|
-
|
|
2345
|
-
Raises:
|
|
2346
|
-
PortInUseError: If port is in use and auto_port=False
|
|
2347
|
-
ServerStartError: If server fails to start
|
|
2348
|
-
|
|
2349
|
-
Example:
|
|
2350
|
-
>>> sdk = SDK(agent="claude")
|
|
2351
|
-
>>> result = sdk.start_server(port=8080, watch=True)
|
|
2352
|
-
>>> print(f"Server running at {result.handle.url}")
|
|
2353
|
-
>>> # Open browser to http://localhost:8080
|
|
2354
|
-
>>>
|
|
2355
|
-
>>> # Stop server when done
|
|
2356
|
-
>>> sdk.stop_server(result.handle)
|
|
2357
|
-
|
|
2358
|
-
See also:
|
|
2359
|
-
stop_server: Stop running server
|
|
2360
|
-
get_server_status: Check if server is running
|
|
2361
|
-
"""
|
|
2362
|
-
from htmlgraph.operations import server
|
|
2363
|
-
|
|
2364
|
-
return server.start_server(
|
|
2365
|
-
port=port,
|
|
2366
|
-
graph_dir=self._directory,
|
|
2367
|
-
static_dir=self._directory.parent, # Project root for index.html
|
|
2368
|
-
host=host,
|
|
2369
|
-
watch=watch,
|
|
2370
|
-
auto_port=auto_port,
|
|
2371
|
-
)
|
|
2372
|
-
|
|
2373
|
-
def stop_server(self, handle: Any) -> None:
|
|
2374
|
-
"""
|
|
2375
|
-
Stop a running HtmlGraph server.
|
|
2376
|
-
|
|
2377
|
-
Args:
|
|
2378
|
-
handle: ServerHandle returned from start_server()
|
|
2379
|
-
|
|
2380
|
-
Raises:
|
|
2381
|
-
ServerStartError: If shutdown fails
|
|
2382
|
-
|
|
2383
|
-
Example:
|
|
2384
|
-
>>> sdk = SDK(agent="claude")
|
|
2385
|
-
>>> result = sdk.start_server()
|
|
2386
|
-
>>> # Work with server...
|
|
2387
|
-
>>> sdk.stop_server(result.handle)
|
|
2388
|
-
"""
|
|
2389
|
-
from htmlgraph.operations import server
|
|
2390
|
-
|
|
2391
|
-
server.stop_server(handle)
|
|
2392
|
-
|
|
2393
|
-
def get_server_status(self, handle: Any | None = None) -> Any:
|
|
2394
|
-
"""
|
|
2395
|
-
Check server status.
|
|
2396
|
-
|
|
2397
|
-
Args:
|
|
2398
|
-
handle: Optional ServerHandle to check
|
|
2399
|
-
|
|
2400
|
-
Returns:
|
|
2401
|
-
ServerStatus indicating whether server is running
|
|
2402
|
-
|
|
2403
|
-
Example:
|
|
2404
|
-
>>> sdk = SDK(agent="claude")
|
|
2405
|
-
>>> result = sdk.start_server()
|
|
2406
|
-
>>> status = sdk.get_server_status(result.handle)
|
|
2407
|
-
>>> print(f"Running: {status.running}")
|
|
2408
|
-
"""
|
|
2409
|
-
from htmlgraph.operations import server
|
|
2410
|
-
|
|
2411
|
-
return server.get_server_status(handle)
|
|
2412
|
-
|
|
2413
|
-
def install_hooks(self, use_copy: bool = False) -> Any:
|
|
2414
|
-
"""
|
|
2415
|
-
Install Git hooks for automatic tracking.
|
|
2416
|
-
|
|
2417
|
-
Installs hooks that automatically track sessions, activities, and features
|
|
2418
|
-
as you work.
|
|
2419
|
-
|
|
2420
|
-
Args:
|
|
2421
|
-
use_copy: Force copy instead of symlink (default: False)
|
|
2422
|
-
|
|
2423
|
-
Returns:
|
|
2424
|
-
HookInstallResult with installation details
|
|
2425
|
-
|
|
2426
|
-
Raises:
|
|
2427
|
-
HookInstallError: If installation fails
|
|
2428
|
-
HookConfigError: If configuration is invalid
|
|
2429
|
-
|
|
2430
|
-
Example:
|
|
2431
|
-
>>> sdk = SDK(agent="claude")
|
|
2432
|
-
>>> result = sdk.install_hooks()
|
|
2433
|
-
>>> print(f"Installed: {result.installed}")
|
|
2434
|
-
>>> print(f"Skipped: {result.skipped}")
|
|
2435
|
-
>>> if result.warnings:
|
|
2436
|
-
... print(f"Warnings: {result.warnings}")
|
|
2437
|
-
|
|
2438
|
-
See also:
|
|
2439
|
-
list_hooks: List installed hooks
|
|
2440
|
-
validate_hook_config: Validate hook configuration
|
|
2441
|
-
"""
|
|
2442
|
-
from htmlgraph.operations import hooks
|
|
2443
|
-
|
|
2444
|
-
return hooks.install_hooks(
|
|
2445
|
-
project_dir=self._directory.parent,
|
|
2446
|
-
use_copy=use_copy,
|
|
2447
|
-
)
|
|
2448
|
-
|
|
2449
|
-
def list_hooks(self) -> Any:
|
|
2450
|
-
"""
|
|
2451
|
-
List Git hooks status (enabled/disabled/missing).
|
|
2452
|
-
|
|
2453
|
-
Returns:
|
|
2454
|
-
HookListResult with enabled, disabled, and missing hooks
|
|
2455
|
-
|
|
2456
|
-
Example:
|
|
2457
|
-
>>> sdk = SDK(agent="claude")
|
|
2458
|
-
>>> result = sdk.list_hooks()
|
|
2459
|
-
>>> print(f"Enabled: {result.enabled}")
|
|
2460
|
-
>>> print(f"Disabled: {result.disabled}")
|
|
2461
|
-
>>> print(f"Missing: {result.missing}")
|
|
2462
|
-
"""
|
|
2463
|
-
from htmlgraph.operations import hooks
|
|
2464
|
-
|
|
2465
|
-
return hooks.list_hooks(project_dir=self._directory.parent)
|
|
2466
|
-
|
|
2467
|
-
def validate_hook_config(self) -> Any:
|
|
2468
|
-
"""
|
|
2469
|
-
Validate hook configuration.
|
|
2470
|
-
|
|
2471
|
-
Returns:
|
|
2472
|
-
HookValidationResult with validation status
|
|
2473
|
-
|
|
2474
|
-
Example:
|
|
2475
|
-
>>> sdk = SDK(agent="claude")
|
|
2476
|
-
>>> result = sdk.validate_hook_config()
|
|
2477
|
-
>>> if not result.valid:
|
|
2478
|
-
... print(f"Errors: {result.errors}")
|
|
2479
|
-
>>> if result.warnings:
|
|
2480
|
-
... print(f"Warnings: {result.warnings}")
|
|
2481
|
-
"""
|
|
2482
|
-
from htmlgraph.operations import hooks
|
|
2483
|
-
|
|
2484
|
-
return hooks.validate_hook_config(project_dir=self._directory.parent)
|
|
2485
|
-
|
|
2486
|
-
def export_sessions(self, overwrite: bool = False) -> Any:
|
|
2487
|
-
"""
|
|
2488
|
-
Export legacy session HTML logs to JSONL events.
|
|
2489
|
-
|
|
2490
|
-
Converts HTML session files to JSONL format for efficient querying.
|
|
2491
|
-
|
|
2492
|
-
Args:
|
|
2493
|
-
overwrite: Whether to overwrite existing JSONL files (default: False)
|
|
2494
|
-
|
|
2495
|
-
Returns:
|
|
2496
|
-
EventExportResult with counts of written, skipped, failed files
|
|
2497
|
-
|
|
2498
|
-
Raises:
|
|
2499
|
-
EventOperationError: If export fails
|
|
2500
|
-
|
|
2501
|
-
Example:
|
|
2502
|
-
>>> sdk = SDK(agent="claude")
|
|
2503
|
-
>>> result = sdk.export_sessions()
|
|
2504
|
-
>>> print(f"Exported {result.written} sessions")
|
|
2505
|
-
>>> print(f"Skipped {result.skipped} (already exist)")
|
|
2506
|
-
>>> if result.failed > 0:
|
|
2507
|
-
... print(f"Failed {result.failed} sessions")
|
|
2508
|
-
|
|
2509
|
-
See also:
|
|
2510
|
-
rebuild_event_index: Rebuild SQLite index from JSONL
|
|
2511
|
-
query_events: Query exported events
|
|
2512
|
-
"""
|
|
2513
|
-
from htmlgraph.operations import events
|
|
2514
|
-
|
|
2515
|
-
return events.export_sessions(
|
|
2516
|
-
graph_dir=self._directory,
|
|
2517
|
-
overwrite=overwrite,
|
|
2518
|
-
)
|
|
2519
|
-
|
|
2520
|
-
def rebuild_event_index(self) -> Any:
|
|
2521
|
-
"""
|
|
2522
|
-
Rebuild SQLite analytics index from JSONL events.
|
|
2523
|
-
|
|
2524
|
-
Creates an optimized SQLite index for fast analytics queries.
|
|
2525
|
-
|
|
2526
|
-
Returns:
|
|
2527
|
-
EventRebuildResult with db_path and counts of inserted/skipped events
|
|
2528
|
-
|
|
2529
|
-
Raises:
|
|
2530
|
-
EventOperationError: If rebuild fails
|
|
2531
|
-
|
|
2532
|
-
Example:
|
|
2533
|
-
>>> sdk = SDK(agent="claude")
|
|
2534
|
-
>>> result = sdk.rebuild_event_index()
|
|
2535
|
-
>>> print(f"Rebuilt index: {result.db_path}")
|
|
2536
|
-
>>> print(f"Inserted {result.inserted} events")
|
|
2537
|
-
>>> print(f"Skipped {result.skipped} (duplicates)")
|
|
2538
|
-
|
|
2539
|
-
See also:
|
|
2540
|
-
export_sessions: Export HTML sessions to JSONL first
|
|
2541
|
-
"""
|
|
2542
|
-
from htmlgraph.operations import events
|
|
2543
|
-
|
|
2544
|
-
return events.rebuild_index(graph_dir=self._directory)
|
|
2545
|
-
|
|
2546
|
-
def query_events(
|
|
2547
|
-
self,
|
|
2548
|
-
session_id: str | None = None,
|
|
2549
|
-
tool: str | None = None,
|
|
2550
|
-
feature_id: str | None = None,
|
|
2551
|
-
since: str | None = None,
|
|
2552
|
-
limit: int | None = None,
|
|
2553
|
-
) -> Any:
|
|
2554
|
-
"""
|
|
2555
|
-
Query events from JSONL logs with optional filters.
|
|
2556
|
-
|
|
2557
|
-
Args:
|
|
2558
|
-
session_id: Filter by session ID (None = all sessions)
|
|
2559
|
-
tool: Filter by tool name (e.g., 'Bash', 'Edit')
|
|
2560
|
-
feature_id: Filter by attributed feature ID
|
|
2561
|
-
since: Only events after this timestamp (ISO string)
|
|
2562
|
-
limit: Maximum number of events to return
|
|
2563
|
-
|
|
2564
|
-
Returns:
|
|
2565
|
-
EventQueryResult with matching events and total count
|
|
2566
|
-
|
|
2567
|
-
Raises:
|
|
2568
|
-
EventOperationError: If query fails
|
|
2569
|
-
|
|
2570
|
-
Example:
|
|
2571
|
-
>>> sdk = SDK(agent="claude")
|
|
2572
|
-
>>> # Get all events for a session
|
|
2573
|
-
>>> result = sdk.query_events(session_id="sess-123")
|
|
2574
|
-
>>> print(f"Found {result.total} events")
|
|
2575
|
-
>>>
|
|
2576
|
-
>>> # Get recent Bash events
|
|
2577
|
-
>>> result = sdk.query_events(
|
|
2578
|
-
... tool="Bash",
|
|
2579
|
-
... since="2025-01-01T00:00:00Z",
|
|
2580
|
-
... limit=10
|
|
2581
|
-
... )
|
|
2582
|
-
>>> for event in result.events:
|
|
2583
|
-
... print(f"{event['timestamp']}: {event['summary']}")
|
|
2584
|
-
|
|
2585
|
-
See also:
|
|
2586
|
-
export_sessions: Export sessions to JSONL first
|
|
2587
|
-
get_event_stats: Get event statistics
|
|
2588
|
-
"""
|
|
2589
|
-
from htmlgraph.operations import events
|
|
2590
|
-
|
|
2591
|
-
return events.query_events(
|
|
2592
|
-
graph_dir=self._directory,
|
|
2593
|
-
session_id=session_id,
|
|
2594
|
-
tool=tool,
|
|
2595
|
-
feature_id=feature_id,
|
|
2596
|
-
since=since,
|
|
2597
|
-
limit=limit,
|
|
2598
|
-
)
|
|
2599
|
-
|
|
2600
|
-
def get_event_stats(self) -> Any:
|
|
2601
|
-
"""
|
|
2602
|
-
Get statistics about events in the system.
|
|
2603
|
-
|
|
2604
|
-
Returns:
|
|
2605
|
-
EventStats with counts of total events, sessions, and files
|
|
2606
|
-
|
|
2607
|
-
Example:
|
|
2608
|
-
>>> sdk = SDK(agent="claude")
|
|
2609
|
-
>>> stats = sdk.get_event_stats()
|
|
2610
|
-
>>> print(f"Total events: {stats.total_events}")
|
|
2611
|
-
>>> print(f"Sessions: {stats.session_count}")
|
|
2612
|
-
>>> print(f"JSONL files: {stats.file_count}")
|
|
2613
|
-
"""
|
|
2614
|
-
from htmlgraph.operations import events
|
|
2615
|
-
|
|
2616
|
-
return events.get_event_stats(graph_dir=self._directory)
|
|
2617
|
-
|
|
2618
|
-
def analyze_session(self, session_id: str) -> Any:
|
|
2619
|
-
"""
|
|
2620
|
-
Compute detailed analytics for a single session.
|
|
2621
|
-
|
|
2622
|
-
Analyzes work distribution, spike-to-feature ratio, maintenance burden,
|
|
2623
|
-
transition metrics, and more.
|
|
2624
|
-
|
|
2625
|
-
Args:
|
|
2626
|
-
session_id: ID of the session to analyze
|
|
2627
|
-
|
|
2628
|
-
Returns:
|
|
2629
|
-
AnalyticsSessionResult with session metrics and warnings
|
|
2630
|
-
|
|
2631
|
-
Raises:
|
|
2632
|
-
AnalyticsOperationError: If session cannot be analyzed
|
|
2633
|
-
|
|
2634
|
-
Example:
|
|
2635
|
-
>>> sdk = SDK(agent="claude")
|
|
2636
|
-
>>> result = sdk.analyze_session("sess-123")
|
|
2637
|
-
>>> print(f"Primary work type: {result.metrics['primary_work_type']}")
|
|
2638
|
-
>>> print(f"Total events: {result.metrics['total_events']}")
|
|
2639
|
-
>>> print(f"Work distribution: {result.metrics['work_distribution']}")
|
|
2640
|
-
>>> if result.warnings:
|
|
2641
|
-
... print(f"Warnings: {result.warnings}")
|
|
2642
|
-
|
|
2643
|
-
See also:
|
|
2644
|
-
analyze_project: Analyze entire project
|
|
2645
|
-
"""
|
|
2646
|
-
from htmlgraph.operations import analytics
|
|
2647
|
-
|
|
2648
|
-
return analytics.analyze_session(
|
|
2649
|
-
graph_dir=self._directory,
|
|
2650
|
-
session_id=session_id,
|
|
2651
|
-
)
|
|
2652
|
-
|
|
2653
|
-
def analyze_project(self) -> Any:
|
|
2654
|
-
"""
|
|
2655
|
-
Compute project-wide analytics.
|
|
2656
|
-
|
|
2657
|
-
Analyzes all sessions, work distribution, spike-to-feature ratios,
|
|
2658
|
-
maintenance burden, and session types across the entire project.
|
|
2659
|
-
|
|
2660
|
-
Returns:
|
|
2661
|
-
AnalyticsProjectResult with project metrics and warnings
|
|
2662
|
-
|
|
2663
|
-
Raises:
|
|
2664
|
-
AnalyticsOperationError: If project cannot be analyzed
|
|
2665
|
-
|
|
2666
|
-
Example:
|
|
2667
|
-
>>> sdk = SDK(agent="claude")
|
|
2668
|
-
>>> result = sdk.analyze_project()
|
|
2669
|
-
>>> print(f"Total sessions: {result.metrics['total_sessions']}")
|
|
2670
|
-
>>> print(f"Work distribution: {result.metrics['work_distribution']}")
|
|
2671
|
-
>>> print(f"Spike-to-feature ratio: {result.metrics['spike_to_feature_ratio']}")
|
|
2672
|
-
>>> print(f"Session types: {result.metrics['session_types']}")
|
|
2673
|
-
>>> for session in result.metrics['recent_sessions']:
|
|
2674
|
-
... print(f" {session['session_id']}: {session['primary_work_type']}")
|
|
2675
|
-
|
|
2676
|
-
See also:
|
|
2677
|
-
analyze_session: Analyze a single session
|
|
2678
|
-
get_work_recommendations: Get work recommendations
|
|
2679
|
-
"""
|
|
2680
|
-
from htmlgraph.operations import analytics
|
|
2681
|
-
|
|
2682
|
-
return analytics.analyze_project(graph_dir=self._directory)
|
|
2683
|
-
|
|
2684
|
-
def get_work_recommendations(self) -> Any:
|
|
2685
|
-
"""
|
|
2686
|
-
Get smart work recommendations based on project state.
|
|
2687
|
-
|
|
2688
|
-
Uses dependency analytics to recommend next tasks based on priority,
|
|
2689
|
-
dependencies, and impact.
|
|
2690
|
-
|
|
2691
|
-
Returns:
|
|
2692
|
-
RecommendationsResult with recommendations, reasoning, and warnings
|
|
2693
|
-
|
|
2694
|
-
Raises:
|
|
2695
|
-
AnalyticsOperationError: If recommendations cannot be generated
|
|
2696
|
-
|
|
2697
|
-
Example:
|
|
2698
|
-
>>> sdk = SDK(agent="claude")
|
|
2699
|
-
>>> result = sdk.get_work_recommendations()
|
|
2700
|
-
>>> for rec in result.recommendations:
|
|
2701
|
-
... print(f"{rec['title']} (score: {rec['score']})")
|
|
2702
|
-
... print(f" Reasons: {rec['reasons']}")
|
|
2703
|
-
... print(f" Unlocks: {rec['unlocks']}")
|
|
2704
|
-
>>> print(f"Reasoning: {result.reasoning}")
|
|
2705
|
-
|
|
2706
|
-
See also:
|
|
2707
|
-
recommend_next_work: Legacy method (backward compatibility)
|
|
2708
|
-
get_work_queue: Get prioritized work queue
|
|
2709
|
-
"""
|
|
2710
|
-
from htmlgraph.operations import analytics
|
|
2711
|
-
|
|
2712
|
-
return analytics.get_recommendations(graph_dir=self._directory)
|
|
2713
|
-
|
|
2714
|
-
# =========================================================================
|
|
2715
|
-
# Task Attribution - Subagent Work Tracking
|
|
2716
|
-
# =========================================================================
|
|
2717
|
-
|
|
2718
|
-
def get_task_attribution(self, task_id: str) -> dict[str, Any]:
|
|
2719
|
-
"""
|
|
2720
|
-
Get attribution - which subagent did what work in this task?
|
|
2721
|
-
|
|
2722
|
-
Queries the database to find all events associated with a Claude Code task,
|
|
2723
|
-
showing which subagent executed each tool call.
|
|
2724
|
-
|
|
2725
|
-
Args:
|
|
2726
|
-
task_id: Claude Code's internal task ID (available from Task() response)
|
|
2727
|
-
|
|
2728
|
-
Returns:
|
|
2729
|
-
Dictionary with task_id, by_subagent mapping, and total_events count
|
|
2730
|
-
|
|
2731
|
-
Example:
|
|
2732
|
-
>>> sdk = SDK(agent="claude")
|
|
2733
|
-
>>> result = sdk.get_task_attribution("task-abc123-xyz789")
|
|
2734
|
-
>>> for subagent, events in result['by_subagent'].items():
|
|
2735
|
-
... print(f"{subagent}:")
|
|
2736
|
-
... for event in events:
|
|
2737
|
-
... print(f" - {event['tool']}: {event['summary']}")
|
|
2738
|
-
>>> print(f"Total events: {result['total_events']}")
|
|
2739
|
-
|
|
2740
|
-
See also:
|
|
2741
|
-
get_subagent_work: Get all work grouped by subagent in a session
|
|
2742
|
-
"""
|
|
2743
|
-
from htmlgraph.config import get_database_path
|
|
2744
|
-
from htmlgraph.db.schema import HtmlGraphDB
|
|
2745
|
-
|
|
2746
|
-
try:
|
|
2747
|
-
db = HtmlGraphDB(str(get_database_path()))
|
|
2748
|
-
events = db.get_events_for_task(task_id)
|
|
2749
|
-
|
|
2750
|
-
# Group by subagent_type
|
|
2751
|
-
by_subagent: dict[str, list[dict[str, Any]]] = {}
|
|
2752
|
-
for event in events:
|
|
2753
|
-
agent = event.get("subagent_type", "orchestrator")
|
|
2754
|
-
if agent not in by_subagent:
|
|
2755
|
-
by_subagent[agent] = []
|
|
2756
|
-
by_subagent[agent].append(
|
|
2757
|
-
{
|
|
2758
|
-
"tool": event.get("tool_name"),
|
|
2759
|
-
"summary": event.get("input_summary"),
|
|
2760
|
-
"timestamp": event.get("created_at"),
|
|
2761
|
-
"event_id": event.get("event_id"),
|
|
2762
|
-
"success": not event.get("is_error", False),
|
|
2763
|
-
}
|
|
2764
|
-
)
|
|
2765
|
-
|
|
2766
|
-
return {
|
|
2767
|
-
"task_id": task_id,
|
|
2768
|
-
"by_subagent": by_subagent,
|
|
2769
|
-
"total_events": len(events),
|
|
2770
|
-
}
|
|
2771
|
-
except Exception as e:
|
|
2772
|
-
return {
|
|
2773
|
-
"task_id": task_id,
|
|
2774
|
-
"by_subagent": {},
|
|
2775
|
-
"total_events": 0,
|
|
2776
|
-
"error": str(e),
|
|
2777
|
-
}
|
|
2778
|
-
|
|
2779
|
-
def get_subagent_work(self, session_id: str) -> dict[str, list[dict[str, Any]]]:
|
|
2780
|
-
"""
|
|
2781
|
-
Get all work grouped by which subagent did it in a session.
|
|
2782
|
-
|
|
2783
|
-
Shows which subagent (researcher, general-purpose, etc.) executed each
|
|
2784
|
-
tool call within a session.
|
|
2785
|
-
|
|
2786
|
-
Args:
|
|
2787
|
-
session_id: Session ID to analyze
|
|
2788
|
-
|
|
2789
|
-
Returns:
|
|
2790
|
-
Dictionary mapping subagent_type to list of events they executed.
|
|
2791
|
-
Each event includes: tool_name, input_summary, output_summary, created_at, event_id
|
|
2792
|
-
|
|
2793
|
-
Example:
|
|
2794
|
-
>>> sdk = SDK(agent="claude")
|
|
2795
|
-
>>> work = sdk.get_subagent_work("sess-123")
|
|
2796
|
-
>>> for subagent, events in work.items():
|
|
2797
|
-
... print(f"{subagent} ({len(events)} events):")
|
|
2798
|
-
... for event in events:
|
|
2799
|
-
... print(f" - {event['tool_name']}: {event['input_summary']}")
|
|
2800
|
-
|
|
2801
|
-
See also:
|
|
2802
|
-
get_task_attribution: Get work for a specific Claude Code task
|
|
2803
|
-
analyze_session: Get session metrics and analytics
|
|
2804
|
-
"""
|
|
2805
|
-
from htmlgraph.config import get_database_path
|
|
2806
|
-
from htmlgraph.db.schema import HtmlGraphDB
|
|
2807
|
-
|
|
2808
|
-
try:
|
|
2809
|
-
db = HtmlGraphDB(str(get_database_path()))
|
|
2810
|
-
return db.get_subagent_work(session_id)
|
|
2811
|
-
except Exception:
|
|
2812
|
-
return {}
|
|
2813
|
-
|
|
2814
|
-
# =========================================================================
|
|
2815
|
-
# Help & Documentation
|
|
2816
|
-
# =========================================================================
|
|
2817
|
-
|
|
2818
|
-
def help(self, topic: str | None = None) -> str:
|
|
2819
|
-
"""
|
|
2820
|
-
Get help on SDK usage.
|
|
2821
|
-
|
|
2822
|
-
Args:
|
|
2823
|
-
topic: Optional topic (e.g., 'features', 'sessions', 'analytics', 'orchestration')
|
|
2824
|
-
|
|
2825
|
-
Returns:
|
|
2826
|
-
Formatted help text
|
|
2827
|
-
|
|
2828
|
-
Example:
|
|
2829
|
-
>>> sdk = SDK(agent="claude")
|
|
2830
|
-
>>> print(sdk.help()) # List all topics
|
|
2831
|
-
>>> print(sdk.help('features')) # Feature collection help
|
|
2832
|
-
>>> print(sdk.help('analytics')) # Analytics help
|
|
2833
|
-
|
|
2834
|
-
See also:
|
|
2835
|
-
Python's built-in help(sdk) for full API documentation
|
|
2836
|
-
sdk.features, sdk.bugs, sdk.spikes for work item managers
|
|
2837
|
-
"""
|
|
2838
|
-
if topic is None:
|
|
2839
|
-
return self._help_index()
|
|
2840
|
-
return self._help_topic(topic)
|
|
2841
|
-
|
|
2842
|
-
def _help_index(self) -> str:
|
|
2843
|
-
"""Return overview of all available methods/collections."""
|
|
2844
|
-
return """HtmlGraph SDK - Quick Reference
|
|
2845
|
-
|
|
2846
|
-
COLLECTIONS (Work Items):
|
|
2847
|
-
sdk.features - Feature work items with builder support
|
|
2848
|
-
sdk.bugs - Bug reports
|
|
2849
|
-
sdk.spikes - Investigation and research spikes
|
|
2850
|
-
sdk.chores - Maintenance and chore tasks
|
|
2851
|
-
sdk.epics - Large bodies of work
|
|
2852
|
-
sdk.phases - Project phases
|
|
2853
|
-
|
|
2854
|
-
COLLECTIONS (Non-Work):
|
|
2855
|
-
sdk.sessions - Agent sessions
|
|
2856
|
-
sdk.tracks - Work tracks with builder support
|
|
2857
|
-
sdk.agents - Agent information
|
|
2858
|
-
|
|
2859
|
-
LEARNING (Active Learning):
|
|
2860
|
-
sdk.patterns - Workflow patterns (optimal/anti-pattern)
|
|
2861
|
-
sdk.insights - Session health insights
|
|
2862
|
-
sdk.metrics - Aggregated time-series metrics
|
|
2863
|
-
|
|
2864
|
-
CORE METHODS:
|
|
2865
|
-
sdk.summary() - Get project summary
|
|
2866
|
-
sdk.my_work() - Get current agent's workload
|
|
2867
|
-
sdk.next_task() - Get next available task
|
|
2868
|
-
sdk.reload() - Reload all data from disk
|
|
2869
|
-
|
|
2870
|
-
SESSION MANAGEMENT:
|
|
2871
|
-
sdk.start_session() - Start a new session
|
|
2872
|
-
sdk.end_session() - End a session
|
|
2873
|
-
sdk.track_activity() - Track activity in session
|
|
2874
|
-
sdk.dedupe_sessions() - Clean up low-signal sessions
|
|
2875
|
-
sdk.get_status() - Get project status
|
|
2876
|
-
|
|
2877
|
-
STRATEGIC ANALYTICS:
|
|
2878
|
-
sdk.find_bottlenecks() - Identify blocking tasks
|
|
2879
|
-
sdk.recommend_next_work() - Get smart recommendations
|
|
2880
|
-
sdk.get_parallel_work() - Find parallelizable work
|
|
2881
|
-
sdk.assess_risks() - Assess dependency risks
|
|
2882
|
-
sdk.analyze_impact() - Analyze task impact
|
|
2883
|
-
|
|
2884
|
-
WORK QUEUE:
|
|
2885
|
-
sdk.get_work_queue() - Get prioritized work queue
|
|
2886
|
-
sdk.work_next() - Get next best task (smart routing)
|
|
2887
|
-
|
|
2888
|
-
PLANNING WORKFLOW:
|
|
2889
|
-
sdk.smart_plan() - Smart planning with research
|
|
2890
|
-
sdk.start_planning_spike() - Create planning spike
|
|
2891
|
-
sdk.create_track_from_plan() - Create track from plan
|
|
2892
|
-
sdk.plan_parallel_work() - Plan parallel execution
|
|
2893
|
-
sdk.aggregate_parallel_results() - Aggregate parallel results
|
|
2894
|
-
|
|
2895
|
-
ORCHESTRATION:
|
|
2896
|
-
sdk.spawn_explorer() - Spawn explorer subagent
|
|
2897
|
-
sdk.spawn_coder() - Spawn coder subagent
|
|
2898
|
-
sdk.orchestrate() - Orchestrate feature implementation
|
|
2899
|
-
|
|
2900
|
-
SESSION OPTIMIZATION:
|
|
2901
|
-
sdk.get_session_start_info() - Get comprehensive session start info
|
|
2902
|
-
sdk.get_active_work_item() - Get currently active work item
|
|
2903
|
-
|
|
2904
|
-
ANALYTICS INTERFACES:
|
|
2905
|
-
sdk.analytics - Work type analytics
|
|
2906
|
-
sdk.dep_analytics - Dependency analytics
|
|
2907
|
-
sdk.context - Context analytics
|
|
2908
|
-
|
|
2909
|
-
OPERATIONS (Server, Hooks, Events):
|
|
2910
|
-
sdk.start_server() - Start web server for graph browsing
|
|
2911
|
-
sdk.stop_server() - Stop running server
|
|
2912
|
-
sdk.install_hooks() - Install Git hooks for tracking
|
|
2913
|
-
sdk.list_hooks() - List Git hooks status
|
|
2914
|
-
sdk.export_sessions() - Export HTML sessions to JSONL
|
|
2915
|
-
sdk.rebuild_event_index() - Rebuild SQLite index from events
|
|
2916
|
-
sdk.query_events() - Query JSONL event logs
|
|
2917
|
-
sdk.get_event_stats() - Get event statistics
|
|
2918
|
-
sdk.analyze_session() - Analyze single session metrics
|
|
2919
|
-
sdk.analyze_project() - Analyze project-wide metrics
|
|
2920
|
-
sdk.get_work_recommendations() - Get work recommendations
|
|
2921
|
-
|
|
2922
|
-
ERROR HANDLING:
|
|
2923
|
-
Lookup (.get) - Returns None if not found
|
|
2924
|
-
Query (.where) - Returns empty list on no matches
|
|
2925
|
-
Edit (.edit) - Raises NodeNotFoundError if missing
|
|
2926
|
-
Batch (.mark_done) - Returns dict with success_count, failed_ids, warnings
|
|
2927
|
-
|
|
2928
|
-
For detailed help on a topic:
|
|
2929
|
-
sdk.help('features') - Feature collection methods
|
|
2930
|
-
sdk.help('analytics') - Analytics methods
|
|
2931
|
-
sdk.help('sessions') - Session management
|
|
2932
|
-
sdk.help('orchestration') - Subagent orchestration
|
|
2933
|
-
sdk.help('planning') - Planning workflow
|
|
2934
|
-
sdk.help('operations') - Server, hooks, events operations
|
|
2935
|
-
"""
|
|
2936
|
-
|
|
2937
|
-
def __dir__(self) -> list[str]:
|
|
2938
|
-
"""Return attributes with most useful ones first for discoverability."""
|
|
2939
|
-
priority = [
|
|
2940
|
-
# Work item managers
|
|
2941
|
-
"features",
|
|
2942
|
-
"bugs",
|
|
2943
|
-
"spikes",
|
|
2944
|
-
"chores",
|
|
2945
|
-
"epics",
|
|
2946
|
-
"phases",
|
|
2947
|
-
# Non-work collections
|
|
2948
|
-
"tracks",
|
|
2949
|
-
"sessions",
|
|
2950
|
-
"agents",
|
|
2951
|
-
# Learning collections
|
|
2952
|
-
"patterns",
|
|
2953
|
-
"insights",
|
|
2954
|
-
"metrics",
|
|
2955
|
-
# Orchestration
|
|
2956
|
-
"spawn_explorer",
|
|
2957
|
-
"spawn_coder",
|
|
2958
|
-
"orchestrate",
|
|
2959
|
-
# Session management
|
|
2960
|
-
"get_session_start_info",
|
|
2961
|
-
"start_session",
|
|
2962
|
-
"end_session",
|
|
2963
|
-
# Strategic analytics
|
|
2964
|
-
"find_bottlenecks",
|
|
2965
|
-
"recommend_next_work",
|
|
2966
|
-
"get_parallel_work",
|
|
2967
|
-
# Work queue
|
|
2968
|
-
"get_work_queue",
|
|
2969
|
-
"work_next",
|
|
2970
|
-
# Operations
|
|
2971
|
-
"start_server",
|
|
2972
|
-
"install_hooks",
|
|
2973
|
-
"export_sessions",
|
|
2974
|
-
"analyze_project",
|
|
2975
|
-
# Help
|
|
2976
|
-
"help",
|
|
2977
|
-
]
|
|
2978
|
-
# Get all attributes
|
|
2979
|
-
all_attrs = object.__dir__(self)
|
|
2980
|
-
# Separate into priority, regular, and dunder attributes
|
|
2981
|
-
regular = [a for a in all_attrs if not a.startswith("_") and a not in priority]
|
|
2982
|
-
dunder = [a for a in all_attrs if a.startswith("_")]
|
|
2983
|
-
# Return priority items first, then regular, then dunder
|
|
2984
|
-
return priority + regular + dunder
|
|
2985
|
-
|
|
2986
|
-
def _help_topic(self, topic: str) -> str:
|
|
2987
|
-
"""Return specific help for topic."""
|
|
2988
|
-
topic = topic.lower()
|
|
2989
|
-
|
|
2990
|
-
if topic in ["feature", "features"]:
|
|
2991
|
-
return """FEATURES COLLECTION
|
|
2992
|
-
|
|
2993
|
-
Create and manage feature work items with builder support.
|
|
2994
|
-
|
|
2995
|
-
COMMON METHODS:
|
|
2996
|
-
sdk.features.create(title) - Create new feature (returns builder)
|
|
2997
|
-
sdk.features.get(id) - Get feature by ID
|
|
2998
|
-
sdk.features.all() - Get all features
|
|
2999
|
-
sdk.features.where(**filters) - Query features
|
|
3000
|
-
sdk.features.edit(id) - Edit feature (context manager)
|
|
3001
|
-
sdk.features.mark_done(ids) - Mark features as done
|
|
3002
|
-
sdk.features.assign(ids, agent) - Assign features to agent
|
|
3003
|
-
|
|
3004
|
-
BUILDER PATTERN:
|
|
3005
|
-
feature = (sdk.features.create("User Auth")
|
|
3006
|
-
.set_priority("high")
|
|
3007
|
-
.add_steps(["Login", "Logout", "Reset password"])
|
|
3008
|
-
.add_edge("blocked_by", "feat-database")
|
|
3009
|
-
.save())
|
|
3010
|
-
|
|
3011
|
-
QUERIES:
|
|
3012
|
-
high_priority = sdk.features.where(status="todo", priority="high")
|
|
3013
|
-
my_features = sdk.features.where(agent_assigned="claude")
|
|
3014
|
-
blocked = sdk.features.where(status="blocked")
|
|
3015
|
-
|
|
3016
|
-
CONTEXT MANAGER:
|
|
3017
|
-
with sdk.features.edit("feat-001") as f:
|
|
3018
|
-
f.status = "in-progress"
|
|
3019
|
-
f.complete_step(0)
|
|
3020
|
-
# Auto-saves on exit
|
|
3021
|
-
|
|
3022
|
-
BATCH OPERATIONS:
|
|
3023
|
-
result = sdk.features.mark_done(["feat-001", "feat-002"])
|
|
3024
|
-
print(f"Completed {result['success_count']} features")
|
|
3025
|
-
if result['failed_ids']:
|
|
3026
|
-
print(f"Failed: {result['failed_ids']}")
|
|
3027
|
-
|
|
3028
|
-
COMMON MISTAKES:
|
|
3029
|
-
❌ sdk.features.mark_complete([ids]) → ✅ sdk.features.mark_done([ids])
|
|
3030
|
-
❌ sdk.feature.create(...) → ✅ sdk.features.create(...)
|
|
3031
|
-
❌ claim(id, agent_id=...) → ✅ claim(id, agent=...)
|
|
3032
|
-
❌ builder.status = "done" → ✅ builder.save(); then edit()
|
|
3033
|
-
|
|
3034
|
-
See also: sdk.help('bugs'), sdk.help('spikes'), sdk.help('chores')
|
|
3035
|
-
"""
|
|
3036
|
-
|
|
3037
|
-
elif topic in ["bug", "bugs"]:
|
|
3038
|
-
return """BUGS COLLECTION
|
|
3039
|
-
|
|
3040
|
-
Create and manage bug reports.
|
|
3041
|
-
|
|
3042
|
-
COMMON METHODS:
|
|
3043
|
-
sdk.bugs.create(title) - Create new bug (returns builder)
|
|
3044
|
-
sdk.bugs.get(id) - Get bug by ID
|
|
3045
|
-
sdk.bugs.all() - Get all bugs
|
|
3046
|
-
sdk.bugs.where(**filters) - Query bugs
|
|
3047
|
-
sdk.bugs.edit(id) - Edit bug (context manager)
|
|
3048
|
-
|
|
3049
|
-
BUILDER PATTERN:
|
|
3050
|
-
bug = (sdk.bugs.create("Login fails on Safari")
|
|
3051
|
-
.set_priority("critical")
|
|
3052
|
-
.add_steps(["Reproduce", "Fix", "Test"])
|
|
3053
|
-
.save())
|
|
3054
|
-
|
|
3055
|
-
QUERIES:
|
|
3056
|
-
critical = sdk.bugs.where(priority="critical", status="todo")
|
|
3057
|
-
my_bugs = sdk.bugs.where(agent_assigned="claude")
|
|
3058
|
-
|
|
3059
|
-
See also: sdk.help('features'), sdk.help('spikes')
|
|
3060
|
-
"""
|
|
3061
|
-
|
|
3062
|
-
elif topic in ["spike", "spikes"]:
|
|
3063
|
-
return """SPIKES COLLECTION
|
|
3064
|
-
|
|
3065
|
-
Create and manage investigation/research spikes.
|
|
3066
|
-
|
|
3067
|
-
COMMON METHODS:
|
|
3068
|
-
sdk.spikes.create(title) - Create new spike (returns builder)
|
|
3069
|
-
sdk.spikes.get(id) - Get spike by ID
|
|
3070
|
-
sdk.spikes.all() - Get all spikes
|
|
3071
|
-
sdk.spikes.where(**filters) - Query spikes
|
|
3072
|
-
|
|
3073
|
-
BUILDER PATTERN:
|
|
3074
|
-
spike = (sdk.spikes.create("Research OAuth providers")
|
|
3075
|
-
.set_priority("high")
|
|
3076
|
-
.add_steps(["Research", "Document findings"])
|
|
3077
|
-
.save())
|
|
3078
|
-
|
|
3079
|
-
PLANNING SPIKES:
|
|
3080
|
-
spike = sdk.start_planning_spike(
|
|
3081
|
-
"Plan User Auth",
|
|
3082
|
-
context="Users need login",
|
|
3083
|
-
timebox_hours=4.0
|
|
3084
|
-
)
|
|
3085
|
-
|
|
3086
|
-
See also: sdk.help('planning'), sdk.help('features')
|
|
3087
|
-
"""
|
|
3088
|
-
|
|
3089
|
-
elif topic in ["chore", "chores"]:
|
|
3090
|
-
return """CHORES COLLECTION
|
|
3091
|
-
|
|
3092
|
-
Create and manage maintenance and chore tasks.
|
|
3093
|
-
|
|
3094
|
-
COMMON METHODS:
|
|
3095
|
-
sdk.chores.create(title) - Create new chore (returns builder)
|
|
3096
|
-
sdk.chores.get(id) - Get chore by ID
|
|
3097
|
-
sdk.chores.all() - Get all chores
|
|
3098
|
-
sdk.chores.where(**filters) - Query chores
|
|
3099
|
-
|
|
3100
|
-
BUILDER PATTERN:
|
|
3101
|
-
chore = (sdk.chores.create("Update dependencies")
|
|
3102
|
-
.set_priority("medium")
|
|
3103
|
-
.add_steps(["Run uv update", "Test", "Commit"])
|
|
3104
|
-
.save())
|
|
3105
|
-
|
|
3106
|
-
See also: sdk.help('features'), sdk.help('bugs')
|
|
3107
|
-
"""
|
|
3108
|
-
|
|
3109
|
-
elif topic in ["epic", "epics"]:
|
|
3110
|
-
return """EPICS COLLECTION
|
|
3111
|
-
|
|
3112
|
-
Create and manage large bodies of work.
|
|
3113
|
-
|
|
3114
|
-
COMMON METHODS:
|
|
3115
|
-
sdk.epics.create(title) - Create new epic (returns builder)
|
|
3116
|
-
sdk.epics.get(id) - Get epic by ID
|
|
3117
|
-
sdk.epics.all() - Get all epics
|
|
3118
|
-
sdk.epics.where(**filters) - Query epics
|
|
3119
|
-
|
|
3120
|
-
BUILDER PATTERN:
|
|
3121
|
-
epic = (sdk.epics.create("Authentication System")
|
|
3122
|
-
.set_priority("critical")
|
|
3123
|
-
.add_steps(["Design", "Implement", "Test", "Deploy"])
|
|
3124
|
-
.save())
|
|
3125
|
-
|
|
3126
|
-
See also: sdk.help('features'), sdk.help('tracks')
|
|
3127
|
-
"""
|
|
3128
|
-
|
|
3129
|
-
elif topic in ["track", "tracks"]:
|
|
3130
|
-
return """TRACKS COLLECTION
|
|
3131
|
-
|
|
3132
|
-
Create and manage work tracks with builder support.
|
|
3133
|
-
|
|
3134
|
-
COMMON METHODS:
|
|
3135
|
-
sdk.tracks.create(title) - Create new track (returns builder)
|
|
3136
|
-
sdk.tracks.builder() - Get track builder
|
|
3137
|
-
sdk.tracks.get(id) - Get track by ID
|
|
3138
|
-
sdk.tracks.all() - Get all tracks
|
|
3139
|
-
sdk.tracks.where(**filters) - Query tracks
|
|
3140
|
-
|
|
3141
|
-
BUILDER PATTERN:
|
|
3142
|
-
track = (sdk.tracks.builder()
|
|
3143
|
-
.title("User Authentication")
|
|
3144
|
-
.description("OAuth 2.0 system")
|
|
3145
|
-
.priority("high")
|
|
3146
|
-
.with_spec(
|
|
3147
|
-
overview="OAuth integration",
|
|
3148
|
-
requirements=[("OAuth 2.0", "must-have")],
|
|
3149
|
-
acceptance_criteria=["Login works"]
|
|
3150
|
-
)
|
|
3151
|
-
.with_plan_phases([
|
|
3152
|
-
("Phase 1", ["Setup (2h)", "Config (1h)"]),
|
|
3153
|
-
("Phase 2", ["Testing (2h)"])
|
|
3154
|
-
])
|
|
3155
|
-
.create())
|
|
3156
|
-
|
|
3157
|
-
FROM PLANNING:
|
|
3158
|
-
track_info = sdk.create_track_from_plan(
|
|
3159
|
-
title="User Auth",
|
|
3160
|
-
description="OAuth system",
|
|
3161
|
-
requirements=[("OAuth", "must-have")],
|
|
3162
|
-
phases=[("Phase 1", ["Setup", "Config"])]
|
|
3163
|
-
)
|
|
3164
|
-
|
|
3165
|
-
See also: sdk.help('planning'), sdk.help('features')
|
|
3166
|
-
"""
|
|
3167
|
-
|
|
3168
|
-
elif topic in ["session", "sessions"]:
|
|
3169
|
-
return """SESSION MANAGEMENT
|
|
3170
|
-
|
|
3171
|
-
Create and manage agent sessions.
|
|
3172
|
-
|
|
3173
|
-
SESSION METHODS:
|
|
3174
|
-
sdk.start_session(title=...) - Start new session
|
|
3175
|
-
sdk.end_session(id) - End session
|
|
3176
|
-
sdk.track_activity(...) - Track activity in session
|
|
3177
|
-
sdk.dedupe_sessions(...) - Clean up low-signal sessions
|
|
3178
|
-
sdk.get_status() - Get project status
|
|
3179
|
-
|
|
3180
|
-
SESSION COLLECTION:
|
|
3181
|
-
sdk.sessions.get(id) - Get session by ID
|
|
3182
|
-
sdk.sessions.all() - Get all sessions
|
|
3183
|
-
sdk.sessions.where(**filters) - Query sessions
|
|
3184
|
-
|
|
3185
|
-
TYPICAL WORKFLOW:
|
|
3186
|
-
# Session start hook handles this automatically
|
|
3187
|
-
session = sdk.start_session(title="Fix login bug")
|
|
3188
|
-
|
|
3189
|
-
# Track activities (handled by hooks)
|
|
3190
|
-
sdk.track_activity(
|
|
3191
|
-
tool="Edit",
|
|
3192
|
-
summary="Fixed auth logic",
|
|
3193
|
-
file_paths=["src/auth.py"],
|
|
3194
|
-
success=True
|
|
3195
|
-
)
|
|
3196
|
-
|
|
3197
|
-
# End session
|
|
3198
|
-
sdk.end_session(
|
|
3199
|
-
session.id,
|
|
3200
|
-
handoff_notes="Login bug fixed, needs testing"
|
|
3201
|
-
)
|
|
3202
|
-
|
|
3203
|
-
CLEANUP:
|
|
3204
|
-
# Remove orphaned sessions (<=1 event)
|
|
3205
|
-
result = sdk.dedupe_sessions(max_events=1, dry_run=False)
|
|
3206
|
-
|
|
3207
|
-
See also: sdk.help('analytics')
|
|
3208
|
-
"""
|
|
3209
|
-
|
|
3210
|
-
elif topic in ["analytic", "analytics", "strategic"]:
|
|
3211
|
-
return """STRATEGIC ANALYTICS
|
|
3212
|
-
|
|
3213
|
-
Find bottlenecks, recommend work, and assess risks.
|
|
3214
|
-
|
|
3215
|
-
DEPENDENCY ANALYTICS:
|
|
3216
|
-
bottlenecks = sdk.find_bottlenecks(top_n=5)
|
|
3217
|
-
# Returns tasks blocking the most work
|
|
3218
|
-
|
|
3219
|
-
parallel = sdk.get_parallel_work(max_agents=5)
|
|
3220
|
-
# Returns tasks that can run simultaneously
|
|
3221
|
-
|
|
3222
|
-
recs = sdk.recommend_next_work(agent_count=3)
|
|
3223
|
-
# Returns smart recommendations with scoring
|
|
3224
|
-
|
|
3225
|
-
risks = sdk.assess_risks()
|
|
3226
|
-
# Returns high-risk tasks and circular deps
|
|
3227
|
-
|
|
3228
|
-
impact = sdk.analyze_impact("feat-001")
|
|
3229
|
-
# Returns what unlocks if you complete this task
|
|
3230
|
-
|
|
3231
|
-
DIRECT ACCESS (preferred):
|
|
3232
|
-
sdk.dep_analytics.find_bottlenecks(top_n=5)
|
|
3233
|
-
sdk.dep_analytics.recommend_next_tasks(agent_count=3)
|
|
3234
|
-
sdk.dep_analytics.find_parallelizable_work(status="todo")
|
|
3235
|
-
sdk.dep_analytics.assess_dependency_risk()
|
|
3236
|
-
sdk.dep_analytics.impact_analysis("feat-001")
|
|
3237
|
-
|
|
3238
|
-
WORK TYPE ANALYTICS:
|
|
3239
|
-
sdk.analytics.get_wip_by_type()
|
|
3240
|
-
sdk.analytics.get_completion_rates()
|
|
3241
|
-
sdk.analytics.get_agent_workload()
|
|
3242
|
-
|
|
3243
|
-
CONTEXT ANALYTICS:
|
|
3244
|
-
sdk.context.track_usage(...)
|
|
3245
|
-
sdk.context.get_usage_report()
|
|
3246
|
-
|
|
3247
|
-
See also: sdk.help('planning'), sdk.help('work_queue')
|
|
3248
|
-
"""
|
|
3249
|
-
|
|
3250
|
-
elif topic in ["queue", "work_queue", "routing"]:
|
|
3251
|
-
return """WORK QUEUE & ROUTING
|
|
3252
|
-
|
|
3253
|
-
Get prioritized work using smart routing.
|
|
3254
|
-
|
|
3255
|
-
WORK QUEUE:
|
|
3256
|
-
queue = sdk.get_work_queue(limit=10, min_score=0.0)
|
|
3257
|
-
# Returns prioritized list with scores
|
|
3258
|
-
|
|
3259
|
-
for item in queue:
|
|
3260
|
-
print(f"{item['score']:.1f} - {item['title']}")
|
|
3261
|
-
if item.get('blocked_by'):
|
|
3262
|
-
print(f" Blocked by: {item['blocked_by']}")
|
|
3263
|
-
|
|
3264
|
-
SMART ROUTING:
|
|
3265
|
-
task = sdk.work_next(auto_claim=True, min_score=0.5)
|
|
3266
|
-
# Returns next best task using analytics + capabilities
|
|
3267
|
-
|
|
3268
|
-
if task:
|
|
3269
|
-
print(f"Working on: {task.title}")
|
|
3270
|
-
# Task is auto-claimed and assigned
|
|
3271
|
-
|
|
3272
|
-
SIMPLE NEXT TASK:
|
|
3273
|
-
task = sdk.next_task(priority="high", auto_claim=True)
|
|
3274
|
-
# Simpler version without smart routing
|
|
3275
|
-
|
|
3276
|
-
See also: sdk.help('analytics')
|
|
3277
|
-
"""
|
|
3278
|
-
|
|
3279
|
-
elif topic in ["plan", "planning", "workflow"]:
|
|
3280
|
-
return """PLANNING WORKFLOW
|
|
3281
|
-
|
|
3282
|
-
Research, plan, and create tracks for new work.
|
|
3283
|
-
|
|
3284
|
-
SMART PLANNING:
|
|
3285
|
-
plan = sdk.smart_plan(
|
|
3286
|
-
"User authentication system",
|
|
3287
|
-
create_spike=True,
|
|
3288
|
-
timebox_hours=4.0,
|
|
3289
|
-
research_completed=True, # IMPORTANT: Do research first!
|
|
3290
|
-
research_findings={
|
|
3291
|
-
"topic": "OAuth 2.0 best practices",
|
|
3292
|
-
"recommended_library": "authlib",
|
|
3293
|
-
"key_insights": ["Use PKCE", "Token rotation"]
|
|
3294
|
-
}
|
|
3295
|
-
)
|
|
3296
|
-
|
|
3297
|
-
PLANNING SPIKE:
|
|
3298
|
-
spike = sdk.start_planning_spike(
|
|
3299
|
-
"Plan Real-time Notifications",
|
|
3300
|
-
context="Users need live updates",
|
|
3301
|
-
timebox_hours=3.0
|
|
3302
|
-
)
|
|
3303
|
-
|
|
3304
|
-
CREATE TRACK FROM PLAN:
|
|
3305
|
-
track_info = sdk.create_track_from_plan(
|
|
3306
|
-
title="User Authentication",
|
|
3307
|
-
description="OAuth 2.0 with JWT",
|
|
3308
|
-
requirements=[
|
|
3309
|
-
("OAuth 2.0 integration", "must-have"),
|
|
3310
|
-
("JWT token management", "must-have")
|
|
3311
|
-
],
|
|
3312
|
-
phases=[
|
|
3313
|
-
("Phase 1: OAuth", ["Setup (2h)", "Callback (2h)"]),
|
|
3314
|
-
("Phase 2: JWT", ["Token signing (2h)"])
|
|
3315
|
-
]
|
|
3316
|
-
)
|
|
3317
|
-
|
|
3318
|
-
PARALLEL PLANNING:
|
|
3319
|
-
plan = sdk.plan_parallel_work(max_agents=3)
|
|
3320
|
-
if plan["can_parallelize"]:
|
|
3321
|
-
for p in plan["prompts"]:
|
|
3322
|
-
Task(prompt=p["prompt"])
|
|
3323
|
-
|
|
3324
|
-
# After parallel work completes
|
|
3325
|
-
results = sdk.aggregate_parallel_results([
|
|
3326
|
-
"agent-1", "agent-2", "agent-3"
|
|
3327
|
-
])
|
|
3328
|
-
|
|
3329
|
-
See also: sdk.help('tracks'), sdk.help('spikes')
|
|
3330
|
-
"""
|
|
3331
|
-
|
|
3332
|
-
elif topic in ["orchestration", "orchestrate", "subagent", "subagents"]:
|
|
3333
|
-
return """SUBAGENT ORCHESTRATION
|
|
3334
|
-
|
|
3335
|
-
Spawn explorer and coder subagents for complex work.
|
|
3336
|
-
|
|
3337
|
-
EXPLORER (Discovery):
|
|
3338
|
-
prompt = sdk.spawn_explorer(
|
|
3339
|
-
task="Find all API endpoints",
|
|
3340
|
-
scope="src/api/",
|
|
3341
|
-
patterns=["*.py"],
|
|
3342
|
-
questions=["What framework is used?"]
|
|
3343
|
-
)
|
|
3344
|
-
# Execute with Task tool
|
|
3345
|
-
Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
3346
|
-
|
|
3347
|
-
CODER (Implementation):
|
|
3348
|
-
prompt = sdk.spawn_coder(
|
|
3349
|
-
feature_id="feat-add-auth",
|
|
3350
|
-
context=explorer_results,
|
|
3351
|
-
files_to_modify=["src/auth.py"],
|
|
3352
|
-
test_command="uv run pytest tests/auth/"
|
|
3353
|
-
)
|
|
3354
|
-
Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
3355
|
-
|
|
3356
|
-
FULL ORCHESTRATION:
|
|
3357
|
-
prompts = sdk.orchestrate(
|
|
3358
|
-
"feat-add-caching",
|
|
3359
|
-
exploration_scope="src/cache/",
|
|
3360
|
-
test_command="uv run pytest tests/cache/"
|
|
3361
|
-
)
|
|
3362
|
-
|
|
3363
|
-
# Phase 1: Explorer
|
|
3364
|
-
Task(prompt=prompts["explorer"]["prompt"])
|
|
3365
|
-
|
|
3366
|
-
# Phase 2: Coder (with explorer results)
|
|
3367
|
-
Task(prompt=prompts["coder"]["prompt"])
|
|
3368
|
-
|
|
3369
|
-
WORKFLOW:
|
|
3370
|
-
1. Explorer discovers code patterns and files
|
|
3371
|
-
2. Coder implements changes using explorer findings
|
|
3372
|
-
3. Both agents auto-track in sessions
|
|
3373
|
-
4. Feature gets updated with progress
|
|
3374
|
-
|
|
3375
|
-
See also: sdk.help('planning')
|
|
3376
|
-
"""
|
|
3377
|
-
|
|
3378
|
-
elif topic in ["optimization", "session_start", "active_work"]:
|
|
3379
|
-
return """SESSION OPTIMIZATION
|
|
3380
|
-
|
|
3381
|
-
Reduce context usage with optimized methods.
|
|
3382
|
-
|
|
3383
|
-
SESSION START INFO:
|
|
3384
|
-
info = sdk.get_session_start_info(
|
|
3385
|
-
include_git_log=True,
|
|
3386
|
-
git_log_count=5,
|
|
3387
|
-
analytics_top_n=3
|
|
3388
|
-
)
|
|
3389
|
-
|
|
3390
|
-
# Single call returns:
|
|
3391
|
-
# - status: Project status
|
|
3392
|
-
# - active_work: Current work item
|
|
3393
|
-
# - features: All features
|
|
3394
|
-
# - sessions: Recent sessions
|
|
3395
|
-
# - git_log: Recent commits
|
|
3396
|
-
# - analytics: Bottlenecks, recommendations, parallel
|
|
3397
|
-
|
|
3398
|
-
ACTIVE WORK ITEM:
|
|
3399
|
-
active = sdk.get_active_work_item()
|
|
3400
|
-
if active:
|
|
3401
|
-
print(f"Working on: {active['title']}")
|
|
3402
|
-
print(f"Progress: {active['steps_completed']}/{active['steps_total']}")
|
|
3403
|
-
|
|
3404
|
-
# Filter by agent
|
|
3405
|
-
active = sdk.get_active_work_item(filter_by_agent=True)
|
|
3406
|
-
|
|
3407
|
-
BENEFITS:
|
|
3408
|
-
- 6+ tool calls → 1 method call
|
|
3409
|
-
- Reduced token usage
|
|
3410
|
-
- Faster session initialization
|
|
3411
|
-
- All context in one place
|
|
3412
|
-
|
|
3413
|
-
See also: sdk.help('sessions')
|
|
3414
|
-
"""
|
|
3415
|
-
|
|
3416
|
-
elif topic in ["operation", "operations", "server", "hooks", "events"]:
|
|
3417
|
-
return """OPERATIONS - Server, Hooks, Events
|
|
3418
|
-
|
|
3419
|
-
Infrastructure operations for running HtmlGraph.
|
|
3420
|
-
|
|
3421
|
-
SERVER OPERATIONS:
|
|
3422
|
-
# Start server for web UI
|
|
3423
|
-
result = sdk.start_server(port=8080, watch=True)
|
|
3424
|
-
print(f"Server at {result.handle.url}")
|
|
3425
|
-
|
|
3426
|
-
# Stop server
|
|
3427
|
-
sdk.stop_server(result.handle)
|
|
3428
|
-
|
|
3429
|
-
# Check status
|
|
3430
|
-
status = sdk.get_server_status(result.handle)
|
|
3431
|
-
|
|
3432
|
-
HOOK OPERATIONS:
|
|
3433
|
-
# Install Git hooks for automatic tracking
|
|
3434
|
-
result = sdk.install_hooks()
|
|
3435
|
-
print(f"Installed: {result.installed}")
|
|
3436
|
-
|
|
3437
|
-
# List hook status
|
|
3438
|
-
result = sdk.list_hooks()
|
|
3439
|
-
print(f"Enabled: {result.enabled}")
|
|
3440
|
-
print(f"Missing: {result.missing}")
|
|
3441
|
-
|
|
3442
|
-
# Validate configuration
|
|
3443
|
-
result = sdk.validate_hook_config()
|
|
3444
|
-
if not result.valid:
|
|
3445
|
-
print(f"Errors: {result.errors}")
|
|
3446
|
-
|
|
3447
|
-
EVENT OPERATIONS:
|
|
3448
|
-
# Export HTML sessions to JSONL
|
|
3449
|
-
result = sdk.export_sessions()
|
|
3450
|
-
print(f"Exported {result.written} sessions")
|
|
3451
|
-
|
|
3452
|
-
# Rebuild SQLite index
|
|
3453
|
-
result = sdk.rebuild_event_index()
|
|
3454
|
-
print(f"Inserted {result.inserted} events")
|
|
3455
|
-
|
|
3456
|
-
# Query events
|
|
3457
|
-
result = sdk.query_events(
|
|
3458
|
-
session_id="sess-123",
|
|
3459
|
-
tool="Bash",
|
|
3460
|
-
limit=10
|
|
3461
|
-
)
|
|
3462
|
-
for event in result.events:
|
|
3463
|
-
print(f"{event['timestamp']}: {event['summary']}")
|
|
3464
|
-
|
|
3465
|
-
# Get statistics
|
|
3466
|
-
stats = sdk.get_event_stats()
|
|
3467
|
-
print(f"Total events: {stats.total_events}")
|
|
3468
|
-
|
|
3469
|
-
ANALYTICS OPERATIONS:
|
|
3470
|
-
# Analyze session
|
|
3471
|
-
result = sdk.analyze_session("sess-123")
|
|
3472
|
-
print(f"Primary work: {result.metrics['primary_work_type']}")
|
|
3473
|
-
|
|
3474
|
-
# Analyze project
|
|
3475
|
-
result = sdk.analyze_project()
|
|
3476
|
-
print(f"Total sessions: {result.metrics['total_sessions']}")
|
|
3477
|
-
print(f"Work distribution: {result.metrics['work_distribution']}")
|
|
3478
|
-
|
|
3479
|
-
# Get recommendations
|
|
3480
|
-
result = sdk.get_work_recommendations()
|
|
3481
|
-
for rec in result.recommendations:
|
|
3482
|
-
print(f"{rec['title']} (score: {rec['score']})")
|
|
3483
|
-
|
|
3484
|
-
See also: sdk.help('analytics'), sdk.help('sessions')
|
|
3485
|
-
"""
|
|
3486
|
-
|
|
3487
|
-
else:
|
|
3488
|
-
return f"""Unknown topic: '{topic}'
|
|
3489
|
-
|
|
3490
|
-
Available topics:
|
|
3491
|
-
- features, bugs, spikes, chores, epics (work collections)
|
|
3492
|
-
- tracks, sessions, agents (non-work collections)
|
|
3493
|
-
- analytics, strategic (dependency and work analytics)
|
|
3494
|
-
- work_queue, routing (smart task routing)
|
|
3495
|
-
- planning, workflow (planning and track creation)
|
|
3496
|
-
- orchestration, subagents (explorer/coder spawning)
|
|
3497
|
-
- optimization, session_start (context optimization)
|
|
3498
|
-
|
|
3499
|
-
Try: sdk.help() for full overview
|
|
3500
|
-
"""
|