htmlgraph 0.20.1__py3-none-any.whl → 0.27.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- htmlgraph/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/.htmlgraph/agents.json +72 -0
- htmlgraph/.htmlgraph/htmlgraph.db +0 -0
- htmlgraph/__init__.py +51 -1
- htmlgraph/__init__.pyi +123 -0
- htmlgraph/agent_detection.py +26 -10
- htmlgraph/agent_registry.py +2 -1
- htmlgraph/analytics/__init__.py +8 -1
- htmlgraph/analytics/cli.py +86 -20
- htmlgraph/analytics/cost_analyzer.py +391 -0
- htmlgraph/analytics/cost_monitor.py +664 -0
- htmlgraph/analytics/cost_reporter.py +675 -0
- htmlgraph/analytics/cross_session.py +617 -0
- htmlgraph/analytics/dependency.py +10 -6
- htmlgraph/analytics/pattern_learning.py +771 -0
- htmlgraph/analytics/session_graph.py +707 -0
- htmlgraph/analytics/strategic/__init__.py +80 -0
- htmlgraph/analytics/strategic/cost_optimizer.py +611 -0
- htmlgraph/analytics/strategic/pattern_detector.py +876 -0
- htmlgraph/analytics/strategic/preference_manager.py +709 -0
- htmlgraph/analytics/strategic/suggestion_engine.py +747 -0
- htmlgraph/analytics/work_type.py +67 -27
- htmlgraph/analytics_index.py +53 -20
- htmlgraph/api/__init__.py +3 -0
- htmlgraph/api/cost_alerts_websocket.py +416 -0
- htmlgraph/api/main.py +2498 -0
- htmlgraph/api/static/htmx.min.js +1 -0
- htmlgraph/api/static/style-redesign.css +1344 -0
- htmlgraph/api/static/style.css +1079 -0
- htmlgraph/api/templates/dashboard-redesign.html +1366 -0
- htmlgraph/api/templates/dashboard.html +794 -0
- htmlgraph/api/templates/partials/activity-feed-hierarchical.html +326 -0
- htmlgraph/api/templates/partials/activity-feed.html +1100 -0
- htmlgraph/api/templates/partials/agents-redesign.html +317 -0
- htmlgraph/api/templates/partials/agents.html +317 -0
- htmlgraph/api/templates/partials/event-traces.html +373 -0
- htmlgraph/api/templates/partials/features-kanban-redesign.html +509 -0
- htmlgraph/api/templates/partials/features.html +578 -0
- htmlgraph/api/templates/partials/metrics-redesign.html +346 -0
- htmlgraph/api/templates/partials/metrics.html +346 -0
- htmlgraph/api/templates/partials/orchestration-redesign.html +443 -0
- htmlgraph/api/templates/partials/orchestration.html +198 -0
- htmlgraph/api/templates/partials/spawners.html +375 -0
- htmlgraph/api/templates/partials/work-items.html +613 -0
- htmlgraph/api/websocket.py +538 -0
- htmlgraph/archive/__init__.py +24 -0
- htmlgraph/archive/bloom.py +234 -0
- htmlgraph/archive/fts.py +297 -0
- htmlgraph/archive/manager.py +583 -0
- htmlgraph/archive/search.py +244 -0
- htmlgraph/atomic_ops.py +560 -0
- htmlgraph/attribute_index.py +2 -1
- htmlgraph/bounded_paths.py +539 -0
- htmlgraph/builders/base.py +57 -2
- htmlgraph/builders/bug.py +19 -3
- htmlgraph/builders/chore.py +19 -3
- htmlgraph/builders/epic.py +19 -3
- htmlgraph/builders/feature.py +27 -3
- htmlgraph/builders/insight.py +2 -1
- htmlgraph/builders/metric.py +2 -1
- htmlgraph/builders/pattern.py +2 -1
- htmlgraph/builders/phase.py +19 -3
- htmlgraph/builders/spike.py +29 -3
- htmlgraph/builders/track.py +42 -1
- htmlgraph/cigs/__init__.py +81 -0
- htmlgraph/cigs/autonomy.py +385 -0
- htmlgraph/cigs/cost.py +475 -0
- htmlgraph/cigs/messages_basic.py +472 -0
- htmlgraph/cigs/messaging.py +365 -0
- htmlgraph/cigs/models.py +771 -0
- htmlgraph/cigs/pattern_storage.py +427 -0
- htmlgraph/cigs/patterns.py +503 -0
- htmlgraph/cigs/posttool_analyzer.py +234 -0
- htmlgraph/cigs/reporter.py +818 -0
- htmlgraph/cigs/tracker.py +317 -0
- htmlgraph/cli/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/cli/.htmlgraph/agents.json +72 -0
- htmlgraph/cli/.htmlgraph/htmlgraph.db +0 -0
- htmlgraph/cli/__init__.py +42 -0
- htmlgraph/cli/__main__.py +6 -0
- htmlgraph/cli/analytics.py +1424 -0
- htmlgraph/cli/base.py +685 -0
- htmlgraph/cli/constants.py +206 -0
- htmlgraph/cli/core.py +954 -0
- htmlgraph/cli/main.py +147 -0
- htmlgraph/cli/models.py +475 -0
- htmlgraph/cli/templates/__init__.py +1 -0
- htmlgraph/cli/templates/cost_dashboard.py +399 -0
- htmlgraph/cli/work/__init__.py +239 -0
- htmlgraph/cli/work/browse.py +115 -0
- htmlgraph/cli/work/features.py +568 -0
- htmlgraph/cli/work/orchestration.py +676 -0
- htmlgraph/cli/work/report.py +728 -0
- htmlgraph/cli/work/sessions.py +466 -0
- htmlgraph/cli/work/snapshot.py +559 -0
- htmlgraph/cli/work/tracks.py +486 -0
- htmlgraph/cli_commands/__init__.py +1 -0
- htmlgraph/cli_commands/feature.py +195 -0
- htmlgraph/cli_framework.py +115 -0
- htmlgraph/collections/__init__.py +2 -0
- htmlgraph/collections/base.py +197 -14
- htmlgraph/collections/bug.py +2 -1
- htmlgraph/collections/chore.py +2 -1
- htmlgraph/collections/epic.py +2 -1
- htmlgraph/collections/feature.py +2 -1
- htmlgraph/collections/insight.py +2 -1
- htmlgraph/collections/metric.py +2 -1
- htmlgraph/collections/pattern.py +2 -1
- htmlgraph/collections/phase.py +2 -1
- htmlgraph/collections/session.py +194 -0
- htmlgraph/collections/spike.py +13 -2
- htmlgraph/collections/task_delegation.py +241 -0
- htmlgraph/collections/todo.py +14 -1
- htmlgraph/collections/traces.py +487 -0
- htmlgraph/config/cost_models.json +56 -0
- htmlgraph/config.py +190 -0
- htmlgraph/context_analytics.py +2 -1
- htmlgraph/converter.py +116 -7
- htmlgraph/cost_analysis/__init__.py +5 -0
- htmlgraph/cost_analysis/analyzer.py +438 -0
- htmlgraph/dashboard.html +2246 -248
- htmlgraph/dashboard.html.backup +6592 -0
- htmlgraph/dashboard.html.bak +7181 -0
- htmlgraph/dashboard.html.bak2 +7231 -0
- htmlgraph/dashboard.html.bak3 +7232 -0
- htmlgraph/db/__init__.py +38 -0
- htmlgraph/db/queries.py +790 -0
- htmlgraph/db/schema.py +1788 -0
- htmlgraph/decorators.py +317 -0
- htmlgraph/dependency_models.py +2 -1
- htmlgraph/deploy.py +26 -27
- htmlgraph/docs/API_REFERENCE.md +841 -0
- htmlgraph/docs/HTTP_API.md +750 -0
- htmlgraph/docs/INTEGRATION_GUIDE.md +752 -0
- htmlgraph/docs/ORCHESTRATION_PATTERNS.md +717 -0
- htmlgraph/docs/README.md +532 -0
- htmlgraph/docs/__init__.py +77 -0
- htmlgraph/docs/docs_version.py +55 -0
- htmlgraph/docs/metadata.py +93 -0
- htmlgraph/docs/migrations.py +232 -0
- htmlgraph/docs/template_engine.py +143 -0
- htmlgraph/docs/templates/_sections/cli_reference.md.j2 +52 -0
- htmlgraph/docs/templates/_sections/core_concepts.md.j2 +29 -0
- htmlgraph/docs/templates/_sections/sdk_basics.md.j2 +69 -0
- htmlgraph/docs/templates/base_agents.md.j2 +78 -0
- htmlgraph/docs/templates/example_user_override.md.j2 +47 -0
- htmlgraph/docs/version_check.py +163 -0
- htmlgraph/edge_index.py +2 -1
- htmlgraph/error_handler.py +544 -0
- htmlgraph/event_log.py +86 -37
- htmlgraph/event_migration.py +2 -1
- htmlgraph/file_watcher.py +12 -8
- htmlgraph/find_api.py +2 -1
- htmlgraph/git_events.py +67 -9
- htmlgraph/hooks/.htmlgraph/.session-warning-state.json +6 -0
- htmlgraph/hooks/.htmlgraph/agents.json +72 -0
- htmlgraph/hooks/.htmlgraph/index.sqlite +0 -0
- htmlgraph/hooks/__init__.py +8 -0
- htmlgraph/hooks/bootstrap.py +169 -0
- htmlgraph/hooks/cigs_pretool_enforcer.py +354 -0
- htmlgraph/hooks/concurrent_sessions.py +208 -0
- htmlgraph/hooks/context.py +350 -0
- htmlgraph/hooks/drift_handler.py +525 -0
- htmlgraph/hooks/event_tracker.py +790 -99
- htmlgraph/hooks/git_commands.py +175 -0
- htmlgraph/hooks/installer.py +5 -1
- htmlgraph/hooks/orchestrator.py +327 -76
- htmlgraph/hooks/orchestrator_reflector.py +31 -4
- htmlgraph/hooks/post_tool_use_failure.py +32 -7
- htmlgraph/hooks/post_tool_use_handler.py +257 -0
- htmlgraph/hooks/posttooluse.py +92 -19
- htmlgraph/hooks/pretooluse.py +527 -7
- htmlgraph/hooks/prompt_analyzer.py +637 -0
- htmlgraph/hooks/session_handler.py +668 -0
- htmlgraph/hooks/session_summary.py +395 -0
- htmlgraph/hooks/state_manager.py +504 -0
- htmlgraph/hooks/subagent_detection.py +202 -0
- htmlgraph/hooks/subagent_stop.py +369 -0
- htmlgraph/hooks/task_enforcer.py +99 -4
- htmlgraph/hooks/validator.py +212 -91
- htmlgraph/ids.py +2 -1
- htmlgraph/learning.py +125 -100
- htmlgraph/mcp_server.py +2 -1
- htmlgraph/models.py +217 -18
- htmlgraph/operations/README.md +62 -0
- htmlgraph/operations/__init__.py +79 -0
- htmlgraph/operations/analytics.py +339 -0
- htmlgraph/operations/bootstrap.py +289 -0
- htmlgraph/operations/events.py +244 -0
- htmlgraph/operations/fastapi_server.py +231 -0
- htmlgraph/operations/hooks.py +350 -0
- htmlgraph/operations/initialization.py +597 -0
- htmlgraph/operations/initialization.py.backup +228 -0
- htmlgraph/operations/server.py +303 -0
- htmlgraph/orchestration/__init__.py +58 -0
- htmlgraph/orchestration/claude_launcher.py +179 -0
- htmlgraph/orchestration/command_builder.py +72 -0
- htmlgraph/orchestration/headless_spawner.py +281 -0
- htmlgraph/orchestration/live_events.py +377 -0
- htmlgraph/orchestration/model_selection.py +327 -0
- htmlgraph/orchestration/plugin_manager.py +140 -0
- htmlgraph/orchestration/prompts.py +137 -0
- htmlgraph/orchestration/spawner_event_tracker.py +383 -0
- htmlgraph/orchestration/spawners/__init__.py +16 -0
- htmlgraph/orchestration/spawners/base.py +194 -0
- htmlgraph/orchestration/spawners/claude.py +173 -0
- htmlgraph/orchestration/spawners/codex.py +435 -0
- htmlgraph/orchestration/spawners/copilot.py +294 -0
- htmlgraph/orchestration/spawners/gemini.py +471 -0
- htmlgraph/orchestration/subprocess_runner.py +36 -0
- htmlgraph/{orchestration.py → orchestration/task_coordination.py} +16 -8
- htmlgraph/orchestration.md +563 -0
- htmlgraph/orchestrator-system-prompt-optimized.txt +863 -0
- htmlgraph/orchestrator.py +2 -1
- htmlgraph/orchestrator_config.py +357 -0
- htmlgraph/orchestrator_mode.py +115 -4
- htmlgraph/parallel.py +2 -1
- htmlgraph/parser.py +86 -6
- htmlgraph/path_query.py +608 -0
- htmlgraph/pattern_matcher.py +636 -0
- htmlgraph/pydantic_models.py +476 -0
- htmlgraph/quality_gates.py +350 -0
- htmlgraph/query_builder.py +2 -1
- htmlgraph/query_composer.py +509 -0
- htmlgraph/reflection.py +443 -0
- htmlgraph/refs.py +344 -0
- htmlgraph/repo_hash.py +512 -0
- htmlgraph/repositories/__init__.py +292 -0
- htmlgraph/repositories/analytics_repository.py +455 -0
- htmlgraph/repositories/analytics_repository_standard.py +628 -0
- htmlgraph/repositories/feature_repository.py +581 -0
- htmlgraph/repositories/feature_repository_htmlfile.py +668 -0
- htmlgraph/repositories/feature_repository_memory.py +607 -0
- htmlgraph/repositories/feature_repository_sqlite.py +858 -0
- htmlgraph/repositories/filter_service.py +620 -0
- htmlgraph/repositories/filter_service_standard.py +445 -0
- htmlgraph/repositories/shared_cache.py +621 -0
- htmlgraph/repositories/shared_cache_memory.py +395 -0
- htmlgraph/repositories/track_repository.py +552 -0
- htmlgraph/repositories/track_repository_htmlfile.py +619 -0
- htmlgraph/repositories/track_repository_memory.py +508 -0
- htmlgraph/repositories/track_repository_sqlite.py +711 -0
- htmlgraph/sdk/__init__.py +398 -0
- htmlgraph/sdk/__init__.pyi +14 -0
- htmlgraph/sdk/analytics/__init__.py +19 -0
- htmlgraph/sdk/analytics/engine.py +155 -0
- htmlgraph/sdk/analytics/helpers.py +178 -0
- htmlgraph/sdk/analytics/registry.py +109 -0
- htmlgraph/sdk/base.py +484 -0
- htmlgraph/sdk/constants.py +216 -0
- htmlgraph/sdk/core.pyi +308 -0
- htmlgraph/sdk/discovery.py +120 -0
- htmlgraph/sdk/help/__init__.py +12 -0
- htmlgraph/sdk/help/mixin.py +699 -0
- htmlgraph/sdk/mixins/__init__.py +15 -0
- htmlgraph/sdk/mixins/attribution.py +113 -0
- htmlgraph/sdk/mixins/mixin.py +410 -0
- htmlgraph/sdk/operations/__init__.py +12 -0
- htmlgraph/sdk/operations/mixin.py +427 -0
- htmlgraph/sdk/orchestration/__init__.py +17 -0
- htmlgraph/sdk/orchestration/coordinator.py +203 -0
- htmlgraph/sdk/orchestration/spawner.py +204 -0
- htmlgraph/sdk/planning/__init__.py +19 -0
- htmlgraph/sdk/planning/bottlenecks.py +93 -0
- htmlgraph/sdk/planning/mixin.py +211 -0
- htmlgraph/sdk/planning/parallel.py +186 -0
- htmlgraph/sdk/planning/queue.py +210 -0
- htmlgraph/sdk/planning/recommendations.py +87 -0
- htmlgraph/sdk/planning/smart_planning.py +319 -0
- htmlgraph/sdk/session/__init__.py +19 -0
- htmlgraph/sdk/session/continuity.py +57 -0
- htmlgraph/sdk/session/handoff.py +110 -0
- htmlgraph/sdk/session/info.py +309 -0
- htmlgraph/sdk/session/manager.py +103 -0
- htmlgraph/sdk/strategic/__init__.py +26 -0
- htmlgraph/sdk/strategic/mixin.py +563 -0
- htmlgraph/server.py +295 -107
- htmlgraph/session_hooks.py +300 -0
- htmlgraph/session_manager.py +285 -3
- htmlgraph/session_registry.py +587 -0
- htmlgraph/session_state.py +436 -0
- htmlgraph/session_warning.py +2 -1
- htmlgraph/sessions/__init__.py +23 -0
- htmlgraph/sessions/handoff.py +756 -0
- htmlgraph/system_prompts.py +450 -0
- htmlgraph/templates/orchestration-view.html +350 -0
- htmlgraph/track_builder.py +33 -1
- htmlgraph/track_manager.py +38 -0
- htmlgraph/transcript.py +18 -5
- htmlgraph/validation.py +115 -0
- htmlgraph/watch.py +2 -1
- htmlgraph/work_type_utils.py +2 -1
- {htmlgraph-0.20.1.data → htmlgraph-0.27.5.data}/data/htmlgraph/dashboard.html +2246 -248
- {htmlgraph-0.20.1.dist-info → htmlgraph-0.27.5.dist-info}/METADATA +95 -64
- htmlgraph-0.27.5.dist-info/RECORD +337 -0
- {htmlgraph-0.20.1.dist-info → htmlgraph-0.27.5.dist-info}/entry_points.txt +1 -1
- htmlgraph/cli.py +0 -4839
- htmlgraph/sdk.py +0 -2359
- htmlgraph-0.20.1.dist-info/RECORD +0 -118
- {htmlgraph-0.20.1.data → htmlgraph-0.27.5.data}/data/htmlgraph/styles.css +0 -0
- {htmlgraph-0.20.1.data → htmlgraph-0.27.5.data}/data/htmlgraph/templates/AGENTS.md.template +0 -0
- {htmlgraph-0.20.1.data → htmlgraph-0.27.5.data}/data/htmlgraph/templates/CLAUDE.md.template +0 -0
- {htmlgraph-0.20.1.data → htmlgraph-0.27.5.data}/data/htmlgraph/templates/GEMINI.md.template +0 -0
- {htmlgraph-0.20.1.dist-info → htmlgraph-0.27.5.dist-info}/WHEEL +0 -0
htmlgraph/sdk.py
DELETED
|
@@ -1,2359 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
HtmlGraph SDK - AI-Friendly Interface
|
|
3
|
-
|
|
4
|
-
Provides a fluent, ergonomic API for AI agents with:
|
|
5
|
-
- Auto-discovery of .htmlgraph directory
|
|
6
|
-
- Method chaining for all operations
|
|
7
|
-
- Context managers for auto-save
|
|
8
|
-
- Batch operations
|
|
9
|
-
- Minimal boilerplate
|
|
10
|
-
|
|
11
|
-
Example:
|
|
12
|
-
from htmlgraph import SDK
|
|
13
|
-
|
|
14
|
-
# Auto-discovers .htmlgraph directory
|
|
15
|
-
sdk = SDK(agent="claude")
|
|
16
|
-
|
|
17
|
-
# Fluent feature creation
|
|
18
|
-
feature = sdk.features.create(
|
|
19
|
-
title="User Authentication",
|
|
20
|
-
track="auth"
|
|
21
|
-
).add_steps([
|
|
22
|
-
"Create login endpoint",
|
|
23
|
-
"Add JWT middleware",
|
|
24
|
-
"Write tests"
|
|
25
|
-
]).set_priority("high").save()
|
|
26
|
-
|
|
27
|
-
# Work on a feature
|
|
28
|
-
with sdk.features.get("feature-001") as feature:
|
|
29
|
-
feature.start()
|
|
30
|
-
feature.complete_step(0)
|
|
31
|
-
# Auto-saves on exit
|
|
32
|
-
|
|
33
|
-
# Query
|
|
34
|
-
todos = sdk.features.where(status="todo", priority="high")
|
|
35
|
-
|
|
36
|
-
# Batch operations
|
|
37
|
-
sdk.features.mark_done(["feat-001", "feat-002", "feat-003"])
|
|
38
|
-
"""
|
|
39
|
-
|
|
40
|
-
from __future__ import annotations
|
|
41
|
-
|
|
42
|
-
from pathlib import Path
|
|
43
|
-
from typing import Any
|
|
44
|
-
|
|
45
|
-
from htmlgraph.agent_detection import detect_agent_name
|
|
46
|
-
from htmlgraph.agents import AgentInterface
|
|
47
|
-
from htmlgraph.analytics import Analytics, DependencyAnalytics
|
|
48
|
-
from htmlgraph.collections import (
|
|
49
|
-
BaseCollection,
|
|
50
|
-
BugCollection,
|
|
51
|
-
ChoreCollection,
|
|
52
|
-
EpicCollection,
|
|
53
|
-
FeatureCollection,
|
|
54
|
-
PhaseCollection,
|
|
55
|
-
SpikeCollection,
|
|
56
|
-
TodoCollection,
|
|
57
|
-
)
|
|
58
|
-
from htmlgraph.collections.insight import InsightCollection
|
|
59
|
-
from htmlgraph.collections.metric import MetricCollection
|
|
60
|
-
from htmlgraph.collections.pattern import PatternCollection
|
|
61
|
-
from htmlgraph.context_analytics import ContextAnalytics
|
|
62
|
-
from htmlgraph.graph import HtmlGraph
|
|
63
|
-
from htmlgraph.models import Node, Step
|
|
64
|
-
from htmlgraph.session_manager import SessionManager
|
|
65
|
-
from htmlgraph.session_warning import check_and_show_warning
|
|
66
|
-
from htmlgraph.track_builder import TrackCollection
|
|
67
|
-
from htmlgraph.types import (
|
|
68
|
-
ActiveWorkItem,
|
|
69
|
-
BottleneckDict,
|
|
70
|
-
SessionStartInfo,
|
|
71
|
-
)
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
class SDK:
|
|
75
|
-
"""
|
|
76
|
-
Main SDK interface for AI agents.
|
|
77
|
-
|
|
78
|
-
Auto-discovers .htmlgraph directory and provides fluent API for all collections.
|
|
79
|
-
|
|
80
|
-
Available Collections:
|
|
81
|
-
- features: Feature work items with builder support
|
|
82
|
-
- bugs: Bug reports
|
|
83
|
-
- chores: Maintenance and chore tasks
|
|
84
|
-
- spikes: Investigation and research spikes
|
|
85
|
-
- epics: Large bodies of work
|
|
86
|
-
- phases: Project phases
|
|
87
|
-
- sessions: Agent sessions
|
|
88
|
-
- tracks: Work tracks
|
|
89
|
-
- agents: Agent information
|
|
90
|
-
- todos: Persistent task tracking (mirrors TodoWrite API)
|
|
91
|
-
|
|
92
|
-
Error Handling Patterns
|
|
93
|
-
=======================
|
|
94
|
-
|
|
95
|
-
SDK methods follow consistent error handling patterns by operation type:
|
|
96
|
-
|
|
97
|
-
LOOKUP OPERATIONS (Return None):
|
|
98
|
-
Single-item lookups return None when not found.
|
|
99
|
-
Always check the result before using.
|
|
100
|
-
|
|
101
|
-
>>> feature = sdk.features.get("nonexistent")
|
|
102
|
-
>>> if feature:
|
|
103
|
-
... print(feature.title)
|
|
104
|
-
... else:
|
|
105
|
-
... print("Not found")
|
|
106
|
-
|
|
107
|
-
QUERY OPERATIONS (Return Empty List):
|
|
108
|
-
Queries return empty list when no matches or on error.
|
|
109
|
-
Safe to iterate without checking.
|
|
110
|
-
|
|
111
|
-
>>> results = sdk.features.where(status="impossible")
|
|
112
|
-
>>> for r in results: # Empty iteration is safe
|
|
113
|
-
... print(r.title)
|
|
114
|
-
|
|
115
|
-
EDIT OPERATIONS (Raise Exception):
|
|
116
|
-
Edit operations raise NodeNotFoundError when target missing.
|
|
117
|
-
Use try/except to handle gracefully.
|
|
118
|
-
|
|
119
|
-
>>> from htmlgraph.exceptions import NodeNotFoundError
|
|
120
|
-
>>> try:
|
|
121
|
-
... with sdk.features.edit("nonexistent") as f:
|
|
122
|
-
... f.status = "done"
|
|
123
|
-
... except NodeNotFoundError:
|
|
124
|
-
... print("Feature not found")
|
|
125
|
-
|
|
126
|
-
CREATE OPERATIONS (Raise on Validation):
|
|
127
|
-
Create operations raise ValidationError on invalid input.
|
|
128
|
-
|
|
129
|
-
>>> try:
|
|
130
|
-
... sdk.features.create("") # Empty title
|
|
131
|
-
... except ValidationError:
|
|
132
|
-
... print("Title required")
|
|
133
|
-
|
|
134
|
-
BATCH OPERATIONS (Return Count):
|
|
135
|
-
Batch operations return count of successful items.
|
|
136
|
-
Silently skip items that fail.
|
|
137
|
-
|
|
138
|
-
>>> count = sdk.features.mark_done(["feat-1", "missing", "feat-2"])
|
|
139
|
-
>>> print(f"Completed {count} of 3") # Outputs: Completed 2 of 3
|
|
140
|
-
|
|
141
|
-
Pattern Summary:
|
|
142
|
-
| Operation Type | Error Behavior | Example Method |
|
|
143
|
-
|----------------|--------------------|-----------------------|
|
|
144
|
-
| Lookup | Return None | .get(id) |
|
|
145
|
-
| Query | Return [] | .where(), .all() |
|
|
146
|
-
| Edit | Raise Exception | .edit(id) |
|
|
147
|
-
| Create | Raise on Invalid | .create(title) |
|
|
148
|
-
| Batch | Return Count | .mark_done([ids]) |
|
|
149
|
-
| Delete | Return Bool | .delete(id) |
|
|
150
|
-
|
|
151
|
-
Available Exceptions:
|
|
152
|
-
- NodeNotFoundError: Node with ID not found
|
|
153
|
-
- ValidationError: Invalid input parameters
|
|
154
|
-
- ClaimConflictError: Node already claimed by another agent
|
|
155
|
-
|
|
156
|
-
Example:
|
|
157
|
-
sdk = SDK(agent="claude")
|
|
158
|
-
|
|
159
|
-
# Work with features (has builder support)
|
|
160
|
-
feature = sdk.features.create("User Auth")
|
|
161
|
-
.set_priority("high")
|
|
162
|
-
.add_steps(["Login", "Logout"])
|
|
163
|
-
.save()
|
|
164
|
-
|
|
165
|
-
# Work with bugs
|
|
166
|
-
high_bugs = sdk.bugs.where(status="todo", priority="high")
|
|
167
|
-
with sdk.bugs.edit("bug-001") as bug:
|
|
168
|
-
bug.status = "in-progress"
|
|
169
|
-
|
|
170
|
-
# Work with any collection
|
|
171
|
-
all_spikes = sdk.spikes.all()
|
|
172
|
-
sdk.chores.mark_done(["chore-001", "chore-002"])
|
|
173
|
-
sdk.epics.assign(["epic-001"], agent="claude")
|
|
174
|
-
"""
|
|
175
|
-
|
|
176
|
-
def __init__(self, directory: Path | str | None = None, agent: str | None = None):
|
|
177
|
-
"""
|
|
178
|
-
Initialize SDK.
|
|
179
|
-
|
|
180
|
-
Args:
|
|
181
|
-
directory: Path to .htmlgraph directory (auto-discovered if not provided)
|
|
182
|
-
agent: Agent identifier for operations
|
|
183
|
-
"""
|
|
184
|
-
if directory is None:
|
|
185
|
-
directory = self._discover_htmlgraph()
|
|
186
|
-
|
|
187
|
-
if agent is None:
|
|
188
|
-
agent = detect_agent_name()
|
|
189
|
-
|
|
190
|
-
self._directory = Path(directory)
|
|
191
|
-
self._agent_id = agent
|
|
192
|
-
|
|
193
|
-
# Initialize underlying HtmlGraphs first (for backward compatibility and sharing)
|
|
194
|
-
# These are shared with SessionManager to avoid double-loading features
|
|
195
|
-
self._graph = HtmlGraph(self._directory / "features")
|
|
196
|
-
self._bugs_graph = HtmlGraph(self._directory / "bugs")
|
|
197
|
-
|
|
198
|
-
# Initialize SessionManager with shared graph instances to avoid double-loading
|
|
199
|
-
self.session_manager = SessionManager(
|
|
200
|
-
self._directory,
|
|
201
|
-
features_graph=self._graph,
|
|
202
|
-
bugs_graph=self._bugs_graph,
|
|
203
|
-
)
|
|
204
|
-
|
|
205
|
-
# Agent interface (for backward compatibility)
|
|
206
|
-
self._agent_interface = AgentInterface(
|
|
207
|
-
self._directory / "features", agent_id=agent
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
# Collection interfaces - all work item types (all with builder support)
|
|
211
|
-
self.features = FeatureCollection(self)
|
|
212
|
-
self.bugs = BugCollection(self)
|
|
213
|
-
self.chores = ChoreCollection(self)
|
|
214
|
-
self.spikes = SpikeCollection(self)
|
|
215
|
-
self.epics = EpicCollection(self)
|
|
216
|
-
self.phases = PhaseCollection(self)
|
|
217
|
-
|
|
218
|
-
# Non-work collections
|
|
219
|
-
self.sessions: BaseCollection = BaseCollection(self, "sessions", "session")
|
|
220
|
-
self.tracks: TrackCollection = TrackCollection(
|
|
221
|
-
self
|
|
222
|
-
) # Use specialized collection with builder support
|
|
223
|
-
self.agents: BaseCollection = BaseCollection(self, "agents", "agent")
|
|
224
|
-
|
|
225
|
-
# Learning collections (Active Learning Persistence)
|
|
226
|
-
self.patterns = PatternCollection(self)
|
|
227
|
-
self.insights = InsightCollection(self)
|
|
228
|
-
self.metrics = MetricCollection(self)
|
|
229
|
-
|
|
230
|
-
# Todo collection (persistent task tracking)
|
|
231
|
-
self.todos = TodoCollection(self)
|
|
232
|
-
|
|
233
|
-
# Create learning directories if needed
|
|
234
|
-
(self._directory / "patterns").mkdir(exist_ok=True)
|
|
235
|
-
(self._directory / "insights").mkdir(exist_ok=True)
|
|
236
|
-
(self._directory / "metrics").mkdir(exist_ok=True)
|
|
237
|
-
(self._directory / "todos").mkdir(exist_ok=True)
|
|
238
|
-
|
|
239
|
-
# Analytics interface (Phase 2: Work Type Analytics)
|
|
240
|
-
self.analytics = Analytics(self)
|
|
241
|
-
|
|
242
|
-
# Dependency analytics interface (Advanced graph analytics)
|
|
243
|
-
self.dep_analytics = DependencyAnalytics(self._graph)
|
|
244
|
-
|
|
245
|
-
# Context analytics interface (Context usage tracking)
|
|
246
|
-
self.context = ContextAnalytics(self)
|
|
247
|
-
|
|
248
|
-
# Lazy-loaded orchestrator for subagent management
|
|
249
|
-
self._orchestrator = None
|
|
250
|
-
|
|
251
|
-
# Session warning system (workaround for Claude Code hook bug #10373)
|
|
252
|
-
# Shows orchestrator instructions on first SDK usage per session
|
|
253
|
-
self._session_warning = check_and_show_warning(
|
|
254
|
-
self._directory,
|
|
255
|
-
agent=self._agent_id,
|
|
256
|
-
session_id=None, # Will be set by session manager if available
|
|
257
|
-
)
|
|
258
|
-
|
|
259
|
-
@staticmethod
|
|
260
|
-
def _discover_htmlgraph() -> Path:
|
|
261
|
-
"""
|
|
262
|
-
Auto-discover .htmlgraph directory.
|
|
263
|
-
|
|
264
|
-
Searches current directory and parents.
|
|
265
|
-
"""
|
|
266
|
-
current = Path.cwd()
|
|
267
|
-
|
|
268
|
-
# Check current directory
|
|
269
|
-
if (current / ".htmlgraph").exists():
|
|
270
|
-
return current / ".htmlgraph"
|
|
271
|
-
|
|
272
|
-
# Check parent directories
|
|
273
|
-
for parent in current.parents:
|
|
274
|
-
if (parent / ".htmlgraph").exists():
|
|
275
|
-
return parent / ".htmlgraph"
|
|
276
|
-
|
|
277
|
-
# Default to current directory
|
|
278
|
-
return current / ".htmlgraph"
|
|
279
|
-
|
|
280
|
-
@property
|
|
281
|
-
def agent(self) -> str | None:
|
|
282
|
-
"""Get current agent ID."""
|
|
283
|
-
return self._agent_id
|
|
284
|
-
|
|
285
|
-
def dismiss_session_warning(self) -> bool:
|
|
286
|
-
"""
|
|
287
|
-
Dismiss the session warning after reading it.
|
|
288
|
-
|
|
289
|
-
IMPORTANT: Call this as your FIRST action after seeing the orchestrator
|
|
290
|
-
warning. This confirms you've read the instructions.
|
|
291
|
-
|
|
292
|
-
Returns:
|
|
293
|
-
True if warning was dismissed, False if already dismissed
|
|
294
|
-
|
|
295
|
-
Example:
|
|
296
|
-
sdk = SDK(agent="claude")
|
|
297
|
-
# Warning shown automatically...
|
|
298
|
-
|
|
299
|
-
# First action: dismiss to confirm you read it
|
|
300
|
-
sdk.dismiss_session_warning()
|
|
301
|
-
|
|
302
|
-
# Now proceed with orchestration
|
|
303
|
-
sdk.spawn_coder(feature_id="feat-123", ...)
|
|
304
|
-
"""
|
|
305
|
-
if self._session_warning:
|
|
306
|
-
return self._session_warning.dismiss(
|
|
307
|
-
agent=self._agent_id,
|
|
308
|
-
session_id=None,
|
|
309
|
-
)
|
|
310
|
-
return False
|
|
311
|
-
|
|
312
|
-
def get_warning_status(self) -> dict[str, Any]:
|
|
313
|
-
"""
|
|
314
|
-
Get current session warning status.
|
|
315
|
-
|
|
316
|
-
Returns:
|
|
317
|
-
Dict with dismissed status, timestamp, and show count
|
|
318
|
-
"""
|
|
319
|
-
if self._session_warning:
|
|
320
|
-
return self._session_warning.get_status()
|
|
321
|
-
return {"dismissed": True, "show_count": 0}
|
|
322
|
-
|
|
323
|
-
def reload(self) -> None:
|
|
324
|
-
"""Reload all data from disk."""
|
|
325
|
-
self._graph.reload()
|
|
326
|
-
self._agent_interface.reload()
|
|
327
|
-
# SessionManager reloads implicitly on access via its converters/graphs
|
|
328
|
-
|
|
329
|
-
def summary(self, max_items: int = 10) -> str:
|
|
330
|
-
"""
|
|
331
|
-
Get project summary.
|
|
332
|
-
|
|
333
|
-
Returns:
|
|
334
|
-
Compact overview for AI agent orientation
|
|
335
|
-
"""
|
|
336
|
-
return self._agent_interface.get_summary(max_items)
|
|
337
|
-
|
|
338
|
-
def my_work(self) -> dict[str, Any]:
|
|
339
|
-
"""
|
|
340
|
-
Get current agent's workload.
|
|
341
|
-
|
|
342
|
-
Returns:
|
|
343
|
-
Dict with in_progress, completed counts
|
|
344
|
-
"""
|
|
345
|
-
if not self._agent_id:
|
|
346
|
-
raise ValueError("No agent ID set")
|
|
347
|
-
return self._agent_interface.get_workload(self._agent_id)
|
|
348
|
-
|
|
349
|
-
def next_task(
|
|
350
|
-
self, priority: str | None = None, auto_claim: bool = True
|
|
351
|
-
) -> Node | None:
|
|
352
|
-
"""
|
|
353
|
-
Get next available task for this agent.
|
|
354
|
-
|
|
355
|
-
Args:
|
|
356
|
-
priority: Optional priority filter
|
|
357
|
-
auto_claim: Automatically claim the task
|
|
358
|
-
|
|
359
|
-
Returns:
|
|
360
|
-
Next available Node or None
|
|
361
|
-
"""
|
|
362
|
-
return self._agent_interface.get_next_task(
|
|
363
|
-
agent_id=self._agent_id,
|
|
364
|
-
priority=priority,
|
|
365
|
-
node_type="feature",
|
|
366
|
-
auto_claim=auto_claim,
|
|
367
|
-
)
|
|
368
|
-
|
|
369
|
-
def set_session_handoff(
|
|
370
|
-
self,
|
|
371
|
-
handoff_notes: str | None = None,
|
|
372
|
-
recommended_next: str | None = None,
|
|
373
|
-
blockers: list[str] | None = None,
|
|
374
|
-
session_id: str | None = None,
|
|
375
|
-
) -> Any:
|
|
376
|
-
"""
|
|
377
|
-
Set handoff context on a session.
|
|
378
|
-
|
|
379
|
-
Args:
|
|
380
|
-
handoff_notes: Notes for next session/agent
|
|
381
|
-
recommended_next: Suggested next steps
|
|
382
|
-
blockers: List of blockers
|
|
383
|
-
session_id: Specific session ID (defaults to active session)
|
|
384
|
-
|
|
385
|
-
Returns:
|
|
386
|
-
Updated Session or None if not found
|
|
387
|
-
"""
|
|
388
|
-
if not session_id:
|
|
389
|
-
if self._agent_id:
|
|
390
|
-
active = self.session_manager.get_active_session_for_agent(
|
|
391
|
-
self._agent_id
|
|
392
|
-
)
|
|
393
|
-
else:
|
|
394
|
-
active = self.session_manager.get_active_session()
|
|
395
|
-
if not active:
|
|
396
|
-
return None
|
|
397
|
-
session_id = active.id
|
|
398
|
-
|
|
399
|
-
return self.session_manager.set_session_handoff(
|
|
400
|
-
session_id=session_id,
|
|
401
|
-
handoff_notes=handoff_notes,
|
|
402
|
-
recommended_next=recommended_next,
|
|
403
|
-
blockers=blockers,
|
|
404
|
-
)
|
|
405
|
-
|
|
406
|
-
def start_session(
|
|
407
|
-
self,
|
|
408
|
-
session_id: str | None = None,
|
|
409
|
-
title: str | None = None,
|
|
410
|
-
agent: str | None = None,
|
|
411
|
-
) -> Any:
|
|
412
|
-
"""
|
|
413
|
-
Start a new session.
|
|
414
|
-
|
|
415
|
-
Args:
|
|
416
|
-
session_id: Optional session ID
|
|
417
|
-
title: Optional session title
|
|
418
|
-
agent: Optional agent override (defaults to SDK agent)
|
|
419
|
-
|
|
420
|
-
Returns:
|
|
421
|
-
New Session instance
|
|
422
|
-
"""
|
|
423
|
-
return self.session_manager.start_session(
|
|
424
|
-
session_id=session_id, agent=agent or self._agent_id or "cli", title=title
|
|
425
|
-
)
|
|
426
|
-
|
|
427
|
-
def end_session(
|
|
428
|
-
self,
|
|
429
|
-
session_id: str,
|
|
430
|
-
handoff_notes: str | None = None,
|
|
431
|
-
recommended_next: str | None = None,
|
|
432
|
-
blockers: list[str] | None = None,
|
|
433
|
-
) -> Any:
|
|
434
|
-
"""
|
|
435
|
-
End a session.
|
|
436
|
-
|
|
437
|
-
Args:
|
|
438
|
-
session_id: Session ID to end
|
|
439
|
-
handoff_notes: Optional handoff notes
|
|
440
|
-
recommended_next: Optional recommendations
|
|
441
|
-
blockers: Optional blockers
|
|
442
|
-
|
|
443
|
-
Returns:
|
|
444
|
-
Ended Session instance
|
|
445
|
-
"""
|
|
446
|
-
return self.session_manager.end_session(
|
|
447
|
-
session_id=session_id,
|
|
448
|
-
handoff_notes=handoff_notes,
|
|
449
|
-
recommended_next=recommended_next,
|
|
450
|
-
blockers=blockers,
|
|
451
|
-
)
|
|
452
|
-
|
|
453
|
-
def get_status(self) -> dict[str, Any]:
|
|
454
|
-
"""
|
|
455
|
-
Get project status.
|
|
456
|
-
|
|
457
|
-
Returns:
|
|
458
|
-
Dict with status metrics (WIP, counts, etc.)
|
|
459
|
-
"""
|
|
460
|
-
return self.session_manager.get_status()
|
|
461
|
-
|
|
462
|
-
def dedupe_sessions(
|
|
463
|
-
self,
|
|
464
|
-
max_events: int = 1,
|
|
465
|
-
move_dir_name: str = "_orphans",
|
|
466
|
-
dry_run: bool = False,
|
|
467
|
-
stale_extra_active: bool = True,
|
|
468
|
-
) -> dict[str, int]:
|
|
469
|
-
"""
|
|
470
|
-
Move low-signal sessions (e.g. SessionStart-only) out of the main sessions dir.
|
|
471
|
-
|
|
472
|
-
Args:
|
|
473
|
-
max_events: Maximum events threshold (sessions with <= this many events are moved)
|
|
474
|
-
move_dir_name: Directory name to move orphaned sessions to
|
|
475
|
-
dry_run: If True, only report what would be done without actually moving files
|
|
476
|
-
stale_extra_active: If True, also mark extra active sessions as stale
|
|
477
|
-
|
|
478
|
-
Returns:
|
|
479
|
-
Dict with counts: {"scanned": int, "moved": int, "missing": int, "staled_active": int, "kept_active": int}
|
|
480
|
-
|
|
481
|
-
Example:
|
|
482
|
-
>>> sdk = SDK(agent="claude")
|
|
483
|
-
>>> result = sdk.dedupe_sessions(max_events=1, dry_run=False)
|
|
484
|
-
>>> print(f"Scanned: {result['scanned']}, Moved: {result['moved']}")
|
|
485
|
-
"""
|
|
486
|
-
return self.session_manager.dedupe_orphan_sessions(
|
|
487
|
-
max_events=max_events,
|
|
488
|
-
move_dir_name=move_dir_name,
|
|
489
|
-
dry_run=dry_run,
|
|
490
|
-
stale_extra_active=stale_extra_active,
|
|
491
|
-
)
|
|
492
|
-
|
|
493
|
-
def track_activity(
|
|
494
|
-
self,
|
|
495
|
-
tool: str,
|
|
496
|
-
summary: str,
|
|
497
|
-
file_paths: list[str] | None = None,
|
|
498
|
-
success: bool = True,
|
|
499
|
-
feature_id: str | None = None,
|
|
500
|
-
session_id: str | None = None,
|
|
501
|
-
parent_activity_id: str | None = None,
|
|
502
|
-
payload: dict[str, Any] | None = None,
|
|
503
|
-
) -> Any:
|
|
504
|
-
"""
|
|
505
|
-
Track an activity in the current or specified session.
|
|
506
|
-
|
|
507
|
-
Args:
|
|
508
|
-
tool: Tool name (Edit, Bash, Read, etc.)
|
|
509
|
-
summary: Human-readable summary of the activity
|
|
510
|
-
file_paths: Files involved in this activity
|
|
511
|
-
success: Whether the tool call succeeded
|
|
512
|
-
feature_id: Explicit feature ID (skips attribution if provided)
|
|
513
|
-
session_id: Session ID (defaults to active session for current agent)
|
|
514
|
-
parent_activity_id: ID of parent activity (e.g., Skill/Task invocation)
|
|
515
|
-
payload: Optional rich payload data
|
|
516
|
-
|
|
517
|
-
Returns:
|
|
518
|
-
Created ActivityEntry with attribution
|
|
519
|
-
|
|
520
|
-
Example:
|
|
521
|
-
>>> sdk = SDK(agent="claude")
|
|
522
|
-
>>> entry = sdk.track_activity(
|
|
523
|
-
... tool="CustomTool",
|
|
524
|
-
... summary="Performed custom analysis",
|
|
525
|
-
... file_paths=["src/main.py"],
|
|
526
|
-
... success=True
|
|
527
|
-
... )
|
|
528
|
-
>>> print(f"Tracked: [{entry.tool}] {entry.summary}")
|
|
529
|
-
"""
|
|
530
|
-
# Find active session if not specified
|
|
531
|
-
if not session_id:
|
|
532
|
-
active = self.session_manager.get_active_session(agent=self._agent_id)
|
|
533
|
-
if not active:
|
|
534
|
-
raise ValueError(
|
|
535
|
-
"No active session. Start one with sdk.start_session()"
|
|
536
|
-
)
|
|
537
|
-
session_id = active.id
|
|
538
|
-
|
|
539
|
-
return self.session_manager.track_activity(
|
|
540
|
-
session_id=session_id,
|
|
541
|
-
tool=tool,
|
|
542
|
-
summary=summary,
|
|
543
|
-
file_paths=file_paths,
|
|
544
|
-
success=success,
|
|
545
|
-
feature_id=feature_id,
|
|
546
|
-
parent_activity_id=parent_activity_id,
|
|
547
|
-
payload=payload,
|
|
548
|
-
)
|
|
549
|
-
|
|
550
|
-
# =========================================================================
|
|
551
|
-
# Strategic Planning & Analytics (Agent-Friendly Interface)
|
|
552
|
-
# =========================================================================
|
|
553
|
-
|
|
554
|
-
def find_bottlenecks(self, top_n: int = 5) -> list[BottleneckDict]:
|
|
555
|
-
"""
|
|
556
|
-
Identify tasks blocking the most downstream work.
|
|
557
|
-
|
|
558
|
-
Note: Prefer using sdk.dep_analytics.find_bottlenecks() directly.
|
|
559
|
-
This method exists for backward compatibility.
|
|
560
|
-
|
|
561
|
-
Args:
|
|
562
|
-
top_n: Maximum number of bottlenecks to return
|
|
563
|
-
|
|
564
|
-
Returns:
|
|
565
|
-
List of bottleneck tasks with impact metrics
|
|
566
|
-
|
|
567
|
-
Example:
|
|
568
|
-
>>> sdk = SDK(agent="claude")
|
|
569
|
-
>>> # Preferred approach
|
|
570
|
-
>>> bottlenecks = sdk.dep_analytics.find_bottlenecks(top_n=3)
|
|
571
|
-
>>> # Or via SDK (backward compatibility)
|
|
572
|
-
>>> bottlenecks = sdk.find_bottlenecks(top_n=3)
|
|
573
|
-
>>> for bn in bottlenecks:
|
|
574
|
-
... print(f"{bn['title']} blocks {bn['blocks_count']} tasks")
|
|
575
|
-
"""
|
|
576
|
-
bottlenecks = self.dep_analytics.find_bottlenecks(top_n=top_n)
|
|
577
|
-
|
|
578
|
-
# Convert to agent-friendly dict format for backward compatibility
|
|
579
|
-
return [
|
|
580
|
-
{
|
|
581
|
-
"id": bn.id,
|
|
582
|
-
"title": bn.title,
|
|
583
|
-
"status": bn.status,
|
|
584
|
-
"priority": bn.priority,
|
|
585
|
-
"blocks_count": bn.transitive_blocking,
|
|
586
|
-
"impact_score": bn.weighted_impact,
|
|
587
|
-
"blocked_tasks": bn.blocked_nodes[:5],
|
|
588
|
-
}
|
|
589
|
-
for bn in bottlenecks
|
|
590
|
-
]
|
|
591
|
-
|
|
592
|
-
def get_parallel_work(self, max_agents: int = 5) -> dict[str, Any]:
|
|
593
|
-
"""
|
|
594
|
-
Find tasks that can be worked on simultaneously.
|
|
595
|
-
|
|
596
|
-
Note: Prefer using sdk.dep_analytics.find_parallelizable_work() directly.
|
|
597
|
-
This method exists for backward compatibility.
|
|
598
|
-
|
|
599
|
-
Args:
|
|
600
|
-
max_agents: Maximum number of parallel agents to plan for
|
|
601
|
-
|
|
602
|
-
Returns:
|
|
603
|
-
Dict with parallelization opportunities
|
|
604
|
-
|
|
605
|
-
Example:
|
|
606
|
-
>>> sdk = SDK(agent="claude")
|
|
607
|
-
>>> # Preferred approach
|
|
608
|
-
>>> report = sdk.dep_analytics.find_parallelizable_work(status="todo")
|
|
609
|
-
>>> # Or via SDK (backward compatibility)
|
|
610
|
-
>>> parallel = sdk.get_parallel_work(max_agents=3)
|
|
611
|
-
>>> print(f"Can work on {parallel['max_parallelism']} tasks at once")
|
|
612
|
-
>>> print(f"Ready now: {parallel['ready_now']}")
|
|
613
|
-
"""
|
|
614
|
-
report = self.dep_analytics.find_parallelizable_work(status="todo")
|
|
615
|
-
|
|
616
|
-
ready_now = (
|
|
617
|
-
report.dependency_levels[0].nodes if report.dependency_levels else []
|
|
618
|
-
)
|
|
619
|
-
|
|
620
|
-
return {
|
|
621
|
-
"max_parallelism": report.max_parallelism,
|
|
622
|
-
"ready_now": ready_now[:max_agents],
|
|
623
|
-
"total_ready": len(ready_now),
|
|
624
|
-
"level_count": len(report.dependency_levels),
|
|
625
|
-
"next_level": report.dependency_levels[1].nodes
|
|
626
|
-
if len(report.dependency_levels) > 1
|
|
627
|
-
else [],
|
|
628
|
-
}
|
|
629
|
-
|
|
630
|
-
def recommend_next_work(self, agent_count: int = 1) -> list[dict[str, Any]]:
|
|
631
|
-
"""
|
|
632
|
-
Get smart recommendations for what to work on next.
|
|
633
|
-
|
|
634
|
-
Note: Prefer using sdk.dep_analytics.recommend_next_tasks() directly.
|
|
635
|
-
This method exists for backward compatibility.
|
|
636
|
-
|
|
637
|
-
Considers priority, dependencies, and transitive impact.
|
|
638
|
-
|
|
639
|
-
Args:
|
|
640
|
-
agent_count: Number of agents/tasks to recommend
|
|
641
|
-
|
|
642
|
-
Returns:
|
|
643
|
-
List of recommended tasks with reasoning
|
|
644
|
-
|
|
645
|
-
Example:
|
|
646
|
-
>>> sdk = SDK(agent="claude")
|
|
647
|
-
>>> # Preferred approach
|
|
648
|
-
>>> recs = sdk.dep_analytics.recommend_next_tasks(agent_count=3)
|
|
649
|
-
>>> # Or via SDK (backward compatibility)
|
|
650
|
-
>>> recs = sdk.recommend_next_work(agent_count=3)
|
|
651
|
-
>>> for rec in recs:
|
|
652
|
-
... print(f"{rec['title']} (score: {rec['score']})")
|
|
653
|
-
... print(f" Reasons: {rec['reasons']}")
|
|
654
|
-
"""
|
|
655
|
-
recommendations = self.dep_analytics.recommend_next_tasks(
|
|
656
|
-
agent_count=agent_count, lookahead=5
|
|
657
|
-
)
|
|
658
|
-
|
|
659
|
-
return [
|
|
660
|
-
{
|
|
661
|
-
"id": rec.id,
|
|
662
|
-
"title": rec.title,
|
|
663
|
-
"priority": rec.priority,
|
|
664
|
-
"score": rec.score,
|
|
665
|
-
"reasons": rec.reasons,
|
|
666
|
-
"estimated_hours": rec.estimated_effort,
|
|
667
|
-
"unlocks_count": len(rec.unlocks),
|
|
668
|
-
"unlocks": rec.unlocks[:3],
|
|
669
|
-
}
|
|
670
|
-
for rec in recommendations.recommendations
|
|
671
|
-
]
|
|
672
|
-
|
|
673
|
-
def assess_risks(self) -> dict[str, Any]:
|
|
674
|
-
"""
|
|
675
|
-
Assess dependency-related risks in the project.
|
|
676
|
-
|
|
677
|
-
Note: Prefer using sdk.dep_analytics.assess_dependency_risk() directly.
|
|
678
|
-
This method exists for backward compatibility.
|
|
679
|
-
|
|
680
|
-
Identifies single points of failure, circular dependencies,
|
|
681
|
-
and orphaned tasks.
|
|
682
|
-
|
|
683
|
-
Returns:
|
|
684
|
-
Dict with risk assessment results
|
|
685
|
-
|
|
686
|
-
Example:
|
|
687
|
-
>>> sdk = SDK(agent="claude")
|
|
688
|
-
>>> # Preferred approach
|
|
689
|
-
>>> risk = sdk.dep_analytics.assess_dependency_risk()
|
|
690
|
-
>>> # Or via SDK (backward compatibility)
|
|
691
|
-
>>> risks = sdk.assess_risks()
|
|
692
|
-
>>> if risks['high_risk_count'] > 0:
|
|
693
|
-
... print(f"Warning: {risks['high_risk_count']} high-risk tasks")
|
|
694
|
-
"""
|
|
695
|
-
risk = self.dep_analytics.assess_dependency_risk()
|
|
696
|
-
|
|
697
|
-
return {
|
|
698
|
-
"high_risk_count": len(risk.high_risk),
|
|
699
|
-
"high_risk_tasks": [
|
|
700
|
-
{
|
|
701
|
-
"id": node.id,
|
|
702
|
-
"title": node.title,
|
|
703
|
-
"risk_score": node.risk_score,
|
|
704
|
-
"risk_factors": [f.description for f in node.risk_factors],
|
|
705
|
-
}
|
|
706
|
-
for node in risk.high_risk
|
|
707
|
-
],
|
|
708
|
-
"circular_dependencies": risk.circular_dependencies,
|
|
709
|
-
"orphaned_count": len(risk.orphaned_nodes),
|
|
710
|
-
"orphaned_tasks": risk.orphaned_nodes[:5],
|
|
711
|
-
"recommendations": risk.recommendations,
|
|
712
|
-
}
|
|
713
|
-
|
|
714
|
-
def analyze_impact(self, node_id: str) -> dict[str, Any]:
|
|
715
|
-
"""
|
|
716
|
-
Analyze the impact of completing a specific task.
|
|
717
|
-
|
|
718
|
-
Note: Prefer using sdk.dep_analytics.impact_analysis() directly.
|
|
719
|
-
This method exists for backward compatibility.
|
|
720
|
-
|
|
721
|
-
Args:
|
|
722
|
-
node_id: Task to analyze
|
|
723
|
-
|
|
724
|
-
Returns:
|
|
725
|
-
Dict with impact analysis
|
|
726
|
-
|
|
727
|
-
Example:
|
|
728
|
-
>>> sdk = SDK(agent="claude")
|
|
729
|
-
>>> # Preferred approach
|
|
730
|
-
>>> impact = sdk.dep_analytics.impact_analysis("feature-001")
|
|
731
|
-
>>> # Or via SDK (backward compatibility)
|
|
732
|
-
>>> impact = sdk.analyze_impact("feature-001")
|
|
733
|
-
>>> print(f"Completing this unlocks {impact['unlocks_count']} tasks")
|
|
734
|
-
"""
|
|
735
|
-
impact = self.dep_analytics.impact_analysis(node_id)
|
|
736
|
-
|
|
737
|
-
return {
|
|
738
|
-
"node_id": node_id,
|
|
739
|
-
"direct_dependents": impact.direct_dependents,
|
|
740
|
-
"total_impact": impact.transitive_dependents,
|
|
741
|
-
"completion_impact": impact.completion_impact,
|
|
742
|
-
"unlocks_count": len(impact.affected_nodes),
|
|
743
|
-
"affected_tasks": impact.affected_nodes[:10],
|
|
744
|
-
}
|
|
745
|
-
|
|
746
|
-
def get_work_queue(
|
|
747
|
-
self, agent_id: str | None = None, limit: int = 10, min_score: float = 0.0
|
|
748
|
-
) -> list[dict[str, Any]]:
|
|
749
|
-
"""
|
|
750
|
-
Get prioritized work queue showing recommended work, active work, and dependencies.
|
|
751
|
-
|
|
752
|
-
This method provides a comprehensive view of:
|
|
753
|
-
1. Recommended next work (using smart analytics)
|
|
754
|
-
2. Active work by all agents
|
|
755
|
-
3. Blocked items and what's blocking them
|
|
756
|
-
4. Priority-based scoring
|
|
757
|
-
|
|
758
|
-
Args:
|
|
759
|
-
agent_id: Agent to get queue for (defaults to SDK agent)
|
|
760
|
-
limit: Maximum number of items to return (default: 10)
|
|
761
|
-
min_score: Minimum score threshold (default: 0.0)
|
|
762
|
-
|
|
763
|
-
Returns:
|
|
764
|
-
List of work queue items with scoring and metadata:
|
|
765
|
-
- task_id: Work item ID
|
|
766
|
-
- title: Work item title
|
|
767
|
-
- status: Current status
|
|
768
|
-
- priority: Priority level
|
|
769
|
-
- score: Routing score
|
|
770
|
-
- complexity: Complexity level (if set)
|
|
771
|
-
- effort: Estimated effort (if set)
|
|
772
|
-
- blocks_count: Number of tasks this blocks (if any)
|
|
773
|
-
- blocked_by: List of blocking task IDs (if blocked)
|
|
774
|
-
- agent_assigned: Current assignee (if any)
|
|
775
|
-
- type: Work item type (feature, bug, spike, etc.)
|
|
776
|
-
|
|
777
|
-
Example:
|
|
778
|
-
>>> sdk = SDK(agent="claude")
|
|
779
|
-
>>> queue = sdk.get_work_queue(limit=5)
|
|
780
|
-
>>> for item in queue:
|
|
781
|
-
... print(f"{item['score']:.1f} - {item['title']}")
|
|
782
|
-
... if item.get('blocked_by'):
|
|
783
|
-
... print(f" ⚠️ Blocked by: {', '.join(item['blocked_by'])}")
|
|
784
|
-
"""
|
|
785
|
-
from htmlgraph.routing import AgentCapabilityRegistry, CapabilityMatcher
|
|
786
|
-
|
|
787
|
-
agent = agent_id or self._agent_id or "cli"
|
|
788
|
-
|
|
789
|
-
# Get all work item types
|
|
790
|
-
all_work = []
|
|
791
|
-
for collection_name in ["features", "bugs", "spikes", "chores", "epics"]:
|
|
792
|
-
collection = getattr(self, collection_name, None)
|
|
793
|
-
if collection:
|
|
794
|
-
# Get todo and blocked items
|
|
795
|
-
for item in collection.where(status="todo"):
|
|
796
|
-
all_work.append(item)
|
|
797
|
-
for item in collection.where(status="blocked"):
|
|
798
|
-
all_work.append(item)
|
|
799
|
-
|
|
800
|
-
if not all_work:
|
|
801
|
-
return []
|
|
802
|
-
|
|
803
|
-
# Get recommendations from analytics (uses strategic scoring)
|
|
804
|
-
recommendations = self.recommend_next_work(agent_count=limit * 2)
|
|
805
|
-
rec_scores = {rec["id"]: rec["score"] for rec in recommendations}
|
|
806
|
-
|
|
807
|
-
# Build routing registry
|
|
808
|
-
registry = AgentCapabilityRegistry()
|
|
809
|
-
|
|
810
|
-
# Register current agent
|
|
811
|
-
registry.register_agent(agent, capabilities=[], wip_limit=5)
|
|
812
|
-
|
|
813
|
-
# Get current WIP count for agent
|
|
814
|
-
wip_count = len(self.features.where(status="in-progress", agent_assigned=agent))
|
|
815
|
-
registry.set_wip(agent, wip_count)
|
|
816
|
-
|
|
817
|
-
# Score each work item
|
|
818
|
-
queue_items = []
|
|
819
|
-
for item in all_work:
|
|
820
|
-
# Use strategic score if available, otherwise use routing score
|
|
821
|
-
if item.id in rec_scores:
|
|
822
|
-
score = rec_scores[item.id]
|
|
823
|
-
else:
|
|
824
|
-
# Fallback to routing score
|
|
825
|
-
agent_profile = registry.get_agent(agent)
|
|
826
|
-
if agent_profile:
|
|
827
|
-
score = CapabilityMatcher.score_agent_task_fit(agent_profile, item)
|
|
828
|
-
else:
|
|
829
|
-
score = 0.0
|
|
830
|
-
|
|
831
|
-
# Apply minimum score filter
|
|
832
|
-
if score < min_score:
|
|
833
|
-
continue
|
|
834
|
-
|
|
835
|
-
# Build queue item
|
|
836
|
-
queue_item = {
|
|
837
|
-
"task_id": item.id,
|
|
838
|
-
"title": item.title,
|
|
839
|
-
"status": item.status,
|
|
840
|
-
"priority": item.priority,
|
|
841
|
-
"score": score,
|
|
842
|
-
"type": item.type,
|
|
843
|
-
"complexity": getattr(item, "complexity", None),
|
|
844
|
-
"effort": getattr(item, "estimated_effort", None),
|
|
845
|
-
"agent_assigned": getattr(item, "agent_assigned", None),
|
|
846
|
-
"blocks_count": 0,
|
|
847
|
-
"blocked_by": [],
|
|
848
|
-
}
|
|
849
|
-
|
|
850
|
-
# Add dependency information
|
|
851
|
-
if hasattr(item, "edges"):
|
|
852
|
-
# Check if this item blocks others
|
|
853
|
-
blocks = item.edges.get("blocks", [])
|
|
854
|
-
queue_item["blocks_count"] = len(blocks)
|
|
855
|
-
|
|
856
|
-
# Check if this item is blocked
|
|
857
|
-
blocked_by = item.edges.get("blocked_by", [])
|
|
858
|
-
queue_item["blocked_by"] = blocked_by
|
|
859
|
-
|
|
860
|
-
queue_items.append(queue_item)
|
|
861
|
-
|
|
862
|
-
# Sort by score (descending)
|
|
863
|
-
queue_items.sort(key=lambda x: x["score"], reverse=True)
|
|
864
|
-
|
|
865
|
-
# Limit results
|
|
866
|
-
return queue_items[:limit]
|
|
867
|
-
|
|
868
|
-
def work_next(
|
|
869
|
-
self,
|
|
870
|
-
agent_id: str | None = None,
|
|
871
|
-
auto_claim: bool = False,
|
|
872
|
-
min_score: float = 0.0,
|
|
873
|
-
) -> Node | None:
|
|
874
|
-
"""
|
|
875
|
-
Get the next best task for an agent using smart routing.
|
|
876
|
-
|
|
877
|
-
Uses both strategic analytics and capability-based routing to find
|
|
878
|
-
the optimal next task.
|
|
879
|
-
|
|
880
|
-
Args:
|
|
881
|
-
agent_id: Agent to get task for (defaults to SDK agent)
|
|
882
|
-
auto_claim: Automatically claim the task (default: False)
|
|
883
|
-
min_score: Minimum score threshold (default: 0.0)
|
|
884
|
-
|
|
885
|
-
Returns:
|
|
886
|
-
Next best Node or None if no suitable task found
|
|
887
|
-
|
|
888
|
-
Example:
|
|
889
|
-
>>> sdk = SDK(agent="claude")
|
|
890
|
-
>>> task = sdk.work_next(auto_claim=True)
|
|
891
|
-
>>> if task:
|
|
892
|
-
... print(f"Working on: {task.title}")
|
|
893
|
-
... # Task is automatically claimed and assigned
|
|
894
|
-
"""
|
|
895
|
-
agent = agent_id or self._agent_id or "cli"
|
|
896
|
-
|
|
897
|
-
# Get work queue - get more items since we filter for actionable (todo) only
|
|
898
|
-
queue = self.get_work_queue(agent_id=agent, limit=20, min_score=min_score)
|
|
899
|
-
|
|
900
|
-
if not queue:
|
|
901
|
-
return None
|
|
902
|
-
|
|
903
|
-
# Find the first actionable (todo) task - blocked tasks are not actionable
|
|
904
|
-
top_item = None
|
|
905
|
-
for item in queue:
|
|
906
|
-
if item["status"] == "todo":
|
|
907
|
-
top_item = item
|
|
908
|
-
break
|
|
909
|
-
|
|
910
|
-
if top_item is None:
|
|
911
|
-
return None
|
|
912
|
-
|
|
913
|
-
# Fetch the actual node
|
|
914
|
-
task = None
|
|
915
|
-
for collection_name in ["features", "bugs", "spikes", "chores", "epics"]:
|
|
916
|
-
collection = getattr(self, collection_name, None)
|
|
917
|
-
if collection:
|
|
918
|
-
try:
|
|
919
|
-
task = collection.get(top_item["task_id"])
|
|
920
|
-
if task:
|
|
921
|
-
break
|
|
922
|
-
except (ValueError, FileNotFoundError):
|
|
923
|
-
continue
|
|
924
|
-
|
|
925
|
-
if not task:
|
|
926
|
-
return None
|
|
927
|
-
|
|
928
|
-
# Auto-claim if requested
|
|
929
|
-
if auto_claim and task.status == "todo" and collection is not None:
|
|
930
|
-
# Claim the task
|
|
931
|
-
# collection.edit returns context manager or None
|
|
932
|
-
task_editor: Any = collection.edit(task.id)
|
|
933
|
-
if task_editor is not None:
|
|
934
|
-
# collection.edit returns context manager
|
|
935
|
-
with task_editor as t:
|
|
936
|
-
t.status = "in-progress"
|
|
937
|
-
t.agent_assigned = agent
|
|
938
|
-
|
|
939
|
-
result: Node | None = task
|
|
940
|
-
return result
|
|
941
|
-
|
|
942
|
-
# =========================================================================
|
|
943
|
-
# Planning Workflow Integration
|
|
944
|
-
# =========================================================================
|
|
945
|
-
|
|
946
|
-
def start_planning_spike(
|
|
947
|
-
self,
|
|
948
|
-
title: str,
|
|
949
|
-
context: str = "",
|
|
950
|
-
timebox_hours: float = 4.0,
|
|
951
|
-
auto_start: bool = True,
|
|
952
|
-
) -> Node:
|
|
953
|
-
"""
|
|
954
|
-
Create a planning spike to research and design before implementation.
|
|
955
|
-
|
|
956
|
-
This is for timeboxed investigation before creating a full track.
|
|
957
|
-
|
|
958
|
-
Args:
|
|
959
|
-
title: Spike title (e.g., "Plan User Authentication System")
|
|
960
|
-
context: Background information
|
|
961
|
-
timebox_hours: Time limit for spike (default: 4 hours)
|
|
962
|
-
auto_start: Automatically start the spike (default: True)
|
|
963
|
-
|
|
964
|
-
Returns:
|
|
965
|
-
Created spike Node
|
|
966
|
-
|
|
967
|
-
Example:
|
|
968
|
-
>>> sdk = SDK(agent="claude")
|
|
969
|
-
>>> spike = sdk.start_planning_spike(
|
|
970
|
-
... "Plan Real-time Notifications",
|
|
971
|
-
... context="Users need live updates. Research options.",
|
|
972
|
-
... timebox_hours=3.0
|
|
973
|
-
... )
|
|
974
|
-
"""
|
|
975
|
-
from htmlgraph.ids import generate_id
|
|
976
|
-
from htmlgraph.models import Spike, SpikeType
|
|
977
|
-
|
|
978
|
-
# Create spike directly (SpikeBuilder doesn't exist yet)
|
|
979
|
-
spike_id = generate_id(node_type="spike", title=title)
|
|
980
|
-
spike = Spike(
|
|
981
|
-
id=spike_id,
|
|
982
|
-
title=title,
|
|
983
|
-
type="spike",
|
|
984
|
-
status="in-progress" if auto_start and self._agent_id else "todo",
|
|
985
|
-
spike_type=SpikeType.ARCHITECTURAL,
|
|
986
|
-
timebox_hours=int(timebox_hours),
|
|
987
|
-
agent_assigned=self._agent_id if auto_start and self._agent_id else None,
|
|
988
|
-
steps=[
|
|
989
|
-
Step(description="Research existing solutions and patterns"),
|
|
990
|
-
Step(description="Define requirements and constraints"),
|
|
991
|
-
Step(description="Design high-level architecture"),
|
|
992
|
-
Step(description="Identify dependencies and risks"),
|
|
993
|
-
Step(description="Create implementation plan"),
|
|
994
|
-
],
|
|
995
|
-
content=f"<p>{context}</p>" if context else "",
|
|
996
|
-
edges={},
|
|
997
|
-
properties={},
|
|
998
|
-
)
|
|
999
|
-
|
|
1000
|
-
self._graph.add(spike)
|
|
1001
|
-
return spike
|
|
1002
|
-
|
|
1003
|
-
def create_track_from_plan(
|
|
1004
|
-
self,
|
|
1005
|
-
title: str,
|
|
1006
|
-
description: str,
|
|
1007
|
-
spike_id: str | None = None,
|
|
1008
|
-
priority: str = "high",
|
|
1009
|
-
requirements: list[str | tuple[str, str]] | None = None,
|
|
1010
|
-
phases: list[tuple[str, list[str]]] | None = None,
|
|
1011
|
-
) -> dict[str, Any]:
|
|
1012
|
-
"""
|
|
1013
|
-
Create a track with spec and plan from planning results.
|
|
1014
|
-
|
|
1015
|
-
Args:
|
|
1016
|
-
title: Track title
|
|
1017
|
-
description: Track description
|
|
1018
|
-
spike_id: Optional spike ID that led to this track
|
|
1019
|
-
priority: Track priority (default: "high")
|
|
1020
|
-
requirements: List of requirements (strings or (req, priority) tuples)
|
|
1021
|
-
phases: List of (phase_name, tasks) tuples for the plan
|
|
1022
|
-
|
|
1023
|
-
Returns:
|
|
1024
|
-
Dict with track, spec, and plan details
|
|
1025
|
-
|
|
1026
|
-
Example:
|
|
1027
|
-
>>> sdk = SDK(agent="claude")
|
|
1028
|
-
>>> track_info = sdk.create_track_from_plan(
|
|
1029
|
-
... title="User Authentication System",
|
|
1030
|
-
... description="OAuth 2.0 with JWT tokens",
|
|
1031
|
-
... requirements=[
|
|
1032
|
-
... ("OAuth 2.0 integration", "must-have"),
|
|
1033
|
-
... ("JWT token management", "must-have"),
|
|
1034
|
-
... "Password reset flow"
|
|
1035
|
-
... ],
|
|
1036
|
-
... phases=[
|
|
1037
|
-
... ("Phase 1: OAuth", ["Setup providers (2h)", "Callback (2h)"]),
|
|
1038
|
-
... ("Phase 2: JWT", ["Token signing (2h)", "Refresh (1.5h)"])
|
|
1039
|
-
... ]
|
|
1040
|
-
... )
|
|
1041
|
-
"""
|
|
1042
|
-
|
|
1043
|
-
builder = (
|
|
1044
|
-
self.tracks.builder()
|
|
1045
|
-
.title(title)
|
|
1046
|
-
.description(description)
|
|
1047
|
-
.priority(priority)
|
|
1048
|
-
)
|
|
1049
|
-
|
|
1050
|
-
# Add reference to planning spike if provided
|
|
1051
|
-
if spike_id:
|
|
1052
|
-
# Access internal data for track builder
|
|
1053
|
-
data: dict[str, Any] = builder._data # type: ignore[attr-defined]
|
|
1054
|
-
data["properties"]["planning_spike"] = spike_id
|
|
1055
|
-
|
|
1056
|
-
# Add spec if requirements provided
|
|
1057
|
-
if requirements:
|
|
1058
|
-
# Convert simple strings to (requirement, "must-have") tuples
|
|
1059
|
-
req_list = []
|
|
1060
|
-
for req in requirements:
|
|
1061
|
-
if isinstance(req, str):
|
|
1062
|
-
req_list.append((req, "must-have"))
|
|
1063
|
-
else:
|
|
1064
|
-
req_list.append(req)
|
|
1065
|
-
|
|
1066
|
-
builder.with_spec(
|
|
1067
|
-
overview=description,
|
|
1068
|
-
context=f"Track created from planning spike: {spike_id}"
|
|
1069
|
-
if spike_id
|
|
1070
|
-
else "",
|
|
1071
|
-
requirements=req_list,
|
|
1072
|
-
acceptance_criteria=[],
|
|
1073
|
-
)
|
|
1074
|
-
|
|
1075
|
-
# Add plan if phases provided
|
|
1076
|
-
if phases:
|
|
1077
|
-
builder.with_plan_phases(phases)
|
|
1078
|
-
|
|
1079
|
-
track = builder.create()
|
|
1080
|
-
|
|
1081
|
-
return {
|
|
1082
|
-
"track_id": track.id,
|
|
1083
|
-
"title": track.title,
|
|
1084
|
-
"has_spec": bool(requirements),
|
|
1085
|
-
"has_plan": bool(phases),
|
|
1086
|
-
"spike_id": spike_id,
|
|
1087
|
-
"priority": priority,
|
|
1088
|
-
}
|
|
1089
|
-
|
|
1090
|
-
def smart_plan(
|
|
1091
|
-
self,
|
|
1092
|
-
description: str,
|
|
1093
|
-
create_spike: bool = True,
|
|
1094
|
-
timebox_hours: float = 4.0,
|
|
1095
|
-
research_completed: bool = False,
|
|
1096
|
-
research_findings: dict[str, Any] | None = None,
|
|
1097
|
-
) -> dict[str, Any]:
|
|
1098
|
-
"""
|
|
1099
|
-
Smart planning workflow: analyzes project context and creates spike or track.
|
|
1100
|
-
|
|
1101
|
-
This is the main entry point for planning new work. It:
|
|
1102
|
-
1. Checks current project state
|
|
1103
|
-
2. Provides context from strategic analytics
|
|
1104
|
-
3. Creates a planning spike or track as appropriate
|
|
1105
|
-
|
|
1106
|
-
**IMPORTANT: Research Phase Required**
|
|
1107
|
-
For complex features, you should complete research BEFORE planning:
|
|
1108
|
-
1. Use /htmlgraph:research or WebSearch to gather best practices
|
|
1109
|
-
2. Document findings (libraries, patterns, anti-patterns)
|
|
1110
|
-
3. Pass research_completed=True and research_findings to this method
|
|
1111
|
-
4. This ensures planning is informed by industry best practices
|
|
1112
|
-
|
|
1113
|
-
Research-first workflow:
|
|
1114
|
-
1. /htmlgraph:research "{topic}" → Gather external knowledge
|
|
1115
|
-
2. sdk.smart_plan(..., research_completed=True) → Plan with context
|
|
1116
|
-
3. Complete spike steps → Design solution
|
|
1117
|
-
4. Create track from plan → Structure implementation
|
|
1118
|
-
|
|
1119
|
-
Args:
|
|
1120
|
-
description: What you want to plan (e.g., "User authentication system")
|
|
1121
|
-
create_spike: Create a spike for research (default: True)
|
|
1122
|
-
timebox_hours: If creating spike, time limit (default: 4 hours)
|
|
1123
|
-
research_completed: Whether research was performed (default: False)
|
|
1124
|
-
research_findings: Structured research findings (optional)
|
|
1125
|
-
|
|
1126
|
-
Returns:
|
|
1127
|
-
Dict with planning context and created spike/track info
|
|
1128
|
-
|
|
1129
|
-
Example:
|
|
1130
|
-
>>> sdk = SDK(agent="claude")
|
|
1131
|
-
>>> # WITH research (recommended for complex work)
|
|
1132
|
-
>>> research = {
|
|
1133
|
-
... "topic": "OAuth 2.0 best practices",
|
|
1134
|
-
... "sources_count": 5,
|
|
1135
|
-
... "recommended_library": "authlib",
|
|
1136
|
-
... "key_insights": ["Use PKCE", "Implement token rotation"]
|
|
1137
|
-
... }
|
|
1138
|
-
>>> plan = sdk.smart_plan(
|
|
1139
|
-
... "User authentication system",
|
|
1140
|
-
... create_spike=True,
|
|
1141
|
-
... research_completed=True,
|
|
1142
|
-
... research_findings=research
|
|
1143
|
-
... )
|
|
1144
|
-
>>> print(f"Created: {plan['spike_id']}")
|
|
1145
|
-
>>> print(f"Research informed: {plan['research_informed']}")
|
|
1146
|
-
"""
|
|
1147
|
-
# Get project context from strategic analytics
|
|
1148
|
-
bottlenecks = self.find_bottlenecks(top_n=3)
|
|
1149
|
-
risks = self.assess_risks()
|
|
1150
|
-
parallel = self.get_parallel_work(max_agents=5)
|
|
1151
|
-
|
|
1152
|
-
context = {
|
|
1153
|
-
"bottlenecks_count": len(bottlenecks),
|
|
1154
|
-
"high_risk_count": risks["high_risk_count"],
|
|
1155
|
-
"parallel_capacity": parallel["max_parallelism"],
|
|
1156
|
-
"description": description,
|
|
1157
|
-
}
|
|
1158
|
-
|
|
1159
|
-
# Build context string with research info
|
|
1160
|
-
context_str = f"Project context:\n- {len(bottlenecks)} bottlenecks\n- {risks['high_risk_count']} high-risk items\n- {parallel['max_parallelism']} parallel capacity"
|
|
1161
|
-
|
|
1162
|
-
if research_completed and research_findings:
|
|
1163
|
-
context_str += f"\n\nResearch completed:\n- Topic: {research_findings.get('topic', description)}"
|
|
1164
|
-
if "sources_count" in research_findings:
|
|
1165
|
-
context_str += f"\n- Sources: {research_findings['sources_count']}"
|
|
1166
|
-
if "recommended_library" in research_findings:
|
|
1167
|
-
context_str += (
|
|
1168
|
-
f"\n- Recommended: {research_findings['recommended_library']}"
|
|
1169
|
-
)
|
|
1170
|
-
|
|
1171
|
-
# Validation: warn if complex work planned without research
|
|
1172
|
-
is_complex = any(
|
|
1173
|
-
[
|
|
1174
|
-
"auth" in description.lower(),
|
|
1175
|
-
"security" in description.lower(),
|
|
1176
|
-
"real-time" in description.lower(),
|
|
1177
|
-
"websocket" in description.lower(),
|
|
1178
|
-
"oauth" in description.lower(),
|
|
1179
|
-
"performance" in description.lower(),
|
|
1180
|
-
"integration" in description.lower(),
|
|
1181
|
-
]
|
|
1182
|
-
)
|
|
1183
|
-
|
|
1184
|
-
warnings = []
|
|
1185
|
-
if is_complex and not research_completed:
|
|
1186
|
-
warnings.append(
|
|
1187
|
-
"⚠️ Complex feature detected without research. "
|
|
1188
|
-
"Consider using /htmlgraph:research first to gather best practices."
|
|
1189
|
-
)
|
|
1190
|
-
|
|
1191
|
-
if create_spike:
|
|
1192
|
-
spike = self.start_planning_spike(
|
|
1193
|
-
title=f"Plan: {description}",
|
|
1194
|
-
context=context_str,
|
|
1195
|
-
timebox_hours=timebox_hours,
|
|
1196
|
-
)
|
|
1197
|
-
|
|
1198
|
-
# Store research metadata in spike properties if provided
|
|
1199
|
-
if research_completed and research_findings:
|
|
1200
|
-
spike.properties["research_completed"] = True
|
|
1201
|
-
spike.properties["research_findings"] = research_findings
|
|
1202
|
-
self._graph.update(spike)
|
|
1203
|
-
|
|
1204
|
-
result = {
|
|
1205
|
-
"type": "spike",
|
|
1206
|
-
"spike_id": spike.id,
|
|
1207
|
-
"title": spike.title,
|
|
1208
|
-
"status": spike.status,
|
|
1209
|
-
"timebox_hours": timebox_hours,
|
|
1210
|
-
"project_context": context,
|
|
1211
|
-
"research_informed": research_completed,
|
|
1212
|
-
"next_steps": [
|
|
1213
|
-
"Research and design the solution"
|
|
1214
|
-
if not research_completed
|
|
1215
|
-
else "Design solution using research findings",
|
|
1216
|
-
"Complete spike steps",
|
|
1217
|
-
"Use SDK.create_track_from_plan() to create track",
|
|
1218
|
-
],
|
|
1219
|
-
}
|
|
1220
|
-
|
|
1221
|
-
if warnings:
|
|
1222
|
-
result["warnings"] = warnings
|
|
1223
|
-
|
|
1224
|
-
return result
|
|
1225
|
-
else:
|
|
1226
|
-
# Direct track creation (for when you already know what to do)
|
|
1227
|
-
track_info = self.create_track_from_plan(
|
|
1228
|
-
title=description, description=f"Planned with context: {context}"
|
|
1229
|
-
)
|
|
1230
|
-
|
|
1231
|
-
result = {
|
|
1232
|
-
"type": "track",
|
|
1233
|
-
**track_info,
|
|
1234
|
-
"project_context": context,
|
|
1235
|
-
"research_informed": research_completed,
|
|
1236
|
-
"next_steps": [
|
|
1237
|
-
"Create features from track plan",
|
|
1238
|
-
"Link features to track",
|
|
1239
|
-
"Start implementation",
|
|
1240
|
-
],
|
|
1241
|
-
}
|
|
1242
|
-
|
|
1243
|
-
if warnings:
|
|
1244
|
-
result["warnings"] = warnings
|
|
1245
|
-
|
|
1246
|
-
return result
|
|
1247
|
-
|
|
1248
|
-
def plan_parallel_work(
|
|
1249
|
-
self,
|
|
1250
|
-
max_agents: int = 5,
|
|
1251
|
-
shared_files: list[str] | None = None,
|
|
1252
|
-
) -> dict[str, Any]:
|
|
1253
|
-
"""
|
|
1254
|
-
Plan and prepare parallel work execution.
|
|
1255
|
-
|
|
1256
|
-
This integrates with smart_plan to enable parallel agent dispatch.
|
|
1257
|
-
Uses the 6-phase ParallelWorkflow:
|
|
1258
|
-
1. Pre-flight analysis (dependencies, risks)
|
|
1259
|
-
2. Context preparation (shared file caching)
|
|
1260
|
-
3. Prompt generation (for Task tool)
|
|
1261
|
-
|
|
1262
|
-
Args:
|
|
1263
|
-
max_agents: Maximum parallel agents (default: 5)
|
|
1264
|
-
shared_files: Files to pre-cache for all agents
|
|
1265
|
-
|
|
1266
|
-
Returns:
|
|
1267
|
-
Dict with parallel execution plan:
|
|
1268
|
-
- can_parallelize: Whether parallelization is recommended
|
|
1269
|
-
- analysis: Pre-flight analysis results
|
|
1270
|
-
- prompts: Ready-to-use Task tool prompts
|
|
1271
|
-
- recommendations: Optimization suggestions
|
|
1272
|
-
|
|
1273
|
-
Example:
|
|
1274
|
-
>>> sdk = SDK(agent="orchestrator")
|
|
1275
|
-
>>> plan = sdk.plan_parallel_work(max_agents=3)
|
|
1276
|
-
>>> if plan["can_parallelize"]:
|
|
1277
|
-
... # Use prompts with Task tool
|
|
1278
|
-
... for p in plan["prompts"]:
|
|
1279
|
-
... Task(prompt=p["prompt"], description=p["description"])
|
|
1280
|
-
"""
|
|
1281
|
-
from htmlgraph.parallel import ParallelWorkflow
|
|
1282
|
-
|
|
1283
|
-
workflow = ParallelWorkflow(self)
|
|
1284
|
-
|
|
1285
|
-
# Phase 1: Pre-flight analysis
|
|
1286
|
-
analysis = workflow.analyze(max_agents=max_agents)
|
|
1287
|
-
|
|
1288
|
-
result = {
|
|
1289
|
-
"can_parallelize": analysis.can_parallelize,
|
|
1290
|
-
"max_parallelism": analysis.max_parallelism,
|
|
1291
|
-
"ready_tasks": analysis.ready_tasks,
|
|
1292
|
-
"blocked_tasks": analysis.blocked_tasks,
|
|
1293
|
-
"speedup_factor": analysis.speedup_factor,
|
|
1294
|
-
"recommendation": analysis.recommendation,
|
|
1295
|
-
"warnings": analysis.warnings,
|
|
1296
|
-
"prompts": [],
|
|
1297
|
-
}
|
|
1298
|
-
|
|
1299
|
-
if not analysis.can_parallelize:
|
|
1300
|
-
result["reason"] = analysis.recommendation
|
|
1301
|
-
return result
|
|
1302
|
-
|
|
1303
|
-
# Phase 2 & 3: Prepare tasks and generate prompts
|
|
1304
|
-
tasks = workflow.prepare_tasks(
|
|
1305
|
-
analysis.ready_tasks[:max_agents],
|
|
1306
|
-
shared_files=shared_files,
|
|
1307
|
-
)
|
|
1308
|
-
prompts = workflow.generate_prompts(tasks)
|
|
1309
|
-
|
|
1310
|
-
result["prompts"] = prompts
|
|
1311
|
-
result["task_count"] = len(prompts)
|
|
1312
|
-
|
|
1313
|
-
# Add efficiency guidelines
|
|
1314
|
-
result["guidelines"] = {
|
|
1315
|
-
"dispatch": "Send ALL Task calls in a SINGLE message for true parallelism",
|
|
1316
|
-
"patterns": [
|
|
1317
|
-
"Grep → Read (search before reading)",
|
|
1318
|
-
"Read → Edit → Bash (read, modify, test)",
|
|
1319
|
-
"Glob → Read (find files first)",
|
|
1320
|
-
],
|
|
1321
|
-
"avoid": [
|
|
1322
|
-
"Sequential Task calls (loses parallelism)",
|
|
1323
|
-
"Read → Read → Read (cache instead)",
|
|
1324
|
-
"Edit → Edit → Edit (batch edits)",
|
|
1325
|
-
],
|
|
1326
|
-
}
|
|
1327
|
-
|
|
1328
|
-
return result
|
|
1329
|
-
|
|
1330
|
-
def aggregate_parallel_results(
|
|
1331
|
-
self,
|
|
1332
|
-
agent_ids: list[str],
|
|
1333
|
-
) -> dict[str, Any]:
|
|
1334
|
-
"""
|
|
1335
|
-
Aggregate results from parallel agent execution.
|
|
1336
|
-
|
|
1337
|
-
Call this after parallel agents complete to:
|
|
1338
|
-
- Collect health metrics
|
|
1339
|
-
- Detect anti-patterns
|
|
1340
|
-
- Identify conflicts
|
|
1341
|
-
- Generate recommendations
|
|
1342
|
-
|
|
1343
|
-
Args:
|
|
1344
|
-
agent_ids: List of agent/transcript IDs to analyze
|
|
1345
|
-
|
|
1346
|
-
Returns:
|
|
1347
|
-
Dict with aggregated results and validation
|
|
1348
|
-
|
|
1349
|
-
Example:
|
|
1350
|
-
>>> # After parallel work completes
|
|
1351
|
-
>>> results = sdk.aggregate_parallel_results([
|
|
1352
|
-
... "agent-abc123",
|
|
1353
|
-
... "agent-def456",
|
|
1354
|
-
... "agent-ghi789",
|
|
1355
|
-
... ])
|
|
1356
|
-
>>> print(f"Health: {results['avg_health_score']:.0%}")
|
|
1357
|
-
>>> print(f"Conflicts: {results['conflicts']}")
|
|
1358
|
-
"""
|
|
1359
|
-
from htmlgraph.parallel import ParallelWorkflow
|
|
1360
|
-
|
|
1361
|
-
workflow = ParallelWorkflow(self)
|
|
1362
|
-
|
|
1363
|
-
# Phase 5: Aggregate
|
|
1364
|
-
aggregate = workflow.aggregate(agent_ids)
|
|
1365
|
-
|
|
1366
|
-
# Phase 6: Validate
|
|
1367
|
-
validation = workflow.validate(aggregate)
|
|
1368
|
-
|
|
1369
|
-
return {
|
|
1370
|
-
"total_agents": aggregate.total_agents,
|
|
1371
|
-
"successful": aggregate.successful,
|
|
1372
|
-
"failed": aggregate.failed,
|
|
1373
|
-
"total_duration_seconds": aggregate.total_duration_seconds,
|
|
1374
|
-
"parallel_speedup": aggregate.parallel_speedup,
|
|
1375
|
-
"avg_health_score": aggregate.avg_health_score,
|
|
1376
|
-
"total_anti_patterns": aggregate.total_anti_patterns,
|
|
1377
|
-
"files_modified": aggregate.files_modified,
|
|
1378
|
-
"conflicts": aggregate.conflicts,
|
|
1379
|
-
"recommendations": aggregate.recommendations,
|
|
1380
|
-
"validation": validation,
|
|
1381
|
-
"all_passed": all(validation.values()),
|
|
1382
|
-
}
|
|
1383
|
-
|
|
1384
|
-
# =========================================================================
|
|
1385
|
-
# Subagent Orchestration
|
|
1386
|
-
# =========================================================================
|
|
1387
|
-
|
|
1388
|
-
@property
|
|
1389
|
-
def orchestrator(self) -> Any:
|
|
1390
|
-
"""
|
|
1391
|
-
Get the subagent orchestrator for spawning explorer/coder agents.
|
|
1392
|
-
|
|
1393
|
-
Lazy-loaded on first access.
|
|
1394
|
-
|
|
1395
|
-
Returns:
|
|
1396
|
-
SubagentOrchestrator instance
|
|
1397
|
-
|
|
1398
|
-
Example:
|
|
1399
|
-
>>> sdk = SDK(agent="claude")
|
|
1400
|
-
>>> explorer = sdk.orchestrator.spawn_explorer(
|
|
1401
|
-
... task="Find all API endpoints",
|
|
1402
|
-
... scope="src/"
|
|
1403
|
-
... )
|
|
1404
|
-
"""
|
|
1405
|
-
if self._orchestrator is None:
|
|
1406
|
-
from htmlgraph.orchestrator import SubagentOrchestrator
|
|
1407
|
-
|
|
1408
|
-
self._orchestrator = SubagentOrchestrator(self) # type: ignore[assignment]
|
|
1409
|
-
return self._orchestrator
|
|
1410
|
-
|
|
1411
|
-
def spawn_explorer(
|
|
1412
|
-
self,
|
|
1413
|
-
task: str,
|
|
1414
|
-
scope: str | None = None,
|
|
1415
|
-
patterns: list[str] | None = None,
|
|
1416
|
-
questions: list[str] | None = None,
|
|
1417
|
-
) -> dict[str, Any]:
|
|
1418
|
-
"""
|
|
1419
|
-
Spawn an explorer subagent for codebase discovery.
|
|
1420
|
-
|
|
1421
|
-
Explorer agents are optimized for finding files, searching patterns,
|
|
1422
|
-
and mapping code without modifying anything.
|
|
1423
|
-
|
|
1424
|
-
Args:
|
|
1425
|
-
task: What to explore/discover
|
|
1426
|
-
scope: Directory scope (e.g., "src/")
|
|
1427
|
-
patterns: Glob patterns to focus on
|
|
1428
|
-
questions: Specific questions to answer
|
|
1429
|
-
|
|
1430
|
-
Returns:
|
|
1431
|
-
Dict with prompt ready for Task tool
|
|
1432
|
-
|
|
1433
|
-
Note:
|
|
1434
|
-
Returns dict with 'prompt', 'description', 'subagent_type' keys.
|
|
1435
|
-
Returns empty dict if spawning fails.
|
|
1436
|
-
|
|
1437
|
-
Example:
|
|
1438
|
-
>>> prompt = sdk.spawn_explorer(
|
|
1439
|
-
... task="Find all database models",
|
|
1440
|
-
... scope="src/models/",
|
|
1441
|
-
... questions=["What ORM is used?"]
|
|
1442
|
-
... )
|
|
1443
|
-
>>> # Execute with Task tool
|
|
1444
|
-
>>> Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
1445
|
-
|
|
1446
|
-
See also:
|
|
1447
|
-
spawn_coder: Spawn implementation agent with feature context
|
|
1448
|
-
orchestrate: Full exploration + implementation workflow
|
|
1449
|
-
"""
|
|
1450
|
-
subagent_prompt = self.orchestrator.spawn_explorer(
|
|
1451
|
-
task=task,
|
|
1452
|
-
scope=scope,
|
|
1453
|
-
patterns=patterns,
|
|
1454
|
-
questions=questions,
|
|
1455
|
-
)
|
|
1456
|
-
result: dict[str, Any] = subagent_prompt.to_task_kwargs()
|
|
1457
|
-
return result
|
|
1458
|
-
|
|
1459
|
-
def spawn_coder(
|
|
1460
|
-
self,
|
|
1461
|
-
feature_id: str,
|
|
1462
|
-
context: str | None = None,
|
|
1463
|
-
files_to_modify: list[str] | None = None,
|
|
1464
|
-
test_command: str | None = None,
|
|
1465
|
-
) -> dict[str, Any]:
|
|
1466
|
-
"""
|
|
1467
|
-
Spawn a coder subagent for implementing changes.
|
|
1468
|
-
|
|
1469
|
-
Coder agents are optimized for reading, modifying, and testing code.
|
|
1470
|
-
|
|
1471
|
-
Args:
|
|
1472
|
-
feature_id: Feature being implemented
|
|
1473
|
-
context: Results from explorer (string summary)
|
|
1474
|
-
files_to_modify: Specific files to change
|
|
1475
|
-
test_command: Command to verify changes
|
|
1476
|
-
|
|
1477
|
-
Returns:
|
|
1478
|
-
Dict with prompt ready for Task tool
|
|
1479
|
-
|
|
1480
|
-
Note:
|
|
1481
|
-
Returns dict with 'prompt', 'description', 'subagent_type' keys.
|
|
1482
|
-
Requires valid feature_id. Returns empty dict if feature not found.
|
|
1483
|
-
|
|
1484
|
-
Example:
|
|
1485
|
-
>>> prompt = sdk.spawn_coder(
|
|
1486
|
-
... feature_id="feat-add-auth",
|
|
1487
|
-
... context=explorer_results,
|
|
1488
|
-
... test_command="uv run pytest tests/auth/"
|
|
1489
|
-
... )
|
|
1490
|
-
>>> Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
1491
|
-
|
|
1492
|
-
See also:
|
|
1493
|
-
spawn_explorer: Explore codebase before implementation
|
|
1494
|
-
orchestrate: Full exploration + implementation workflow
|
|
1495
|
-
"""
|
|
1496
|
-
subagent_prompt = self.orchestrator.spawn_coder(
|
|
1497
|
-
feature_id=feature_id,
|
|
1498
|
-
context=context,
|
|
1499
|
-
files_to_modify=files_to_modify,
|
|
1500
|
-
test_command=test_command,
|
|
1501
|
-
)
|
|
1502
|
-
result: dict[str, Any] = subagent_prompt.to_task_kwargs()
|
|
1503
|
-
return result
|
|
1504
|
-
|
|
1505
|
-
def orchestrate(
|
|
1506
|
-
self,
|
|
1507
|
-
feature_id: str,
|
|
1508
|
-
exploration_scope: str | None = None,
|
|
1509
|
-
test_command: str | None = None,
|
|
1510
|
-
) -> dict[str, Any]:
|
|
1511
|
-
"""
|
|
1512
|
-
Orchestrate full feature implementation with explorer and coder.
|
|
1513
|
-
|
|
1514
|
-
Generates prompts for a two-phase workflow:
|
|
1515
|
-
1. Explorer discovers relevant code and patterns
|
|
1516
|
-
2. Coder implements the feature based on explorer findings
|
|
1517
|
-
|
|
1518
|
-
Args:
|
|
1519
|
-
feature_id: Feature to implement
|
|
1520
|
-
exploration_scope: Directory to explore
|
|
1521
|
-
test_command: Test command for verification
|
|
1522
|
-
|
|
1523
|
-
Returns:
|
|
1524
|
-
Dict with explorer and coder prompts
|
|
1525
|
-
|
|
1526
|
-
Example:
|
|
1527
|
-
>>> prompts = sdk.orchestrate(
|
|
1528
|
-
... "feat-add-caching",
|
|
1529
|
-
... exploration_scope="src/cache/",
|
|
1530
|
-
... test_command="uv run pytest tests/cache/"
|
|
1531
|
-
... )
|
|
1532
|
-
>>> # Phase 1: Run explorer
|
|
1533
|
-
>>> Task(prompt=prompts["explorer"]["prompt"], ...)
|
|
1534
|
-
>>> # Phase 2: Run coder with explorer results
|
|
1535
|
-
>>> Task(prompt=prompts["coder"]["prompt"], ...)
|
|
1536
|
-
|
|
1537
|
-
See also:
|
|
1538
|
-
spawn_explorer: Just the exploration phase
|
|
1539
|
-
spawn_coder: Just the implementation phase
|
|
1540
|
-
"""
|
|
1541
|
-
prompts = self.orchestrator.orchestrate_feature(
|
|
1542
|
-
feature_id=feature_id,
|
|
1543
|
-
exploration_scope=exploration_scope,
|
|
1544
|
-
test_command=test_command,
|
|
1545
|
-
)
|
|
1546
|
-
return {
|
|
1547
|
-
"explorer": prompts["explorer"].to_task_kwargs(),
|
|
1548
|
-
"coder": prompts["coder"].to_task_kwargs(),
|
|
1549
|
-
"workflow": [
|
|
1550
|
-
"1. Execute explorer Task and collect results",
|
|
1551
|
-
"2. Parse explorer results for files and patterns",
|
|
1552
|
-
"3. Execute coder Task with explorer context",
|
|
1553
|
-
"4. Verify coder results and update feature status",
|
|
1554
|
-
],
|
|
1555
|
-
}
|
|
1556
|
-
|
|
1557
|
-
# =========================================================================
|
|
1558
|
-
# Session Management Optimization
|
|
1559
|
-
# =========================================================================
|
|
1560
|
-
|
|
1561
|
-
def get_session_start_info(
|
|
1562
|
-
self,
|
|
1563
|
-
include_git_log: bool = True,
|
|
1564
|
-
git_log_count: int = 5,
|
|
1565
|
-
analytics_top_n: int = 3,
|
|
1566
|
-
analytics_max_agents: int = 3,
|
|
1567
|
-
) -> SessionStartInfo:
|
|
1568
|
-
"""
|
|
1569
|
-
Get comprehensive session start information in a single call.
|
|
1570
|
-
|
|
1571
|
-
Consolidates all information needed for session start into one method,
|
|
1572
|
-
reducing context usage from 6+ tool calls to 1.
|
|
1573
|
-
|
|
1574
|
-
Args:
|
|
1575
|
-
include_git_log: Include recent git commits (default: True)
|
|
1576
|
-
git_log_count: Number of recent commits to include (default: 5)
|
|
1577
|
-
analytics_top_n: Number of bottlenecks/recommendations (default: 3)
|
|
1578
|
-
analytics_max_agents: Max agents for parallel work analysis (default: 3)
|
|
1579
|
-
|
|
1580
|
-
Returns:
|
|
1581
|
-
Dict with comprehensive session start context:
|
|
1582
|
-
- status: Project status (nodes, collections, WIP)
|
|
1583
|
-
- active_work: Current active work item (if any)
|
|
1584
|
-
- features: List of features with status
|
|
1585
|
-
- sessions: Recent sessions
|
|
1586
|
-
- git_log: Recent commits (if include_git_log=True)
|
|
1587
|
-
- analytics: Strategic insights (bottlenecks, recommendations, parallel)
|
|
1588
|
-
|
|
1589
|
-
Note:
|
|
1590
|
-
Returns empty dict {} if session context unavailable.
|
|
1591
|
-
Always check for expected keys before accessing.
|
|
1592
|
-
|
|
1593
|
-
Example:
|
|
1594
|
-
>>> sdk = SDK(agent="claude")
|
|
1595
|
-
>>> info = sdk.get_session_start_info()
|
|
1596
|
-
>>> print(f"Project: {info['status']['total_nodes']} nodes")
|
|
1597
|
-
>>> print(f"WIP: {info['status']['in_progress_count']}")
|
|
1598
|
-
>>> if info.get('active_work'):
|
|
1599
|
-
... print(f"Active: {info['active_work']['title']}")
|
|
1600
|
-
>>> for bn in info['analytics']['bottlenecks']:
|
|
1601
|
-
... print(f"Bottleneck: {bn['title']}")
|
|
1602
|
-
"""
|
|
1603
|
-
import subprocess
|
|
1604
|
-
|
|
1605
|
-
result = {}
|
|
1606
|
-
|
|
1607
|
-
# 1. Project status
|
|
1608
|
-
result["status"] = self.get_status()
|
|
1609
|
-
|
|
1610
|
-
# 2. Active work item (validation status) - always include, even if None
|
|
1611
|
-
result["active_work"] = self.get_active_work_item() # type: ignore[assignment]
|
|
1612
|
-
|
|
1613
|
-
# 3. Features list (simplified)
|
|
1614
|
-
features_list: list[dict[str, object]] = []
|
|
1615
|
-
for feature in self.features.all():
|
|
1616
|
-
features_list.append(
|
|
1617
|
-
{
|
|
1618
|
-
"id": feature.id,
|
|
1619
|
-
"title": feature.title,
|
|
1620
|
-
"status": feature.status,
|
|
1621
|
-
"priority": feature.priority,
|
|
1622
|
-
"steps_total": len(feature.steps),
|
|
1623
|
-
"steps_completed": sum(1 for s in feature.steps if s.completed),
|
|
1624
|
-
}
|
|
1625
|
-
)
|
|
1626
|
-
result["features"] = features_list # type: ignore[assignment]
|
|
1627
|
-
|
|
1628
|
-
# 4. Sessions list (recent 20)
|
|
1629
|
-
sessions_list: list[dict[str, Any]] = []
|
|
1630
|
-
for session in self.sessions.all()[:20]:
|
|
1631
|
-
sessions_list.append(
|
|
1632
|
-
{
|
|
1633
|
-
"id": session.id,
|
|
1634
|
-
"status": session.status,
|
|
1635
|
-
"agent": session.properties.get("agent", "unknown"),
|
|
1636
|
-
"event_count": session.properties.get("event_count", 0),
|
|
1637
|
-
"started": session.created.isoformat()
|
|
1638
|
-
if hasattr(session, "created")
|
|
1639
|
-
else None,
|
|
1640
|
-
}
|
|
1641
|
-
)
|
|
1642
|
-
result["sessions"] = sessions_list # type: ignore[assignment]
|
|
1643
|
-
|
|
1644
|
-
# 5. Git log (if requested)
|
|
1645
|
-
if include_git_log:
|
|
1646
|
-
try:
|
|
1647
|
-
git_result = subprocess.run(
|
|
1648
|
-
["git", "log", "--oneline", f"-{git_log_count}"],
|
|
1649
|
-
capture_output=True,
|
|
1650
|
-
text=True,
|
|
1651
|
-
check=True,
|
|
1652
|
-
cwd=self._directory.parent,
|
|
1653
|
-
)
|
|
1654
|
-
git_lines: list[str] = git_result.stdout.strip().split("\n")
|
|
1655
|
-
result["git_log"] = git_lines # type: ignore[assignment]
|
|
1656
|
-
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1657
|
-
empty_list: list[str] = []
|
|
1658
|
-
result["git_log"] = empty_list # type: ignore[assignment]
|
|
1659
|
-
|
|
1660
|
-
# 6. Strategic analytics
|
|
1661
|
-
result["analytics"] = {
|
|
1662
|
-
"bottlenecks": self.find_bottlenecks(top_n=analytics_top_n),
|
|
1663
|
-
"recommendations": self.recommend_next_work(agent_count=analytics_top_n),
|
|
1664
|
-
"parallel": self.get_parallel_work(max_agents=analytics_max_agents),
|
|
1665
|
-
}
|
|
1666
|
-
|
|
1667
|
-
return result # type: ignore[return-value]
|
|
1668
|
-
|
|
1669
|
-
def get_active_work_item(
|
|
1670
|
-
self,
|
|
1671
|
-
agent: str | None = None,
|
|
1672
|
-
filter_by_agent: bool = False,
|
|
1673
|
-
work_types: list[str] | None = None,
|
|
1674
|
-
) -> ActiveWorkItem | None:
|
|
1675
|
-
"""
|
|
1676
|
-
Get the currently active work item (in-progress status).
|
|
1677
|
-
|
|
1678
|
-
This is used by the PreToolUse validation hook to check if code changes
|
|
1679
|
-
have an active work item for attribution.
|
|
1680
|
-
|
|
1681
|
-
Args:
|
|
1682
|
-
agent: Agent ID for filtering (optional)
|
|
1683
|
-
filter_by_agent: If True, filter by agent. If False (default), return any active work item
|
|
1684
|
-
work_types: Work item types to check (defaults to all: features, bugs, spikes, chores, epics)
|
|
1685
|
-
|
|
1686
|
-
Returns:
|
|
1687
|
-
Dict with work item details or None if no active work item found:
|
|
1688
|
-
- id: Work item ID
|
|
1689
|
-
- title: Work item title
|
|
1690
|
-
- type: Work item type (feature, bug, spike, chore, epic)
|
|
1691
|
-
- status: Should be "in-progress"
|
|
1692
|
-
- agent: Assigned agent
|
|
1693
|
-
- steps_total: Total steps
|
|
1694
|
-
- steps_completed: Completed steps
|
|
1695
|
-
- auto_generated: (spikes only) True if auto-generated spike
|
|
1696
|
-
- spike_subtype: (spikes only) "session-init" or "transition"
|
|
1697
|
-
|
|
1698
|
-
Example:
|
|
1699
|
-
>>> sdk = SDK(agent="claude")
|
|
1700
|
-
>>> # Get any active work item
|
|
1701
|
-
>>> active = sdk.get_active_work_item()
|
|
1702
|
-
>>> if active:
|
|
1703
|
-
... print(f"Working on: {active['title']}")
|
|
1704
|
-
...
|
|
1705
|
-
>>> # Get only this agent's active work item
|
|
1706
|
-
>>> active = sdk.get_active_work_item(filter_by_agent=True)
|
|
1707
|
-
"""
|
|
1708
|
-
# Default to all work item types
|
|
1709
|
-
if work_types is None:
|
|
1710
|
-
work_types = ["features", "bugs", "spikes", "chores", "epics"]
|
|
1711
|
-
|
|
1712
|
-
# Search across all work item types
|
|
1713
|
-
# Separate real work items from auto-generated spikes
|
|
1714
|
-
real_work_items = []
|
|
1715
|
-
auto_spikes = []
|
|
1716
|
-
|
|
1717
|
-
for work_type in work_types:
|
|
1718
|
-
collection = getattr(self, work_type, None)
|
|
1719
|
-
if collection is None:
|
|
1720
|
-
continue
|
|
1721
|
-
|
|
1722
|
-
# Query for in-progress items
|
|
1723
|
-
in_progress = collection.where(status="in-progress")
|
|
1724
|
-
|
|
1725
|
-
for item in in_progress:
|
|
1726
|
-
# Filter by agent if requested
|
|
1727
|
-
if filter_by_agent:
|
|
1728
|
-
agent_id = agent or self._agent_id
|
|
1729
|
-
if agent_id and hasattr(item, "agent_assigned"):
|
|
1730
|
-
if item.agent_assigned != agent_id:
|
|
1731
|
-
continue
|
|
1732
|
-
|
|
1733
|
-
item_dict = {
|
|
1734
|
-
"id": item.id,
|
|
1735
|
-
"title": item.title,
|
|
1736
|
-
"type": item.type,
|
|
1737
|
-
"status": item.status,
|
|
1738
|
-
"agent": getattr(item, "agent_assigned", None),
|
|
1739
|
-
"steps_total": len(item.steps) if hasattr(item, "steps") else 0,
|
|
1740
|
-
"steps_completed": sum(1 for s in item.steps if s.completed)
|
|
1741
|
-
if hasattr(item, "steps")
|
|
1742
|
-
else 0,
|
|
1743
|
-
}
|
|
1744
|
-
|
|
1745
|
-
# Add spike-specific fields for auto-spike detection
|
|
1746
|
-
if item.type == "spike":
|
|
1747
|
-
item_dict["auto_generated"] = getattr(item, "auto_generated", False)
|
|
1748
|
-
item_dict["spike_subtype"] = getattr(item, "spike_subtype", None)
|
|
1749
|
-
|
|
1750
|
-
# Separate auto-spikes from real work
|
|
1751
|
-
# Auto-spikes are temporary tracking items (session-init, transition, conversation-init)
|
|
1752
|
-
is_auto_spike = item_dict["auto_generated"] and item_dict[
|
|
1753
|
-
"spike_subtype"
|
|
1754
|
-
] in ("session-init", "transition", "conversation-init")
|
|
1755
|
-
|
|
1756
|
-
if is_auto_spike:
|
|
1757
|
-
auto_spikes.append(item_dict)
|
|
1758
|
-
else:
|
|
1759
|
-
# Real user-created spike
|
|
1760
|
-
real_work_items.append(item_dict)
|
|
1761
|
-
else:
|
|
1762
|
-
# Features, bugs, chores, epics are always real work
|
|
1763
|
-
real_work_items.append(item_dict)
|
|
1764
|
-
|
|
1765
|
-
# Prioritize real work items over auto-spikes
|
|
1766
|
-
# Auto-spikes should only show if there's NO other active work item
|
|
1767
|
-
if real_work_items:
|
|
1768
|
-
return real_work_items[0] # type: ignore[return-value]
|
|
1769
|
-
|
|
1770
|
-
if auto_spikes:
|
|
1771
|
-
return auto_spikes[0] # type: ignore[return-value]
|
|
1772
|
-
|
|
1773
|
-
return None
|
|
1774
|
-
|
|
1775
|
-
# =========================================================================
|
|
1776
|
-
# Help & Documentation
|
|
1777
|
-
# =========================================================================
|
|
1778
|
-
|
|
1779
|
-
def help(self, topic: str | None = None) -> str:
|
|
1780
|
-
"""
|
|
1781
|
-
Get help on SDK usage.
|
|
1782
|
-
|
|
1783
|
-
Args:
|
|
1784
|
-
topic: Optional topic (e.g., 'features', 'sessions', 'analytics', 'orchestration')
|
|
1785
|
-
|
|
1786
|
-
Returns:
|
|
1787
|
-
Formatted help text
|
|
1788
|
-
|
|
1789
|
-
Example:
|
|
1790
|
-
>>> sdk = SDK(agent="claude")
|
|
1791
|
-
>>> print(sdk.help()) # List all topics
|
|
1792
|
-
>>> print(sdk.help('features')) # Feature collection help
|
|
1793
|
-
>>> print(sdk.help('analytics')) # Analytics help
|
|
1794
|
-
|
|
1795
|
-
See also:
|
|
1796
|
-
Python's built-in help(sdk) for full API documentation
|
|
1797
|
-
sdk.features, sdk.bugs, sdk.spikes for work item managers
|
|
1798
|
-
"""
|
|
1799
|
-
if topic is None:
|
|
1800
|
-
return self._help_index()
|
|
1801
|
-
return self._help_topic(topic)
|
|
1802
|
-
|
|
1803
|
-
def _help_index(self) -> str:
|
|
1804
|
-
"""Return overview of all available methods/collections."""
|
|
1805
|
-
return """HtmlGraph SDK - Quick Reference
|
|
1806
|
-
|
|
1807
|
-
COLLECTIONS (Work Items):
|
|
1808
|
-
sdk.features - Feature work items with builder support
|
|
1809
|
-
sdk.bugs - Bug reports
|
|
1810
|
-
sdk.spikes - Investigation and research spikes
|
|
1811
|
-
sdk.chores - Maintenance and chore tasks
|
|
1812
|
-
sdk.epics - Large bodies of work
|
|
1813
|
-
sdk.phases - Project phases
|
|
1814
|
-
|
|
1815
|
-
COLLECTIONS (Non-Work):
|
|
1816
|
-
sdk.sessions - Agent sessions
|
|
1817
|
-
sdk.tracks - Work tracks with builder support
|
|
1818
|
-
sdk.agents - Agent information
|
|
1819
|
-
|
|
1820
|
-
LEARNING (Active Learning):
|
|
1821
|
-
sdk.patterns - Workflow patterns (optimal/anti-pattern)
|
|
1822
|
-
sdk.insights - Session health insights
|
|
1823
|
-
sdk.metrics - Aggregated time-series metrics
|
|
1824
|
-
|
|
1825
|
-
CORE METHODS:
|
|
1826
|
-
sdk.summary() - Get project summary
|
|
1827
|
-
sdk.my_work() - Get current agent's workload
|
|
1828
|
-
sdk.next_task() - Get next available task
|
|
1829
|
-
sdk.reload() - Reload all data from disk
|
|
1830
|
-
|
|
1831
|
-
SESSION MANAGEMENT:
|
|
1832
|
-
sdk.start_session() - Start a new session
|
|
1833
|
-
sdk.end_session() - End a session
|
|
1834
|
-
sdk.track_activity() - Track activity in session
|
|
1835
|
-
sdk.dedupe_sessions() - Clean up low-signal sessions
|
|
1836
|
-
sdk.get_status() - Get project status
|
|
1837
|
-
|
|
1838
|
-
STRATEGIC ANALYTICS:
|
|
1839
|
-
sdk.find_bottlenecks() - Identify blocking tasks
|
|
1840
|
-
sdk.recommend_next_work() - Get smart recommendations
|
|
1841
|
-
sdk.get_parallel_work() - Find parallelizable work
|
|
1842
|
-
sdk.assess_risks() - Assess dependency risks
|
|
1843
|
-
sdk.analyze_impact() - Analyze task impact
|
|
1844
|
-
|
|
1845
|
-
WORK QUEUE:
|
|
1846
|
-
sdk.get_work_queue() - Get prioritized work queue
|
|
1847
|
-
sdk.work_next() - Get next best task (smart routing)
|
|
1848
|
-
|
|
1849
|
-
PLANNING WORKFLOW:
|
|
1850
|
-
sdk.smart_plan() - Smart planning with research
|
|
1851
|
-
sdk.start_planning_spike() - Create planning spike
|
|
1852
|
-
sdk.create_track_from_plan() - Create track from plan
|
|
1853
|
-
sdk.plan_parallel_work() - Plan parallel execution
|
|
1854
|
-
sdk.aggregate_parallel_results() - Aggregate parallel results
|
|
1855
|
-
|
|
1856
|
-
ORCHESTRATION:
|
|
1857
|
-
sdk.spawn_explorer() - Spawn explorer subagent
|
|
1858
|
-
sdk.spawn_coder() - Spawn coder subagent
|
|
1859
|
-
sdk.orchestrate() - Orchestrate feature implementation
|
|
1860
|
-
|
|
1861
|
-
SESSION OPTIMIZATION:
|
|
1862
|
-
sdk.get_session_start_info() - Get comprehensive session start info
|
|
1863
|
-
sdk.get_active_work_item() - Get currently active work item
|
|
1864
|
-
|
|
1865
|
-
ANALYTICS INTERFACES:
|
|
1866
|
-
sdk.analytics - Work type analytics
|
|
1867
|
-
sdk.dep_analytics - Dependency analytics
|
|
1868
|
-
sdk.context - Context analytics
|
|
1869
|
-
|
|
1870
|
-
ERROR HANDLING:
|
|
1871
|
-
Lookup (.get) - Returns None if not found
|
|
1872
|
-
Query (.where) - Returns empty list on no matches
|
|
1873
|
-
Edit (.edit) - Raises NodeNotFoundError if missing
|
|
1874
|
-
Batch (.mark_done) - Returns count of successful operations
|
|
1875
|
-
|
|
1876
|
-
For detailed help on a topic:
|
|
1877
|
-
sdk.help('features') - Feature collection methods
|
|
1878
|
-
sdk.help('analytics') - Analytics methods
|
|
1879
|
-
sdk.help('sessions') - Session management
|
|
1880
|
-
sdk.help('orchestration') - Subagent orchestration
|
|
1881
|
-
sdk.help('planning') - Planning workflow
|
|
1882
|
-
"""
|
|
1883
|
-
|
|
1884
|
-
def __dir__(self) -> list[str]:
|
|
1885
|
-
"""Return attributes with most useful ones first for discoverability."""
|
|
1886
|
-
priority = [
|
|
1887
|
-
# Work item managers
|
|
1888
|
-
"features",
|
|
1889
|
-
"bugs",
|
|
1890
|
-
"spikes",
|
|
1891
|
-
"chores",
|
|
1892
|
-
"epics",
|
|
1893
|
-
"phases",
|
|
1894
|
-
# Non-work collections
|
|
1895
|
-
"tracks",
|
|
1896
|
-
"sessions",
|
|
1897
|
-
"agents",
|
|
1898
|
-
# Learning collections
|
|
1899
|
-
"patterns",
|
|
1900
|
-
"insights",
|
|
1901
|
-
"metrics",
|
|
1902
|
-
# Orchestration
|
|
1903
|
-
"spawn_explorer",
|
|
1904
|
-
"spawn_coder",
|
|
1905
|
-
"orchestrate",
|
|
1906
|
-
# Session management
|
|
1907
|
-
"get_session_start_info",
|
|
1908
|
-
"start_session",
|
|
1909
|
-
"end_session",
|
|
1910
|
-
# Strategic analytics
|
|
1911
|
-
"find_bottlenecks",
|
|
1912
|
-
"recommend_next_work",
|
|
1913
|
-
"get_parallel_work",
|
|
1914
|
-
# Work queue
|
|
1915
|
-
"get_work_queue",
|
|
1916
|
-
"work_next",
|
|
1917
|
-
# Help
|
|
1918
|
-
"help",
|
|
1919
|
-
]
|
|
1920
|
-
# Get all attributes
|
|
1921
|
-
all_attrs = object.__dir__(self)
|
|
1922
|
-
# Separate into priority, regular, and dunder attributes
|
|
1923
|
-
regular = [a for a in all_attrs if not a.startswith("_") and a not in priority]
|
|
1924
|
-
dunder = [a for a in all_attrs if a.startswith("_")]
|
|
1925
|
-
# Return priority items first, then regular, then dunder
|
|
1926
|
-
return priority + regular + dunder
|
|
1927
|
-
|
|
1928
|
-
def _help_topic(self, topic: str) -> str:
|
|
1929
|
-
"""Return specific help for topic."""
|
|
1930
|
-
topic = topic.lower()
|
|
1931
|
-
|
|
1932
|
-
if topic in ["feature", "features"]:
|
|
1933
|
-
return """FEATURES COLLECTION
|
|
1934
|
-
|
|
1935
|
-
Create and manage feature work items with builder support.
|
|
1936
|
-
|
|
1937
|
-
COMMON METHODS:
|
|
1938
|
-
sdk.features.create(title) - Create new feature (returns builder)
|
|
1939
|
-
sdk.features.get(id) - Get feature by ID
|
|
1940
|
-
sdk.features.all() - Get all features
|
|
1941
|
-
sdk.features.where(**filters) - Query features
|
|
1942
|
-
sdk.features.edit(id) - Edit feature (context manager)
|
|
1943
|
-
sdk.features.mark_done(ids) - Mark features as done
|
|
1944
|
-
sdk.features.assign(ids, agent) - Assign features to agent
|
|
1945
|
-
|
|
1946
|
-
BUILDER PATTERN:
|
|
1947
|
-
feature = (sdk.features.create("User Auth")
|
|
1948
|
-
.set_priority("high")
|
|
1949
|
-
.add_steps(["Login", "Logout", "Reset password"])
|
|
1950
|
-
.add_edge("blocked_by", "feat-database")
|
|
1951
|
-
.save())
|
|
1952
|
-
|
|
1953
|
-
QUERIES:
|
|
1954
|
-
high_priority = sdk.features.where(status="todo", priority="high")
|
|
1955
|
-
my_features = sdk.features.where(agent_assigned="claude")
|
|
1956
|
-
blocked = sdk.features.where(status="blocked")
|
|
1957
|
-
|
|
1958
|
-
CONTEXT MANAGER:
|
|
1959
|
-
with sdk.features.edit("feat-001") as f:
|
|
1960
|
-
f.status = "in-progress"
|
|
1961
|
-
f.complete_step(0)
|
|
1962
|
-
# Auto-saves on exit
|
|
1963
|
-
|
|
1964
|
-
See also: sdk.help('bugs'), sdk.help('spikes'), sdk.help('chores')
|
|
1965
|
-
"""
|
|
1966
|
-
|
|
1967
|
-
elif topic in ["bug", "bugs"]:
|
|
1968
|
-
return """BUGS COLLECTION
|
|
1969
|
-
|
|
1970
|
-
Create and manage bug reports.
|
|
1971
|
-
|
|
1972
|
-
COMMON METHODS:
|
|
1973
|
-
sdk.bugs.create(title) - Create new bug (returns builder)
|
|
1974
|
-
sdk.bugs.get(id) - Get bug by ID
|
|
1975
|
-
sdk.bugs.all() - Get all bugs
|
|
1976
|
-
sdk.bugs.where(**filters) - Query bugs
|
|
1977
|
-
sdk.bugs.edit(id) - Edit bug (context manager)
|
|
1978
|
-
|
|
1979
|
-
BUILDER PATTERN:
|
|
1980
|
-
bug = (sdk.bugs.create("Login fails on Safari")
|
|
1981
|
-
.set_priority("critical")
|
|
1982
|
-
.add_steps(["Reproduce", "Fix", "Test"])
|
|
1983
|
-
.save())
|
|
1984
|
-
|
|
1985
|
-
QUERIES:
|
|
1986
|
-
critical = sdk.bugs.where(priority="critical", status="todo")
|
|
1987
|
-
my_bugs = sdk.bugs.where(agent_assigned="claude")
|
|
1988
|
-
|
|
1989
|
-
See also: sdk.help('features'), sdk.help('spikes')
|
|
1990
|
-
"""
|
|
1991
|
-
|
|
1992
|
-
elif topic in ["spike", "spikes"]:
|
|
1993
|
-
return """SPIKES COLLECTION
|
|
1994
|
-
|
|
1995
|
-
Create and manage investigation/research spikes.
|
|
1996
|
-
|
|
1997
|
-
COMMON METHODS:
|
|
1998
|
-
sdk.spikes.create(title) - Create new spike (returns builder)
|
|
1999
|
-
sdk.spikes.get(id) - Get spike by ID
|
|
2000
|
-
sdk.spikes.all() - Get all spikes
|
|
2001
|
-
sdk.spikes.where(**filters) - Query spikes
|
|
2002
|
-
|
|
2003
|
-
BUILDER PATTERN:
|
|
2004
|
-
spike = (sdk.spikes.create("Research OAuth providers")
|
|
2005
|
-
.set_priority("high")
|
|
2006
|
-
.add_steps(["Research", "Document findings"])
|
|
2007
|
-
.save())
|
|
2008
|
-
|
|
2009
|
-
PLANNING SPIKES:
|
|
2010
|
-
spike = sdk.start_planning_spike(
|
|
2011
|
-
"Plan User Auth",
|
|
2012
|
-
context="Users need login",
|
|
2013
|
-
timebox_hours=4.0
|
|
2014
|
-
)
|
|
2015
|
-
|
|
2016
|
-
See also: sdk.help('planning'), sdk.help('features')
|
|
2017
|
-
"""
|
|
2018
|
-
|
|
2019
|
-
elif topic in ["chore", "chores"]:
|
|
2020
|
-
return """CHORES COLLECTION
|
|
2021
|
-
|
|
2022
|
-
Create and manage maintenance and chore tasks.
|
|
2023
|
-
|
|
2024
|
-
COMMON METHODS:
|
|
2025
|
-
sdk.chores.create(title) - Create new chore (returns builder)
|
|
2026
|
-
sdk.chores.get(id) - Get chore by ID
|
|
2027
|
-
sdk.chores.all() - Get all chores
|
|
2028
|
-
sdk.chores.where(**filters) - Query chores
|
|
2029
|
-
|
|
2030
|
-
BUILDER PATTERN:
|
|
2031
|
-
chore = (sdk.chores.create("Update dependencies")
|
|
2032
|
-
.set_priority("medium")
|
|
2033
|
-
.add_steps(["Run uv update", "Test", "Commit"])
|
|
2034
|
-
.save())
|
|
2035
|
-
|
|
2036
|
-
See also: sdk.help('features'), sdk.help('bugs')
|
|
2037
|
-
"""
|
|
2038
|
-
|
|
2039
|
-
elif topic in ["epic", "epics"]:
|
|
2040
|
-
return """EPICS COLLECTION
|
|
2041
|
-
|
|
2042
|
-
Create and manage large bodies of work.
|
|
2043
|
-
|
|
2044
|
-
COMMON METHODS:
|
|
2045
|
-
sdk.epics.create(title) - Create new epic (returns builder)
|
|
2046
|
-
sdk.epics.get(id) - Get epic by ID
|
|
2047
|
-
sdk.epics.all() - Get all epics
|
|
2048
|
-
sdk.epics.where(**filters) - Query epics
|
|
2049
|
-
|
|
2050
|
-
BUILDER PATTERN:
|
|
2051
|
-
epic = (sdk.epics.create("Authentication System")
|
|
2052
|
-
.set_priority("critical")
|
|
2053
|
-
.add_steps(["Design", "Implement", "Test", "Deploy"])
|
|
2054
|
-
.save())
|
|
2055
|
-
|
|
2056
|
-
See also: sdk.help('features'), sdk.help('tracks')
|
|
2057
|
-
"""
|
|
2058
|
-
|
|
2059
|
-
elif topic in ["track", "tracks"]:
|
|
2060
|
-
return """TRACKS COLLECTION
|
|
2061
|
-
|
|
2062
|
-
Create and manage work tracks with builder support.
|
|
2063
|
-
|
|
2064
|
-
COMMON METHODS:
|
|
2065
|
-
sdk.tracks.create(title) - Create new track (returns builder)
|
|
2066
|
-
sdk.tracks.builder() - Get track builder
|
|
2067
|
-
sdk.tracks.get(id) - Get track by ID
|
|
2068
|
-
sdk.tracks.all() - Get all tracks
|
|
2069
|
-
sdk.tracks.where(**filters) - Query tracks
|
|
2070
|
-
|
|
2071
|
-
BUILDER PATTERN:
|
|
2072
|
-
track = (sdk.tracks.builder()
|
|
2073
|
-
.title("User Authentication")
|
|
2074
|
-
.description("OAuth 2.0 system")
|
|
2075
|
-
.priority("high")
|
|
2076
|
-
.with_spec(
|
|
2077
|
-
overview="OAuth integration",
|
|
2078
|
-
requirements=[("OAuth 2.0", "must-have")],
|
|
2079
|
-
acceptance_criteria=["Login works"]
|
|
2080
|
-
)
|
|
2081
|
-
.with_plan_phases([
|
|
2082
|
-
("Phase 1", ["Setup (2h)", "Config (1h)"]),
|
|
2083
|
-
("Phase 2", ["Testing (2h)"])
|
|
2084
|
-
])
|
|
2085
|
-
.create())
|
|
2086
|
-
|
|
2087
|
-
FROM PLANNING:
|
|
2088
|
-
track_info = sdk.create_track_from_plan(
|
|
2089
|
-
title="User Auth",
|
|
2090
|
-
description="OAuth system",
|
|
2091
|
-
requirements=[("OAuth", "must-have")],
|
|
2092
|
-
phases=[("Phase 1", ["Setup", "Config"])]
|
|
2093
|
-
)
|
|
2094
|
-
|
|
2095
|
-
See also: sdk.help('planning'), sdk.help('features')
|
|
2096
|
-
"""
|
|
2097
|
-
|
|
2098
|
-
elif topic in ["session", "sessions"]:
|
|
2099
|
-
return """SESSION MANAGEMENT
|
|
2100
|
-
|
|
2101
|
-
Create and manage agent sessions.
|
|
2102
|
-
|
|
2103
|
-
SESSION METHODS:
|
|
2104
|
-
sdk.start_session(title=...) - Start new session
|
|
2105
|
-
sdk.end_session(id) - End session
|
|
2106
|
-
sdk.track_activity(...) - Track activity in session
|
|
2107
|
-
sdk.dedupe_sessions(...) - Clean up low-signal sessions
|
|
2108
|
-
sdk.get_status() - Get project status
|
|
2109
|
-
|
|
2110
|
-
SESSION COLLECTION:
|
|
2111
|
-
sdk.sessions.get(id) - Get session by ID
|
|
2112
|
-
sdk.sessions.all() - Get all sessions
|
|
2113
|
-
sdk.sessions.where(**filters) - Query sessions
|
|
2114
|
-
|
|
2115
|
-
TYPICAL WORKFLOW:
|
|
2116
|
-
# Session start hook handles this automatically
|
|
2117
|
-
session = sdk.start_session(title="Fix login bug")
|
|
2118
|
-
|
|
2119
|
-
# Track activities (handled by hooks)
|
|
2120
|
-
sdk.track_activity(
|
|
2121
|
-
tool="Edit",
|
|
2122
|
-
summary="Fixed auth logic",
|
|
2123
|
-
file_paths=["src/auth.py"],
|
|
2124
|
-
success=True
|
|
2125
|
-
)
|
|
2126
|
-
|
|
2127
|
-
# End session
|
|
2128
|
-
sdk.end_session(
|
|
2129
|
-
session.id,
|
|
2130
|
-
handoff_notes="Login bug fixed, needs testing"
|
|
2131
|
-
)
|
|
2132
|
-
|
|
2133
|
-
CLEANUP:
|
|
2134
|
-
# Remove orphaned sessions (<=1 event)
|
|
2135
|
-
result = sdk.dedupe_sessions(max_events=1, dry_run=False)
|
|
2136
|
-
|
|
2137
|
-
See also: sdk.help('analytics')
|
|
2138
|
-
"""
|
|
2139
|
-
|
|
2140
|
-
elif topic in ["analytic", "analytics", "strategic"]:
|
|
2141
|
-
return """STRATEGIC ANALYTICS
|
|
2142
|
-
|
|
2143
|
-
Find bottlenecks, recommend work, and assess risks.
|
|
2144
|
-
|
|
2145
|
-
DEPENDENCY ANALYTICS:
|
|
2146
|
-
bottlenecks = sdk.find_bottlenecks(top_n=5)
|
|
2147
|
-
# Returns tasks blocking the most work
|
|
2148
|
-
|
|
2149
|
-
parallel = sdk.get_parallel_work(max_agents=5)
|
|
2150
|
-
# Returns tasks that can run simultaneously
|
|
2151
|
-
|
|
2152
|
-
recs = sdk.recommend_next_work(agent_count=3)
|
|
2153
|
-
# Returns smart recommendations with scoring
|
|
2154
|
-
|
|
2155
|
-
risks = sdk.assess_risks()
|
|
2156
|
-
# Returns high-risk tasks and circular deps
|
|
2157
|
-
|
|
2158
|
-
impact = sdk.analyze_impact("feat-001")
|
|
2159
|
-
# Returns what unlocks if you complete this task
|
|
2160
|
-
|
|
2161
|
-
DIRECT ACCESS (preferred):
|
|
2162
|
-
sdk.dep_analytics.find_bottlenecks(top_n=5)
|
|
2163
|
-
sdk.dep_analytics.recommend_next_tasks(agent_count=3)
|
|
2164
|
-
sdk.dep_analytics.find_parallelizable_work(status="todo")
|
|
2165
|
-
sdk.dep_analytics.assess_dependency_risk()
|
|
2166
|
-
sdk.dep_analytics.impact_analysis("feat-001")
|
|
2167
|
-
|
|
2168
|
-
WORK TYPE ANALYTICS:
|
|
2169
|
-
sdk.analytics.get_wip_by_type()
|
|
2170
|
-
sdk.analytics.get_completion_rates()
|
|
2171
|
-
sdk.analytics.get_agent_workload()
|
|
2172
|
-
|
|
2173
|
-
CONTEXT ANALYTICS:
|
|
2174
|
-
sdk.context.track_usage(...)
|
|
2175
|
-
sdk.context.get_usage_report()
|
|
2176
|
-
|
|
2177
|
-
See also: sdk.help('planning'), sdk.help('work_queue')
|
|
2178
|
-
"""
|
|
2179
|
-
|
|
2180
|
-
elif topic in ["queue", "work_queue", "routing"]:
|
|
2181
|
-
return """WORK QUEUE & ROUTING
|
|
2182
|
-
|
|
2183
|
-
Get prioritized work using smart routing.
|
|
2184
|
-
|
|
2185
|
-
WORK QUEUE:
|
|
2186
|
-
queue = sdk.get_work_queue(limit=10, min_score=0.0)
|
|
2187
|
-
# Returns prioritized list with scores
|
|
2188
|
-
|
|
2189
|
-
for item in queue:
|
|
2190
|
-
print(f"{item['score']:.1f} - {item['title']}")
|
|
2191
|
-
if item.get('blocked_by'):
|
|
2192
|
-
print(f" Blocked by: {item['blocked_by']}")
|
|
2193
|
-
|
|
2194
|
-
SMART ROUTING:
|
|
2195
|
-
task = sdk.work_next(auto_claim=True, min_score=0.5)
|
|
2196
|
-
# Returns next best task using analytics + capabilities
|
|
2197
|
-
|
|
2198
|
-
if task:
|
|
2199
|
-
print(f"Working on: {task.title}")
|
|
2200
|
-
# Task is auto-claimed and assigned
|
|
2201
|
-
|
|
2202
|
-
SIMPLE NEXT TASK:
|
|
2203
|
-
task = sdk.next_task(priority="high", auto_claim=True)
|
|
2204
|
-
# Simpler version without smart routing
|
|
2205
|
-
|
|
2206
|
-
See also: sdk.help('analytics')
|
|
2207
|
-
"""
|
|
2208
|
-
|
|
2209
|
-
elif topic in ["plan", "planning", "workflow"]:
|
|
2210
|
-
return """PLANNING WORKFLOW
|
|
2211
|
-
|
|
2212
|
-
Research, plan, and create tracks for new work.
|
|
2213
|
-
|
|
2214
|
-
SMART PLANNING:
|
|
2215
|
-
plan = sdk.smart_plan(
|
|
2216
|
-
"User authentication system",
|
|
2217
|
-
create_spike=True,
|
|
2218
|
-
timebox_hours=4.0,
|
|
2219
|
-
research_completed=True, # IMPORTANT: Do research first!
|
|
2220
|
-
research_findings={
|
|
2221
|
-
"topic": "OAuth 2.0 best practices",
|
|
2222
|
-
"recommended_library": "authlib",
|
|
2223
|
-
"key_insights": ["Use PKCE", "Token rotation"]
|
|
2224
|
-
}
|
|
2225
|
-
)
|
|
2226
|
-
|
|
2227
|
-
PLANNING SPIKE:
|
|
2228
|
-
spike = sdk.start_planning_spike(
|
|
2229
|
-
"Plan Real-time Notifications",
|
|
2230
|
-
context="Users need live updates",
|
|
2231
|
-
timebox_hours=3.0
|
|
2232
|
-
)
|
|
2233
|
-
|
|
2234
|
-
CREATE TRACK FROM PLAN:
|
|
2235
|
-
track_info = sdk.create_track_from_plan(
|
|
2236
|
-
title="User Authentication",
|
|
2237
|
-
description="OAuth 2.0 with JWT",
|
|
2238
|
-
requirements=[
|
|
2239
|
-
("OAuth 2.0 integration", "must-have"),
|
|
2240
|
-
("JWT token management", "must-have")
|
|
2241
|
-
],
|
|
2242
|
-
phases=[
|
|
2243
|
-
("Phase 1: OAuth", ["Setup (2h)", "Callback (2h)"]),
|
|
2244
|
-
("Phase 2: JWT", ["Token signing (2h)"])
|
|
2245
|
-
]
|
|
2246
|
-
)
|
|
2247
|
-
|
|
2248
|
-
PARALLEL PLANNING:
|
|
2249
|
-
plan = sdk.plan_parallel_work(max_agents=3)
|
|
2250
|
-
if plan["can_parallelize"]:
|
|
2251
|
-
for p in plan["prompts"]:
|
|
2252
|
-
Task(prompt=p["prompt"])
|
|
2253
|
-
|
|
2254
|
-
# After parallel work completes
|
|
2255
|
-
results = sdk.aggregate_parallel_results([
|
|
2256
|
-
"agent-1", "agent-2", "agent-3"
|
|
2257
|
-
])
|
|
2258
|
-
|
|
2259
|
-
See also: sdk.help('tracks'), sdk.help('spikes')
|
|
2260
|
-
"""
|
|
2261
|
-
|
|
2262
|
-
elif topic in ["orchestration", "orchestrate", "subagent", "subagents"]:
|
|
2263
|
-
return """SUBAGENT ORCHESTRATION
|
|
2264
|
-
|
|
2265
|
-
Spawn explorer and coder subagents for complex work.
|
|
2266
|
-
|
|
2267
|
-
EXPLORER (Discovery):
|
|
2268
|
-
prompt = sdk.spawn_explorer(
|
|
2269
|
-
task="Find all API endpoints",
|
|
2270
|
-
scope="src/api/",
|
|
2271
|
-
patterns=["*.py"],
|
|
2272
|
-
questions=["What framework is used?"]
|
|
2273
|
-
)
|
|
2274
|
-
# Execute with Task tool
|
|
2275
|
-
Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
2276
|
-
|
|
2277
|
-
CODER (Implementation):
|
|
2278
|
-
prompt = sdk.spawn_coder(
|
|
2279
|
-
feature_id="feat-add-auth",
|
|
2280
|
-
context=explorer_results,
|
|
2281
|
-
files_to_modify=["src/auth.py"],
|
|
2282
|
-
test_command="uv run pytest tests/auth/"
|
|
2283
|
-
)
|
|
2284
|
-
Task(prompt=prompt["prompt"], description=prompt["description"])
|
|
2285
|
-
|
|
2286
|
-
FULL ORCHESTRATION:
|
|
2287
|
-
prompts = sdk.orchestrate(
|
|
2288
|
-
"feat-add-caching",
|
|
2289
|
-
exploration_scope="src/cache/",
|
|
2290
|
-
test_command="uv run pytest tests/cache/"
|
|
2291
|
-
)
|
|
2292
|
-
|
|
2293
|
-
# Phase 1: Explorer
|
|
2294
|
-
Task(prompt=prompts["explorer"]["prompt"])
|
|
2295
|
-
|
|
2296
|
-
# Phase 2: Coder (with explorer results)
|
|
2297
|
-
Task(prompt=prompts["coder"]["prompt"])
|
|
2298
|
-
|
|
2299
|
-
WORKFLOW:
|
|
2300
|
-
1. Explorer discovers code patterns and files
|
|
2301
|
-
2. Coder implements changes using explorer findings
|
|
2302
|
-
3. Both agents auto-track in sessions
|
|
2303
|
-
4. Feature gets updated with progress
|
|
2304
|
-
|
|
2305
|
-
See also: sdk.help('planning')
|
|
2306
|
-
"""
|
|
2307
|
-
|
|
2308
|
-
elif topic in ["optimization", "session_start", "active_work"]:
|
|
2309
|
-
return """SESSION OPTIMIZATION
|
|
2310
|
-
|
|
2311
|
-
Reduce context usage with optimized methods.
|
|
2312
|
-
|
|
2313
|
-
SESSION START INFO:
|
|
2314
|
-
info = sdk.get_session_start_info(
|
|
2315
|
-
include_git_log=True,
|
|
2316
|
-
git_log_count=5,
|
|
2317
|
-
analytics_top_n=3
|
|
2318
|
-
)
|
|
2319
|
-
|
|
2320
|
-
# Single call returns:
|
|
2321
|
-
# - status: Project status
|
|
2322
|
-
# - active_work: Current work item
|
|
2323
|
-
# - features: All features
|
|
2324
|
-
# - sessions: Recent sessions
|
|
2325
|
-
# - git_log: Recent commits
|
|
2326
|
-
# - analytics: Bottlenecks, recommendations, parallel
|
|
2327
|
-
|
|
2328
|
-
ACTIVE WORK ITEM:
|
|
2329
|
-
active = sdk.get_active_work_item()
|
|
2330
|
-
if active:
|
|
2331
|
-
print(f"Working on: {active['title']}")
|
|
2332
|
-
print(f"Progress: {active['steps_completed']}/{active['steps_total']}")
|
|
2333
|
-
|
|
2334
|
-
# Filter by agent
|
|
2335
|
-
active = sdk.get_active_work_item(filter_by_agent=True)
|
|
2336
|
-
|
|
2337
|
-
BENEFITS:
|
|
2338
|
-
- 6+ tool calls → 1 method call
|
|
2339
|
-
- Reduced token usage
|
|
2340
|
-
- Faster session initialization
|
|
2341
|
-
- All context in one place
|
|
2342
|
-
|
|
2343
|
-
See also: sdk.help('sessions')
|
|
2344
|
-
"""
|
|
2345
|
-
|
|
2346
|
-
else:
|
|
2347
|
-
return f"""Unknown topic: '{topic}'
|
|
2348
|
-
|
|
2349
|
-
Available topics:
|
|
2350
|
-
- features, bugs, spikes, chores, epics (work collections)
|
|
2351
|
-
- tracks, sessions, agents (non-work collections)
|
|
2352
|
-
- analytics, strategic (dependency and work analytics)
|
|
2353
|
-
- work_queue, routing (smart task routing)
|
|
2354
|
-
- planning, workflow (planning and track creation)
|
|
2355
|
-
- orchestration, subagents (explorer/coder spawning)
|
|
2356
|
-
- optimization, session_start (context optimization)
|
|
2357
|
-
|
|
2358
|
-
Try: sdk.help() for full overview
|
|
2359
|
-
"""
|