htmlgraph 0.27.6__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- htmlgraph/__init__.py +9 -1
- htmlgraph/api/broadcast.py +316 -0
- htmlgraph/api/broadcast_routes.py +357 -0
- htmlgraph/api/broadcast_websocket.py +115 -0
- htmlgraph/api/cost_alerts_websocket.py +7 -16
- htmlgraph/api/main.py +110 -1
- htmlgraph/api/offline.py +776 -0
- htmlgraph/api/presence.py +446 -0
- htmlgraph/api/reactive.py +455 -0
- htmlgraph/api/reactive_routes.py +195 -0
- htmlgraph/api/static/broadcast-demo.html +393 -0
- htmlgraph/api/sync_routes.py +184 -0
- htmlgraph/api/websocket.py +112 -37
- htmlgraph/broadcast_integration.py +227 -0
- htmlgraph/cli_commands/sync.py +207 -0
- htmlgraph/db/schema.py +214 -0
- htmlgraph/hooks/event_tracker.py +53 -2
- htmlgraph/reactive_integration.py +148 -0
- htmlgraph/session_context.py +1669 -0
- htmlgraph/session_manager.py +70 -0
- htmlgraph/sync/__init__.py +21 -0
- htmlgraph/sync/git_sync.py +458 -0
- {htmlgraph-0.27.6.dist-info → htmlgraph-0.28.0.dist-info}/METADATA +1 -1
- {htmlgraph-0.27.6.dist-info → htmlgraph-0.28.0.dist-info}/RECORD +31 -16
- {htmlgraph-0.27.6.data → htmlgraph-0.28.0.data}/data/htmlgraph/dashboard.html +0 -0
- {htmlgraph-0.27.6.data → htmlgraph-0.28.0.data}/data/htmlgraph/styles.css +0 -0
- {htmlgraph-0.27.6.data → htmlgraph-0.28.0.data}/data/htmlgraph/templates/AGENTS.md.template +0 -0
- {htmlgraph-0.27.6.data → htmlgraph-0.28.0.data}/data/htmlgraph/templates/CLAUDE.md.template +0 -0
- {htmlgraph-0.27.6.data → htmlgraph-0.28.0.data}/data/htmlgraph/templates/GEMINI.md.template +0 -0
- {htmlgraph-0.27.6.dist-info → htmlgraph-0.28.0.dist-info}/WHEEL +0 -0
- {htmlgraph-0.27.6.dist-info → htmlgraph-0.28.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,1669 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SessionContextBuilder - Builds session start context for AI agents.
|
|
3
|
+
|
|
4
|
+
Extracts all context-building business logic from the session-start hook
|
|
5
|
+
into testable SDK methods. The hook becomes a thin wrapper that calls
|
|
6
|
+
these methods.
|
|
7
|
+
|
|
8
|
+
Architecture:
|
|
9
|
+
- SessionContextBuilder: Assembles complete session start context
|
|
10
|
+
- VersionChecker: Checks installed vs PyPI version
|
|
11
|
+
- GitHooksInstaller: Installs pre-commit hooks
|
|
12
|
+
- All methods are independently testable
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
from htmlgraph.session_context import SessionContextBuilder
|
|
16
|
+
|
|
17
|
+
builder = SessionContextBuilder(graph_dir, project_dir)
|
|
18
|
+
context = builder.build(session_id="sess-001")
|
|
19
|
+
# Returns formatted Markdown string with all session context
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import asyncio
|
|
25
|
+
import json
|
|
26
|
+
import logging
|
|
27
|
+
import shutil
|
|
28
|
+
import subprocess
|
|
29
|
+
from datetime import datetime, timezone
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import TYPE_CHECKING, Any
|
|
32
|
+
|
|
33
|
+
logger = logging.getLogger(__name__)
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# ---------------------------------------------------------------------------
|
|
40
|
+
# Static context templates (moved from session-start.py)
|
|
41
|
+
# ---------------------------------------------------------------------------
|
|
42
|
+
|
|
43
|
+
HTMLGRAPH_VERSION_WARNING = """## HTMLGRAPH UPDATE AVAILABLE
|
|
44
|
+
|
|
45
|
+
**Installed:** {installed} -> **Latest:** {latest}
|
|
46
|
+
|
|
47
|
+
Update now to get the latest features and fixes:
|
|
48
|
+
```bash
|
|
49
|
+
uv pip install --upgrade htmlgraph
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
---
|
|
53
|
+
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
HTMLGRAPH_PROCESS_NOTICE = """## HTMLGRAPH DEVELOPMENT PROCESS ACTIVE
|
|
57
|
+
|
|
58
|
+
**HtmlGraph is tracking this session. All activity is logged to HTML files.**
|
|
59
|
+
|
|
60
|
+
### Feature Creation Decision Framework
|
|
61
|
+
|
|
62
|
+
**Use this framework for EVERY user request:**
|
|
63
|
+
|
|
64
|
+
Create a **FEATURE** if ANY apply:
|
|
65
|
+
- >30 minutes work
|
|
66
|
+
- 3+ files
|
|
67
|
+
- New tests needed
|
|
68
|
+
- Multi-component impact
|
|
69
|
+
- Hard to revert
|
|
70
|
+
- Needs docs
|
|
71
|
+
|
|
72
|
+
Implement **DIRECTLY** if ALL apply:
|
|
73
|
+
- Single file
|
|
74
|
+
- <30 minutes
|
|
75
|
+
- Trivial change
|
|
76
|
+
- Easy to revert
|
|
77
|
+
- No tests needed
|
|
78
|
+
|
|
79
|
+
**When in doubt, CREATE A FEATURE.** Over-tracking is better than losing attribution.
|
|
80
|
+
|
|
81
|
+
---
|
|
82
|
+
|
|
83
|
+
### Quick Reference
|
|
84
|
+
|
|
85
|
+
**IMPORTANT:** Always use `uv run` when running htmlgraph commands.
|
|
86
|
+
|
|
87
|
+
**Check Status:**
|
|
88
|
+
```bash
|
|
89
|
+
uv run htmlgraph status
|
|
90
|
+
uv run htmlgraph feature list
|
|
91
|
+
uv run htmlgraph session list
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
**Feature Commands:**
|
|
95
|
+
- `uv run htmlgraph feature start <id>` - Start working on a feature
|
|
96
|
+
- `uv run htmlgraph feature complete <id>` - Mark feature as done
|
|
97
|
+
- `uv run htmlgraph feature primary <id>` - Set primary feature for attribution
|
|
98
|
+
|
|
99
|
+
**Track Creation (for multi-feature work):**
|
|
100
|
+
```python
|
|
101
|
+
from htmlgraph import SDK
|
|
102
|
+
sdk = SDK(agent="claude")
|
|
103
|
+
|
|
104
|
+
# Create track with spec and plan in one command
|
|
105
|
+
track = sdk.tracks.builder() \\
|
|
106
|
+
.title("Feature Name") \\
|
|
107
|
+
.priority("high") \\
|
|
108
|
+
.with_spec(overview="...", requirements=[...]) \\
|
|
109
|
+
.with_plan_phases([("Phase 1", ["Task 1 (2h)", ...])]) \\
|
|
110
|
+
.create()
|
|
111
|
+
|
|
112
|
+
# Link features to track
|
|
113
|
+
feature = sdk.features.create("Feature") \\
|
|
114
|
+
.set_track(track.id) \\
|
|
115
|
+
.add_steps([...]) \\
|
|
116
|
+
.save()
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
**See:** `docs/TRACK_BUILDER_QUICK_START.md` for complete track creation guide
|
|
120
|
+
|
|
121
|
+
**Session Management:**
|
|
122
|
+
- Sessions auto-start when you begin working
|
|
123
|
+
- Activities are attributed to in-progress features
|
|
124
|
+
- Session history preserved in `.htmlgraph/sessions/`
|
|
125
|
+
|
|
126
|
+
**Dashboard:**
|
|
127
|
+
```bash
|
|
128
|
+
uv run htmlgraph serve
|
|
129
|
+
# Open http://localhost:8080
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
**Key Files:**
|
|
133
|
+
- `.htmlgraph/features/` - Feature HTML files
|
|
134
|
+
- `.htmlgraph/sessions/` - Session HTML files with activity logs
|
|
135
|
+
- `index.html` - Dashboard (open in browser)
|
|
136
|
+
"""
|
|
137
|
+
|
|
138
|
+
TRACKER_WORKFLOW = """## HTMLGRAPH TRACKING WORKFLOW
|
|
139
|
+
|
|
140
|
+
**CRITICAL: Follow this checklist for EVERY session.**
|
|
141
|
+
|
|
142
|
+
### Session Start (DO THESE FIRST)
|
|
143
|
+
1. Check active features: `uv run htmlgraph status`
|
|
144
|
+
2. Review session context and decide what to work on
|
|
145
|
+
3. **DECIDE:** Create feature or implement directly?
|
|
146
|
+
- Create FEATURE if ANY apply: >30min, 3+ files, needs tests, multi-component, hard to revert
|
|
147
|
+
- Implement DIRECTLY if ALL apply: single file, <30min, trivial, easy to revert
|
|
148
|
+
|
|
149
|
+
### During Work (DO CONTINUOUSLY)
|
|
150
|
+
1. **Feature MUST be in-progress before writing code**
|
|
151
|
+
- Start feature: `sdk.features.start("feature-id")` or `uv run htmlgraph feature start <id>`
|
|
152
|
+
2. **CRITICAL:** Mark each step complete IMMEDIATELY after finishing it:
|
|
153
|
+
```python
|
|
154
|
+
from htmlgraph import SDK
|
|
155
|
+
sdk = SDK(agent="claude")
|
|
156
|
+
with sdk.features.edit("feature-id") as f:
|
|
157
|
+
f.steps[0].completed = True # First step done
|
|
158
|
+
f.steps[1].completed = True # Second step done
|
|
159
|
+
```
|
|
160
|
+
3. Document decisions as you make them
|
|
161
|
+
4. Test incrementally - don't wait until the end
|
|
162
|
+
|
|
163
|
+
### Session End (MUST DO BEFORE COMPLETING FEATURE)
|
|
164
|
+
1. **RUN TESTS:** All tests MUST pass
|
|
165
|
+
2. **VERIFY STEPS:** ALL feature steps marked complete
|
|
166
|
+
3. **CLEAN CODE:** Remove debug code, console.logs, TODOs
|
|
167
|
+
4. **COMMIT WORK:** Git commit IMMEDIATELY (include feature ID in message)
|
|
168
|
+
5. **COMPLETE FEATURE:** `sdk.features.complete("feature-id")` or `uv run htmlgraph feature complete <id>`
|
|
169
|
+
|
|
170
|
+
### SDK Usage (ALWAYS USE SDK, NEVER DIRECT FILE EDITS)
|
|
171
|
+
**FORBIDDEN:** `Write('/path/.htmlgraph/features/...', ...)` `Edit('/path/.htmlgraph/...')`
|
|
172
|
+
**REQUIRED:** Use SDK for ALL operations on `.htmlgraph/` files
|
|
173
|
+
|
|
174
|
+
```python
|
|
175
|
+
from htmlgraph import SDK
|
|
176
|
+
sdk = SDK(agent="claude")
|
|
177
|
+
|
|
178
|
+
# Create and work on features
|
|
179
|
+
feature = sdk.features.create("Title").set_priority("high").add_steps(["Step 1", "Step 2"]).save()
|
|
180
|
+
with sdk.features.edit("feature-id") as f:
|
|
181
|
+
f.status = "done"
|
|
182
|
+
|
|
183
|
+
# Query and batch operations
|
|
184
|
+
high_priority = sdk.features.where(status="todo", priority="high")
|
|
185
|
+
sdk.features.batch_update(["feat-1", "feat-2"], {"status": "done"})
|
|
186
|
+
```
|
|
187
|
+
|
|
188
|
+
**For complete SDK documentation -> see `docs/AGENTS.md`**
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
RESEARCH_FIRST_DEBUGGING = """## RESEARCH-FIRST DEBUGGING (IMPERATIVE)
|
|
192
|
+
|
|
193
|
+
**CRITICAL: NEVER implement solutions based on assumptions. ALWAYS research documentation first.**
|
|
194
|
+
|
|
195
|
+
This principle emerged from dogfooding HtmlGraph development. Violating it results in:
|
|
196
|
+
- Multiple trial-and-error attempts before researching
|
|
197
|
+
- Implementing "fixes" based on guesses instead of documentation
|
|
198
|
+
- Not using available research tools and agents
|
|
199
|
+
- Wasted time and context on wrong approaches
|
|
200
|
+
|
|
201
|
+
### The Research-First Workflow (ALWAYS FOLLOW)
|
|
202
|
+
|
|
203
|
+
1. **Research First** - Use `sdk.help()` to understand the API
|
|
204
|
+
```python
|
|
205
|
+
from htmlgraph import SDK
|
|
206
|
+
sdk = SDK(agent="claude")
|
|
207
|
+
|
|
208
|
+
# ALWAYS START HERE
|
|
209
|
+
print(sdk.help()) # Overview of all SDK methods
|
|
210
|
+
print(sdk.help('tracks')) # Tracks-specific help
|
|
211
|
+
print(sdk.help('planning')) # Planning workflow help
|
|
212
|
+
print(sdk.help('features')) # Feature collection help
|
|
213
|
+
print(sdk.help('analytics')) # Analytics methods
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
2. **Understand** - Read the help output carefully
|
|
217
|
+
- Look for correct method signatures
|
|
218
|
+
- Note parameter types and names
|
|
219
|
+
- Understand return types
|
|
220
|
+
- Find examples in the help text
|
|
221
|
+
|
|
222
|
+
3. **Implement** - Apply fix based on actual understanding
|
|
223
|
+
- Use the correct API signature from help
|
|
224
|
+
- Copy example patterns from help text
|
|
225
|
+
- Test incrementally
|
|
226
|
+
|
|
227
|
+
4. **Validate** - Test to confirm the approach works
|
|
228
|
+
- Run tests before and after
|
|
229
|
+
- Verify behavior matches expectations
|
|
230
|
+
|
|
231
|
+
5. **Document** - Capture learning in HtmlGraph spike
|
|
232
|
+
- Record what you learned
|
|
233
|
+
- Note what the correct approach was
|
|
234
|
+
- Help future debugging
|
|
235
|
+
|
|
236
|
+
### When You Get an Error
|
|
237
|
+
|
|
238
|
+
**WRONG APPROACH (what NOT to do):**
|
|
239
|
+
```python
|
|
240
|
+
# Error: "object has no attribute 'set_priority'"
|
|
241
|
+
# Response: Try track.with_priority() -> error
|
|
242
|
+
# Try track._priority = "high" -> error
|
|
243
|
+
# Try track.priority("high") -> error
|
|
244
|
+
```
|
|
245
|
+
|
|
246
|
+
**CORRECT APPROACH (what TO do):**
|
|
247
|
+
```python
|
|
248
|
+
# Error: "object has no attribute 'set_priority'"
|
|
249
|
+
# IMMEDIATE RESPONSE:
|
|
250
|
+
# 1. Stop and use sdk.help('tracks')
|
|
251
|
+
# 2. Read the help output
|
|
252
|
+
# 3. Look for correct method: create_track_from_plan() with requirements parameter
|
|
253
|
+
# 4. Implement based on actual API
|
|
254
|
+
# 5. Test and verify
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
### Remember
|
|
258
|
+
|
|
259
|
+
**"Fixing errors immediately by researching is faster than letting them accumulate through trial-and-error."**
|
|
260
|
+
|
|
261
|
+
Your context is precious. Use `sdk.help()` first, implement second, test third.
|
|
262
|
+
"""
|
|
263
|
+
|
|
264
|
+
ORCHESTRATOR_DIRECTIVES = """## ORCHESTRATOR DIRECTIVES (IMPERATIVE)
|
|
265
|
+
|
|
266
|
+
**YOU ARE THE ORCHESTRATOR.** Follow these directives:
|
|
267
|
+
|
|
268
|
+
### 1. ALWAYS DELEGATE - Even "Simple" Operations
|
|
269
|
+
|
|
270
|
+
**CRITICAL INSIGHT:** What looks like "one tool call" often becomes 2, 3, 4+ calls.
|
|
271
|
+
|
|
272
|
+
**ALWAYS delegate, even if you think it's simple:**
|
|
273
|
+
- "Just read one file" -> Delegate to Explorer
|
|
274
|
+
- "Just edit one file" -> Delegate to Coder
|
|
275
|
+
- "Just run tests" -> Delegate to Tester
|
|
276
|
+
- "Just search for X" -> Delegate to Explorer
|
|
277
|
+
|
|
278
|
+
**Why ALWAYS delegate:**
|
|
279
|
+
- Tool outputs are unknown until execution
|
|
280
|
+
- "One operation" often expands into many
|
|
281
|
+
- Each subagent has self-contained context
|
|
282
|
+
- Orchestrator only pays for: Task() call + Task output (not intermediate tool calls)
|
|
283
|
+
- Your context stays strategic, not filled with implementation details
|
|
284
|
+
|
|
285
|
+
**ONLY execute directly:**
|
|
286
|
+
- Task() - Delegation itself
|
|
287
|
+
- AskUserQuestion() - Clarifying with user
|
|
288
|
+
- TodoWrite() - Tracking work
|
|
289
|
+
- SDK operations - Creating features/work items
|
|
290
|
+
|
|
291
|
+
**Everything else -> DELEGATE.**
|
|
292
|
+
|
|
293
|
+
### 2. YOUR ONLY JOB: Provide Clear Task Descriptions
|
|
294
|
+
|
|
295
|
+
**You don't execute, you describe what needs executing.**
|
|
296
|
+
|
|
297
|
+
**Good delegation:**
|
|
298
|
+
```python
|
|
299
|
+
Task(
|
|
300
|
+
prompt="Find all files in src/auth/ that handle JWT validation.
|
|
301
|
+
List the files and explain what each one does.",
|
|
302
|
+
subagent_type="Explore"
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
Task(
|
|
306
|
+
prompt="Fix the bug in src/auth/jwt.py where tokens expire immediately.
|
|
307
|
+
The issue is in the validate_token() function.
|
|
308
|
+
Run tests after fixing to verify the fix works.",
|
|
309
|
+
subagent_type="general-purpose"
|
|
310
|
+
)
|
|
311
|
+
```
|
|
312
|
+
|
|
313
|
+
**Your job:**
|
|
314
|
+
- Describe the task clearly
|
|
315
|
+
- Provide context the subagent needs
|
|
316
|
+
- Specify what success looks like
|
|
317
|
+
- Give enough detail for self-contained execution
|
|
318
|
+
|
|
319
|
+
**Not your job:**
|
|
320
|
+
- Execute the task yourself
|
|
321
|
+
- Guess how many tool calls it will take
|
|
322
|
+
- Read files to "check if it's simple"
|
|
323
|
+
|
|
324
|
+
### 3. CREATE Work Items FIRST
|
|
325
|
+
**Before ANY implementation, create features:**
|
|
326
|
+
```python
|
|
327
|
+
from htmlgraph import SDK
|
|
328
|
+
sdk = SDK(agent="claude-code")
|
|
329
|
+
feature = sdk.features.create("Feature Title").save()
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
**Why:** Work items enable learning, pattern detection, and progress tracking.
|
|
333
|
+
|
|
334
|
+
### 4. PARALLELIZE Independent Tasks
|
|
335
|
+
**Spawn multiple `Task()` calls in a single message when tasks don't depend on each other.**
|
|
336
|
+
|
|
337
|
+
### 5. CONTEXT COST MODEL
|
|
338
|
+
|
|
339
|
+
**Understand what uses YOUR context:**
|
|
340
|
+
- Task() call (tiny - just the prompt)
|
|
341
|
+
- Task output (small - summary from subagent)
|
|
342
|
+
- Subagent's tool calls (NOT in your context!)
|
|
343
|
+
- Subagent's file reads (NOT in your context!)
|
|
344
|
+
- Subagent's intermediate results (NOT in your context!)
|
|
345
|
+
|
|
346
|
+
**Your context is precious. Delegate everything.**
|
|
347
|
+
|
|
348
|
+
### 6. HTMLGRAPH DELEGATION PATTERN (CRITICAL)
|
|
349
|
+
|
|
350
|
+
**PROBLEM:** TaskOutput tool is unreliable - subagent results often can't be retrieved.
|
|
351
|
+
|
|
352
|
+
**SOLUTION:** Use HtmlGraph for subagent communication.
|
|
353
|
+
|
|
354
|
+
**Step 1 - Orchestrator delegates with reporting instructions:**
|
|
355
|
+
|
|
356
|
+
Include this in every Task prompt:
|
|
357
|
+
CRITICAL - Report Results to HtmlGraph:
|
|
358
|
+
from htmlgraph import SDK
|
|
359
|
+
sdk = SDK(agent='explorer')
|
|
360
|
+
sdk.spikes.create('Task Results').set_findings('...').save()
|
|
361
|
+
|
|
362
|
+
**Step 2 - Wait for Task completion.**
|
|
363
|
+
|
|
364
|
+
**Step 3 - Retrieve results from HtmlGraph:**
|
|
365
|
+
|
|
366
|
+
Use this command:
|
|
367
|
+
uv run python -c "from htmlgraph import SDK; findings = SDK().spikes.get_latest(agent='explorer'); print(findings[0].findings if findings else 'No results')"
|
|
368
|
+
|
|
369
|
+
---
|
|
370
|
+
|
|
371
|
+
**YOU ARE THE ARCHITECT. SUBAGENTS ARE BUILDERS. DELEGATE EVERYTHING.**
|
|
372
|
+
"""
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
# ---------------------------------------------------------------------------
|
|
376
|
+
# VersionChecker - Checks installed vs latest version
|
|
377
|
+
# ---------------------------------------------------------------------------
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
class VersionChecker:
|
|
381
|
+
"""Check installed htmlgraph version against PyPI."""
|
|
382
|
+
|
|
383
|
+
@staticmethod
|
|
384
|
+
def get_installed_version() -> str | None:
|
|
385
|
+
"""Get the installed htmlgraph version."""
|
|
386
|
+
try:
|
|
387
|
+
result = subprocess.run(
|
|
388
|
+
[
|
|
389
|
+
"uv",
|
|
390
|
+
"run",
|
|
391
|
+
"python",
|
|
392
|
+
"-c",
|
|
393
|
+
"import htmlgraph; print(htmlgraph.__version__)",
|
|
394
|
+
],
|
|
395
|
+
capture_output=True,
|
|
396
|
+
text=True,
|
|
397
|
+
timeout=10,
|
|
398
|
+
)
|
|
399
|
+
if result.returncode == 0:
|
|
400
|
+
return result.stdout.strip()
|
|
401
|
+
except Exception:
|
|
402
|
+
pass
|
|
403
|
+
|
|
404
|
+
# Fallback to pip show
|
|
405
|
+
try:
|
|
406
|
+
result = subprocess.run(
|
|
407
|
+
["pip", "show", "htmlgraph"],
|
|
408
|
+
capture_output=True,
|
|
409
|
+
text=True,
|
|
410
|
+
timeout=10,
|
|
411
|
+
)
|
|
412
|
+
if result.returncode == 0:
|
|
413
|
+
for line in result.stdout.splitlines():
|
|
414
|
+
if line.startswith("Version:"):
|
|
415
|
+
return line.split(":", 1)[1].strip()
|
|
416
|
+
except Exception:
|
|
417
|
+
pass
|
|
418
|
+
|
|
419
|
+
return None
|
|
420
|
+
|
|
421
|
+
@staticmethod
|
|
422
|
+
def get_latest_version() -> str | None:
|
|
423
|
+
"""Get the latest version from PyPI."""
|
|
424
|
+
try:
|
|
425
|
+
import urllib.request
|
|
426
|
+
|
|
427
|
+
req = urllib.request.Request(
|
|
428
|
+
"https://pypi.org/pypi/htmlgraph/json",
|
|
429
|
+
headers={
|
|
430
|
+
"Accept": "application/json",
|
|
431
|
+
"User-Agent": "htmlgraph-version-check",
|
|
432
|
+
},
|
|
433
|
+
)
|
|
434
|
+
with urllib.request.urlopen(req, timeout=5) as response:
|
|
435
|
+
data = json.loads(response.read().decode())
|
|
436
|
+
version: str | None = data.get("info", {}).get("version")
|
|
437
|
+
return version
|
|
438
|
+
except Exception:
|
|
439
|
+
return None
|
|
440
|
+
|
|
441
|
+
@staticmethod
|
|
442
|
+
def compare_versions(installed: str, latest: str) -> bool:
|
|
443
|
+
"""
|
|
444
|
+
Check if installed version is outdated.
|
|
445
|
+
|
|
446
|
+
Returns True if installed < latest.
|
|
447
|
+
"""
|
|
448
|
+
try:
|
|
449
|
+
installed_parts = [int(x) for x in installed.split(".")]
|
|
450
|
+
latest_parts = [int(x) for x in latest.split(".")]
|
|
451
|
+
return installed_parts < latest_parts
|
|
452
|
+
except ValueError:
|
|
453
|
+
return installed != latest
|
|
454
|
+
|
|
455
|
+
@classmethod
|
|
456
|
+
def get_version_status(cls) -> dict[str, Any]:
|
|
457
|
+
"""
|
|
458
|
+
Get version status information.
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
Dict with keys: installed_version, latest_version, is_outdated
|
|
462
|
+
"""
|
|
463
|
+
installed = cls.get_installed_version()
|
|
464
|
+
latest = cls.get_latest_version()
|
|
465
|
+
|
|
466
|
+
is_outdated = False
|
|
467
|
+
if installed and latest and installed != latest:
|
|
468
|
+
is_outdated = cls.compare_versions(installed, latest)
|
|
469
|
+
|
|
470
|
+
return {
|
|
471
|
+
"installed_version": installed,
|
|
472
|
+
"latest_version": latest,
|
|
473
|
+
"is_outdated": is_outdated,
|
|
474
|
+
}
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
# ---------------------------------------------------------------------------
|
|
478
|
+
# GitHooksInstaller - Installs pre-commit hooks
|
|
479
|
+
# ---------------------------------------------------------------------------
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
class GitHooksInstaller:
|
|
483
|
+
"""Install pre-commit hooks from project scripts."""
|
|
484
|
+
|
|
485
|
+
@staticmethod
|
|
486
|
+
def install(project_dir: str | Path) -> bool:
|
|
487
|
+
"""
|
|
488
|
+
Install pre-commit hooks if not already installed.
|
|
489
|
+
|
|
490
|
+
Args:
|
|
491
|
+
project_dir: Path to the project root
|
|
492
|
+
|
|
493
|
+
Returns:
|
|
494
|
+
True if hooks were installed or already exist
|
|
495
|
+
"""
|
|
496
|
+
project_dir = Path(project_dir)
|
|
497
|
+
hooks_source = project_dir / "scripts" / "hooks" / "pre-commit"
|
|
498
|
+
hooks_target = project_dir / ".git" / "hooks" / "pre-commit"
|
|
499
|
+
|
|
500
|
+
# Skip if not a git repo or hooks source doesn't exist
|
|
501
|
+
if not (project_dir / ".git").exists():
|
|
502
|
+
return False
|
|
503
|
+
if not hooks_source.exists():
|
|
504
|
+
return False
|
|
505
|
+
|
|
506
|
+
# Skip if hook already installed and up to date
|
|
507
|
+
if hooks_target.exists():
|
|
508
|
+
try:
|
|
509
|
+
if hooks_source.read_text() == hooks_target.read_text():
|
|
510
|
+
return True # Already installed and current
|
|
511
|
+
except Exception:
|
|
512
|
+
pass
|
|
513
|
+
|
|
514
|
+
# Install the hook
|
|
515
|
+
try:
|
|
516
|
+
shutil.copy2(hooks_source, hooks_target)
|
|
517
|
+
hooks_target.chmod(0o755)
|
|
518
|
+
return True
|
|
519
|
+
except Exception:
|
|
520
|
+
return False
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
# ---------------------------------------------------------------------------
|
|
524
|
+
# SessionContextBuilder - Assembles session start context
|
|
525
|
+
# ---------------------------------------------------------------------------
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
class SessionContextBuilder:
|
|
529
|
+
"""
|
|
530
|
+
Builds complete session start context for AI agents.
|
|
531
|
+
|
|
532
|
+
Extracts and encapsulates all context-building logic from the
|
|
533
|
+
session-start hook into a testable, reusable class.
|
|
534
|
+
|
|
535
|
+
Usage:
|
|
536
|
+
builder = SessionContextBuilder(graph_dir, project_dir)
|
|
537
|
+
context = builder.build(session_id="sess-001")
|
|
538
|
+
"""
|
|
539
|
+
|
|
540
|
+
def __init__(
|
|
541
|
+
self,
|
|
542
|
+
graph_dir: str | Path,
|
|
543
|
+
project_dir: str | Path,
|
|
544
|
+
) -> None:
|
|
545
|
+
self.graph_dir = Path(graph_dir)
|
|
546
|
+
self.project_dir = Path(project_dir)
|
|
547
|
+
|
|
548
|
+
# Lazy-loaded components
|
|
549
|
+
self._features: list[dict[str, Any]] | None = None
|
|
550
|
+
self._stats: dict[str, Any] | None = None
|
|
551
|
+
self._sessions: list[dict[str, Any]] | None = None
|
|
552
|
+
|
|
553
|
+
# -------------------------------------------------------------------
|
|
554
|
+
# Data loading
|
|
555
|
+
# -------------------------------------------------------------------
|
|
556
|
+
|
|
557
|
+
def _load_features(self) -> list[dict[str, Any]]:
|
|
558
|
+
"""Load features from the graph directory."""
|
|
559
|
+
if self._features is not None:
|
|
560
|
+
return self._features
|
|
561
|
+
|
|
562
|
+
features_dir = self.graph_dir / "features"
|
|
563
|
+
if not features_dir.exists():
|
|
564
|
+
self._features = []
|
|
565
|
+
return self._features
|
|
566
|
+
|
|
567
|
+
try:
|
|
568
|
+
from htmlgraph.converter import node_to_dict
|
|
569
|
+
from htmlgraph.graph import HtmlGraph
|
|
570
|
+
|
|
571
|
+
graph = HtmlGraph(features_dir, auto_load=True)
|
|
572
|
+
self._features = [node_to_dict(node) for node in graph.nodes.values()]
|
|
573
|
+
except Exception as e:
|
|
574
|
+
logger.warning(f"Could not load features: {e}")
|
|
575
|
+
self._features = []
|
|
576
|
+
|
|
577
|
+
return self._features
|
|
578
|
+
|
|
579
|
+
def _load_sessions(self) -> list[dict[str, Any]]:
|
|
580
|
+
"""Load sessions from the graph directory."""
|
|
581
|
+
if self._sessions is not None:
|
|
582
|
+
return self._sessions
|
|
583
|
+
|
|
584
|
+
sessions_dir = self.graph_dir / "sessions"
|
|
585
|
+
if not sessions_dir.exists():
|
|
586
|
+
self._sessions = []
|
|
587
|
+
return self._sessions
|
|
588
|
+
|
|
589
|
+
try:
|
|
590
|
+
from htmlgraph.converter import SessionConverter, session_to_dict
|
|
591
|
+
|
|
592
|
+
converter = SessionConverter(sessions_dir)
|
|
593
|
+
sessions = converter.load_all()
|
|
594
|
+
self._sessions = [session_to_dict(s) for s in sessions]
|
|
595
|
+
except Exception as e:
|
|
596
|
+
logger.warning(f"Could not load sessions: {e}")
|
|
597
|
+
self._sessions = []
|
|
598
|
+
|
|
599
|
+
return self._sessions
|
|
600
|
+
|
|
601
|
+
def get_feature_summary(self) -> tuple[list[dict[str, Any]], dict[str, Any]]:
|
|
602
|
+
"""
|
|
603
|
+
Get features and calculate statistics.
|
|
604
|
+
|
|
605
|
+
Returns:
|
|
606
|
+
Tuple of (features_list, stats_dict)
|
|
607
|
+
"""
|
|
608
|
+
features = self._load_features()
|
|
609
|
+
|
|
610
|
+
stats: dict[str, Any] = {
|
|
611
|
+
"total": len(features),
|
|
612
|
+
"done": sum(1 for f in features if f.get("status") == "done"),
|
|
613
|
+
"in_progress": sum(1 for f in features if f.get("status") == "in-progress"),
|
|
614
|
+
"blocked": sum(1 for f in features if f.get("status") == "blocked"),
|
|
615
|
+
"todo": sum(1 for f in features if f.get("status") == "todo"),
|
|
616
|
+
}
|
|
617
|
+
stats["percentage"] = (
|
|
618
|
+
int(stats["done"] * 100 / stats["total"]) if stats["total"] > 0 else 0
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
self._stats = stats
|
|
622
|
+
return features, stats
|
|
623
|
+
|
|
624
|
+
def get_session_summary(self) -> dict[str, Any] | None:
|
|
625
|
+
"""Get the most recent ended session as a summary dict."""
|
|
626
|
+
sessions = self._load_sessions()
|
|
627
|
+
|
|
628
|
+
def parse_ts(value: str | None) -> datetime:
|
|
629
|
+
if not value:
|
|
630
|
+
return datetime.min.replace(tzinfo=timezone.utc)
|
|
631
|
+
try:
|
|
632
|
+
dt = datetime.fromisoformat(value.replace("Z", "+00:00"))
|
|
633
|
+
if dt.tzinfo is None:
|
|
634
|
+
dt = dt.replace(tzinfo=timezone.utc)
|
|
635
|
+
return dt
|
|
636
|
+
except Exception:
|
|
637
|
+
return datetime.min.replace(tzinfo=timezone.utc)
|
|
638
|
+
|
|
639
|
+
ended = [s for s in sessions if s.get("status") == "ended"]
|
|
640
|
+
if ended:
|
|
641
|
+
ended.sort(
|
|
642
|
+
key=lambda s: parse_ts(s.get("ended_at") or s.get("last_activity")),
|
|
643
|
+
reverse=True,
|
|
644
|
+
)
|
|
645
|
+
return ended[0]
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
def get_strategic_recommendations(self, agent_count: int = 1) -> dict[str, Any]:
|
|
649
|
+
"""
|
|
650
|
+
Get strategic recommendations using SDK analytics.
|
|
651
|
+
|
|
652
|
+
Args:
|
|
653
|
+
agent_count: Number of agents for parallel work calculation
|
|
654
|
+
|
|
655
|
+
Returns:
|
|
656
|
+
Dict with recommendations, bottlenecks, parallel_capacity
|
|
657
|
+
"""
|
|
658
|
+
try:
|
|
659
|
+
from htmlgraph.sdk import SDK
|
|
660
|
+
|
|
661
|
+
sdk = SDK(directory=self.graph_dir, agent="claude-code")
|
|
662
|
+
|
|
663
|
+
recs = sdk.recommend_next_work(agent_count=agent_count)
|
|
664
|
+
bottlenecks = sdk.find_bottlenecks(top_n=3)
|
|
665
|
+
parallel = sdk.get_parallel_work(max_agents=5)
|
|
666
|
+
|
|
667
|
+
return {
|
|
668
|
+
"recommendations": recs[:3] if recs else [],
|
|
669
|
+
"bottlenecks": bottlenecks,
|
|
670
|
+
"parallel_capacity": parallel,
|
|
671
|
+
}
|
|
672
|
+
except Exception as e:
|
|
673
|
+
logger.warning(f"Could not get strategic recommendations: {e}")
|
|
674
|
+
return {
|
|
675
|
+
"recommendations": [],
|
|
676
|
+
"bottlenecks": [],
|
|
677
|
+
"parallel_capacity": {
|
|
678
|
+
"max_parallelism": 0,
|
|
679
|
+
"ready_now": 0,
|
|
680
|
+
"total_ready": 0,
|
|
681
|
+
},
|
|
682
|
+
}
|
|
683
|
+
|
|
684
|
+
def get_active_agents(self) -> list[dict[str, Any]]:
|
|
685
|
+
"""Get information about other active agents."""
|
|
686
|
+
try:
|
|
687
|
+
sessions_dir = self.graph_dir / "sessions"
|
|
688
|
+
if not sessions_dir.exists():
|
|
689
|
+
return []
|
|
690
|
+
|
|
691
|
+
from htmlgraph.converter import SessionConverter
|
|
692
|
+
|
|
693
|
+
converter = SessionConverter(sessions_dir)
|
|
694
|
+
all_sessions = converter.load_all()
|
|
695
|
+
|
|
696
|
+
active_agents = []
|
|
697
|
+
for session in all_sessions:
|
|
698
|
+
if session.status == "active":
|
|
699
|
+
active_agents.append(
|
|
700
|
+
{
|
|
701
|
+
"agent": session.agent,
|
|
702
|
+
"session_id": session.id,
|
|
703
|
+
"started_at": (
|
|
704
|
+
session.started_at.isoformat()
|
|
705
|
+
if session.started_at
|
|
706
|
+
else None
|
|
707
|
+
),
|
|
708
|
+
"event_count": session.event_count,
|
|
709
|
+
"worked_on": (
|
|
710
|
+
list(session.worked_on)
|
|
711
|
+
if hasattr(session, "worked_on")
|
|
712
|
+
else []
|
|
713
|
+
),
|
|
714
|
+
}
|
|
715
|
+
)
|
|
716
|
+
|
|
717
|
+
return active_agents
|
|
718
|
+
except Exception as e:
|
|
719
|
+
logger.warning(f"Could not get active agents: {e}")
|
|
720
|
+
return []
|
|
721
|
+
|
|
722
|
+
def detect_feature_conflicts(
|
|
723
|
+
self,
|
|
724
|
+
features: list[dict[str, Any]] | None = None,
|
|
725
|
+
active_agents: list[dict[str, Any]] | None = None,
|
|
726
|
+
) -> list[dict[str, Any]]:
|
|
727
|
+
"""
|
|
728
|
+
Detect features being worked on by multiple agents simultaneously.
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
features: Features list (loaded if not provided)
|
|
732
|
+
active_agents: Active agents list (loaded if not provided)
|
|
733
|
+
|
|
734
|
+
Returns:
|
|
735
|
+
List of conflict dicts with feature_id, title, agents
|
|
736
|
+
"""
|
|
737
|
+
if features is None:
|
|
738
|
+
features = self._load_features()
|
|
739
|
+
if active_agents is None:
|
|
740
|
+
active_agents = self.get_active_agents()
|
|
741
|
+
|
|
742
|
+
conflicts: list[dict[str, Any]] = []
|
|
743
|
+
|
|
744
|
+
try:
|
|
745
|
+
# Build map of feature -> agents
|
|
746
|
+
feature_agents: dict[str, list[str]] = {}
|
|
747
|
+
|
|
748
|
+
for agent_info in active_agents:
|
|
749
|
+
for feature_id in agent_info.get("worked_on", []):
|
|
750
|
+
if feature_id not in feature_agents:
|
|
751
|
+
feature_agents[feature_id] = []
|
|
752
|
+
feature_agents[feature_id].append(agent_info["agent"])
|
|
753
|
+
|
|
754
|
+
# Find features with multiple agents
|
|
755
|
+
for feature_id, agents in feature_agents.items():
|
|
756
|
+
if len(agents) > 1:
|
|
757
|
+
feature = next(
|
|
758
|
+
(f for f in features if f.get("id") == feature_id), None
|
|
759
|
+
)
|
|
760
|
+
if feature:
|
|
761
|
+
conflicts.append(
|
|
762
|
+
{
|
|
763
|
+
"feature_id": feature_id,
|
|
764
|
+
"title": feature.get("title", "Unknown"),
|
|
765
|
+
"agents": agents,
|
|
766
|
+
}
|
|
767
|
+
)
|
|
768
|
+
except Exception as e:
|
|
769
|
+
logger.warning(f"Could not detect conflicts: {e}")
|
|
770
|
+
|
|
771
|
+
return conflicts
|
|
772
|
+
|
|
773
|
+
# -------------------------------------------------------------------
|
|
774
|
+
# Git helpers
|
|
775
|
+
# -------------------------------------------------------------------
|
|
776
|
+
|
|
777
|
+
def get_head_commit(self) -> str | None:
|
|
778
|
+
"""Get current HEAD commit hash (short form)."""
|
|
779
|
+
try:
|
|
780
|
+
result = subprocess.run(
|
|
781
|
+
["git", "rev-parse", "--short", "HEAD"],
|
|
782
|
+
capture_output=True,
|
|
783
|
+
text=True,
|
|
784
|
+
cwd=str(self.project_dir),
|
|
785
|
+
timeout=5,
|
|
786
|
+
)
|
|
787
|
+
if result.returncode == 0:
|
|
788
|
+
return result.stdout.strip()
|
|
789
|
+
except Exception:
|
|
790
|
+
pass
|
|
791
|
+
return None
|
|
792
|
+
|
|
793
|
+
def get_recent_commits(self, count: int = 5) -> list[str]:
|
|
794
|
+
"""Get recent git commits."""
|
|
795
|
+
try:
|
|
796
|
+
result = subprocess.run(
|
|
797
|
+
["git", "log", "--oneline", f"-{count}"],
|
|
798
|
+
capture_output=True,
|
|
799
|
+
text=True,
|
|
800
|
+
cwd=str(self.project_dir),
|
|
801
|
+
timeout=5,
|
|
802
|
+
)
|
|
803
|
+
if result.returncode == 0:
|
|
804
|
+
return result.stdout.strip().split("\n")
|
|
805
|
+
except Exception:
|
|
806
|
+
pass
|
|
807
|
+
return []
|
|
808
|
+
|
|
809
|
+
# -------------------------------------------------------------------
|
|
810
|
+
# Orchestrator mode
|
|
811
|
+
# -------------------------------------------------------------------
|
|
812
|
+
|
|
813
|
+
def activate_orchestrator_mode(self, session_id: str) -> tuple[bool, str]:
|
|
814
|
+
"""
|
|
815
|
+
Activate orchestrator mode unconditionally.
|
|
816
|
+
|
|
817
|
+
Plugin installed = Orchestrator mode enabled.
|
|
818
|
+
This is the default operating mode for all htmlgraph projects.
|
|
819
|
+
|
|
820
|
+
Args:
|
|
821
|
+
session_id: Current session ID
|
|
822
|
+
|
|
823
|
+
Returns:
|
|
824
|
+
(is_active, enforcement_level)
|
|
825
|
+
"""
|
|
826
|
+
try:
|
|
827
|
+
from htmlgraph.orchestrator_mode import OrchestratorModeManager
|
|
828
|
+
|
|
829
|
+
manager = OrchestratorModeManager(self.graph_dir)
|
|
830
|
+
mode = manager.load()
|
|
831
|
+
|
|
832
|
+
if mode.disabled_by_user:
|
|
833
|
+
return False, "disabled"
|
|
834
|
+
|
|
835
|
+
if not mode.enabled:
|
|
836
|
+
manager.enable(session_id=session_id, level="strict", auto=True)
|
|
837
|
+
return True, "strict"
|
|
838
|
+
|
|
839
|
+
return True, mode.enforcement_level
|
|
840
|
+
|
|
841
|
+
except Exception as e:
|
|
842
|
+
logger.warning(f"Could not manage orchestrator mode: {e}")
|
|
843
|
+
return False, "error"
|
|
844
|
+
|
|
845
|
+
def _build_orchestrator_status(self, active: bool, level: str) -> str:
|
|
846
|
+
"""
|
|
847
|
+
Build orchestrator status section for context.
|
|
848
|
+
|
|
849
|
+
Args:
|
|
850
|
+
active: Whether orchestrator mode is active
|
|
851
|
+
level: Enforcement level
|
|
852
|
+
|
|
853
|
+
Returns:
|
|
854
|
+
Formatted status message
|
|
855
|
+
"""
|
|
856
|
+
if not active or level == "disabled":
|
|
857
|
+
return (
|
|
858
|
+
"## ORCHESTRATOR MODE: INACTIVE\n\n"
|
|
859
|
+
"Orchestrator mode has been manually disabled. "
|
|
860
|
+
"This is unusual - the default mode is ORCHESTRATOR ENABLED.\n\n"
|
|
861
|
+
"**Note:** Without orchestrator mode, you will fill context with "
|
|
862
|
+
"implementation details instead of delegating to subagents.\n\n"
|
|
863
|
+
"To re-enable: `uv run htmlgraph orchestrator enable`\n"
|
|
864
|
+
)
|
|
865
|
+
|
|
866
|
+
if level == "error":
|
|
867
|
+
return (
|
|
868
|
+
"## ORCHESTRATOR MODE: ERROR\n\n"
|
|
869
|
+
"Warning: Could not determine orchestrator mode status. "
|
|
870
|
+
"Proceeding without enforcement.\n"
|
|
871
|
+
)
|
|
872
|
+
|
|
873
|
+
enforcement_desc = (
|
|
874
|
+
"blocks direct implementation"
|
|
875
|
+
if level == "strict"
|
|
876
|
+
else "provides guidance only"
|
|
877
|
+
)
|
|
878
|
+
|
|
879
|
+
return (
|
|
880
|
+
f"## ORCHESTRATOR MODE: ACTIVE ({level} enforcement)\n\n"
|
|
881
|
+
f"**Default operating mode** - Plugin installed = Orchestrator enabled.\n\n"
|
|
882
|
+
f"**Enforcement:** This mode {enforcement_desc}. "
|
|
883
|
+
f"Follow the delegation workflow in ORCHESTRATOR DIRECTIVES below.\n\n"
|
|
884
|
+
f"**Why:** Orchestrator mode saves 80%+ context by delegating "
|
|
885
|
+
f"implementation to subagents instead of executing directly.\n\n"
|
|
886
|
+
f"To disable: `uv run htmlgraph orchestrator disable`\n"
|
|
887
|
+
f"To change level: `uv run htmlgraph orchestrator set-level guidance`\n"
|
|
888
|
+
)
|
|
889
|
+
|
|
890
|
+
# -------------------------------------------------------------------
|
|
891
|
+
# CIGS context
|
|
892
|
+
# -------------------------------------------------------------------
|
|
893
|
+
|
|
894
|
+
def get_cigs_context(self, session_id: str) -> str:
|
|
895
|
+
"""
|
|
896
|
+
Generate CIGS (Computational Imperative Guidance System) context.
|
|
897
|
+
|
|
898
|
+
Args:
|
|
899
|
+
session_id: Current session ID
|
|
900
|
+
|
|
901
|
+
Returns:
|
|
902
|
+
Formatted CIGS context string
|
|
903
|
+
"""
|
|
904
|
+
try:
|
|
905
|
+
from htmlgraph.cigs import (
|
|
906
|
+
AutonomyRecommender,
|
|
907
|
+
PatternDetector,
|
|
908
|
+
ViolationTracker,
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
tracker = ViolationTracker(self.graph_dir)
|
|
912
|
+
tracker.set_session_id(session_id)
|
|
913
|
+
|
|
914
|
+
recent_violations = tracker.get_recent_violations(sessions=5)
|
|
915
|
+
session_summary = tracker.get_session_violations()
|
|
916
|
+
|
|
917
|
+
# Convert violations to tool history format
|
|
918
|
+
history = [
|
|
919
|
+
{
|
|
920
|
+
"tool": v.tool,
|
|
921
|
+
"command": v.tool_params.get("command", ""),
|
|
922
|
+
"file_path": v.tool_params.get("file_path", ""),
|
|
923
|
+
"prompt": "",
|
|
924
|
+
"timestamp": v.timestamp,
|
|
925
|
+
}
|
|
926
|
+
for v in recent_violations
|
|
927
|
+
]
|
|
928
|
+
|
|
929
|
+
detector = PatternDetector()
|
|
930
|
+
patterns = detector.detect_all_patterns(history)
|
|
931
|
+
|
|
932
|
+
# Calculate compliance history
|
|
933
|
+
compliance_history = [
|
|
934
|
+
max(
|
|
935
|
+
0.0,
|
|
936
|
+
1.0
|
|
937
|
+
- (
|
|
938
|
+
len([v for v in recent_violations if v.session_id == sid]) / 5.0
|
|
939
|
+
),
|
|
940
|
+
)
|
|
941
|
+
for sid in set(v.session_id for v in recent_violations[-5:])
|
|
942
|
+
]
|
|
943
|
+
|
|
944
|
+
recommender = AutonomyRecommender()
|
|
945
|
+
autonomy = recommender.recommend(
|
|
946
|
+
violations=session_summary,
|
|
947
|
+
patterns=patterns,
|
|
948
|
+
compliance_history=compliance_history if compliance_history else None,
|
|
949
|
+
)
|
|
950
|
+
|
|
951
|
+
# Build CIGS context
|
|
952
|
+
context_parts = [
|
|
953
|
+
"## CIGS Status (Computational Imperative Guidance System)",
|
|
954
|
+
"",
|
|
955
|
+
f"**Autonomy Level:** {autonomy.level.upper()}",
|
|
956
|
+
f"**Messaging Intensity:** {autonomy.messaging_intensity}",
|
|
957
|
+
f"**Enforcement Mode:** {autonomy.enforcement_mode}",
|
|
958
|
+
"",
|
|
959
|
+
f"**Reason:** {autonomy.reason}",
|
|
960
|
+
]
|
|
961
|
+
|
|
962
|
+
if session_summary.total_violations > 0:
|
|
963
|
+
context_parts.extend(
|
|
964
|
+
[
|
|
965
|
+
"",
|
|
966
|
+
"### Session Violations",
|
|
967
|
+
f"- Total violations: {session_summary.total_violations}",
|
|
968
|
+
f"- Compliance rate: {session_summary.compliance_rate:.0%}",
|
|
969
|
+
f"- Wasted tokens: {session_summary.total_waste_tokens}",
|
|
970
|
+
]
|
|
971
|
+
)
|
|
972
|
+
|
|
973
|
+
if session_summary.circuit_breaker_triggered:
|
|
974
|
+
context_parts.append("- **Circuit breaker active** (3+ violations)")
|
|
975
|
+
|
|
976
|
+
if patterns:
|
|
977
|
+
context_parts.extend(
|
|
978
|
+
[
|
|
979
|
+
"",
|
|
980
|
+
"### Detected Anti-Patterns",
|
|
981
|
+
]
|
|
982
|
+
)
|
|
983
|
+
for pattern in patterns:
|
|
984
|
+
context_parts.append(f"- **{pattern.name}**: {pattern.description}")
|
|
985
|
+
if pattern.delegation_suggestion:
|
|
986
|
+
context_parts.append(
|
|
987
|
+
f" - Fix: {pattern.delegation_suggestion}"
|
|
988
|
+
)
|
|
989
|
+
|
|
990
|
+
context_parts.extend(
|
|
991
|
+
[
|
|
992
|
+
"",
|
|
993
|
+
"### Delegation Reminders",
|
|
994
|
+
]
|
|
995
|
+
)
|
|
996
|
+
|
|
997
|
+
if autonomy.level == "operator":
|
|
998
|
+
context_parts.extend(
|
|
999
|
+
[
|
|
1000
|
+
"STRICT MODE ACTIVE - You MUST delegate ALL operations except:",
|
|
1001
|
+
"- Task() - Delegation itself",
|
|
1002
|
+
"- AskUserQuestion() - User clarification",
|
|
1003
|
+
"- TodoWrite() - Work tracking",
|
|
1004
|
+
"- SDK operations - Feature/session management",
|
|
1005
|
+
"",
|
|
1006
|
+
"**ALL other operations MUST be delegated to subagents.**",
|
|
1007
|
+
]
|
|
1008
|
+
)
|
|
1009
|
+
elif autonomy.level == "collaborator":
|
|
1010
|
+
context_parts.extend(
|
|
1011
|
+
[
|
|
1012
|
+
"ACTIVE GUIDANCE - Focus on delegation:",
|
|
1013
|
+
"- Exploration: Use spawn_gemini() (FREE)",
|
|
1014
|
+
"- Code changes: Use spawn_codex() or Task()",
|
|
1015
|
+
"- Git operations: Use spawn_copilot()",
|
|
1016
|
+
"",
|
|
1017
|
+
"Direct tool use should be rare and well-justified.",
|
|
1018
|
+
]
|
|
1019
|
+
)
|
|
1020
|
+
elif autonomy.level == "consultant":
|
|
1021
|
+
context_parts.extend(
|
|
1022
|
+
[
|
|
1023
|
+
"MODERATE GUIDANCE - Remember delegation patterns:",
|
|
1024
|
+
"- Multi-file exploration -> spawn_gemini()",
|
|
1025
|
+
"- Code changes with tests -> Task() or spawn_codex()",
|
|
1026
|
+
"- Git operations -> spawn_copilot()",
|
|
1027
|
+
]
|
|
1028
|
+
)
|
|
1029
|
+
else: # observer
|
|
1030
|
+
context_parts.extend(
|
|
1031
|
+
[
|
|
1032
|
+
"MINIMAL GUIDANCE - You're doing well!",
|
|
1033
|
+
"Continue delegating as appropriate. Guidance will escalate if patterns change.",
|
|
1034
|
+
]
|
|
1035
|
+
)
|
|
1036
|
+
|
|
1037
|
+
return "\n".join(context_parts)
|
|
1038
|
+
|
|
1039
|
+
except Exception as e:
|
|
1040
|
+
logger.warning(f"Could not generate CIGS context: {e}")
|
|
1041
|
+
return ""
|
|
1042
|
+
|
|
1043
|
+
# -------------------------------------------------------------------
|
|
1044
|
+
# System prompt
|
|
1045
|
+
# -------------------------------------------------------------------
|
|
1046
|
+
|
|
1047
|
+
def load_system_prompt(self) -> str | None:
|
|
1048
|
+
"""
|
|
1049
|
+
Load system prompt from plugin default or project override.
|
|
1050
|
+
|
|
1051
|
+
Returns:
|
|
1052
|
+
System prompt content, or None if not available
|
|
1053
|
+
"""
|
|
1054
|
+
try:
|
|
1055
|
+
from htmlgraph.system_prompts import SystemPromptManager
|
|
1056
|
+
|
|
1057
|
+
manager = SystemPromptManager(self.graph_dir)
|
|
1058
|
+
prompt: str | None = manager.get_active()
|
|
1059
|
+
|
|
1060
|
+
if prompt:
|
|
1061
|
+
logger.info(f"Loaded system prompt ({len(prompt)} chars)")
|
|
1062
|
+
return prompt
|
|
1063
|
+
else:
|
|
1064
|
+
logger.warning("System prompt not found")
|
|
1065
|
+
return None
|
|
1066
|
+
|
|
1067
|
+
except ImportError:
|
|
1068
|
+
logger.warning("SDK not available, falling back to legacy loading")
|
|
1069
|
+
prompt_file = self.project_dir / ".claude" / "system-prompt.md"
|
|
1070
|
+
if not prompt_file.exists():
|
|
1071
|
+
return None
|
|
1072
|
+
try:
|
|
1073
|
+
content = prompt_file.read_text(encoding="utf-8")
|
|
1074
|
+
logger.info(f"Loaded system prompt ({len(content)} chars)")
|
|
1075
|
+
return content
|
|
1076
|
+
except Exception as e:
|
|
1077
|
+
logger.error(f"Failed to load system prompt: {e}")
|
|
1078
|
+
return None
|
|
1079
|
+
|
|
1080
|
+
except Exception as e:
|
|
1081
|
+
logger.error(f"Failed to load system prompt via SDK: {e}")
|
|
1082
|
+
prompt_file = self.project_dir / ".claude" / "system-prompt.md"
|
|
1083
|
+
if prompt_file.exists():
|
|
1084
|
+
try:
|
|
1085
|
+
content = prompt_file.read_text(encoding="utf-8")
|
|
1086
|
+
logger.info(
|
|
1087
|
+
f"Loaded system prompt via fallback ({len(content)} chars)"
|
|
1088
|
+
)
|
|
1089
|
+
return content
|
|
1090
|
+
except Exception:
|
|
1091
|
+
pass
|
|
1092
|
+
return None
|
|
1093
|
+
|
|
1094
|
+
def validate_token_count(
|
|
1095
|
+
self, prompt: str, max_tokens: int = 500
|
|
1096
|
+
) -> tuple[bool, int]:
|
|
1097
|
+
"""
|
|
1098
|
+
Validate prompt token count using SDK validator.
|
|
1099
|
+
|
|
1100
|
+
Args:
|
|
1101
|
+
prompt: Text to count tokens for
|
|
1102
|
+
max_tokens: Maximum allowed tokens
|
|
1103
|
+
|
|
1104
|
+
Returns:
|
|
1105
|
+
(is_valid, token_count) tuple
|
|
1106
|
+
"""
|
|
1107
|
+
try:
|
|
1108
|
+
from htmlgraph.system_prompts import SystemPromptValidator
|
|
1109
|
+
|
|
1110
|
+
result = SystemPromptValidator.validate(prompt, max_tokens=max_tokens)
|
|
1111
|
+
tokens = result["tokens"]
|
|
1112
|
+
is_valid = result["is_valid"]
|
|
1113
|
+
|
|
1114
|
+
if not is_valid:
|
|
1115
|
+
logger.warning(f"Prompt exceeds budget: {tokens} > {max_tokens}")
|
|
1116
|
+
else:
|
|
1117
|
+
logger.info(f"Prompt tokens: {tokens}/{max_tokens}")
|
|
1118
|
+
|
|
1119
|
+
return is_valid, tokens
|
|
1120
|
+
|
|
1121
|
+
except ImportError:
|
|
1122
|
+
logger.debug("SDK validator not available, using fallback estimation")
|
|
1123
|
+
try:
|
|
1124
|
+
import tiktoken
|
|
1125
|
+
|
|
1126
|
+
encoding = tiktoken.encoding_for_model("gpt-4")
|
|
1127
|
+
tokens = len(encoding.encode(prompt))
|
|
1128
|
+
except Exception:
|
|
1129
|
+
tokens = max(1, len(prompt) // 4)
|
|
1130
|
+
|
|
1131
|
+
is_valid = tokens <= max_tokens
|
|
1132
|
+
return is_valid, tokens
|
|
1133
|
+
|
|
1134
|
+
except Exception as e:
|
|
1135
|
+
logger.error(f"Token validation failed: {e}")
|
|
1136
|
+
tokens = max(1, len(prompt) // 4)
|
|
1137
|
+
is_valid = tokens <= max_tokens
|
|
1138
|
+
return is_valid, tokens
|
|
1139
|
+
|
|
1140
|
+
# -------------------------------------------------------------------
|
|
1141
|
+
# Reflection context
|
|
1142
|
+
# -------------------------------------------------------------------
|
|
1143
|
+
|
|
1144
|
+
def get_reflection_context(
|
|
1145
|
+
self, current_feature_id: str | None = None
|
|
1146
|
+
) -> str | None:
|
|
1147
|
+
"""
|
|
1148
|
+
Get computational reflections (pre-computed context from history).
|
|
1149
|
+
|
|
1150
|
+
Args:
|
|
1151
|
+
current_feature_id: ID of the currently active feature
|
|
1152
|
+
|
|
1153
|
+
Returns:
|
|
1154
|
+
Formatted reflection context string, or None
|
|
1155
|
+
"""
|
|
1156
|
+
try:
|
|
1157
|
+
from htmlgraph.reflection import get_reflection_context
|
|
1158
|
+
from htmlgraph.sdk import SDK
|
|
1159
|
+
|
|
1160
|
+
sdk = SDK(directory=self.graph_dir, agent="claude-code")
|
|
1161
|
+
return get_reflection_context(
|
|
1162
|
+
sdk,
|
|
1163
|
+
feature_id=current_feature_id,
|
|
1164
|
+
track=None,
|
|
1165
|
+
)
|
|
1166
|
+
except Exception as e:
|
|
1167
|
+
logger.warning(f"Could not compute reflections: {e}")
|
|
1168
|
+
return None
|
|
1169
|
+
|
|
1170
|
+
# -------------------------------------------------------------------
|
|
1171
|
+
# Async parallelization helpers
|
|
1172
|
+
# -------------------------------------------------------------------
|
|
1173
|
+
|
|
1174
|
+
async def _load_system_prompt_async(self) -> str | None:
|
|
1175
|
+
"""Asynchronously load system prompt."""
|
|
1176
|
+
loop = asyncio.get_event_loop()
|
|
1177
|
+
return await loop.run_in_executor(None, self.load_system_prompt)
|
|
1178
|
+
|
|
1179
|
+
async def _load_analytics_async(self) -> dict[str, Any]:
|
|
1180
|
+
"""Asynchronously compute analytics and strategic recommendations."""
|
|
1181
|
+
|
|
1182
|
+
def _compute() -> dict[str, Any]:
|
|
1183
|
+
try:
|
|
1184
|
+
return self.get_strategic_recommendations(agent_count=1)
|
|
1185
|
+
except Exception as e:
|
|
1186
|
+
logger.warning(f"Analytics computation failed: {e}")
|
|
1187
|
+
return {}
|
|
1188
|
+
|
|
1189
|
+
loop = asyncio.get_event_loop()
|
|
1190
|
+
return await loop.run_in_executor(None, _compute)
|
|
1191
|
+
|
|
1192
|
+
async def _parallelize_initialization(self) -> dict[str, Any]:
|
|
1193
|
+
"""Parallelize system prompt loading and analytics computation."""
|
|
1194
|
+
try:
|
|
1195
|
+
system_prompt, analytics = await asyncio.gather(
|
|
1196
|
+
self._load_system_prompt_async(),
|
|
1197
|
+
self._load_analytics_async(),
|
|
1198
|
+
return_exceptions=False,
|
|
1199
|
+
)
|
|
1200
|
+
|
|
1201
|
+
return {
|
|
1202
|
+
"system_prompt": system_prompt,
|
|
1203
|
+
"analytics": analytics or {},
|
|
1204
|
+
"parallelized": True,
|
|
1205
|
+
}
|
|
1206
|
+
except Exception as e:
|
|
1207
|
+
logger.warning(f"Parallel initialization failed: {e}")
|
|
1208
|
+
return {
|
|
1209
|
+
"system_prompt": None,
|
|
1210
|
+
"analytics": {},
|
|
1211
|
+
"parallelized": False,
|
|
1212
|
+
}
|
|
1213
|
+
|
|
1214
|
+
def run_parallel_init(self) -> dict[str, Any]:
|
|
1215
|
+
"""
|
|
1216
|
+
Run parallelized initialization using asyncio.
|
|
1217
|
+
|
|
1218
|
+
Runs system prompt loading and analytics computation in parallel
|
|
1219
|
+
to reduce latency.
|
|
1220
|
+
|
|
1221
|
+
Returns:
|
|
1222
|
+
Dict with system_prompt, analytics, and parallelized flag
|
|
1223
|
+
"""
|
|
1224
|
+
try:
|
|
1225
|
+
loop = asyncio.new_event_loop()
|
|
1226
|
+
asyncio.set_event_loop(loop)
|
|
1227
|
+
result = loop.run_until_complete(self._parallelize_initialization())
|
|
1228
|
+
loop.close()
|
|
1229
|
+
return result
|
|
1230
|
+
except Exception as e:
|
|
1231
|
+
logger.warning(f"Could not run parallelized init: {e}")
|
|
1232
|
+
return {
|
|
1233
|
+
"system_prompt": None,
|
|
1234
|
+
"analytics": {},
|
|
1235
|
+
"parallelized": False,
|
|
1236
|
+
}
|
|
1237
|
+
|
|
1238
|
+
# -------------------------------------------------------------------
|
|
1239
|
+
# Context assembly - the main build method
|
|
1240
|
+
# -------------------------------------------------------------------
|
|
1241
|
+
|
|
1242
|
+
def build_version_section(self) -> str:
|
|
1243
|
+
"""Build version warning section if outdated."""
|
|
1244
|
+
try:
|
|
1245
|
+
status = VersionChecker.get_version_status()
|
|
1246
|
+
if (
|
|
1247
|
+
status["is_outdated"]
|
|
1248
|
+
and status["installed_version"]
|
|
1249
|
+
and status["latest_version"]
|
|
1250
|
+
):
|
|
1251
|
+
return HTMLGRAPH_VERSION_WARNING.format(
|
|
1252
|
+
installed=status["installed_version"],
|
|
1253
|
+
latest=status["latest_version"],
|
|
1254
|
+
).strip()
|
|
1255
|
+
except Exception:
|
|
1256
|
+
pass
|
|
1257
|
+
return ""
|
|
1258
|
+
|
|
1259
|
+
def build_features_section(
|
|
1260
|
+
self, features: list[dict[str, Any]], stats: dict[str, Any]
|
|
1261
|
+
) -> str:
|
|
1262
|
+
"""
|
|
1263
|
+
Build the features context section.
|
|
1264
|
+
|
|
1265
|
+
Args:
|
|
1266
|
+
features: Feature dicts
|
|
1267
|
+
stats: Feature statistics
|
|
1268
|
+
|
|
1269
|
+
Returns:
|
|
1270
|
+
Formatted features context
|
|
1271
|
+
"""
|
|
1272
|
+
context_parts: list[str] = []
|
|
1273
|
+
|
|
1274
|
+
active_features = [f for f in features if f.get("status") == "in-progress"]
|
|
1275
|
+
pending_features = [f for f in features if f.get("status") == "todo"]
|
|
1276
|
+
|
|
1277
|
+
# Project status
|
|
1278
|
+
context_parts.append(
|
|
1279
|
+
f"## Project Status\n\n"
|
|
1280
|
+
f"**Progress:** {stats['done']}/{stats['total']} features complete "
|
|
1281
|
+
f"({stats['percentage']}%)\n"
|
|
1282
|
+
f"**Active:** {stats['in_progress']} | "
|
|
1283
|
+
f"**Blocked:** {stats['blocked']} | "
|
|
1284
|
+
f"**Todo:** {stats['todo']}"
|
|
1285
|
+
)
|
|
1286
|
+
|
|
1287
|
+
# Active features
|
|
1288
|
+
if active_features:
|
|
1289
|
+
active_list = "\n".join(
|
|
1290
|
+
[f"- **{f['id']}**: {f['title']}" for f in active_features[:3]]
|
|
1291
|
+
)
|
|
1292
|
+
context_parts.append(
|
|
1293
|
+
f"## Active Features\n\n{active_list}\n\n"
|
|
1294
|
+
f"*Activity will be attributed to these features based on "
|
|
1295
|
+
f"file patterns and keywords.*"
|
|
1296
|
+
)
|
|
1297
|
+
else:
|
|
1298
|
+
context_parts.append(
|
|
1299
|
+
"## No Active Features\n\n"
|
|
1300
|
+
"Start working on a feature:\n"
|
|
1301
|
+
"```bash\n"
|
|
1302
|
+
"htmlgraph feature start <feature-id>\n"
|
|
1303
|
+
"```"
|
|
1304
|
+
)
|
|
1305
|
+
|
|
1306
|
+
# Pending features
|
|
1307
|
+
if pending_features:
|
|
1308
|
+
pending_list = "\n".join(
|
|
1309
|
+
[f"- {f['id']}: {f['title'][:50]}" for f in pending_features[:5]]
|
|
1310
|
+
)
|
|
1311
|
+
context_parts.append(f"## Pending Features\n\n{pending_list}")
|
|
1312
|
+
|
|
1313
|
+
return "\n\n".join(context_parts)
|
|
1314
|
+
|
|
1315
|
+
def build_previous_session_section(self) -> str:
|
|
1316
|
+
"""Build previous session summary section."""
|
|
1317
|
+
prev_session = self.get_session_summary()
|
|
1318
|
+
if not prev_session:
|
|
1319
|
+
return ""
|
|
1320
|
+
|
|
1321
|
+
handoff_lines: list[str] = []
|
|
1322
|
+
if prev_session.get("handoff_notes"):
|
|
1323
|
+
handoff_lines.append(f"**Notes:** {prev_session.get('handoff_notes')}")
|
|
1324
|
+
if prev_session.get("recommended_next"):
|
|
1325
|
+
handoff_lines.append(
|
|
1326
|
+
f"**Recommended Next:** {prev_session.get('recommended_next')}"
|
|
1327
|
+
)
|
|
1328
|
+
blockers = prev_session.get("blockers") or []
|
|
1329
|
+
if blockers:
|
|
1330
|
+
handoff_lines.append(f"**Blockers:** {', '.join(blockers)}")
|
|
1331
|
+
|
|
1332
|
+
handoff_text = ""
|
|
1333
|
+
if handoff_lines:
|
|
1334
|
+
handoff_text = "\n\n" + "\n".join(handoff_lines)
|
|
1335
|
+
|
|
1336
|
+
worked_on = prev_session.get("worked_on", [])
|
|
1337
|
+
worked_on_text = ", ".join(worked_on[:3]) if worked_on else "N/A"
|
|
1338
|
+
if len(worked_on) > 3:
|
|
1339
|
+
worked_on_text += f" (+{len(worked_on) - 3} more)"
|
|
1340
|
+
|
|
1341
|
+
return (
|
|
1342
|
+
f"## Previous Session\n\n"
|
|
1343
|
+
f"**Session:** {prev_session.get('id', 'unknown')[:12]}...\n"
|
|
1344
|
+
f"**Events:** {prev_session.get('event_count', 0)}\n"
|
|
1345
|
+
f"**Worked On:** {worked_on_text}"
|
|
1346
|
+
f"{handoff_text}"
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
def build_commits_section(self) -> str:
|
|
1350
|
+
"""Build recent commits section."""
|
|
1351
|
+
recent_commits = self.get_recent_commits(count=5)
|
|
1352
|
+
if not recent_commits:
|
|
1353
|
+
return ""
|
|
1354
|
+
|
|
1355
|
+
commits_text = "\n".join([f" {commit}" for commit in recent_commits])
|
|
1356
|
+
return f"## Recent Commits\n\n{commits_text}"
|
|
1357
|
+
|
|
1358
|
+
def build_strategic_insights_section(
|
|
1359
|
+
self, analytics: dict[str, Any] | None = None
|
|
1360
|
+
) -> str:
|
|
1361
|
+
"""
|
|
1362
|
+
Build strategic insights section.
|
|
1363
|
+
|
|
1364
|
+
Args:
|
|
1365
|
+
analytics: Pre-computed analytics dict (loaded if not provided)
|
|
1366
|
+
|
|
1367
|
+
Returns:
|
|
1368
|
+
Formatted insights section
|
|
1369
|
+
"""
|
|
1370
|
+
if analytics is None:
|
|
1371
|
+
analytics = self.get_strategic_recommendations(agent_count=1)
|
|
1372
|
+
|
|
1373
|
+
recommendations = analytics.get("recommendations", [])
|
|
1374
|
+
bottlenecks = analytics.get("bottlenecks", [])
|
|
1375
|
+
parallel = analytics.get("parallel_capacity", {})
|
|
1376
|
+
|
|
1377
|
+
if (
|
|
1378
|
+
not recommendations
|
|
1379
|
+
and not bottlenecks
|
|
1380
|
+
and not parallel.get("max_parallelism", 0)
|
|
1381
|
+
):
|
|
1382
|
+
return ""
|
|
1383
|
+
|
|
1384
|
+
insights_parts: list[str] = []
|
|
1385
|
+
|
|
1386
|
+
if bottlenecks:
|
|
1387
|
+
bottleneck_count = len(bottlenecks)
|
|
1388
|
+
bottleneck_list = "\n".join(
|
|
1389
|
+
[
|
|
1390
|
+
f" - **{bn['title']}** (blocks {bn['blocks_count']} tasks, "
|
|
1391
|
+
f"impact: {bn['impact_score']:.1f})"
|
|
1392
|
+
for bn in bottlenecks[:3]
|
|
1393
|
+
]
|
|
1394
|
+
)
|
|
1395
|
+
insights_parts.append(
|
|
1396
|
+
f"#### Bottlenecks ({bottleneck_count})\n{bottleneck_list}"
|
|
1397
|
+
)
|
|
1398
|
+
|
|
1399
|
+
if recommendations:
|
|
1400
|
+
rec_list = "\n".join(
|
|
1401
|
+
[
|
|
1402
|
+
f" {i + 1}. **{rec['title']}** (score: {rec['score']:.1f})\n"
|
|
1403
|
+
f" - Why: {', '.join(rec['reasons'][:2])}"
|
|
1404
|
+
for i, rec in enumerate(recommendations[:3])
|
|
1405
|
+
]
|
|
1406
|
+
)
|
|
1407
|
+
insights_parts.append(f"#### Top Recommendations\n{rec_list}")
|
|
1408
|
+
|
|
1409
|
+
if parallel.get("max_parallelism", 0) > 0:
|
|
1410
|
+
ready_now = parallel.get("ready_now", 0)
|
|
1411
|
+
total_ready = parallel.get("total_ready", 0)
|
|
1412
|
+
insights_parts.append(
|
|
1413
|
+
f"#### Parallel Work\n"
|
|
1414
|
+
f"**Can work on {parallel['max_parallelism']} tasks simultaneously**\n"
|
|
1415
|
+
f"- {ready_now} tasks ready now\n"
|
|
1416
|
+
f"- {total_ready} total tasks ready"
|
|
1417
|
+
)
|
|
1418
|
+
|
|
1419
|
+
if insights_parts:
|
|
1420
|
+
return "## Strategic Insights\n\n" + "\n\n".join(insights_parts)
|
|
1421
|
+
return ""
|
|
1422
|
+
|
|
1423
|
+
def build_agents_section(self, active_agents: list[dict[str, Any]]) -> str:
|
|
1424
|
+
"""Build active agents section."""
|
|
1425
|
+
other_agents = [a for a in active_agents if a["agent"] != "claude-code"]
|
|
1426
|
+
if not other_agents:
|
|
1427
|
+
return ""
|
|
1428
|
+
|
|
1429
|
+
agents_list = "\n".join(
|
|
1430
|
+
[
|
|
1431
|
+
f" - **{agent['agent']}**: {agent['event_count']} events, "
|
|
1432
|
+
f"working on {', '.join(agent.get('worked_on', [])[:2]) or 'unknown'}"
|
|
1433
|
+
for agent in other_agents[:5]
|
|
1434
|
+
]
|
|
1435
|
+
)
|
|
1436
|
+
return (
|
|
1437
|
+
f"## Other Active Agents\n\n{agents_list}\n\n"
|
|
1438
|
+
f"**Note:** Coordinate with other agents to avoid conflicts."
|
|
1439
|
+
)
|
|
1440
|
+
|
|
1441
|
+
def build_conflicts_section(self, conflicts: list[dict[str, Any]]) -> str:
|
|
1442
|
+
"""Build conflict warnings section."""
|
|
1443
|
+
if not conflicts:
|
|
1444
|
+
return ""
|
|
1445
|
+
|
|
1446
|
+
conflict_list = "\n".join(
|
|
1447
|
+
[
|
|
1448
|
+
f" - **{conf['title']}** ({conf['feature_id']}): "
|
|
1449
|
+
f"{', '.join(conf['agents'])}"
|
|
1450
|
+
for conf in conflicts
|
|
1451
|
+
]
|
|
1452
|
+
)
|
|
1453
|
+
return (
|
|
1454
|
+
f"## CONFLICT DETECTED\n\n"
|
|
1455
|
+
f"**Multiple agents working on the same features:**\n\n"
|
|
1456
|
+
f"{conflict_list}\n\n"
|
|
1457
|
+
f"**Action required:** Coordinate with other agents or choose a different feature."
|
|
1458
|
+
)
|
|
1459
|
+
|
|
1460
|
+
@staticmethod
|
|
1461
|
+
def build_continuity_section() -> str:
|
|
1462
|
+
"""Build session continuity instructions."""
|
|
1463
|
+
return (
|
|
1464
|
+
"## Session Continuity\n\n"
|
|
1465
|
+
"Greet the user with a brief status update:\n"
|
|
1466
|
+
"- Previous session summary (if any)\n"
|
|
1467
|
+
"- Current feature progress\n"
|
|
1468
|
+
"- What remains to be done\n"
|
|
1469
|
+
"- Ask what they'd like to work on next\n\n"
|
|
1470
|
+
"**Note:** Orchestrator directives are loaded via system prompt. "
|
|
1471
|
+
"Skills activate on-demand when needed."
|
|
1472
|
+
)
|
|
1473
|
+
|
|
1474
|
+
def build(
|
|
1475
|
+
self,
|
|
1476
|
+
session_id: str,
|
|
1477
|
+
compute_async: bool = True,
|
|
1478
|
+
) -> str:
|
|
1479
|
+
"""
|
|
1480
|
+
Build complete session start context.
|
|
1481
|
+
|
|
1482
|
+
This is the main entry point that assembles all context sections
|
|
1483
|
+
into a single Markdown string suitable for injection via
|
|
1484
|
+
additionalContext.
|
|
1485
|
+
|
|
1486
|
+
Args:
|
|
1487
|
+
session_id: Current session ID
|
|
1488
|
+
compute_async: Use parallel async operations for performance
|
|
1489
|
+
|
|
1490
|
+
Returns:
|
|
1491
|
+
Complete formatted Markdown context string
|
|
1492
|
+
"""
|
|
1493
|
+
# Run parallelized initialization if requested
|
|
1494
|
+
if compute_async:
|
|
1495
|
+
init_results = self.run_parallel_init()
|
|
1496
|
+
system_prompt = init_results.get("system_prompt")
|
|
1497
|
+
analytics = init_results.get("analytics", {})
|
|
1498
|
+
else:
|
|
1499
|
+
system_prompt = self.load_system_prompt()
|
|
1500
|
+
analytics = {}
|
|
1501
|
+
|
|
1502
|
+
# Load features
|
|
1503
|
+
features, stats = self.get_feature_summary()
|
|
1504
|
+
|
|
1505
|
+
# Activate orchestrator mode
|
|
1506
|
+
orchestrator_active, orchestrator_level = self.activate_orchestrator_mode(
|
|
1507
|
+
session_id
|
|
1508
|
+
)
|
|
1509
|
+
|
|
1510
|
+
# No features case - return minimal context
|
|
1511
|
+
if not features:
|
|
1512
|
+
context = f"""{HTMLGRAPH_PROCESS_NOTICE}
|
|
1513
|
+
|
|
1514
|
+
---
|
|
1515
|
+
|
|
1516
|
+
{ORCHESTRATOR_DIRECTIVES}
|
|
1517
|
+
|
|
1518
|
+
---
|
|
1519
|
+
|
|
1520
|
+
{TRACKER_WORKFLOW}
|
|
1521
|
+
|
|
1522
|
+
---
|
|
1523
|
+
|
|
1524
|
+
## No Features Found
|
|
1525
|
+
|
|
1526
|
+
Initialize HtmlGraph in this project:
|
|
1527
|
+
```bash
|
|
1528
|
+
uv pip install htmlgraph
|
|
1529
|
+
htmlgraph init
|
|
1530
|
+
```
|
|
1531
|
+
|
|
1532
|
+
Or create features manually in `.htmlgraph/features/`
|
|
1533
|
+
"""
|
|
1534
|
+
return self._wrap_with_system_prompt(context, system_prompt, session_id)
|
|
1535
|
+
|
|
1536
|
+
# Load analytics if not already computed
|
|
1537
|
+
if not analytics:
|
|
1538
|
+
analytics = self.get_strategic_recommendations(agent_count=1)
|
|
1539
|
+
|
|
1540
|
+
# Get active agents and detect conflicts
|
|
1541
|
+
active_agents = self.get_active_agents()
|
|
1542
|
+
conflicts = self.detect_feature_conflicts(features, active_agents)
|
|
1543
|
+
|
|
1544
|
+
# Get CIGS context
|
|
1545
|
+
cigs_context = self.get_cigs_context(session_id)
|
|
1546
|
+
|
|
1547
|
+
# Build all context sections
|
|
1548
|
+
context_parts: list[str] = []
|
|
1549
|
+
|
|
1550
|
+
# Version warning
|
|
1551
|
+
version_warning = self.build_version_section()
|
|
1552
|
+
if version_warning:
|
|
1553
|
+
context_parts.append(version_warning)
|
|
1554
|
+
|
|
1555
|
+
# Static sections
|
|
1556
|
+
context_parts.append(HTMLGRAPH_PROCESS_NOTICE)
|
|
1557
|
+
if cigs_context:
|
|
1558
|
+
context_parts.append(cigs_context)
|
|
1559
|
+
context_parts.append(
|
|
1560
|
+
self._build_orchestrator_status(orchestrator_active, orchestrator_level)
|
|
1561
|
+
)
|
|
1562
|
+
context_parts.append(ORCHESTRATOR_DIRECTIVES)
|
|
1563
|
+
context_parts.append(TRACKER_WORKFLOW)
|
|
1564
|
+
context_parts.append(RESEARCH_FIRST_DEBUGGING)
|
|
1565
|
+
|
|
1566
|
+
# Previous session
|
|
1567
|
+
prev_session_section = self.build_previous_session_section()
|
|
1568
|
+
if prev_session_section:
|
|
1569
|
+
context_parts.append(prev_session_section)
|
|
1570
|
+
|
|
1571
|
+
# Features and status
|
|
1572
|
+
context_parts.append(self.build_features_section(features, stats))
|
|
1573
|
+
|
|
1574
|
+
# Commits
|
|
1575
|
+
commits_section = self.build_commits_section()
|
|
1576
|
+
if commits_section:
|
|
1577
|
+
context_parts.append(commits_section)
|
|
1578
|
+
|
|
1579
|
+
# Strategic insights
|
|
1580
|
+
insights_section = self.build_strategic_insights_section(analytics)
|
|
1581
|
+
if insights_section:
|
|
1582
|
+
context_parts.append(insights_section)
|
|
1583
|
+
|
|
1584
|
+
# Active agents
|
|
1585
|
+
agents_section = self.build_agents_section(active_agents)
|
|
1586
|
+
if agents_section:
|
|
1587
|
+
context_parts.append(agents_section)
|
|
1588
|
+
|
|
1589
|
+
# Conflicts
|
|
1590
|
+
conflicts_section = self.build_conflicts_section(conflicts)
|
|
1591
|
+
if conflicts_section:
|
|
1592
|
+
context_parts.append(conflicts_section)
|
|
1593
|
+
|
|
1594
|
+
# Reflections
|
|
1595
|
+
active_features = [f for f in features if f.get("status") == "in-progress"]
|
|
1596
|
+
current_feature_id = active_features[0]["id"] if active_features else None
|
|
1597
|
+
reflection_context = self.get_reflection_context(current_feature_id)
|
|
1598
|
+
if reflection_context:
|
|
1599
|
+
context_parts.append(reflection_context)
|
|
1600
|
+
|
|
1601
|
+
# Session continuity
|
|
1602
|
+
context_parts.append(self.build_continuity_section())
|
|
1603
|
+
|
|
1604
|
+
context = "\n\n---\n\n".join(context_parts)
|
|
1605
|
+
|
|
1606
|
+
return self._wrap_with_system_prompt(context, system_prompt, session_id)
|
|
1607
|
+
|
|
1608
|
+
def _wrap_with_system_prompt(
|
|
1609
|
+
self,
|
|
1610
|
+
context: str,
|
|
1611
|
+
system_prompt: str | None,
|
|
1612
|
+
session_id: str,
|
|
1613
|
+
) -> str:
|
|
1614
|
+
"""
|
|
1615
|
+
Prepend system prompt to context if available.
|
|
1616
|
+
|
|
1617
|
+
Args:
|
|
1618
|
+
context: Main context string
|
|
1619
|
+
system_prompt: System prompt string or None
|
|
1620
|
+
session_id: Session ID for source tracking
|
|
1621
|
+
|
|
1622
|
+
Returns:
|
|
1623
|
+
Context with system prompt prepended (if available)
|
|
1624
|
+
"""
|
|
1625
|
+
if not system_prompt:
|
|
1626
|
+
return context
|
|
1627
|
+
|
|
1628
|
+
# Validate token count (informational only)
|
|
1629
|
+
self.validate_token_count(system_prompt, max_tokens=500)
|
|
1630
|
+
|
|
1631
|
+
system_section = (
|
|
1632
|
+
"## SYSTEM PROMPT RESTORED (via SessionStart)\n\n"
|
|
1633
|
+
"This system prompt was injected at session startup to maintain "
|
|
1634
|
+
"context across compacts and session transitions.\n\n"
|
|
1635
|
+
"---\n\n"
|
|
1636
|
+
f"{system_prompt}\n\n"
|
|
1637
|
+
"---\n\n"
|
|
1638
|
+
"*This prompt persists across tool executions and survives "
|
|
1639
|
+
"compact/resume cycles.*"
|
|
1640
|
+
)
|
|
1641
|
+
|
|
1642
|
+
return f"{system_section}\n\n---\n\n{context}"
|
|
1643
|
+
|
|
1644
|
+
def build_status_summary(
|
|
1645
|
+
self, features: list[dict[str, Any]], stats: dict[str, Any]
|
|
1646
|
+
) -> str:
|
|
1647
|
+
"""
|
|
1648
|
+
Build a brief terminal status summary line.
|
|
1649
|
+
|
|
1650
|
+
Args:
|
|
1651
|
+
features: Feature dicts
|
|
1652
|
+
stats: Feature statistics
|
|
1653
|
+
|
|
1654
|
+
Returns:
|
|
1655
|
+
Single-line status summary
|
|
1656
|
+
"""
|
|
1657
|
+
active_features = [f for f in features if f.get("status") == "in-progress"]
|
|
1658
|
+
pending_features = [f for f in features if f.get("status") == "todo"]
|
|
1659
|
+
|
|
1660
|
+
if active_features:
|
|
1661
|
+
return (
|
|
1662
|
+
f"Feature: {active_features[0]['title'][:40]} | "
|
|
1663
|
+
f"Progress: {stats['done']}/{stats['total']} ({stats['percentage']}%)"
|
|
1664
|
+
)
|
|
1665
|
+
else:
|
|
1666
|
+
return (
|
|
1667
|
+
f"No active feature | Progress: {stats['done']}/{stats['total']} "
|
|
1668
|
+
f"({stats['percentage']}%) | {len(pending_features)} pending"
|
|
1669
|
+
)
|