@hustle-together/api-dev-tools 3.6.4 → 3.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/README.md +5307 -258
  2. package/bin/cli.js +348 -20
  3. package/commands/README.md +459 -71
  4. package/commands/hustle-api-continue.md +158 -0
  5. package/commands/{api-create.md → hustle-api-create.md} +22 -2
  6. package/commands/{api-env.md → hustle-api-env.md} +4 -4
  7. package/commands/{api-interview.md → hustle-api-interview.md} +1 -1
  8. package/commands/{api-research.md → hustle-api-research.md} +3 -3
  9. package/commands/hustle-api-sessions.md +149 -0
  10. package/commands/{api-status.md → hustle-api-status.md} +16 -16
  11. package/commands/{api-verify.md → hustle-api-verify.md} +2 -2
  12. package/commands/hustle-combine.md +763 -0
  13. package/commands/hustle-ui-create.md +825 -0
  14. package/hooks/api-workflow-check.py +385 -19
  15. package/hooks/cache-research.py +337 -0
  16. package/hooks/check-playwright-setup.py +103 -0
  17. package/hooks/check-storybook-setup.py +81 -0
  18. package/hooks/detect-interruption.py +165 -0
  19. package/hooks/enforce-brand-guide.py +131 -0
  20. package/hooks/enforce-documentation.py +60 -8
  21. package/hooks/enforce-freshness.py +184 -0
  22. package/hooks/enforce-questions-sourced.py +146 -0
  23. package/hooks/enforce-schema-from-interview.py +248 -0
  24. package/hooks/enforce-ui-disambiguation.py +108 -0
  25. package/hooks/enforce-ui-interview.py +130 -0
  26. package/hooks/generate-manifest-entry.py +981 -0
  27. package/hooks/session-logger.py +297 -0
  28. package/hooks/session-startup.py +65 -10
  29. package/hooks/track-scope-coverage.py +220 -0
  30. package/hooks/track-tool-use.py +81 -1
  31. package/hooks/update-api-showcase.py +149 -0
  32. package/hooks/update-registry.py +352 -0
  33. package/hooks/update-ui-showcase.py +148 -0
  34. package/package.json +8 -2
  35. package/templates/BRAND_GUIDE.md +299 -0
  36. package/templates/CLAUDE-SECTION.md +56 -24
  37. package/templates/SPEC.json +640 -0
  38. package/templates/api-dev-state.json +179 -161
  39. package/templates/api-showcase/APICard.tsx +153 -0
  40. package/templates/api-showcase/APIModal.tsx +375 -0
  41. package/templates/api-showcase/APIShowcase.tsx +231 -0
  42. package/templates/api-showcase/APITester.tsx +522 -0
  43. package/templates/api-showcase/page.tsx +41 -0
  44. package/templates/component/Component.stories.tsx +172 -0
  45. package/templates/component/Component.test.tsx +237 -0
  46. package/templates/component/Component.tsx +86 -0
  47. package/templates/component/Component.types.ts +55 -0
  48. package/templates/component/index.ts +15 -0
  49. package/templates/dev-tools/_components/DevToolsLanding.tsx +320 -0
  50. package/templates/dev-tools/page.tsx +10 -0
  51. package/templates/page/page.e2e.test.ts +218 -0
  52. package/templates/page/page.tsx +42 -0
  53. package/templates/performance-budgets.json +58 -0
  54. package/templates/registry.json +13 -0
  55. package/templates/settings.json +74 -0
  56. package/templates/shared/HeroHeader.tsx +261 -0
  57. package/templates/shared/index.ts +1 -0
  58. package/templates/ui-showcase/PreviewCard.tsx +315 -0
  59. package/templates/ui-showcase/PreviewModal.tsx +676 -0
  60. package/templates/ui-showcase/UIShowcase.tsx +262 -0
  61. package/templates/ui-showcase/page.tsx +26 -0
@@ -0,0 +1,337 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PostToolUse for Write/Edit
4
+ Purpose: Create research cache files from state when documentation phase starts
5
+
6
+ This hook creates the following files that enforce-documentation.py expects:
7
+ - .claude/research/{endpoint}/sources.json - Research sources with URLs
8
+ - .claude/research/{endpoint}/interview.json - Interview decisions
9
+ - .claude/research/{endpoint}/schema.json - Schema snapshot
10
+ - .claude/research/{endpoint}/CURRENT.md - Aggregated research (if not exists)
11
+ - .claude/research/index.json - Updates the freshness index
12
+
13
+ Added in v3.6.7 to fix critical gap where these files were expected but never created.
14
+
15
+ Returns:
16
+ - JSON with cacheCreated info
17
+ """
18
+ import json
19
+ import sys
20
+ import os
21
+ from datetime import datetime
22
+ from pathlib import Path
23
+
24
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
25
+ RESEARCH_DIR = Path(__file__).parent.parent / "research"
26
+ RESEARCH_INDEX = RESEARCH_DIR / "index.json"
27
+
28
+
29
+ def get_active_endpoint(state):
30
+ """Get active endpoint - supports both old and new state formats."""
31
+ # New format (v3.6.7+): endpoints object with active_endpoint pointer
32
+ if "endpoints" in state and "active_endpoint" in state:
33
+ active = state.get("active_endpoint")
34
+ if active and active in state["endpoints"]:
35
+ return active, state["endpoints"][active]
36
+ return None, None
37
+
38
+ # Old format: single endpoint field
39
+ endpoint = state.get("endpoint")
40
+ if endpoint:
41
+ return endpoint, state
42
+
43
+ return None, None
44
+
45
+
46
+ def create_sources_json(endpoint_dir, state, endpoint_data):
47
+ """Create sources.json from research queries in state."""
48
+ sources_file = endpoint_dir / "sources.json"
49
+
50
+ # Collect sources from various places in state
51
+ sources = []
52
+
53
+ # From research_queries array
54
+ for query in state.get("research_queries", []):
55
+ source = {
56
+ "query": query.get("query", ""),
57
+ "tool": query.get("tool", "unknown"),
58
+ "timestamp": query.get("timestamp", ""),
59
+ "url": query.get("url", ""),
60
+ "summary": query.get("summary", "")
61
+ }
62
+ sources.append(source)
63
+
64
+ # From initial research phase
65
+ initial_research = endpoint_data.get("phases", {}).get("research_initial", {})
66
+ for src in initial_research.get("sources", []):
67
+ if isinstance(src, dict):
68
+ sources.append(src)
69
+ elif isinstance(src, str):
70
+ sources.append({"url": src, "summary": ""})
71
+
72
+ # From deep research phase
73
+ deep_research = endpoint_data.get("phases", {}).get("research_deep", {})
74
+ for src in deep_research.get("sources", []):
75
+ if isinstance(src, dict):
76
+ sources.append(src)
77
+ elif isinstance(src, str):
78
+ sources.append({"url": src, "summary": ""})
79
+
80
+ # Deduplicate by URL
81
+ seen_urls = set()
82
+ unique_sources = []
83
+ for src in sources:
84
+ url = src.get("url", src.get("query", ""))
85
+ if url and url not in seen_urls:
86
+ seen_urls.add(url)
87
+ unique_sources.append(src)
88
+
89
+ data = {
90
+ "created_at": datetime.now().isoformat(),
91
+ "updated_at": datetime.now().isoformat(),
92
+ "endpoint": endpoint_data.get("endpoint", state.get("endpoint", "")),
93
+ "source_count": len(unique_sources),
94
+ "sources": unique_sources
95
+ }
96
+
97
+ sources_file.write_text(json.dumps(data, indent=2))
98
+ return True
99
+
100
+
101
+ def create_interview_json(endpoint_dir, endpoint_data):
102
+ """Create interview.json from interview decisions in state."""
103
+ interview_file = endpoint_dir / "interview.json"
104
+
105
+ interview = endpoint_data.get("phases", {}).get("interview", {})
106
+ decisions = interview.get("decisions", {})
107
+ questions = interview.get("questions", [])
108
+
109
+ data = {
110
+ "created_at": datetime.now().isoformat(),
111
+ "updated_at": datetime.now().isoformat(),
112
+ "question_count": len(questions),
113
+ "decision_count": len(decisions),
114
+ "questions": questions,
115
+ "decisions": decisions
116
+ }
117
+
118
+ interview_file.write_text(json.dumps(data, indent=2))
119
+ return True
120
+
121
+
122
+ def create_schema_json(endpoint_dir, endpoint_data, state):
123
+ """Create schema.json from schema creation phase in state."""
124
+ schema_json_file = endpoint_dir / "schema.json"
125
+
126
+ schema_phase = endpoint_data.get("phases", {}).get("schema_creation", {})
127
+ schema_file = schema_phase.get("schema_file", schema_phase.get("file", ""))
128
+ fields_count = schema_phase.get("fields_count", 0)
129
+
130
+ # Try to read actual schema file if it exists
131
+ schema_content = None
132
+ if schema_file:
133
+ schema_path = Path(schema_file)
134
+ if schema_path.exists():
135
+ try:
136
+ schema_content = schema_path.read_text()
137
+ except IOError:
138
+ pass
139
+
140
+ data = {
141
+ "created_at": datetime.now().isoformat(),
142
+ "updated_at": datetime.now().isoformat(),
143
+ "schema_file": schema_file,
144
+ "fields_count": fields_count,
145
+ "schema_content": schema_content
146
+ }
147
+
148
+ schema_json_file.write_text(json.dumps(data, indent=2))
149
+ return True
150
+
151
+
152
+ def create_current_md(endpoint_dir, endpoint, endpoint_data, state):
153
+ """Create CURRENT.md if it doesn't exist."""
154
+ current_md = endpoint_dir / "CURRENT.md"
155
+
156
+ # Only create if doesn't exist (don't overwrite manual research)
157
+ if current_md.exists():
158
+ return False
159
+
160
+ # Build aggregated research content
161
+ lines = [
162
+ f"# Research: {endpoint}",
163
+ "",
164
+ f"*Generated: {datetime.now().isoformat()}*",
165
+ "",
166
+ "## Sources",
167
+ ""
168
+ ]
169
+
170
+ # Add sources
171
+ sources_file = endpoint_dir / "sources.json"
172
+ if sources_file.exists():
173
+ try:
174
+ sources = json.loads(sources_file.read_text())
175
+ for src in sources.get("sources", []):
176
+ url = src.get("url", "")
177
+ summary = src.get("summary", "")
178
+ if url:
179
+ lines.append(f"- {url}")
180
+ if summary:
181
+ lines.append(f" - {summary}")
182
+ except (json.JSONDecodeError, IOError):
183
+ pass
184
+
185
+ lines.extend(["", "## Interview Decisions", ""])
186
+
187
+ # Add interview decisions
188
+ interview_file = endpoint_dir / "interview.json"
189
+ if interview_file.exists():
190
+ try:
191
+ interview = json.loads(interview_file.read_text())
192
+ for key, value in interview.get("decisions", {}).items():
193
+ response = value.get("response", value.get("value", "N/A"))
194
+ lines.append(f"- **{key}**: {response}")
195
+ except (json.JSONDecodeError, IOError):
196
+ pass
197
+
198
+ lines.extend(["", "## Schema", ""])
199
+
200
+ # Add schema info
201
+ schema_file = endpoint_dir / "schema.json"
202
+ if schema_file.exists():
203
+ try:
204
+ schema = json.loads(schema_file.read_text())
205
+ lines.append(f"- File: `{schema.get('schema_file', 'N/A')}`")
206
+ lines.append(f"- Fields: {schema.get('fields_count', 0)}")
207
+ except (json.JSONDecodeError, IOError):
208
+ pass
209
+
210
+ current_md.write_text("\n".join(lines))
211
+ return True
212
+
213
+
214
+ def update_research_index(endpoint):
215
+ """Update the research index with this endpoint."""
216
+ RESEARCH_DIR.mkdir(parents=True, exist_ok=True)
217
+
218
+ # Load existing index or create new
219
+ if RESEARCH_INDEX.exists():
220
+ try:
221
+ index = json.loads(RESEARCH_INDEX.read_text())
222
+ except json.JSONDecodeError:
223
+ index = {"version": "3.6.7", "apis": {}}
224
+ else:
225
+ index = {"version": "3.6.7", "apis": {}}
226
+
227
+ # Ensure apis object exists
228
+ if "apis" not in index:
229
+ index["apis"] = {}
230
+
231
+ # Update this endpoint's entry
232
+ now = datetime.now().isoformat()
233
+ index["apis"][endpoint] = {
234
+ "last_updated": now,
235
+ "freshness_days": 0,
236
+ "cache_path": f".claude/research/{endpoint}/",
237
+ "files": ["sources.json", "interview.json", "schema.json", "CURRENT.md"]
238
+ }
239
+
240
+ RESEARCH_INDEX.write_text(json.dumps(index, indent=2))
241
+ return True
242
+
243
+
244
+ def main():
245
+ try:
246
+ input_data = json.load(sys.stdin)
247
+ except json.JSONDecodeError:
248
+ print(json.dumps({"continue": True}))
249
+ sys.exit(0)
250
+
251
+ tool_name = input_data.get("tool_name", "")
252
+ tool_input = input_data.get("tool_input", {})
253
+ tool_result = input_data.get("tool_result", {})
254
+ file_path = tool_input.get("file_path", "")
255
+
256
+ # Only trigger on Write/Edit to documentation-related files
257
+ if tool_name not in ["Write", "Edit"]:
258
+ print(json.dumps({"continue": True}))
259
+ sys.exit(0)
260
+
261
+ # Check if this is a documentation-related write
262
+ is_manifest = "api-tests-manifest.json" in file_path
263
+ is_readme = file_path.endswith("README.md") and "/api/" in file_path
264
+ is_state = "api-dev-state.json" in file_path
265
+
266
+ # Also trigger when documentation phase is in progress
267
+ if not STATE_FILE.exists():
268
+ print(json.dumps({"continue": True}))
269
+ sys.exit(0)
270
+
271
+ try:
272
+ state = json.loads(STATE_FILE.read_text())
273
+ except json.JSONDecodeError:
274
+ print(json.dumps({"continue": True}))
275
+ sys.exit(0)
276
+
277
+ endpoint, endpoint_data = get_active_endpoint(state)
278
+ if not endpoint or not endpoint_data:
279
+ print(json.dumps({"continue": True}))
280
+ sys.exit(0)
281
+
282
+ # Check if documentation phase is in progress or we're writing doc files
283
+ doc_phase = endpoint_data.get("phases", {}).get("documentation", {})
284
+ doc_status = doc_phase.get("status", "not_started")
285
+
286
+ if doc_status not in ["in_progress", "complete"] and not is_manifest and not is_readme:
287
+ print(json.dumps({"continue": True}))
288
+ sys.exit(0)
289
+
290
+ # Create research cache directory
291
+ endpoint_dir = RESEARCH_DIR / endpoint
292
+ endpoint_dir.mkdir(parents=True, exist_ok=True)
293
+
294
+ # Create cache files
295
+ files_created = []
296
+
297
+ sources_created = create_sources_json(endpoint_dir, state, endpoint_data)
298
+ if sources_created:
299
+ files_created.append("sources.json")
300
+
301
+ interview_created = create_interview_json(endpoint_dir, endpoint_data)
302
+ if interview_created:
303
+ files_created.append("interview.json")
304
+
305
+ schema_created = create_schema_json(endpoint_dir, endpoint_data, state)
306
+ if schema_created:
307
+ files_created.append("schema.json")
308
+
309
+ current_created = create_current_md(endpoint_dir, endpoint, endpoint_data, state)
310
+ if current_created:
311
+ files_created.append("CURRENT.md")
312
+
313
+ # Update index
314
+ index_updated = update_research_index(endpoint)
315
+ if index_updated:
316
+ files_created.append("index.json")
317
+
318
+ # Update state to indicate research is cached
319
+ if files_created:
320
+ doc_phase["research_cached"] = True
321
+ STATE_FILE.write_text(json.dumps(state, indent=2))
322
+
323
+ output = {
324
+ "hookSpecificOutput": {
325
+ "cacheCreated": True,
326
+ "endpoint": endpoint,
327
+ "files": files_created,
328
+ "cacheDir": str(endpoint_dir)
329
+ }
330
+ }
331
+
332
+ print(json.dumps(output))
333
+ sys.exit(0)
334
+
335
+
336
+ if __name__ == "__main__":
337
+ main()
@@ -0,0 +1,103 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write
4
+ Purpose: Verify Playwright is configured before writing E2E test files
5
+
6
+ This hook runs before writing E2E test files. It checks that:
7
+ - playwright.config.ts or playwright.config.js exists
8
+ - @playwright/test is in package.json dependencies
9
+
10
+ If Playwright is not configured, it blocks and suggests installation.
11
+
12
+ Version: 3.9.0
13
+
14
+ Returns:
15
+ - {"continue": true} - If Playwright is configured or not an E2E test file
16
+ - {"continue": false, "reason": "..."} - If Playwright is not configured
17
+ """
18
+ import json
19
+ import sys
20
+ from pathlib import Path
21
+
22
+
23
+ def main():
24
+ # Read hook input from stdin
25
+ try:
26
+ input_data = json.load(sys.stdin)
27
+ except json.JSONDecodeError:
28
+ print(json.dumps({"continue": True}))
29
+ sys.exit(0)
30
+
31
+ tool_name = input_data.get("tool_name", "")
32
+ tool_input = input_data.get("tool_input", {})
33
+
34
+ # Only check Write operations
35
+ if tool_name != "Write":
36
+ print(json.dumps({"continue": True}))
37
+ sys.exit(0)
38
+
39
+ # Check if writing an E2E test file
40
+ file_path = tool_input.get("file_path", "")
41
+
42
+ # Common E2E test patterns
43
+ is_e2e_test = (
44
+ file_path.endswith(".spec.ts") or
45
+ file_path.endswith(".spec.tsx") or
46
+ file_path.endswith(".e2e.ts") or
47
+ file_path.endswith(".e2e.tsx") or
48
+ "/e2e/" in file_path or
49
+ "/tests/e2e/" in file_path
50
+ )
51
+
52
+ if not is_e2e_test:
53
+ print(json.dumps({"continue": True}))
54
+ sys.exit(0)
55
+
56
+ # Look for playwright config in common locations
57
+ cwd = Path.cwd()
58
+ config_files = [
59
+ cwd / "playwright.config.ts",
60
+ cwd / "playwright.config.js",
61
+ cwd.parent / "playwright.config.ts",
62
+ cwd.parent / "playwright.config.js",
63
+ ]
64
+
65
+ playwright_found = False
66
+ for config_file in config_files:
67
+ if config_file.exists():
68
+ playwright_found = True
69
+ break
70
+
71
+ # Also check package.json for @playwright/test
72
+ if not playwright_found:
73
+ package_json = cwd / "package.json"
74
+ if package_json.exists():
75
+ try:
76
+ pkg = json.loads(package_json.read_text())
77
+ deps = pkg.get("devDependencies", {})
78
+ deps.update(pkg.get("dependencies", {}))
79
+ if "@playwright/test" in deps:
80
+ playwright_found = True
81
+ except (json.JSONDecodeError, IOError):
82
+ pass
83
+
84
+ if not playwright_found:
85
+ print(json.dumps({
86
+ "continue": False,
87
+ "reason": (
88
+ "Playwright is not configured in this project.\n\n"
89
+ "Before writing E2E test files, please install Playwright:\n\n"
90
+ " npm init playwright@latest\n\n"
91
+ "This will create playwright.config.ts and install browsers.\n"
92
+ "After installation, run 'npx playwright test' to run tests."
93
+ )
94
+ }))
95
+ sys.exit(0)
96
+
97
+ # Playwright is configured
98
+ print(json.dumps({"continue": True}))
99
+ sys.exit(0)
100
+
101
+
102
+ if __name__ == "__main__":
103
+ main()
@@ -0,0 +1,81 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write
4
+ Purpose: Verify Storybook is configured before writing story files
5
+
6
+ This hook runs before writing .stories.tsx files. It checks that:
7
+ - .storybook/ directory exists
8
+ - main.ts or main.js config exists
9
+
10
+ If Storybook is not configured, it blocks and suggests installation.
11
+
12
+ Version: 3.9.0
13
+
14
+ Returns:
15
+ - {"continue": true} - If Storybook is configured or not a story file
16
+ - {"continue": false, "reason": "..."} - If Storybook is not configured
17
+ """
18
+ import json
19
+ import sys
20
+ from pathlib import Path
21
+
22
+
23
+ def main():
24
+ # Read hook input from stdin
25
+ try:
26
+ input_data = json.load(sys.stdin)
27
+ except json.JSONDecodeError:
28
+ print(json.dumps({"continue": True}))
29
+ sys.exit(0)
30
+
31
+ tool_name = input_data.get("tool_name", "")
32
+ tool_input = input_data.get("tool_input", {})
33
+
34
+ # Only check Write operations
35
+ if tool_name != "Write":
36
+ print(json.dumps({"continue": True}))
37
+ sys.exit(0)
38
+
39
+ # Check if writing a story file
40
+ file_path = tool_input.get("file_path", "")
41
+ if not file_path.endswith(".stories.tsx") and not file_path.endswith(".stories.ts"):
42
+ print(json.dumps({"continue": True}))
43
+ sys.exit(0)
44
+
45
+ # Look for .storybook directory in common locations
46
+ cwd = Path.cwd()
47
+ storybook_dirs = [
48
+ cwd / ".storybook",
49
+ cwd.parent / ".storybook", # In case running from subdirectory
50
+ ]
51
+
52
+ storybook_found = False
53
+ for storybook_dir in storybook_dirs:
54
+ if storybook_dir.exists():
55
+ # Check for main config file
56
+ main_ts = storybook_dir / "main.ts"
57
+ main_js = storybook_dir / "main.js"
58
+ if main_ts.exists() or main_js.exists():
59
+ storybook_found = True
60
+ break
61
+
62
+ if not storybook_found:
63
+ print(json.dumps({
64
+ "continue": False,
65
+ "reason": (
66
+ "Storybook is not configured in this project.\n\n"
67
+ "Before writing story files, please install Storybook:\n\n"
68
+ " npx storybook@latest init\n\n"
69
+ "This will create the .storybook/ directory and configuration.\n"
70
+ "After installation, run 'pnpm storybook' to start the dev server."
71
+ )
72
+ }))
73
+ sys.exit(0)
74
+
75
+ # Storybook is configured
76
+ print(json.dumps({"continue": True}))
77
+ sys.exit(0)
78
+
79
+
80
+ if __name__ == "__main__":
81
+ main()
@@ -0,0 +1,165 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: SessionStart
4
+ Purpose: Detect and prompt for interrupted workflows
5
+
6
+ This hook runs at session start and checks if there are any
7
+ in-progress workflows that were interrupted. If found, it injects
8
+ a prompt asking the user if they want to resume.
9
+
10
+ Added in v3.6.7 for session continuation support.
11
+
12
+ Returns:
13
+ - JSON with additionalContext about interrupted workflows
14
+ """
15
+ import json
16
+ import sys
17
+ import os
18
+ from datetime import datetime
19
+ from pathlib import Path
20
+
21
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
22
+
23
+
24
+ def get_interrupted_workflows(state):
25
+ """Find all workflows that are in_progress but not active."""
26
+ interrupted = []
27
+
28
+ # New format (v3.6.7+): check endpoints object
29
+ if "endpoints" in state:
30
+ active = state.get("active_endpoint")
31
+ for endpoint_name, endpoint_data in state["endpoints"].items():
32
+ status = endpoint_data.get("status", "not_started")
33
+ if status == "in_progress" and endpoint_name != active:
34
+ # Find the current phase
35
+ phases = endpoint_data.get("phases", {})
36
+ current_phase = None
37
+ for phase_name, phase_data in phases.items():
38
+ if phase_data.get("status") == "in_progress":
39
+ current_phase = phase_name
40
+ break
41
+
42
+ interrupted.append({
43
+ "endpoint": endpoint_name,
44
+ "status": status,
45
+ "current_phase": current_phase,
46
+ "started_at": endpoint_data.get("started_at"),
47
+ "interrupted_at": endpoint_data.get("session", {}).get("interrupted_at"),
48
+ "interrupted_phase": endpoint_data.get("session", {}).get("interrupted_phase")
49
+ })
50
+
51
+ # Also check if active endpoint is not fully started
52
+ if active and active in state["endpoints"]:
53
+ active_data = state["endpoints"][active]
54
+ session = active_data.get("session", {})
55
+ if session.get("interrupted_at"):
56
+ # Active endpoint was previously interrupted
57
+ interrupted.insert(0, {
58
+ "endpoint": active,
59
+ "status": active_data.get("status"),
60
+ "current_phase": session.get("interrupted_phase"),
61
+ "started_at": active_data.get("started_at"),
62
+ "interrupted_at": session.get("interrupted_at"),
63
+ "is_active": True
64
+ })
65
+
66
+ # Old format: single endpoint
67
+ elif state.get("endpoint"):
68
+ endpoint = state.get("endpoint")
69
+ phases = state.get("phases", {})
70
+
71
+ # Check if any phase is in_progress
72
+ for phase_name, phase_data in phases.items():
73
+ if phase_data.get("status") == "in_progress":
74
+ interrupted.append({
75
+ "endpoint": endpoint,
76
+ "status": "in_progress",
77
+ "current_phase": phase_name,
78
+ "started_at": state.get("created_at"),
79
+ "is_legacy": True
80
+ })
81
+ break
82
+
83
+ return interrupted
84
+
85
+
86
+ def format_interrupted_message(interrupted):
87
+ """Format a user-friendly message about interrupted workflows."""
88
+ if not interrupted:
89
+ return None
90
+
91
+ lines = [
92
+ "",
93
+ "=" * 60,
94
+ " INTERRUPTED WORKFLOW DETECTED",
95
+ "=" * 60,
96
+ ""
97
+ ]
98
+
99
+ for i, workflow in enumerate(interrupted, 1):
100
+ endpoint = workflow["endpoint"]
101
+ phase = workflow.get("current_phase", "unknown")
102
+ started = workflow.get("started_at", "unknown")
103
+ interrupted_at = workflow.get("interrupted_at", "")
104
+
105
+ lines.append(f"{i}. **{endpoint}**")
106
+ lines.append(f" - Phase: {phase}")
107
+ lines.append(f" - Started: {started}")
108
+ if interrupted_at:
109
+ lines.append(f" - Interrupted: {interrupted_at}")
110
+ lines.append("")
111
+
112
+ lines.extend([
113
+ "To resume an interrupted workflow, use:",
114
+ " /api-continue [endpoint-name]",
115
+ "",
116
+ "Or start a new workflow with:",
117
+ " /api-create [new-endpoint-name]",
118
+ "",
119
+ "=" * 60
120
+ ])
121
+
122
+ return "\n".join(lines)
123
+
124
+
125
+ def main():
126
+ try:
127
+ input_data = json.load(sys.stdin)
128
+ except json.JSONDecodeError:
129
+ input_data = {}
130
+
131
+ # Check if state file exists
132
+ if not STATE_FILE.exists():
133
+ print(json.dumps({"continue": True}))
134
+ sys.exit(0)
135
+
136
+ try:
137
+ state = json.loads(STATE_FILE.read_text())
138
+ except json.JSONDecodeError:
139
+ print(json.dumps({"continue": True}))
140
+ sys.exit(0)
141
+
142
+ # Find interrupted workflows
143
+ interrupted = get_interrupted_workflows(state)
144
+
145
+ if not interrupted:
146
+ print(json.dumps({"continue": True}))
147
+ sys.exit(0)
148
+
149
+ # Format message
150
+ message = format_interrupted_message(interrupted)
151
+
152
+ output = {
153
+ "hookSpecificOutput": {
154
+ "hookEventName": "SessionStart",
155
+ "additionalContext": message,
156
+ "interruptedWorkflows": interrupted
157
+ }
158
+ }
159
+
160
+ print(json.dumps(output))
161
+ sys.exit(0)
162
+
163
+
164
+ if __name__ == "__main__":
165
+ main()