@hustle-together/api-dev-tools 3.6.5 → 3.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/README.md +5599 -258
  2. package/bin/cli.js +395 -20
  3. package/commands/README.md +459 -71
  4. package/commands/hustle-api-continue.md +158 -0
  5. package/commands/{api-create.md → hustle-api-create.md} +35 -15
  6. package/commands/{api-env.md → hustle-api-env.md} +4 -4
  7. package/commands/{api-interview.md → hustle-api-interview.md} +1 -1
  8. package/commands/{api-research.md → hustle-api-research.md} +3 -3
  9. package/commands/hustle-api-sessions.md +149 -0
  10. package/commands/{api-status.md → hustle-api-status.md} +16 -16
  11. package/commands/{api-verify.md → hustle-api-verify.md} +2 -2
  12. package/commands/hustle-combine.md +763 -0
  13. package/commands/hustle-ui-create-page.md +933 -0
  14. package/commands/hustle-ui-create.md +825 -0
  15. package/hooks/api-workflow-check.py +545 -21
  16. package/hooks/cache-research.py +337 -0
  17. package/hooks/check-api-routes.py +168 -0
  18. package/hooks/check-playwright-setup.py +103 -0
  19. package/hooks/check-storybook-setup.py +81 -0
  20. package/hooks/detect-interruption.py +165 -0
  21. package/hooks/enforce-a11y-audit.py +202 -0
  22. package/hooks/enforce-brand-guide.py +241 -0
  23. package/hooks/enforce-documentation.py +60 -8
  24. package/hooks/enforce-freshness.py +184 -0
  25. package/hooks/enforce-page-components.py +186 -0
  26. package/hooks/enforce-page-data-schema.py +155 -0
  27. package/hooks/enforce-questions-sourced.py +146 -0
  28. package/hooks/enforce-schema-from-interview.py +248 -0
  29. package/hooks/enforce-ui-disambiguation.py +108 -0
  30. package/hooks/enforce-ui-interview.py +130 -0
  31. package/hooks/generate-manifest-entry.py +1161 -0
  32. package/hooks/session-logger.py +297 -0
  33. package/hooks/session-startup.py +160 -15
  34. package/hooks/track-scope-coverage.py +220 -0
  35. package/hooks/track-tool-use.py +81 -1
  36. package/hooks/update-api-showcase.py +149 -0
  37. package/hooks/update-registry.py +352 -0
  38. package/hooks/update-ui-showcase.py +212 -0
  39. package/package.json +8 -3
  40. package/templates/BRAND_GUIDE.md +299 -0
  41. package/templates/CLAUDE-SECTION.md +56 -24
  42. package/templates/SPEC.json +640 -0
  43. package/templates/api-dev-state.json +217 -161
  44. package/templates/api-showcase/_components/APICard.tsx +153 -0
  45. package/templates/api-showcase/_components/APIModal.tsx +375 -0
  46. package/templates/api-showcase/_components/APIShowcase.tsx +231 -0
  47. package/templates/api-showcase/_components/APITester.tsx +522 -0
  48. package/templates/api-showcase/page.tsx +41 -0
  49. package/templates/component/Component.stories.tsx +172 -0
  50. package/templates/component/Component.test.tsx +237 -0
  51. package/templates/component/Component.tsx +86 -0
  52. package/templates/component/Component.types.ts +55 -0
  53. package/templates/component/index.ts +15 -0
  54. package/templates/dev-tools/_components/DevToolsLanding.tsx +320 -0
  55. package/templates/dev-tools/page.tsx +10 -0
  56. package/templates/page/page.e2e.test.ts +218 -0
  57. package/templates/page/page.tsx +42 -0
  58. package/templates/performance-budgets.json +58 -0
  59. package/templates/registry.json +13 -0
  60. package/templates/settings.json +90 -0
  61. package/templates/shared/HeroHeader.tsx +261 -0
  62. package/templates/shared/index.ts +1 -0
  63. package/templates/ui-showcase/_components/PreviewCard.tsx +315 -0
  64. package/templates/ui-showcase/_components/PreviewModal.tsx +676 -0
  65. package/templates/ui-showcase/_components/UIShowcase.tsx +262 -0
  66. package/templates/ui-showcase/page.tsx +26 -0
  67. package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +0 -959
  68. package/demo/hustle-together/blog/interview-driven-api-development.html +0 -1146
  69. package/demo/hustle-together/blog/tdd-for-ai.html +0 -982
  70. package/demo/hustle-together/index.html +0 -1312
  71. package/demo/workflow-demo-v3.5-backup.html +0 -5008
  72. package/demo/workflow-demo.html +0 -6202
@@ -11,6 +11,12 @@ Gap Fixes Applied:
11
11
  - Gap 3: Warns if there are verification_warnings that weren't addressed
12
12
  - Gap 4: Requires explicit verification that implementation matches interview
13
13
 
14
+ v3.6.7 Enhancement:
15
+ - Phase 13 completion output with curl examples, test commands, parameter tables
16
+ - Scope coverage report (discovered vs implemented vs deferred)
17
+ - Research cache location
18
+ - Summary statistics
19
+
14
20
  Returns:
15
21
  - {"decision": "approve"} - Allow stopping
16
22
  - {"decision": "block", "reason": "..."} - Prevent stopping with explanation
@@ -18,10 +24,13 @@ Returns:
18
24
  import json
19
25
  import sys
20
26
  import subprocess
27
+ import re
28
+ from datetime import datetime
21
29
  from pathlib import Path
22
30
 
23
31
  # State file is in .claude/ directory (sibling to hooks/)
24
32
  STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
33
+ RESEARCH_DIR = Path(__file__).parent.parent / "research"
25
34
 
26
35
  # Phases that MUST be complete before stopping
27
36
  REQUIRED_PHASES = [
@@ -40,6 +49,155 @@ RECOMMENDED_PHASES = [
40
49
  ("documentation", "Documentation updates"),
41
50
  ]
42
51
 
52
+ # Combine workflow specific phases
53
+ COMBINE_REQUIRED_PHASES = [
54
+ ("selection", "API selection (2+ APIs required)"),
55
+ ("scope", "Scope confirmation"),
56
+ ("research_initial", "Initial research"),
57
+ ("interview", "User interview"),
58
+ ("research_deep", "Deep research"),
59
+ ("schema_creation", "Combined schema creation"),
60
+ ("environment_check", "Environment check"),
61
+ ("tdd_red", "TDD Red phase"),
62
+ ("tdd_green", "TDD Green phase"),
63
+ ("verify", "Verification phase"),
64
+ ("documentation", "Documentation updates"),
65
+ ]
66
+
67
+ # UI workflow specific phases
68
+ UI_REQUIRED_PHASES = [
69
+ ("disambiguation", "Component/Page type disambiguation"),
70
+ ("scope", "Scope confirmation"),
71
+ ("design_research", "Design research"),
72
+ ("interview", "User interview"),
73
+ ("tdd_red", "TDD Red phase"),
74
+ ("tdd_green", "TDD Green phase"),
75
+ ("verify", "Verification phase (4-step)"),
76
+ ("documentation", "Documentation updates"),
77
+ ]
78
+
79
+
80
+ def get_workflow_type(state):
81
+ """Detect the workflow type from state."""
82
+ workflow = state.get("workflow", "")
83
+ if workflow:
84
+ return workflow
85
+
86
+ # Infer from state structure
87
+ if state.get("combine_config"):
88
+ return "combine-api"
89
+ if state.get("ui_config"):
90
+ mode = state.get("ui_config", {}).get("mode", "")
91
+ return f"ui-create-{mode}" if mode else "ui-create-component"
92
+
93
+ return "api-create"
94
+
95
+
96
+ def get_required_phases_for_workflow(workflow_type):
97
+ """Get the required phases list for a given workflow type."""
98
+ if workflow_type == "combine-api":
99
+ return COMBINE_REQUIRED_PHASES
100
+ elif workflow_type.startswith("ui-create"):
101
+ return UI_REQUIRED_PHASES
102
+ else:
103
+ return REQUIRED_PHASES
104
+
105
+
106
+ def validate_combine_workflow(state):
107
+ """Validate combine-specific requirements.
108
+
109
+ Returns list of issues if validation fails, empty list if OK.
110
+ """
111
+ issues = []
112
+
113
+ combine_config = state.get("combine_config", {})
114
+ if not combine_config:
115
+ issues.append("❌ Combine config not found in state")
116
+ return issues
117
+
118
+ # Check that at least 2 APIs are selected
119
+ source_elements = combine_config.get("source_elements", [])
120
+ if len(source_elements) < 2:
121
+ issues.append(f"❌ Combine requires 2+ APIs, found {len(source_elements)}")
122
+ issues.append(" Select more APIs in Phase 1 (SELECTION)")
123
+
124
+ # Verify all source APIs exist in registry
125
+ try:
126
+ registry_path = STATE_FILE.parent / "registry.json"
127
+ if registry_path.exists():
128
+ registry = json.loads(registry_path.read_text())
129
+ apis = registry.get("apis", {})
130
+
131
+ for elem in source_elements:
132
+ elem_name = elem.get("name", "") if isinstance(elem, dict) else str(elem)
133
+ if elem_name and elem_name not in apis:
134
+ issues.append(f"⚠️ Source API '{elem_name}' not found in registry")
135
+ issues.append(f" Run /api-create {elem_name} first")
136
+ except Exception:
137
+ pass
138
+
139
+ # Check flow type is defined
140
+ flow_type = combine_config.get("flow_type", "")
141
+ if not flow_type:
142
+ issues.append("⚠️ Flow type not defined (sequential/parallel/conditional)")
143
+
144
+ return issues
145
+
146
+
147
+ def validate_ui_workflow(state):
148
+ """Validate UI-specific requirements.
149
+
150
+ Returns list of issues if validation fails, empty list if OK.
151
+ """
152
+ issues = []
153
+
154
+ ui_config = state.get("ui_config", {})
155
+ if not ui_config:
156
+ # Try to get from active element
157
+ active = state.get("active_element", "")
158
+ if active:
159
+ elements = state.get("elements", {})
160
+ element = elements.get(active, {})
161
+ ui_config = element.get("ui_config", {})
162
+
163
+ if not ui_config:
164
+ issues.append("⚠️ UI config not found in state")
165
+ return issues
166
+
167
+ # Check brand guide was applied
168
+ if not ui_config.get("use_brand_guide"):
169
+ issues.append("⚠️ Brand guide not applied - design may not match project standards")
170
+
171
+ return issues
172
+
173
+
174
+ def get_active_endpoint(state):
175
+ """Get active endpoint - supports both old and new state formats."""
176
+ if "endpoints" in state and "active_endpoint" in state:
177
+ active = state.get("active_endpoint")
178
+ if active and active in state["endpoints"]:
179
+ return active, state["endpoints"][active]
180
+ return None, None
181
+
182
+ # Support for elements (UI workflow)
183
+ if "elements" in state and "active_element" in state:
184
+ active = state.get("active_element")
185
+ if active and active in state["elements"]:
186
+ return active, state["elements"][active]
187
+ return None, None
188
+
189
+ # Old format: single endpoint
190
+ endpoint = state.get("endpoint")
191
+ if endpoint:
192
+ return endpoint, state
193
+
194
+ # Try active_element without elements dict
195
+ active = state.get("active_element")
196
+ if active:
197
+ return active, state
198
+
199
+ return None, None
200
+
43
201
 
44
202
  def get_git_modified_files() -> list[str]:
45
203
  """Get list of modified files from git.
@@ -76,21 +234,23 @@ def check_verification_warnings(state: dict) -> list[str]:
76
234
  return []
77
235
 
78
236
 
79
- def check_interview_implementation_match(state: dict) -> list[str]:
237
+ def check_interview_implementation_match(state: dict, endpoint_data: dict = None) -> list[str]:
80
238
  """Verify implementation matches interview requirements.
81
239
 
82
240
  Gap 4 Fix: Define specific "done" criteria based on interview.
83
241
  """
84
242
  issues = []
85
243
 
86
- interview = state.get("phases", {}).get("interview", {})
244
+ # Use endpoint_data if provided (multi-API), otherwise use state directly
245
+ data = endpoint_data if endpoint_data else state
246
+ interview = data.get("phases", {}).get("interview", {})
87
247
  questions = interview.get("questions", [])
88
248
 
89
249
  # Extract key requirements from interview
90
250
  all_text = " ".join(str(q) for q in questions)
91
251
 
92
252
  # Check files_created includes expected patterns
93
- files_created = state.get("files_created", [])
253
+ files_created = data.get("files_created", []) or state.get("files_created", [])
94
254
 
95
255
  # Look for route files if interview mentioned endpoints
96
256
  if "endpoint" in all_text.lower() or "/api/" in all_text.lower():
@@ -106,6 +266,324 @@ def check_interview_implementation_match(state: dict) -> list[str]:
106
266
  return issues
107
267
 
108
268
 
269
+ def extract_schema_params(endpoint: str, endpoint_data: dict) -> list[dict]:
270
+ """Extract parameters from schema file for the parameter table."""
271
+ schema_file = endpoint_data.get("phases", {}).get("schema_creation", {}).get("schema_file")
272
+ if not schema_file:
273
+ return []
274
+
275
+ # Try to read the schema file
276
+ try:
277
+ schema_path = STATE_FILE.parent.parent / schema_file
278
+ if not schema_path.exists():
279
+ return []
280
+
281
+ content = schema_path.read_text()
282
+
283
+ # Simple regex to extract Zod field definitions
284
+ # Matches patterns like: fieldName: z.string(), fieldName: z.number().optional()
285
+ params = []
286
+ field_pattern = r'(\w+):\s*z\.(\w+)\(([^)]*)\)(\.[^,\n}]+)?'
287
+
288
+ for match in re.finditer(field_pattern, content):
289
+ name = match.group(1)
290
+ zod_type = match.group(2)
291
+ chain = match.group(4) or ""
292
+
293
+ # Map Zod types to simple types
294
+ type_map = {
295
+ "string": "string",
296
+ "number": "number",
297
+ "boolean": "boolean",
298
+ "array": "array",
299
+ "object": "object",
300
+ "enum": "enum",
301
+ "literal": "literal",
302
+ "union": "union",
303
+ }
304
+
305
+ param_type = type_map.get(zod_type, zod_type)
306
+ required = ".optional()" not in chain
307
+ description = ""
308
+
309
+ # Try to extract description from .describe()
310
+ desc_match = re.search(r'\.describe\(["\']([^"\']+)["\']', chain)
311
+ if desc_match:
312
+ description = desc_match.group(1)
313
+
314
+ params.append({
315
+ "name": name,
316
+ "type": param_type,
317
+ "required": required,
318
+ "description": description
319
+ })
320
+
321
+ return params
322
+ except Exception:
323
+ return []
324
+
325
+
326
+ def generate_curl_examples(endpoint: str, endpoint_data: dict, params: list) -> list[str]:
327
+ """Generate curl command examples for the endpoint."""
328
+ lines = []
329
+
330
+ # Determine HTTP method from route file
331
+ method = "POST" # Default
332
+ files_created = endpoint_data.get("files_created", [])
333
+ for f in files_created:
334
+ if "route.ts" in f:
335
+ try:
336
+ route_path = STATE_FILE.parent.parent / f
337
+ if route_path.exists():
338
+ route_content = route_path.read_text()
339
+ if "export async function GET" in route_content:
340
+ method = "GET"
341
+ elif "export async function DELETE" in route_content:
342
+ method = "DELETE"
343
+ elif "export async function PUT" in route_content:
344
+ method = "PUT"
345
+ elif "export async function PATCH" in route_content:
346
+ method = "PATCH"
347
+ except Exception:
348
+ pass
349
+ break
350
+
351
+ lines.append("## API Usage (curl)")
352
+ lines.append("")
353
+ lines.append("```bash")
354
+ lines.append("# Basic request")
355
+
356
+ # Build example request body from params
357
+ if method in ["POST", "PUT", "PATCH"] and params:
358
+ example_body = {}
359
+ for p in params[:5]: # First 5 params
360
+ if p["type"] == "string":
361
+ example_body[p["name"]] = f"example-{p['name']}"
362
+ elif p["type"] == "number":
363
+ example_body[p["name"]] = 42
364
+ elif p["type"] == "boolean":
365
+ example_body[p["name"]] = True
366
+ elif p["type"] == "array":
367
+ example_body[p["name"]] = []
368
+
369
+ body_json = json.dumps(example_body, indent=2)
370
+ lines.append(f"curl -X {method} http://localhost:3001/api/v2/{endpoint} \\")
371
+ lines.append(" -H \"Content-Type: application/json\" \\")
372
+ lines.append(f" -d '{body_json}'")
373
+ else:
374
+ lines.append(f"curl http://localhost:3001/api/v2/{endpoint}")
375
+
376
+ lines.append("")
377
+
378
+ # With authentication example
379
+ lines.append("# With API key (if required)")
380
+ if method in ["POST", "PUT", "PATCH"]:
381
+ lines.append(f"curl -X {method} http://localhost:3001/api/v2/{endpoint} \\")
382
+ lines.append(" -H \"Content-Type: application/json\" \\")
383
+ lines.append(" -H \"X-API-Key: your-api-key\" \\")
384
+ lines.append(" -d '{\"param\": \"value\"}'")
385
+ else:
386
+ lines.append(f"curl http://localhost:3001/api/v2/{endpoint} \\")
387
+ lines.append(" -H \"X-API-Key: your-api-key\"")
388
+
389
+ lines.append("```")
390
+
391
+ return lines
392
+
393
+
394
+ def generate_test_commands(endpoint: str, endpoint_data: dict) -> list[str]:
395
+ """Generate test commands for running endpoint tests."""
396
+ lines = []
397
+
398
+ lines.append("## Test Commands")
399
+ lines.append("")
400
+ lines.append("```bash")
401
+ lines.append("# Run endpoint tests")
402
+ lines.append(f"pnpm test -- {endpoint}")
403
+ lines.append("")
404
+ lines.append("# Run with coverage")
405
+ lines.append(f"pnpm test:coverage -- {endpoint}")
406
+ lines.append("")
407
+ lines.append("# Run specific test file")
408
+
409
+ # Find test file
410
+ files_created = endpoint_data.get("files_created", [])
411
+ test_file = None
412
+ for f in files_created:
413
+ if ".test." in f or "__tests__" in f:
414
+ test_file = f
415
+ break
416
+
417
+ if test_file:
418
+ lines.append(f"pnpm test:run {test_file}")
419
+ else:
420
+ lines.append(f"pnpm test:run src/app/api/v2/{endpoint}/__tests__/{endpoint}.api.test.ts")
421
+
422
+ lines.append("")
423
+ lines.append("# Full test suite")
424
+ lines.append("pnpm test:run")
425
+ lines.append("```")
426
+
427
+ return lines
428
+
429
+
430
+ def generate_parameter_table(params: list) -> list[str]:
431
+ """Generate markdown parameter table."""
432
+ if not params:
433
+ return []
434
+
435
+ lines = []
436
+ lines.append("## Parameters Discovered")
437
+ lines.append("")
438
+ lines.append("| Name | Type | Required | Description |")
439
+ lines.append("|------|------|----------|-------------|")
440
+
441
+ for p in params:
442
+ req = "✓" if p.get("required") else "-"
443
+ desc = p.get("description", "")[:50] # Truncate long descriptions
444
+ lines.append(f"| {p['name']} | {p['type']} | {req} | {desc} |")
445
+
446
+ return lines
447
+
448
+
449
+ def generate_scope_coverage(endpoint_data: dict) -> list[str]:
450
+ """Generate scope coverage report."""
451
+ scope = endpoint_data.get("scope", {})
452
+ if not scope:
453
+ return []
454
+
455
+ discovered = scope.get("discovered_features", [])
456
+ implemented = scope.get("implemented_features", [])
457
+ deferred = scope.get("deferred_features", [])
458
+ coverage = scope.get("coverage_percent", 0)
459
+
460
+ if not discovered and not implemented and not deferred:
461
+ return []
462
+
463
+ lines = []
464
+ lines.append("## Implementation Scope")
465
+ lines.append("")
466
+
467
+ if implemented:
468
+ lines.append(f"### Implemented ({len(implemented)}/{len(discovered)} features)")
469
+ lines.append("")
470
+ lines.append("| Feature | Status |")
471
+ lines.append("|---------|--------|")
472
+ for feat in implemented:
473
+ if isinstance(feat, dict):
474
+ lines.append(f"| {feat.get('name', feat)} | ✅ |")
475
+ else:
476
+ lines.append(f"| {feat} | ✅ |")
477
+ lines.append("")
478
+
479
+ if deferred:
480
+ lines.append(f"### Deferred ({len(deferred)} features)")
481
+ lines.append("")
482
+ lines.append("| Feature | Reason |")
483
+ lines.append("|---------|--------|")
484
+ for feat in deferred:
485
+ if isinstance(feat, dict):
486
+ reason = feat.get("reason", "User choice")
487
+ lines.append(f"| {feat.get('name', feat)} | {reason} |")
488
+ else:
489
+ lines.append(f"| {feat} | User choice |")
490
+ lines.append("")
491
+
492
+ if discovered:
493
+ total = len(discovered)
494
+ impl_count = len(implemented)
495
+ lines.append(f"**Coverage:** {impl_count}/{total} features ({coverage}%)")
496
+
497
+ return lines
498
+
499
+
500
+ def generate_completion_output(endpoint: str, endpoint_data: dict, state: dict) -> str:
501
+ """Generate comprehensive Phase 13 completion output."""
502
+ lines = []
503
+
504
+ # Header
505
+ lines.append("")
506
+ lines.append("=" * 60)
507
+ lines.append(f"# ✅ API Implementation Complete: {endpoint}")
508
+ lines.append("=" * 60)
509
+ lines.append("")
510
+
511
+ # Summary
512
+ phases = endpoint_data.get("phases", {})
513
+ phases_complete = sum(1 for p in phases.values() if isinstance(p, dict) and p.get("status") == "complete")
514
+ total_phases = len([p for p in phases.values() if isinstance(p, dict)])
515
+
516
+ started_at = endpoint_data.get("started_at", "Unknown")
517
+ files_created = endpoint_data.get("files_created", []) or state.get("files_created", [])
518
+
519
+ # Calculate test count from state
520
+ tdd_red = phases.get("tdd_red", {})
521
+ test_count = tdd_red.get("test_count", 0)
522
+
523
+ lines.append("## Summary")
524
+ lines.append("")
525
+ lines.append(f"- **Status:** PRODUCTION READY")
526
+ lines.append(f"- **Phases:** {phases_complete}/{total_phases} Complete")
527
+ lines.append(f"- **Tests:** {test_count} test scenarios")
528
+ lines.append(f"- **Started:** {started_at}")
529
+ lines.append(f"- **Completed:** {datetime.now().isoformat()}")
530
+ lines.append("")
531
+
532
+ # Files Created
533
+ if files_created:
534
+ lines.append("## Files Created")
535
+ lines.append("")
536
+ for f in files_created:
537
+ lines.append(f"- {f}")
538
+ lines.append("")
539
+
540
+ # Extract schema params
541
+ params = extract_schema_params(endpoint, endpoint_data)
542
+
543
+ # Test Commands
544
+ lines.extend(generate_test_commands(endpoint, endpoint_data))
545
+ lines.append("")
546
+
547
+ # Curl Examples
548
+ lines.extend(generate_curl_examples(endpoint, endpoint_data, params))
549
+ lines.append("")
550
+
551
+ # Parameter Table
552
+ param_lines = generate_parameter_table(params)
553
+ if param_lines:
554
+ lines.extend(param_lines)
555
+ lines.append("")
556
+
557
+ # Scope Coverage
558
+ scope_lines = generate_scope_coverage(endpoint_data)
559
+ if scope_lines:
560
+ lines.extend(scope_lines)
561
+ lines.append("")
562
+
563
+ # Research Cache Location
564
+ research_cache = RESEARCH_DIR / endpoint
565
+ if research_cache.exists():
566
+ lines.append("## Research Cache")
567
+ lines.append("")
568
+ lines.append(f"- `.claude/research/{endpoint}/CURRENT.md`")
569
+ lines.append(f"- `.claude/research/{endpoint}/sources.json`")
570
+ lines.append(f"- `.claude/research/{endpoint}/interview.json`")
571
+ lines.append("")
572
+
573
+ # Next Steps
574
+ lines.append("## Next Steps")
575
+ lines.append("")
576
+ lines.append(f"1. Review tests: `pnpm test -- {endpoint}`")
577
+ lines.append("2. Test manually with curl examples above")
578
+ lines.append("3. Deploy to staging")
579
+ lines.append("4. Update OpenAPI spec if needed")
580
+ lines.append("")
581
+
582
+ lines.append("=" * 60)
583
+
584
+ return "\n".join(lines)
585
+
586
+
109
587
  def main():
110
588
  # If no state file, we're not in an API workflow - allow stop
111
589
  if not STATE_FILE.exists():
@@ -120,11 +598,26 @@ def main():
120
598
  print(json.dumps({"decision": "approve"}))
121
599
  sys.exit(0)
122
600
 
123
- phases = state.get("phases", {})
601
+ # Detect workflow type
602
+ workflow_type = get_workflow_type(state)
603
+
604
+ # Get active endpoint (multi-API support)
605
+ endpoint, endpoint_data = get_active_endpoint(state)
606
+
607
+ # If no active endpoint, check if using old format
608
+ if not endpoint_data:
609
+ phases = state.get("phases", {})
610
+ else:
611
+ phases = endpoint_data.get("phases", {})
124
612
 
125
613
  # Check if workflow was even started
126
614
  research = phases.get("research_initial", {})
127
- if research.get("status") == "not_started":
615
+ design_research = phases.get("design_research", {}) # For UI workflows
616
+ selection = phases.get("selection", {}) # For combine workflows
617
+
618
+ if (research.get("status") == "not_started" and
619
+ design_research.get("status") == "not_started" and
620
+ selection.get("status") == "not_started"):
128
621
  # Workflow not started, allow stop
129
622
  print(json.dumps({"decision": "approve"}))
130
623
  sys.exit(0)
@@ -132,9 +625,26 @@ def main():
132
625
  # Collect all issues
133
626
  all_issues = []
134
627
 
628
+ # Workflow-specific validation
629
+ if workflow_type == "combine-api":
630
+ combine_issues = validate_combine_workflow(state)
631
+ if combine_issues:
632
+ all_issues.append("❌ COMBINE WORKFLOW VALIDATION FAILED:")
633
+ all_issues.extend(combine_issues)
634
+ all_issues.append("")
635
+
636
+ elif workflow_type.startswith("ui-create"):
637
+ ui_issues = validate_ui_workflow(state)
638
+ if ui_issues:
639
+ all_issues.extend(ui_issues)
640
+ all_issues.append("")
641
+
642
+ # Get the correct required phases for this workflow
643
+ required_phases = get_required_phases_for_workflow(workflow_type)
644
+
135
645
  # Check required phases
136
646
  incomplete_required = []
137
- for phase_key, phase_name in REQUIRED_PHASES:
647
+ for phase_key, phase_name in required_phases:
138
648
  phase = phases.get(phase_key, {})
139
649
  status = phase.get("status", "not_started")
140
650
  if status != "complete":
@@ -154,7 +664,8 @@ def main():
154
664
 
155
665
  # Gap 2: Check git diff vs tracked files
156
666
  git_files = get_git_modified_files()
157
- tracked_files = state.get("files_created", []) + state.get("files_modified", [])
667
+ data_for_files = endpoint_data if endpoint_data else state
668
+ tracked_files = (data_for_files.get("files_created", []) or []) + (data_for_files.get("files_modified", []) or [])
158
669
 
159
670
  if git_files and tracked_files:
160
671
  # Find files in git but not tracked
@@ -169,12 +680,12 @@ def main():
169
680
  all_issues.extend([f" - {f}" for f in untracked_changes[:5]])
170
681
 
171
682
  # Gap 3: Check for unaddressed warnings
172
- warning_issues = check_verification_warnings(state)
683
+ warning_issues = check_verification_warnings(endpoint_data if endpoint_data else state)
173
684
  if warning_issues:
174
685
  all_issues.append("\n" + "\n".join(warning_issues))
175
686
 
176
687
  # Gap 4: Check interview-implementation match
177
- match_issues = check_interview_implementation_match(state)
688
+ match_issues = check_interview_implementation_match(state, endpoint_data)
178
689
  if match_issues:
179
690
  all_issues.append("\n⚠️ Gap 4: Implementation verification:")
180
691
  all_issues.extend([f" {i}" for i in match_issues])
@@ -192,22 +703,35 @@ def main():
192
703
  }))
193
704
  sys.exit(0)
194
705
 
195
- # Build completion message
196
- message_parts = ["✅ API workflow completing"]
197
-
706
+ # ================================================================
707
+ # Phase 13: Generate comprehensive completion output (v3.6.7)
708
+ # ================================================================
709
+
710
+ # Build completion message with full output
711
+ message_parts = []
712
+
713
+ # Generate comprehensive output if we have endpoint data
714
+ if endpoint and endpoint_data:
715
+ completion_output = generate_completion_output(endpoint, endpoint_data, state)
716
+ message_parts.append(completion_output)
717
+ else:
718
+ # Fallback for old format
719
+ message_parts.append("✅ API workflow completing")
720
+
721
+ # Show summary of tracked files
722
+ files_created = state.get("files_created", [])
723
+ if files_created:
724
+ message_parts.append(f"\n📁 Files created: {len(files_created)}")
725
+ for f in files_created[:5]:
726
+ message_parts.append(f" - {f}")
727
+ if len(files_created) > 5:
728
+ message_parts.append(f" ... and {len(files_created) - 5} more")
729
+
730
+ # Add warnings if any optional phases were skipped
198
731
  if incomplete_recommended:
199
732
  message_parts.append("\n⚠️ Optional phases skipped:")
200
733
  message_parts.extend(incomplete_recommended)
201
734
 
202
- # Show summary of tracked files
203
- files_created = state.get("files_created", [])
204
- if files_created:
205
- message_parts.append(f"\n📁 Files created: {len(files_created)}")
206
- for f in files_created[:5]:
207
- message_parts.append(f" - {f}")
208
- if len(files_created) > 5:
209
- message_parts.append(f" ... and {len(files_created) - 5} more")
210
-
211
735
  # Show any remaining warnings
212
736
  if warning_issues or match_issues:
213
737
  message_parts.append("\n⚠️ Review suggested:")