@iloom/cli 0.13.0-beta.0 → 0.13.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/LICENSE +1 -1
  2. package/README.md +3 -18
  3. package/dist/{ClaudeContextManager-RRGREEZQ.js → ClaudeContextManager-ZH6LEA5I.js} +5 -5
  4. package/dist/{ClaudeService-LEPW6QAC.js → ClaudeService-YR66WXZN.js} +4 -4
  5. package/dist/{IssueTrackerFactory-KE2BDCLC.js → IssueTrackerFactory-O2ZBA666.js} +3 -3
  6. package/dist/{LoomLauncher-GKQMR5E6.js → LoomLauncher-V54ENBEF.js} +5 -5
  7. package/dist/{MetadataManager-V4LSJ2PB.js → MetadataManager-HHE6LQF2.js} +2 -2
  8. package/dist/{PromptTemplateManager-I75WKXM4.js → PromptTemplateManager-4RFELNYY.js} +2 -2
  9. package/dist/README.md +3 -18
  10. package/dist/{SettingsManager-KQU7OX7G.js → SettingsManager-SLSYEYDZ.js} +4 -4
  11. package/dist/agents/iloom-artifact-reviewer.md +1 -0
  12. package/dist/agents/iloom-code-reviewer.md +21 -0
  13. package/dist/agents/iloom-issue-analyze-and-plan.md +30 -12
  14. package/dist/agents/iloom-issue-analyzer.md +32 -7
  15. package/dist/agents/iloom-issue-complexity-evaluator.md +32 -12
  16. package/dist/agents/iloom-issue-implementer.md +31 -12
  17. package/dist/agents/iloom-issue-planner.md +30 -12
  18. package/dist/agents/iloom-wave-verifier.md +177 -4
  19. package/dist/{build-V3KADFMO.js → build-ZTGWDHWU.js} +8 -8
  20. package/dist/{chunk-VVQQIG64.js → chunk-55NTREIU.js} +33 -30
  21. package/dist/chunk-55NTREIU.js.map +1 -0
  22. package/dist/{chunk-AYLC633W.js → chunk-7TN5VW4I.js} +65 -7
  23. package/dist/chunk-7TN5VW4I.js.map +1 -0
  24. package/dist/{chunk-RFCAPHL5.js → chunk-C2BVNJW5.js} +2 -2
  25. package/dist/{chunk-3XEXT35Z.js → chunk-E5OM25WK.js} +3 -3
  26. package/dist/{chunk-Q7VXHJP6.js → chunk-EHAITKLS.js} +10 -6
  27. package/dist/{chunk-Q7VXHJP6.js.map → chunk-EHAITKLS.js.map} +1 -1
  28. package/dist/{chunk-ZUIFO7B4.js → chunk-ERMEYFT6.js} +7 -2
  29. package/dist/chunk-ERMEYFT6.js.map +1 -0
  30. package/dist/{chunk-WGUGB54H.js → chunk-F5NKWLMQ.js} +21 -24
  31. package/dist/chunk-F5NKWLMQ.js.map +1 -0
  32. package/dist/{chunk-TN2D2RX7.js → chunk-G2DGDCDP.js} +33 -224
  33. package/dist/chunk-G2DGDCDP.js.map +1 -0
  34. package/dist/{chunk-NUUFP53X.js → chunk-GPBX2BY2.js} +2 -2
  35. package/dist/{chunk-SN4S5CWL.js → chunk-GQDVH6FA.js} +2 -2
  36. package/dist/{chunk-YUOVWWJX.js → chunk-HKEXRZMU.js} +5 -310
  37. package/dist/chunk-HKEXRZMU.js.map +1 -0
  38. package/dist/{chunk-TAEVA4QR.js → chunk-HWDQRW3O.js} +3 -3
  39. package/dist/chunk-HWDQRW3O.js.map +1 -0
  40. package/dist/{chunk-KQSV7FOG.js → chunk-J5JOJPK3.js} +2 -2
  41. package/dist/{chunk-PD75ZCFT.js → chunk-KCAWSZUO.js} +18 -17
  42. package/dist/chunk-KCAWSZUO.js.map +1 -0
  43. package/dist/{chunk-QNPJXO53.js → chunk-KGOBNC5A.js} +4 -4
  44. package/dist/{chunk-H3T3EPF3.js → chunk-LNY2Y32V.js} +2 -2
  45. package/dist/{chunk-7RCUWU3I.js → chunk-MRPIDNZU.js} +1 -1
  46. package/dist/chunk-MRPIDNZU.js.map +1 -0
  47. package/dist/{chunk-VIQOQ463.js → chunk-OLJ54WGW.js} +15 -10
  48. package/dist/chunk-OLJ54WGW.js.map +1 -0
  49. package/dist/{chunk-QQULYI2S.js → chunk-P5MNWBLH.js} +108 -47
  50. package/dist/chunk-P5MNWBLH.js.map +1 -0
  51. package/dist/{chunk-4VQXMEEP.js → chunk-PPQ5LV7U.js} +3 -3
  52. package/dist/{chunk-4VQXMEEP.js.map → chunk-PPQ5LV7U.js.map} +1 -1
  53. package/dist/{chunk-QED2WB2D.js → chunk-PS6K2AOV.js} +5 -5
  54. package/dist/{chunk-JD3K2344.js → chunk-QNRXRSKC.js} +36 -3
  55. package/dist/chunk-QNRXRSKC.js.map +1 -0
  56. package/dist/{chunk-SA446KA2.js → chunk-T4KFKKEB.js} +7 -7
  57. package/dist/{chunk-XCP2WDYA.js → chunk-T4NESGYB.js} +3 -3
  58. package/dist/{chunk-QXGM32TO.js → chunk-TJDKGKQV.js} +2 -2
  59. package/dist/{chunk-X5DRLONY.js → chunk-UXBVDD7U.js} +6 -6
  60. package/dist/{chunk-JDN4SPV3.js → chunk-WYDLOQYO.js} +2 -2
  61. package/dist/{chunk-4JZEQBWV.js → chunk-XIVLGWUX.js} +3 -1
  62. package/dist/chunk-XIVLGWUX.js.map +1 -0
  63. package/dist/{chunk-NTDY5AMO.js → chunk-ZEFTWM5Z.js} +2 -2
  64. package/dist/{cleanup-RJKLI47I.js → cleanup-BCVY7PEF.js} +22 -22
  65. package/dist/cleanup-BCVY7PEF.js.map +1 -0
  66. package/dist/cli.js +136 -105
  67. package/dist/cli.js.map +1 -1
  68. package/dist/{commit-SUHRUMDE.js → commit-L5JNBU4U.js} +8 -8
  69. package/dist/{compile-2MD346PO.js → compile-GPJOHXH4.js} +8 -8
  70. package/dist/{contribute-P4BMRY7C.js → contribute-QEGCI4PS.js} +4 -4
  71. package/dist/{dev-server-ZNTLWOL5.js → dev-server-UQKNKU2S.js} +249 -31
  72. package/dist/dev-server-UQKNKU2S.js.map +1 -0
  73. package/dist/{feedback-Q6WG2WX4.js → feedback-2LWXKLQZ.js} +4 -4
  74. package/dist/{git-TX2IEMB3.js → git-IS7AV3ED.js} +4 -4
  75. package/dist/hooks/iloom-hook.js +40 -2
  76. package/dist/{ignite-P644W2PK.js → ignite-VQDJQ37S.js} +12 -14
  77. package/dist/index.d.ts +73 -75
  78. package/dist/index.js +32 -32
  79. package/dist/index.js.map +1 -1
  80. package/dist/{init-5HFY7JG6.js → init-7SDJUAEZ.js} +8 -8
  81. package/dist/{install-deps-J4ALTM27.js → install-deps-NGSFDNUW.js} +8 -8
  82. package/dist/{issues-LZMIF22U.js → issues-4HQKEUP7.js} +5 -5
  83. package/dist/{lint-XIXKU22H.js → lint-C5FOVRXY.js} +8 -8
  84. package/dist/mcp/issue-management-server.js +19 -22
  85. package/dist/mcp/issue-management-server.js.map +1 -1
  86. package/dist/neon-helpers-LCZAN4U4.js +11 -0
  87. package/dist/{open-KUO35JIJ.js → open-2HL6GV5F.js} +19 -15
  88. package/dist/open-2HL6GV5F.js.map +1 -0
  89. package/dist/{plan-7CF56OIR.js → plan-GC3HF73T.js} +86 -66
  90. package/dist/plan-GC3HF73T.js.map +1 -0
  91. package/dist/{projects-L5AHUBGA.js → projects-3F6T3KZL.js} +2 -2
  92. package/dist/prompts/init-prompt.txt +40 -36
  93. package/dist/prompts/issue-prompt.txt +4 -1
  94. package/dist/prompts/plan-prompt.txt +97 -16
  95. package/dist/prompts/regular-prompt.txt +1 -1
  96. package/dist/prompts/swarm-orchestrator-prompt.txt +25 -12
  97. package/dist/{rebase-MAMWPA2L.js → rebase-MLIN572O.js} +7 -7
  98. package/dist/{recap-IDBO3KM5.js → recap-CKGKFDJL.js} +7 -7
  99. package/dist/{run-RGZHCQ6M.js → run-CUNRQNZS.js} +19 -15
  100. package/dist/run-CUNRQNZS.js.map +1 -0
  101. package/dist/schema/settings.schema.json +35 -31
  102. package/dist/{shell-7ADCDFIV.js → shell-M2YYPNGV.js} +6 -6
  103. package/dist/{summary-7J2HORFD.js → summary-XR4CBJEG.js} +9 -9
  104. package/dist/{test-SRB7EWU6.js → test-ESDAHEVE.js} +8 -8
  105. package/dist/{test-git-G7ATVIXG.js → test-git-KWPLHYSI.js} +4 -4
  106. package/dist/{test-jira-Q2HPA522.js → test-jira-6NK7UHSV.js} +3 -3
  107. package/dist/{test-prefix-JMDGXR5A.js → test-prefix-VVODGHXP.js} +4 -4
  108. package/dist/{test-webserver-GZFVXBGD.js → test-webserver-AHXKC6H4.js} +6 -6
  109. package/dist/{vscode-3I7ISHUU.js → vscode-OY7HOVRO.js} +6 -6
  110. package/package.json +1 -1
  111. package/dist/chunk-4JZEQBWV.js.map +0 -1
  112. package/dist/chunk-7RCUWU3I.js.map +0 -1
  113. package/dist/chunk-AYLC633W.js.map +0 -1
  114. package/dist/chunk-JD3K2344.js.map +0 -1
  115. package/dist/chunk-PD75ZCFT.js.map +0 -1
  116. package/dist/chunk-QQULYI2S.js.map +0 -1
  117. package/dist/chunk-TAEVA4QR.js.map +0 -1
  118. package/dist/chunk-TN2D2RX7.js.map +0 -1
  119. package/dist/chunk-VIQOQ463.js.map +0 -1
  120. package/dist/chunk-VVQQIG64.js.map +0 -1
  121. package/dist/chunk-WGUGB54H.js.map +0 -1
  122. package/dist/chunk-YUOVWWJX.js.map +0 -1
  123. package/dist/chunk-ZUIFO7B4.js.map +0 -1
  124. package/dist/cleanup-RJKLI47I.js.map +0 -1
  125. package/dist/database-helpers-PRDFNDRO.js +0 -11
  126. package/dist/dev-server-ZNTLWOL5.js.map +0 -1
  127. package/dist/open-KUO35JIJ.js.map +0 -1
  128. package/dist/plan-7CF56OIR.js.map +0 -1
  129. package/dist/run-RGZHCQ6M.js.map +0 -1
  130. /package/dist/{ClaudeContextManager-RRGREEZQ.js.map → ClaudeContextManager-ZH6LEA5I.js.map} +0 -0
  131. /package/dist/{ClaudeService-LEPW6QAC.js.map → ClaudeService-YR66WXZN.js.map} +0 -0
  132. /package/dist/{IssueTrackerFactory-KE2BDCLC.js.map → IssueTrackerFactory-O2ZBA666.js.map} +0 -0
  133. /package/dist/{LoomLauncher-GKQMR5E6.js.map → LoomLauncher-V54ENBEF.js.map} +0 -0
  134. /package/dist/{MetadataManager-V4LSJ2PB.js.map → MetadataManager-HHE6LQF2.js.map} +0 -0
  135. /package/dist/{PromptTemplateManager-I75WKXM4.js.map → PromptTemplateManager-4RFELNYY.js.map} +0 -0
  136. /package/dist/{SettingsManager-KQU7OX7G.js.map → SettingsManager-SLSYEYDZ.js.map} +0 -0
  137. /package/dist/{build-V3KADFMO.js.map → build-ZTGWDHWU.js.map} +0 -0
  138. /package/dist/{chunk-RFCAPHL5.js.map → chunk-C2BVNJW5.js.map} +0 -0
  139. /package/dist/{chunk-3XEXT35Z.js.map → chunk-E5OM25WK.js.map} +0 -0
  140. /package/dist/{chunk-NUUFP53X.js.map → chunk-GPBX2BY2.js.map} +0 -0
  141. /package/dist/{chunk-SN4S5CWL.js.map → chunk-GQDVH6FA.js.map} +0 -0
  142. /package/dist/{chunk-KQSV7FOG.js.map → chunk-J5JOJPK3.js.map} +0 -0
  143. /package/dist/{chunk-QNPJXO53.js.map → chunk-KGOBNC5A.js.map} +0 -0
  144. /package/dist/{chunk-H3T3EPF3.js.map → chunk-LNY2Y32V.js.map} +0 -0
  145. /package/dist/{chunk-QED2WB2D.js.map → chunk-PS6K2AOV.js.map} +0 -0
  146. /package/dist/{chunk-SA446KA2.js.map → chunk-T4KFKKEB.js.map} +0 -0
  147. /package/dist/{chunk-XCP2WDYA.js.map → chunk-T4NESGYB.js.map} +0 -0
  148. /package/dist/{chunk-QXGM32TO.js.map → chunk-TJDKGKQV.js.map} +0 -0
  149. /package/dist/{chunk-X5DRLONY.js.map → chunk-UXBVDD7U.js.map} +0 -0
  150. /package/dist/{chunk-JDN4SPV3.js.map → chunk-WYDLOQYO.js.map} +0 -0
  151. /package/dist/{chunk-NTDY5AMO.js.map → chunk-ZEFTWM5Z.js.map} +0 -0
  152. /package/dist/{commit-SUHRUMDE.js.map → commit-L5JNBU4U.js.map} +0 -0
  153. /package/dist/{compile-2MD346PO.js.map → compile-GPJOHXH4.js.map} +0 -0
  154. /package/dist/{contribute-P4BMRY7C.js.map → contribute-QEGCI4PS.js.map} +0 -0
  155. /package/dist/{feedback-Q6WG2WX4.js.map → feedback-2LWXKLQZ.js.map} +0 -0
  156. /package/dist/{database-helpers-PRDFNDRO.js.map → git-IS7AV3ED.js.map} +0 -0
  157. /package/dist/{git-TX2IEMB3.js.map → ignite-VQDJQ37S.js.map} +0 -0
  158. /package/dist/{init-5HFY7JG6.js.map → init-7SDJUAEZ.js.map} +0 -0
  159. /package/dist/{install-deps-J4ALTM27.js.map → install-deps-NGSFDNUW.js.map} +0 -0
  160. /package/dist/{issues-LZMIF22U.js.map → issues-4HQKEUP7.js.map} +0 -0
  161. /package/dist/{lint-XIXKU22H.js.map → lint-C5FOVRXY.js.map} +0 -0
  162. /package/dist/{ignite-P644W2PK.js.map → neon-helpers-LCZAN4U4.js.map} +0 -0
  163. /package/dist/{projects-L5AHUBGA.js.map → projects-3F6T3KZL.js.map} +0 -0
  164. /package/dist/{rebase-MAMWPA2L.js.map → rebase-MLIN572O.js.map} +0 -0
  165. /package/dist/{recap-IDBO3KM5.js.map → recap-CKGKFDJL.js.map} +0 -0
  166. /package/dist/{shell-7ADCDFIV.js.map → shell-M2YYPNGV.js.map} +0 -0
  167. /package/dist/{summary-7J2HORFD.js.map → summary-XR4CBJEG.js.map} +0 -0
  168. /package/dist/{test-SRB7EWU6.js.map → test-ESDAHEVE.js.map} +0 -0
  169. /package/dist/{test-git-G7ATVIXG.js.map → test-git-KWPLHYSI.js.map} +0 -0
  170. /package/dist/{test-jira-Q2HPA522.js.map → test-jira-6NK7UHSV.js.map} +0 -0
  171. /package/dist/{test-prefix-JMDGXR5A.js.map → test-prefix-VVODGHXP.js.map} +0 -0
  172. /package/dist/{test-webserver-GZFVXBGD.js.map → test-webserver-AHXKC6H4.js.map} +0 -0
  173. /package/dist/{vscode-3I7ISHUU.js.map → vscode-OY7HOVRO.js.map} +0 -0
@@ -300,16 +300,20 @@ The following JSON Schema defines valid iloom settings:
300
300
  "enum": [
301
301
  "sonnet",
302
302
  "opus",
303
- "haiku"
303
+ "haiku",
304
+ "sonnet[1m]",
305
+ "opus[1m]"
304
306
  ],
305
- "description": "Claude model shorthand: sonnet, opus, or haiku"
307
+ "description": "Claude model shorthand: sonnet, opus, haiku, sonnet[1m], or opus[1m]"
306
308
  },
307
309
  "swarmModel": {
308
310
  "type": "string",
309
311
  "enum": [
310
312
  "sonnet",
311
313
  "opus",
312
- "haiku"
314
+ "haiku",
315
+ "sonnet[1m]",
316
+ "opus[1m]"
313
317
  ],
314
318
  "description": "Model to use for this agent in swarm mode. Overrides the base model when running inside swarm workers."
315
319
  },
@@ -359,7 +363,9 @@ The following JSON Schema defines valid iloom settings:
359
363
  "enum": [
360
364
  "sonnet",
361
365
  "opus",
362
- "haiku"
366
+ "haiku",
367
+ "sonnet[1m]",
368
+ "opus[1m]"
363
369
  ],
364
370
  "default": "opus",
365
371
  "description": "Claude model shorthand for spin orchestrator"
@@ -369,7 +375,9 @@ The following JSON Schema defines valid iloom settings:
369
375
  "enum": [
370
376
  "sonnet",
371
377
  "opus",
372
- "haiku"
378
+ "haiku",
379
+ "sonnet[1m]",
380
+ "opus[1m]"
373
381
  ],
374
382
  "description": "Model for the spin orchestrator when running in swarm mode. Overrides spin.model for swarm workflows."
375
383
  },
@@ -390,7 +398,9 @@ The following JSON Schema defines valid iloom settings:
390
398
  "enum": [
391
399
  "sonnet",
392
400
  "opus",
393
- "haiku"
401
+ "haiku",
402
+ "sonnet[1m]",
403
+ "opus[1m]"
394
404
  ],
395
405
  "default": "opus",
396
406
  "description": "Claude model shorthand for plan command"
@@ -433,7 +443,9 @@ The following JSON Schema defines valid iloom settings:
433
443
  "enum": [
434
444
  "sonnet",
435
445
  "opus",
436
- "haiku"
446
+ "haiku",
447
+ "sonnet[1m]",
448
+ "opus[1m]"
437
449
  ],
438
450
  "default": "sonnet",
439
451
  "description": "Claude model shorthand for session summary generation"
@@ -454,6 +466,15 @@ The following JSON Schema defines valid iloom settings:
454
466
  "maximum": 65535,
455
467
  "description": "Base port for web workspace port calculations (default: 3000)"
456
468
  },
469
+ "protocol": {
470
+ "type": "string",
471
+ "enum": [
472
+ "http",
473
+ "https"
474
+ ],
475
+ "default": "http",
476
+ "description": "Protocol for dev server URLs (http or https)"
477
+ },
457
478
  "devServer": {
458
479
  "type": "string",
459
480
  "enum": [
@@ -481,6 +502,13 @@ The following JSON Schema defines valid iloom settings:
481
502
  },
482
503
  "description": "Build arguments to pass to docker build (e.g., {\"NODE_ENV\": \"development\"})"
483
504
  },
505
+ "dockerBuildSecrets": {
506
+ "type": "object",
507
+ "additionalProperties": {
508
+ "type": "string"
509
+ },
510
+ "description": "Secret files to mount during docker build via --secret (e.g., {\"npmrc\": \"~/.npmrc\"}). Keys are secret IDs, values are source file paths."
511
+ },
484
512
  "dockerRunArgs": {
485
513
  "type": "array",
486
514
  "items": {
@@ -579,30 +607,6 @@ The following JSON Schema defines valid iloom settings:
579
607
  ],
580
608
  "additionalProperties": false,
581
609
  "description": "Neon database configuration. Requires Neon CLI installed and authenticated for database branching."
582
- },
583
- "supabase": {
584
- "type": "object",
585
- "properties": {
586
- "projectRef": {
587
- "type": "string",
588
- "minLength": 1,
589
- "description": "Supabase project reference ID (e.g., \"abcdefghijklmnop\")"
590
- },
591
- "parentBranch": {
592
- "type": "string",
593
- "minLength": 1,
594
- "description": "Reserved for future use. Supabase currently always branches from the default branch."
595
- },
596
- "withData": {
597
- "type": "boolean",
598
- "description": "Whether to include data when creating a new branch (defaults to true)"
599
- }
600
- },
601
- "required": [
602
- "projectRef"
603
- ],
604
- "additionalProperties": false,
605
- "description": "Supabase database configuration. Requires Supabase CLI installed and authenticated for database branching."
606
610
  }
607
611
  },
608
612
  "additionalProperties": false,
@@ -1385,7 +1389,7 @@ Swarms are iloom's most powerful feature. Give them an epic and they'll decompos
1385
1389
 
1386
1390
  You can tune how swarms balance thinking depth versus speed:
1387
1391
 
1388
- 🧠 Maximum Quality — Agents take more time to reason deeply, produce thorough analysis, and write more carefully considered code. Best when correctness matters most. Uses Opus for all agents, which is significantly more expensive.
1392
+ 🧠 Maximum Quality — Agents take more time to reason deeply, produce thorough analysis, and write more carefully considered code. Best when correctness matters most. Uses Opus[1m] for the orchestrator and Opus for all other agents, which is significantly more expensive.
1389
1393
  ⚖️ Balanced (recommended) — Strong reasoning at a practical pace. The sweet spot for most work.
1390
1394
  ⚡ Fast & Cheap — Agents move quickly through issues with lighter analysis. Great for rapid prototyping or if you're on a budget.
1391
1395
  ```
@@ -1889,8 +1893,8 @@ When configuring agents, use these model identifiers:
1889
1893
 
1890
1894
  | Mode | Focus | Models used | Best for |
1891
1895
  |------|-------|-------------|----------|
1892
- | **Maximum Quality** | Deepest reasoning, best analysis | Opus everywhere (except complexity evaluator stays Haiku) | Complex epics, critical features |
1893
- | **Balanced** (default) | Opus for analysis, Sonnet for everything else | Opus: analyzer, analyze-and-plan. Sonnet: orchestrator, worker, planner, implementer, enhancer, code-reviewer. Haiku: complexity evaluator | Most tasks |
1896
+ | **Maximum Quality** | Deepest reasoning, best analysis | Opus[1m] orchestrator; Opus everywhere else (complexity evaluator stays Haiku) | Complex epics, critical features |
1897
+ | **Balanced** (default) | Strong reasoning at a practical pace | Opus[1m]: orchestrator. Opus: worker, analyzer, analyze-and-plan. Sonnet: planner, implementer, enhancer, code-reviewer. Haiku: complexity evaluator | Most tasks |
1894
1898
  | **Fast & Cheap** | Quick iterations, lowest cost | Haiku everywhere | Simple tasks, rapid prototyping |
1895
1899
 
1896
1900
  Note: The complexity evaluator always stays on Haiku regardless of mode, since it performs a simple classification task that does not benefit from a larger model.
@@ -1899,8 +1903,8 @@ When configuring agents, use these model identifiers:
1899
1903
 
1900
1904
  When the user selects a mode, **merge** the model values into their existing settings. Only set the `swarmModel` (or `model` for the worker) field on each agent — do not overwrite or remove other agent settings.
1901
1905
 
1902
- - **Maximum Quality:** Set `spin.swarmModel` to `"opus"`. Set `agents.iloom-swarm-worker.model` to `"opus"`. Set `swarmModel` to `"opus"` on all phase agents (`iloom-issue-analyzer`, `iloom-issue-planner`, `iloom-issue-implementer`, `iloom-issue-enhancer`, `iloom-issue-analyze-and-plan`, `iloom-code-reviewer`). Keep `iloom-issue-complexity-evaluator.swarmModel` as `"haiku"`.
1903
- - **Balanced (default):** Set `spin.swarmModel` to `"sonnet"`. Set `agents.iloom-swarm-worker.model` to `"sonnet"`. Set `swarmModel` to `"opus"` on analysis agents (`iloom-issue-analyzer`, `iloom-issue-analyze-and-plan`). Set `swarmModel` to `"sonnet"` on all other phase agents (`iloom-issue-planner`, `iloom-issue-implementer`, `iloom-issue-enhancer`, `iloom-code-reviewer`). Keep `iloom-issue-complexity-evaluator.swarmModel` as `"haiku"`.
1906
+ - **Maximum Quality:** Set `spin.swarmModel` to `"opus[1m]"`. Set `agents.iloom-swarm-worker.model` to `"opus"`. Set `swarmModel` to `"opus"` on all phase agents (`iloom-issue-analyzer`, `iloom-issue-planner`, `iloom-issue-implementer`, `iloom-issue-enhancer`, `iloom-issue-analyze-and-plan`, `iloom-code-reviewer`). Keep `iloom-issue-complexity-evaluator.swarmModel` as `"haiku"`.
1907
+ - **Balanced (default):** No settings changes needed the defaults already apply Opus[1m] for the orchestrator, Opus for workers and analysis agents, Sonnet for other phase agents, and Haiku for the complexity evaluator. Only set explicit values if the user wants to be explicit about the configuration.
1904
1908
  - **Fast & Cheap:** Set `spin.swarmModel` to `"haiku"`. Set `agents.iloom-swarm-worker.model` to `"haiku"`. Set `swarmModel` to `"haiku"` on all phase agents and `iloom-issue-complexity-evaluator`.
1905
1909
 
1906
1910
  **Important notes:**
@@ -226,7 +226,7 @@ The `il` command can also be used as a shorter alias.
226
226
  ### Loom Isolation Features
227
227
  Each loom provides:
228
228
  - Dedicated Git worktree (no branch conflicts)
229
- - Unique database branch via database branching integration
229
+ - Unique database branch via Neon integration
230
230
  - Color-coded terminal/VS Code for visual context switching
231
231
  - Deterministic port assignment (3000 + issue number)
232
232
 
@@ -1082,6 +1082,9 @@ If workflow plan determined SKIP_PLANNING AND complexity is COMPLEX:
1082
1082
 
1083
1083
  **Execute for both SIMPLE and COMPLEX workflows**
1084
1084
 
1085
+ **PREREQUISITE CHECK — Do not skip this:**
1086
+ Before executing implementation, verify that a plan comment exists on the issue (either from STEP 2-SIMPLE or STEP 3). If no plan comment exists and the workflow scan determined NEEDS_PLANNING, you MUST stop and run the appropriate analyze-and-plan or planning skill before proceeding. Do not implement without a documented plan.
1087
+
1085
1088
  Only execute if workflow plan determined NEEDS_IMPLEMENTATION:
1086
1089
 
1087
1090
  1. **Extract plan location from previous agent:**
@@ -14,7 +14,7 @@ Look for available Gemini MCP tools (typically named with "gemini" in the tool n
14
14
  - Brainstorming and creative exploration
15
15
  - Targeted queries and analysis
16
16
 
17
- **Workflow**: Use these tools during Phase 1 (Understanding) and Phase 2 (Design Exploration) to gather multiple perspectives. Synthesize Gemini's responses into your plan.
17
+ **Workflow**: Use these tools during Phase 1 (Understanding) and Phase 2 (Design Exploration) to gather multiple perspectives. Delegate deep research to the `iloom-issue-analyzer` agent (see Phase 1) and use Gemini for supplementary brainstorming. Synthesize findings into your plan.
18
18
 
19
19
  **Fallback**: If Gemini MCP tools are unavailable (error responses), continue using your own capabilities for planning.
20
20
  {{/if}}
@@ -26,7 +26,7 @@ You have access to Codex AI through MCP tools for code-aware planning.
26
26
  Look for available Codex MCP tools (typically named with "codex" in the tool name) for:
27
27
  - Code-focused analysis and implementation suggestions
28
28
 
29
- **Workflow**: Use this tool during Phase 2 (Design Exploration) and Phase 3 (Issue Decomposition) to get code-specific insights.
29
+ **Workflow**: Use this tool during Phase 2 (Design Exploration) and Phase 3 (Issue Decomposition) for code-specific insights. Delegate deep research to the `iloom-issue-analyzer` agent (see Phase 1) and use Codex for supplementary code analysis.
30
30
 
31
31
  **Fallback**: If Codex MCP tool is unavailable (error response), continue using your own capabilities for planning.
32
32
  {{/if}}
@@ -35,9 +35,9 @@ Look for available Codex MCP tools (typically named with "codex" in the tool nam
35
35
  ### Default Planning (Claude)
36
36
 
37
37
  You are the primary planner for this session. Use your capabilities to:
38
- - Gather context by exploring the codebase with Read, Glob, Grep tools
39
- - Use Task subagents for comprehensive research when needed
40
- - Synthesize information into a coherent implementation plan
38
+ - Delegate deep research to the `iloom-issue-analyzer` agent (see Phase 1 Research Phase) for thorough problem space, codebase, and third-party dependency understanding
39
+ - Use Task subagents for supplementary research tasks, keeping the main conversation focused on planning decisions
40
+ - Synthesize findings into a coherent implementation plan
41
41
  {{/unless}}{{/unless}}
42
42
 
43
43
  {{#if HAS_REVIEWER}}
@@ -148,13 +148,41 @@ Each issue you help create should represent a single, focused unit of work that:
148
148
  - Ask clarifying questions when requirements are ambiguous
149
149
  - Present trade-offs clearly when multiple approaches exist
150
150
 
151
+ **No Spikes or Investigation Issues**
152
+ All technical uncertainty must be resolved during the research phase (Phase 1), before decomposition begins. Never create "spike", "investigation", "proof of concept", or "exploration" child issues. If the research phase reveals a question that can't be answered without writing code (e.g., "can this library handle our use case?"), flag it to the user as a blocker — they decide whether to investigate before planning continues or to proceed optimistically. The swarm executes — it does not research.
153
+
151
154
  ---
152
155
 
153
156
  ## Planning Process
154
157
 
155
158
  ### Phase 1: Understanding (Brainstorming)
156
159
 
157
- Start by understanding the problem space. Ask clarifying questions ONE AT A TIME to prevent cognitive overload.
160
+ Before asking questions, conduct structured research to build a thorough understanding. Research informs better questions and prevents decomposition errors later.
161
+
162
+ #### Research Phase
163
+
164
+ **Delegate research to the `iloom-issue-analyzer` agent** to build thorough understanding before decomposing work.
165
+
166
+ Launch the analyzer as a Task subagent in foreground mode (wait for completion since you need the results before proceeding):
167
+
168
+ ```
169
+ Task(
170
+ subagent_type: "iloom-issue-analyzer",
171
+ prompt: "Analyze [issue description/context]. Focus on: problem space understanding, third-party dependencies, codebase entry points and patterns, cross-cutting concerns, and edge cases. Do NOT create issue comments - just return your findings."
172
+ )
173
+ ```
174
+
175
+ The analyzer agent has a comprehensive research framework covering:
176
+ - Problem space and domain understanding
177
+ - Third-party dependency research (Context7, WebSearch, ToolSearch)
178
+ - Systematic codebase exploration (entry points, dependency mapping, pattern recognition)
179
+ - Cross-cutting concern analysis
180
+
181
+ Wait for the analyzer to complete, then use its findings to inform sharper clarifying questions and better decomposition decisions.
182
+
183
+ ---
184
+
185
+ After research is complete, ask clarifying questions. Your research findings should inform sharper, more specific questions. **Ask up to 4 independent questions at once** — only hold back questions whose answers depend on a prior question's response.
158
186
 
159
187
  **Using the AskUserQuestion Tool:**
160
188
  When asking clarifying questions, use the AskUserQuestion tool to present options to the user. This provides a better UX with structured choices:
@@ -163,23 +191,17 @@ When asking clarifying questions, use the AskUserQuestion tool to present option
163
191
  - Include a clear question header
164
192
  - The user can always provide custom input if options don't fit
165
193
 
166
- Example tool usage:
194
+ Example tool usage (batching independent questions):
167
195
  ```
168
196
  AskUserQuestion(
169
- question: "What authentication approach should we use?",
170
- options: ["OAuth 2.0 (Recommended)", "JWT tokens", "Session-based", "Other"]
197
+ question: "A few questions to clarify scope:\n\n1. What authentication approach should we use?\n2. Are there hard constraints (time, dependencies, compatibility)?\n3. Should this support multiple database providers or just one?",
198
+ options: ["Answer inline", "Let me think — ask again later"]
171
199
  )
172
200
  ```
173
201
 
174
202
  **Question Patterns:**
175
203
  - Use the AskUserQuestion tool with multiple-choice options when feasible (easier to answer)
176
204
  - Focus on: purpose, constraints, success criteria, existing context
177
- - Acknowledge each answer before moving to the next question
178
-
179
- **Example Questions:**
180
- - "What problem does this feature solve for users?"
181
- - "Are there existing patterns in the codebase we should follow?"
182
- - "What are the hard constraints (time, dependencies, compatibility)?"
183
205
 
184
206
  ### Phase 2: Design Exploration
185
207
 
@@ -283,6 +305,8 @@ Each child issue should include ONLY these sections:
283
305
  - `substantive: <file-path> — <what it should contain or export>` — the file has expected content, exports, or structure
284
306
  - `wired: <file-path> — <how it should be integrated>` — the code is properly connected to the rest of the system (e.g., registered in a router, imported by a parent module, called from an entry point)
285
307
 
308
+ For replacement or migration work, the `substantive` check type means "equivalent to the original in observable terms" — not just "file has expected content or exports." Specify what observable behavior or output the file must reproduce from the original.
309
+
286
310
  Example:
287
311
  ```
288
312
  ## Must-Haves
@@ -292,10 +316,33 @@ Each child issue should include ONLY these sections:
292
316
  ```
293
317
  {{/if}}
294
318
 
319
+ **Acceptance Criteria Quality Rules:**
320
+
321
+ - **Observable over structural**: Criteria must describe what a user or developer can observe by using the app or running a command — not what exists in code. "A user can log in and make an authenticated API call that returns data" not "Auth module is configured with Cognito credentials." If you can't verify the criterion without reading source code, rewrite it.
322
+ - **Replacement work requires comparison criteria**: When an issue replaces or rewrites existing functionality, at least one acceptance criterion must define equivalence to the original in observable terms — visual appearance, feature set, and behavior. Specify which dimensions must match. "Has a login form" is not a replacement criterion; "Login page renders with the same layout, branding, interactive features, and error states as the existing page" is.
323
+ - **Integration points need cross-boundary criteria**: When work involves two systems communicating (shared auth, data passing between frameworks, synchronized state), at least one criterion must verify behavior across the boundary — not just that each side is configured. Pattern: "Action in system A produces expected result in system B."
324
+ - **Criteria must specify the verification context**: State the conditions under which the criterion is verified. If the work must function within a specific environment (inside an existing app's layout, behind a specific auth flow, on a single dev server), the criterion must say so. "Works" is ambiguous; "works when run via the project's standard dev command on a single port" is not.
325
+ - **Include constraint criteria for shared systems**: When work modifies or integrates with a shared system, at least one criterion must verify that existing behavior is preserved. "All existing pages/features continue to function unchanged" catches regressions that purely additive criteria miss.
326
+
295
327
  Do NOT add "Implementation Details", "Technical Approach", or similar sections — these violate the rule above.
296
328
 
297
329
  ---
298
330
 
331
+ ## Issue Tracker
332
+
333
+ The configured issue tracker for this project is **{{ISSUE_TRACKER}}**.
334
+
335
+ **All issue operations** (creating issues, fetching issue details, managing dependencies, posting comments) **MUST use the `mcp__issue_management__*` MCP tools** listed below. These tools abstract across tracker backends (GitHub, Linear, Jira) and handle authentication, format conversion, and API differences automatically.
336
+
337
+ {{#unless IS_GITHUB_TRACKER}}
338
+ **Do NOT use `gh` CLI for issue or project management** (e.g., `gh issue`, `gh search`, `gh project`, `gh api` for issues) — the configured tracker is {{ISSUE_TRACKER}}, not GitHub. The `gh` CLI only works with GitHub issues and will fail or return irrelevant results. Use the MCP tools above instead.
339
+ {{#if IS_GITHUB_VCS}}
340
+ `gh` is still available for GitHub VCS operations: `gh pr`, `gh repo`.
341
+ {{/if}}
342
+ {{/unless}}
343
+
344
+ ---
345
+
299
346
  ## Issue Creation
300
347
 
301
348
  At the end of the planning session, you have MCP tools to create issues:
@@ -330,8 +377,10 @@ Before presenting the plan to the user, audit it against these questions:
330
377
  1. **For each blocking dependency:** "Can I replace this with a shared contract so both issues run in parallel?" If yes, rewrite the issues with an explicit contract and remove the dependency.
331
378
  2. **DAG shape check:** Is the graph wide and shallow, or narrow and linear? If your longest chain is more than 2 deep, re-examine whether intermediate dependencies are truly hard blockers.
332
379
  3. **Contract completeness:** Does every issue that produces or consumes a shared API specify the exact contract (function signatures, types, module exports) in its description?
380
+ 4. **Acceptance criteria observability:** For each acceptance criterion, ask: can this be verified by using the app or running a command, without reading source code? If not, rewrite it.
333
381
  {{#if WAVE_VERIFICATION}}
334
- 4. **Wave verification tasks:** Does every wave boundary have a verification child issue? Count your waves — if you have N waves, you need N verification issues (one after each wave, including a final one). If any wave boundary is missing a verification issue, add one now. This is mandatory, not optional.
382
+ 5. **Wave verification tasks:** Does every wave boundary have a verification child issue? Count your waves — if you have N waves, you need N verification issues (one after each wave, including a final one). If any wave boundary is missing a verification issue, add one now. This is mandatory, not optional.
383
+ 6. **Integration must-haves for producer/consumer pairs:** For each wave with parallel issues, check: does any issue produce a class/service that a sibling issue's factory or provider is expected to consume? If yes, the wave's verification issue must have a `substantive:` must-have verifying the consumer injects/passes a real instance (not null). Missing these means nobody checks the wiring after merge.
335
384
  {{/if}}
336
385
 
337
386
  {{#if WAVE_VERIFICATION}}
@@ -370,12 +419,44 @@ Verify integration (depends on A, B, C)
370
419
  ```
371
420
 
372
421
  Skip verification tasks only when the DAG is trivially small (2 or fewer child issues).
422
+
423
+ **Integration-Point Must-Haves for Producer/Consumer Pairs:**
424
+
425
+ When parallel issues in the same wave have producer/consumer relationships, the verification issue for that wave MUST include integration must-haves that verify the consumer actually uses the producer's output — not just that both pieces exist independently.
426
+
427
+ **How to detect producer/consumer pairs:** Scan parallel issues within each wave for these signals:
428
+ - One issue's "Shared Contracts (Produces)" matches another issue's "Shared Contracts (Consumes)"
429
+ - A factory or provider class in issue B constructs or injects instances of a class created in issue A
430
+ - Constructor parameters or method signatures in one issue are typed as classes/interfaces from a sibling issue
431
+ - DI container registrations in one issue provide services consumed in another
432
+
433
+ **What to generate:** For each detected pair, add a `substantive:` must-have to the wave's verification issue. The must-have should verify that the consumer passes or injects a real instance of the producer's class — not a null placeholder. The check must be statically verifiable (grep for null in constructor calls, verify the factory imports and instantiates the real class, check that DI binds to the concrete implementation).
434
+
435
+ **Format:**
436
+ ```
437
+ ## Must-Haves
438
+ - substantive: <consumer-file-path> — <ConsumerClass> passes a real <ProducerClass> instance (not null) to <target> in <methods-or-call-sites>
439
+ ```
440
+
441
+ **Example:** If wave 2 has:
442
+ - Issue A: Creates `LayoutCanvasView` class
443
+ - Issue B: Creates `LayoutPlayerItemFactory` that constructs `LayoutPlayerItem`
444
+
445
+ Then the wave 2 verification issue gets:
446
+ ```
447
+ ## Must-Haves
448
+ - substantive: src/factories/LayoutPlayerItemFactory.ts — LayoutPlayerItemFactory injects CanvasViewFactoryService and passes a real LayoutCanvasView (not null) to LayoutPlayerItem in both createFromEditorItem and createFromDraftItem
449
+ ```
450
+
451
+ Place integration must-haves on the wave's verification issue, NOT on the individual child issues — the integration only exists after both are merged.
373
452
  {{/if}}
374
453
 
454
+ {{#unless AUTONOMOUS_MODE}}
375
455
  **Before Creating Issues:**
376
456
  1. Summarize the proposed issues and their relationships
377
457
  2. Ask for explicit confirmation: "Ready to create these issues?"
378
458
  3. Create issues only after user approval
459
+ {{/unless}}
379
460
 
380
461
  {{#if EXISTING_ISSUE_MODE}}
381
462
  **Issue Decomposition Mode:**
@@ -133,7 +133,7 @@ The `il` command can also be used as a shorter alias.
133
133
  ### Loom Isolation Features
134
134
  Each loom provides:
135
135
  - Dedicated Git worktree (no branch conflicts)
136
- - Unique database branch via database branching integration
136
+ - Unique database branch via Neon integration
137
137
  - Color-coded terminal/VS Code for visual context switching
138
138
  - Deterministic port assignment (3000 + issue number)
139
139
 
@@ -199,7 +199,16 @@ If both fail, mark the child as `failed` with the error and skip spawning.
199
199
 
200
200
  **Do NOT use `il start` to create worktrees. Worktrees are created by this orchestrator via `git worktree add`.**
201
201
 
202
- #### Step 2.2b: Spawn the Child Agent
202
+ #### Step 2.2b: Capture Pre-Wave Commit and Spawn the Child Agent
203
+
204
+ **Before spawning this wave**, capture the current epic branch HEAD so the wave verifier can diff only the changes introduced by this wave:
205
+
206
+ ```bash
207
+ cd "{{EPIC_WORKTREE_PATH}}"
208
+ PRE_WAVE_COMMIT=$(git rev-parse HEAD)
209
+ ```
210
+
211
+ Save `PRE_WAVE_COMMIT` — you will pass it to any verification agents spawned for this wave.
203
212
 
204
213
  **Spawn all unblocked issues in parallel** by making multiple `Task` tool calls in a single message.
205
214
 
@@ -242,6 +251,8 @@ The prompt for each verification agent should be exactly:
242
251
  ```
243
252
  Issue: #<child-number>
244
253
  Worktree: <child-worktree-path>
254
+ Epic Worktree: {{EPIC_WORKTREE_PATH}}
255
+ Pre-wave commit: <PRE_WAVE_COMMIT>
245
256
 
246
257
  IMPORTANT: Your working directory is <child-worktree-path>. Run `cd <child-worktree-path>` as your FIRST action before doing ANY work.
247
258
  ```
@@ -397,7 +408,9 @@ If the `il cleanup` command fails, log the error but continue with the orchestra
397
408
  After a child completes:
398
409
  1. Remove the completed child's issue number from all other children's `blockedBy` lists
399
410
  2. Check if any previously blocked children are now unblocked (empty `blockedBy` list)
400
- 3. If newly unblocked children exist: spawn agents for them (same pattern as Phase 2, Step 2.2)
411
+ 3. If newly unblocked children exist:
412
+ - Capture a fresh `PRE_WAVE_COMMIT` before spawning (same as Step 2.2b)
413
+ - Spawn agents for them (same pattern as Phase 2, Step 2.2)
401
414
 
402
415
  {{#if DRAFT_PR_MODE}}
403
416
  {{#if AUTO_COMMIT_PUSH}}
@@ -468,23 +481,19 @@ git log --oneline -5
468
481
 
469
482
  #### Step 5.2.5a: Run Code Review
470
483
 
471
- **Delegate this to a subagent.** Spawn a Task subagent to invoke the code reviewer:
484
+ **Delegate this to a subagent.** Spawn a Task subagent using the code reviewer agent directly:
472
485
 
473
- - `subagent_type`: `"general-purpose"`
486
+ - `subagent_type`: `"iloom-swarm-code-reviewer"`
474
487
  - Prompt:
475
488
 
476
489
  ```
477
490
  Run a full code review of the integrated epic branch.
478
491
 
479
- ## Instructions
480
-
481
- You are in the epic worktree at `{{EPIC_WORKTREE_PATH}}`. All child agents' work has been merged into this branch.
492
+ You are in the epic worktree at `{{EPIC_WORKTREE_PATH}}`. All child agents' work has been merged into this branch. Individual file changes have already been reviewed per-wave. Focus on cross-wave integration concerns: duplicate implementations across child issues, inconsistent patterns where children touched the same areas, conflicts introduced by combining waves.
482
493
 
483
- 1. Execute: @agent-iloom-code-reviewer with prompt "Run code review."
484
- 2. Wait for the review to complete.
485
- 3. Report back with the full review results, including all findings with their confidence scores, file locations, and recommendations.
486
- - If no issues found, report "No issues found."
487
- - If issues found, include the full structured report (Critical issues 95-100, Warnings 80-94).
494
+ Report the full review results, including all findings with their confidence scores, file locations, and recommendations.
495
+ - If no issues found, report "No issues found."
496
+ - If issues found, include the full structured report (Critical issues 95-100, Warnings 80-94).
488
497
  ```
489
498
 
490
499
  **Handle the subagent result:**
@@ -639,3 +648,7 @@ Mark todo #9, #10, #11, and #12 as completed.
639
648
  Mark todo #9, #10, and #11 as completed.
640
649
  {{/if}}
641
650
  {{/if}}
651
+
652
+ ### Step 5.5: Mark Epic as Complete
653
+
654
+ Call `mcp__recap__set_loom_state` with `{ "state": "done" }` (omit `worktreePath` — this targets the epic loom itself). This lets the iloom hook detect swarm completion and show appropriate agent guidance for follow-up questions.
@@ -2,24 +2,24 @@
2
2
  import {
3
3
  BuildRunner,
4
4
  MergeManager
5
- } from "./chunk-Q7VXHJP6.js";
5
+ } from "./chunk-EHAITKLS.js";
6
6
  import {
7
7
  installDependencies
8
- } from "./chunk-VIQOQ463.js";
8
+ } from "./chunk-OLJ54WGW.js";
9
9
  import {
10
10
  GitWorktreeManager
11
- } from "./chunk-4VQXMEEP.js";
11
+ } from "./chunk-PPQ5LV7U.js";
12
12
  import "./chunk-772N5WCA.js";
13
13
  import "./chunk-K3QGG4O2.js";
14
14
  import "./chunk-DDHWZNGL.js";
15
15
  import {
16
16
  getWorktreeRoot,
17
17
  isValidGitRepo
18
- } from "./chunk-QNPJXO53.js";
18
+ } from "./chunk-KGOBNC5A.js";
19
19
  import {
20
20
  SettingsManager
21
- } from "./chunk-WGUGB54H.js";
22
- import "./chunk-4JZEQBWV.js";
21
+ } from "./chunk-F5NKWLMQ.js";
22
+ import "./chunk-XIVLGWUX.js";
23
23
  import "./chunk-FTYWGQFM.js";
24
24
  import {
25
25
  logger
@@ -148,4 +148,4 @@ export {
148
148
  RebaseCommand,
149
149
  WorktreeValidationError
150
150
  };
151
- //# sourceMappingURL=rebase-MAMWPA2L.js.map
151
+ //# sourceMappingURL=rebase-MLIN572O.js.map
@@ -4,16 +4,16 @@ import {
4
4
  } from "./chunk-NXMDEL3F.js";
5
5
  import {
6
6
  findArchivedRecap
7
- } from "./chunk-3XEXT35Z.js";
7
+ } from "./chunk-E5OM25WK.js";
8
8
  import {
9
9
  IdentifierParser
10
- } from "./chunk-SN4S5CWL.js";
10
+ } from "./chunk-GQDVH6FA.js";
11
11
  import {
12
12
  GitWorktreeManager
13
- } from "./chunk-4VQXMEEP.js";
14
- import "./chunk-QNPJXO53.js";
15
- import "./chunk-WGUGB54H.js";
16
- import "./chunk-4JZEQBWV.js";
13
+ } from "./chunk-PPQ5LV7U.js";
14
+ import "./chunk-KGOBNC5A.js";
15
+ import "./chunk-F5NKWLMQ.js";
16
+ import "./chunk-XIVLGWUX.js";
17
17
  import "./chunk-FTYWGQFM.js";
18
18
  import "./chunk-VRPPI6GU.js";
19
19
 
@@ -135,4 +135,4 @@ var RecapCommand = class {
135
135
  export {
136
136
  RecapCommand
137
137
  };
138
- //# sourceMappingURL=recap-IDBO3KM5.js.map
138
+ //# sourceMappingURL=recap-CKGKFDJL.js.map
@@ -1,21 +1,22 @@
1
1
  #!/usr/bin/env node
2
2
  import {
3
- DevServerManager
4
- } from "./chunk-QQULYI2S.js";
3
+ DevServerManager,
4
+ buildDevServerUrl
5
+ } from "./chunk-P5MNWBLH.js";
5
6
  import {
6
7
  DockerManager
7
- } from "./chunk-AYLC633W.js";
8
- import "./chunk-H3T3EPF3.js";
8
+ } from "./chunk-7TN5VW4I.js";
9
+ import "./chunk-LNY2Y32V.js";
9
10
  import {
10
11
  getWorkspacePort
11
- } from "./chunk-ZUIFO7B4.js";
12
+ } from "./chunk-ERMEYFT6.js";
12
13
  import {
13
14
  IdentifierParser
14
- } from "./chunk-SN4S5CWL.js";
15
- import "./chunk-VIQOQ463.js";
15
+ } from "./chunk-GQDVH6FA.js";
16
+ import "./chunk-OLJ54WGW.js";
16
17
  import {
17
18
  GitWorktreeManager
18
- } from "./chunk-4VQXMEEP.js";
19
+ } from "./chunk-PPQ5LV7U.js";
19
20
  import {
20
21
  openBrowser
21
22
  } from "./chunk-WEBMMJKL.js";
@@ -28,11 +29,11 @@ import {
28
29
  import "./chunk-K3QGG4O2.js";
29
30
  import {
30
31
  extractIssueNumber
31
- } from "./chunk-QNPJXO53.js";
32
+ } from "./chunk-KGOBNC5A.js";
32
33
  import {
33
34
  SettingsManager
34
- } from "./chunk-WGUGB54H.js";
35
- import "./chunk-4JZEQBWV.js";
35
+ } from "./chunk-F5NKWLMQ.js";
36
+ import "./chunk-XIVLGWUX.js";
36
37
  import "./chunk-FTYWGQFM.js";
37
38
  import {
38
39
  logger
@@ -214,14 +215,16 @@ Make sure the project is built (run 'il start' first)`
214
215
  * Auto-starts dev server if not already running
215
216
  */
216
217
  async openWebBrowser(worktree) {
217
- var _a, _b, _c;
218
+ var _a, _b, _c, _d, _e;
218
219
  const cliOverrides = extractSettingsOverrides();
219
220
  const settings = await this.settingsManager.loadSettings(void 0, cliOverrides);
221
+ const isMainWorktree = await this.gitWorktreeManager.isMainWorktree(worktree, this.settingsManager);
220
222
  const port = await getWorkspacePort({
221
223
  worktreePath: worktree.path,
222
224
  worktreeBranch: worktree.branch,
223
225
  basePort: (_b = (_a = settings.capabilities) == null ? void 0 : _a.web) == null ? void 0 : _b.basePort,
224
- checkEnvFile: true
226
+ checkEnvFile: true,
227
+ isMainWorktree
225
228
  });
226
229
  const issueNumber = extractIssueNumber(worktree.branch);
227
230
  const dockerIdentifier = (issueNumber == null ? void 0 : issueNumber.toString()) ?? worktree.branch;
@@ -244,7 +247,8 @@ Make sure the project is built (run 'il start' first)`
244
247
  `Dev server failed to start on port ${port}. Opening browser anyway...`
245
248
  );
246
249
  }
247
- const url = `http://localhost:${port}`;
250
+ const protocol = ((_e = (_d = settings.capabilities) == null ? void 0 : _d.web) == null ? void 0 : _e.protocol) ?? "http";
251
+ const url = buildDevServerUrl(port, protocol);
248
252
  logger.info(`Opening browser: ${url}`);
249
253
  await openBrowser(url);
250
254
  logger.success("Browser opened");
@@ -253,4 +257,4 @@ Make sure the project is built (run 'il start' first)`
253
257
  export {
254
258
  RunCommand
255
259
  };
256
- //# sourceMappingURL=run-RGZHCQ6M.js.map
260
+ //# sourceMappingURL=run-CUNRQNZS.js.map