@iloom/cli 0.7.4 → 0.7.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +2 -4
- package/dist/{BranchNamingService-UB2EJGFQ.js → BranchNamingService-AO7BPIUJ.js} +2 -2
- package/dist/{ClaudeContextManager-M57BQUMY.js → ClaudeContextManager-Y2YJC6BU.js} +4 -4
- package/dist/{ClaudeService-FLZ2IXAO.js → ClaudeService-NDVFQRKC.js} +3 -3
- package/dist/{LoomLauncher-5PPVFTFN.js → LoomLauncher-U2B3VHPC.js} +4 -4
- package/dist/{PRManager-YTG6XPMG.js → PRManager-7F3AAY66.js} +4 -4
- package/dist/README.md +2 -4
- package/dist/agents/iloom-issue-analyze-and-plan.md +1 -1
- package/dist/agents/iloom-issue-analyzer.md +1 -1
- package/dist/agents/iloom-issue-complexity-evaluator.md +1 -1
- package/dist/agents/iloom-issue-enhancer.md +1 -1
- package/dist/agents/iloom-issue-implementer.md +1 -1
- package/dist/agents/iloom-issue-planner.md +1 -1
- package/dist/agents/iloom-issue-reviewer.md +1 -1
- package/dist/{chunk-7GKMQJGQ.js → chunk-64HCHVJM.js} +2 -2
- package/dist/{chunk-JWUYPJ7K.js → chunk-6YAMWLCP.js} +3 -3
- package/dist/{chunk-33P5VSKS.js → chunk-C7YW5IMS.js} +2 -2
- package/dist/{chunk-37V2NBYR.js → chunk-CAXFWFV6.js} +2 -2
- package/dist/chunk-CFQVOTHO.js +111 -0
- package/dist/chunk-CFQVOTHO.js.map +1 -0
- package/dist/{chunk-AFRICMSW.js → chunk-ENMTWE74.js} +2 -2
- package/dist/{chunk-RVLRPQU4.js → chunk-ETY2SBW5.js} +21 -18
- package/dist/chunk-ETY2SBW5.js.map +1 -0
- package/dist/{chunk-ITIXKM24.js → chunk-IGKPPACU.js} +2 -2
- package/dist/chunk-IGKPPACU.js.map +1 -0
- package/dist/{chunk-GH4FLYV5.js → chunk-NEPH2O4C.js} +2 -2
- package/dist/{chunk-GJMEKEI5.js → chunk-NPEMVE27.js} +342 -6
- package/dist/chunk-NPEMVE27.js.map +1 -0
- package/dist/{chunk-XAHE76RL.js → chunk-O36JLYNW.js} +2 -2
- package/dist/{chunk-6VQNF44G.js → chunk-Q457PKGH.js} +2 -2
- package/dist/{chunk-7FM7AL7S.js → chunk-VYKKWU36.js} +2 -2
- package/dist/{chunk-EDDIAWVM.js → chunk-WT4UGBE2.js} +8 -7
- package/dist/chunk-WT4UGBE2.js.map +1 -0
- package/dist/{chunk-453NC377.js → chunk-WZYBHD7P.js} +3 -106
- package/dist/chunk-WZYBHD7P.js.map +1 -0
- package/dist/{claude-SNWHWWWM.js → claude-V4HRPR4Z.js} +2 -2
- package/dist/{cleanup-PLMS2KWF.js → cleanup-IO4KV2DL.js} +9 -6
- package/dist/{cleanup-PLMS2KWF.js.map → cleanup-IO4KV2DL.js.map} +1 -1
- package/dist/cli.js +84 -63
- package/dist/cli.js.map +1 -1
- package/dist/{commit-NAGJH4J4.js → commit-3ULFKXNB.js} +4 -4
- package/dist/{dev-server-UKAPBGUR.js → dev-server-OAP3RZC6.js} +4 -3
- package/dist/{dev-server-UKAPBGUR.js.map → dev-server-OAP3RZC6.js.map} +1 -1
- package/dist/{feedback-ICJ44XGB.js → feedback-ZLAX3BVL.js} +3 -3
- package/dist/{ignite-U2JSVOEZ.js → ignite-HA2OJF6Z.js} +20 -36
- package/dist/ignite-HA2OJF6Z.js.map +1 -0
- package/dist/index.js +1 -1
- package/dist/index.js.map +1 -1
- package/dist/{init-YDKOPB54.js → init-S6IEGRSX.js} +3 -3
- package/dist/mcp/issue-management-server.js +420 -14
- package/dist/mcp/issue-management-server.js.map +1 -1
- package/dist/{open-QI63XQ4F.js → open-IN3LUZXX.js} +4 -3
- package/dist/{open-QI63XQ4F.js.map → open-IN3LUZXX.js.map} +1 -1
- package/dist/{projects-TWY4RT2Z.js → projects-CTRTTMSK.js} +25 -9
- package/dist/projects-CTRTTMSK.js.map +1 -0
- package/dist/prompts/issue-prompt.txt +16 -0
- package/dist/prompts/pr-prompt.txt +33 -13
- package/dist/prompts/regular-prompt.txt +7 -0
- package/dist/{rebase-AONLKM2V.js → rebase-RLEVFHWN.js} +3 -3
- package/dist/{run-YDVYORT2.js → run-QEIS2EH2.js} +4 -3
- package/dist/{run-YDVYORT2.js.map → run-QEIS2EH2.js.map} +1 -1
- package/dist/{summary-7KYFRAIM.js → summary-MPOOQIOX.js} +38 -7
- package/dist/summary-MPOOQIOX.js.map +1 -0
- package/dist/{test-webserver-NRMGT2HB.js → test-webserver-J6SMNLU2.js} +3 -2
- package/dist/{test-webserver-NRMGT2HB.js.map → test-webserver-J6SMNLU2.js.map} +1 -1
- package/package.json +1 -1
- package/dist/chunk-453NC377.js.map +0 -1
- package/dist/chunk-EDDIAWVM.js.map +0 -1
- package/dist/chunk-GJMEKEI5.js.map +0 -1
- package/dist/chunk-ITIXKM24.js.map +0 -1
- package/dist/chunk-RVLRPQU4.js.map +0 -1
- package/dist/ignite-U2JSVOEZ.js.map +0 -1
- package/dist/projects-TWY4RT2Z.js.map +0 -1
- package/dist/summary-7KYFRAIM.js.map +0 -1
- /package/dist/{BranchNamingService-UB2EJGFQ.js.map → BranchNamingService-AO7BPIUJ.js.map} +0 -0
- /package/dist/{ClaudeContextManager-M57BQUMY.js.map → ClaudeContextManager-Y2YJC6BU.js.map} +0 -0
- /package/dist/{ClaudeService-FLZ2IXAO.js.map → ClaudeService-NDVFQRKC.js.map} +0 -0
- /package/dist/{LoomLauncher-5PPVFTFN.js.map → LoomLauncher-U2B3VHPC.js.map} +0 -0
- /package/dist/{PRManager-YTG6XPMG.js.map → PRManager-7F3AAY66.js.map} +0 -0
- /package/dist/{chunk-7GKMQJGQ.js.map → chunk-64HCHVJM.js.map} +0 -0
- /package/dist/{chunk-JWUYPJ7K.js.map → chunk-6YAMWLCP.js.map} +0 -0
- /package/dist/{chunk-33P5VSKS.js.map → chunk-C7YW5IMS.js.map} +0 -0
- /package/dist/{chunk-37V2NBYR.js.map → chunk-CAXFWFV6.js.map} +0 -0
- /package/dist/{chunk-AFRICMSW.js.map → chunk-ENMTWE74.js.map} +0 -0
- /package/dist/{chunk-GH4FLYV5.js.map → chunk-NEPH2O4C.js.map} +0 -0
- /package/dist/{chunk-XAHE76RL.js.map → chunk-O36JLYNW.js.map} +0 -0
- /package/dist/{chunk-6VQNF44G.js.map → chunk-Q457PKGH.js.map} +0 -0
- /package/dist/{chunk-7FM7AL7S.js.map → chunk-VYKKWU36.js.map} +0 -0
- /package/dist/{claude-SNWHWWWM.js.map → claude-V4HRPR4Z.js.map} +0 -0
- /package/dist/{commit-NAGJH4J4.js.map → commit-3ULFKXNB.js.map} +0 -0
- /package/dist/{feedback-ICJ44XGB.js.map → feedback-ZLAX3BVL.js.map} +0 -0
- /package/dist/{init-YDKOPB54.js.map → init-S6IEGRSX.js.map} +0 -0
- /package/dist/{rebase-AONLKM2V.js.map → rebase-RLEVFHWN.js.map} +0 -0
package/LICENSE
CHANGED
|
@@ -20,7 +20,7 @@ it within your organization. What is prohibited is redistributing or offering
|
|
|
20
20
|
access to this software (in original or modified form) as part of something
|
|
21
21
|
you sell or provide to others.
|
|
22
22
|
|
|
23
|
-
Change Date: 2030-01-
|
|
23
|
+
Change Date: 2030-01-25
|
|
24
24
|
Change License: Apache License 2.0
|
|
25
25
|
|
|
26
26
|
For clarity, on or after the Change Date, the Licensed Work will
|
package/README.md
CHANGED
|
@@ -79,9 +79,7 @@ iloom uses your existing Claude subscription to build a shared mental model of y
|
|
|
79
79
|
il finish
|
|
80
80
|
```
|
|
81
81
|
|
|
82
|
-
**The iloom Difference:** il start doesn't
|
|
83
|
-
|
|
84
|
-
**→ [Want to know how you'll benefit from iloom?](docs/is-iloom-right-for-you.md)**
|
|
82
|
+
**The iloom Difference:** il start doesn't merely create a branch. It launches a multi-agent workflow that surfaces assumptions and creates a structured plan in your issue tracker **before you even need to look at your IDE.**
|
|
85
83
|
|
|
86
84
|
How It Works: The Multi-Agent Workflow
|
|
87
85
|
--------------------------------------
|
|
@@ -524,7 +522,7 @@ License & Name
|
|
|
524
522
|
|
|
525
523
|
* ❌ You cannot resell iloom itself as a product or SaaS.
|
|
526
524
|
|
|
527
|
-
* Converts to Apache 2.0 on 2030-01-
|
|
525
|
+
* Converts to Apache 2.0 on 2030-01-25.
|
|
528
526
|
|
|
529
527
|
|
|
530
528
|
See [LICENSE](https://raw.githubusercontent.com/iloom-ai/iloom-cli/main/LICENSE) for complete terms.
|
|
@@ -3,7 +3,7 @@ import {
|
|
|
3
3
|
ClaudeBranchNameStrategy,
|
|
4
4
|
DefaultBranchNamingService,
|
|
5
5
|
SimpleBranchNameStrategy
|
|
6
|
-
} from "./chunk-
|
|
6
|
+
} from "./chunk-Q457PKGH.js";
|
|
7
7
|
import "./chunk-6MLEBAYZ.js";
|
|
8
8
|
import "./chunk-VT4PDUYT.js";
|
|
9
9
|
export {
|
|
@@ -11,4 +11,4 @@ export {
|
|
|
11
11
|
DefaultBranchNamingService,
|
|
12
12
|
SimpleBranchNameStrategy
|
|
13
13
|
};
|
|
14
|
-
//# sourceMappingURL=BranchNamingService-
|
|
14
|
+
//# sourceMappingURL=BranchNamingService-AO7BPIUJ.js.map
|
|
@@ -1,14 +1,14 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
ClaudeContextManager
|
|
4
|
-
} from "./chunk-
|
|
5
|
-
import "./chunk-
|
|
4
|
+
} from "./chunk-64HCHVJM.js";
|
|
5
|
+
import "./chunk-O36JLYNW.js";
|
|
6
6
|
import "./chunk-TIYJEEVO.js";
|
|
7
7
|
import "./chunk-WFQ5CLTR.js";
|
|
8
|
-
import "./chunk-
|
|
8
|
+
import "./chunk-IGKPPACU.js";
|
|
9
9
|
import "./chunk-6MLEBAYZ.js";
|
|
10
10
|
import "./chunk-VT4PDUYT.js";
|
|
11
11
|
export {
|
|
12
12
|
ClaudeContextManager
|
|
13
13
|
};
|
|
14
|
-
//# sourceMappingURL=ClaudeContextManager-
|
|
14
|
+
//# sourceMappingURL=ClaudeContextManager-Y2YJC6BU.js.map
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
ClaudeService
|
|
4
|
-
} from "./chunk-
|
|
4
|
+
} from "./chunk-O36JLYNW.js";
|
|
5
5
|
import "./chunk-TIYJEEVO.js";
|
|
6
6
|
import "./chunk-WFQ5CLTR.js";
|
|
7
|
-
import "./chunk-
|
|
7
|
+
import "./chunk-IGKPPACU.js";
|
|
8
8
|
import "./chunk-6MLEBAYZ.js";
|
|
9
9
|
import "./chunk-VT4PDUYT.js";
|
|
10
10
|
export {
|
|
11
11
|
ClaudeService
|
|
12
12
|
};
|
|
13
|
-
//# sourceMappingURL=ClaudeService-
|
|
13
|
+
//# sourceMappingURL=ClaudeService-NDVFQRKC.js.map
|
|
@@ -4,8 +4,8 @@ import {
|
|
|
4
4
|
} from "./chunk-O7VL5N6S.js";
|
|
5
5
|
import {
|
|
6
6
|
ClaudeContextManager
|
|
7
|
-
} from "./chunk-
|
|
8
|
-
import "./chunk-
|
|
7
|
+
} from "./chunk-64HCHVJM.js";
|
|
8
|
+
import "./chunk-O36JLYNW.js";
|
|
9
9
|
import "./chunk-TIYJEEVO.js";
|
|
10
10
|
import {
|
|
11
11
|
getExecutablePath
|
|
@@ -15,7 +15,7 @@ import {
|
|
|
15
15
|
generateColorFromBranchName,
|
|
16
16
|
hexToRgb
|
|
17
17
|
} from "./chunk-433MOLAU.js";
|
|
18
|
-
import "./chunk-
|
|
18
|
+
import "./chunk-IGKPPACU.js";
|
|
19
19
|
import {
|
|
20
20
|
getLogger
|
|
21
21
|
} from "./chunk-6MLEBAYZ.js";
|
|
@@ -252,4 +252,4 @@ var LoomLauncher = class {
|
|
|
252
252
|
export {
|
|
253
253
|
LoomLauncher
|
|
254
254
|
};
|
|
255
|
-
//# sourceMappingURL=LoomLauncher-
|
|
255
|
+
//# sourceMappingURL=LoomLauncher-U2B3VHPC.js.map
|
|
@@ -1,16 +1,16 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
PRManager
|
|
4
|
-
} from "./chunk-
|
|
4
|
+
} from "./chunk-6YAMWLCP.js";
|
|
5
5
|
import "./chunk-YETJNRQM.js";
|
|
6
|
-
import "./chunk-
|
|
6
|
+
import "./chunk-NPEMVE27.js";
|
|
7
7
|
import "./chunk-HBJITKSZ.js";
|
|
8
8
|
import "./chunk-FXDYIV3K.js";
|
|
9
9
|
import "./chunk-GCPAZSGV.js";
|
|
10
|
-
import "./chunk-
|
|
10
|
+
import "./chunk-IGKPPACU.js";
|
|
11
11
|
import "./chunk-6MLEBAYZ.js";
|
|
12
12
|
import "./chunk-VT4PDUYT.js";
|
|
13
13
|
export {
|
|
14
14
|
PRManager
|
|
15
15
|
};
|
|
16
|
-
//# sourceMappingURL=PRManager-
|
|
16
|
+
//# sourceMappingURL=PRManager-7F3AAY66.js.map
|
package/dist/README.md
CHANGED
|
@@ -79,9 +79,7 @@ iloom uses your existing Claude subscription to build a shared mental model of y
|
|
|
79
79
|
il finish
|
|
80
80
|
```
|
|
81
81
|
|
|
82
|
-
**The iloom Difference:** il start doesn't
|
|
83
|
-
|
|
84
|
-
**→ [Want to know how you'll benefit from iloom?](docs/is-iloom-right-for-you.md)**
|
|
82
|
+
**The iloom Difference:** il start doesn't merely create a branch. It launches a multi-agent workflow that surfaces assumptions and creates a structured plan in your issue tracker **before you even need to look at your IDE.**
|
|
85
83
|
|
|
86
84
|
How It Works: The Multi-Agent Workflow
|
|
87
85
|
--------------------------------------
|
|
@@ -524,7 +522,7 @@ License & Name
|
|
|
524
522
|
|
|
525
523
|
* ❌ You cannot resell iloom itself as a product or SaaS.
|
|
526
524
|
|
|
527
|
-
* Converts to Apache 2.0 on 2030-01-
|
|
525
|
+
* Converts to Apache 2.0 on 2030-01-25.
|
|
528
526
|
|
|
529
527
|
|
|
530
528
|
See [LICENSE](https://raw.githubusercontent.com/iloom-ai/iloom-cli/main/LICENSE) for complete terms.
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-analyze-and-plan
|
|
3
3
|
description: Combined analysis and planning agent for SIMPLE tasks. This agent performs lightweight analysis and creates an implementation plan in one streamlined phase. Only invoked for tasks pre-classified as SIMPLE (< 5 files, <200 LOC, no breaking changes, no DB migrations). Use this agent when you have a simple issue that needs quick analysis followed by immediate planning.
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*),
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*), mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
color: teal
|
|
6
6
|
model: sonnet
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-analyzer
|
|
3
3
|
description: Use this agent when you need to analyze and research issues, bugs, or enhancement requests. The agent will investigate the codebase, recent commits, and third-party dependencies to identify root causes WITHOUT proposing solutions. Ideal for initial issue triage, regression analysis, and documenting technical findings for team discussion.\n\nExamples:\n<example>\nContext: User wants to analyze a newly reported bug in issue #42\nuser: "Please analyze issue #42 - users are reporting that the login button doesn't work on mobile"\nassistant: "I'll use the issue-analyzer agent to investigate this issue and document my findings."\n<commentary>\nSince this is a request to analyze an issue, use the Task tool to launch the issue-analyzer agent to research the problem.\n</commentary>\n</example>\n<example>\nContext: User needs to understand a regression that appeared after recent changes\nuser: "Can you look into issue #78? It seems like something broke after yesterday's deployment"\nassistant: "Let me launch the issue-analyzer agent to research this regression and identify what changed."\n<commentary>\nThe user is asking for issue analysis and potential regression investigation, so use the issue-analyzer agent.\n</commentary>\n</example>
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*),
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*), mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
color: pink
|
|
6
6
|
model: sonnet
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-complexity-evaluator
|
|
3
3
|
description: Use this agent when you need to quickly assess the complexity of an issue before deciding on the appropriate workflow. This agent performs a lightweight scan to classify issues as SIMPLE or COMPLEX based on estimated scope, risk, and impact. Runs first before any detailed analysis or planning.
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*),
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*), mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact, mcp__recap__set_complexity
|
|
5
5
|
color: orange
|
|
6
6
|
model: haiku
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-enhancer
|
|
3
3
|
description: Use this agent when you need to analyze bug or enhancement reports from a Product Manager perspective. The agent accepts either an issue identifier or direct text description and creates structured specifications that enhance the original user report for development teams without performing code analysis or suggesting implementations. Ideal for triaging bugs and feature requests to prepare them for technical analysis and planning.\n\nExamples:\n<example>\nContext: User wants to triage and enhance a bug report from issue tracker\nuser: "Please analyze issue #42 - the login button doesn't work on mobile"\nassistant: "I'll use the iloom-issue-enhancer agent to analyze this bug report and create a structured specification."\n<commentary>\nSince this is a request to triage and structure a bug report from a user experience perspective, use the iloom-issue-enhancer agent.\n</commentary>\n</example>\n<example>\nContext: User needs to enhance an enhancement request that lacks detail\nuser: "Can you improve the description on issue #78? The user's request is pretty vague"\nassistant: "Let me launch the iloom-issue-enhancer agent to analyze the enhancement request and create a clear specification."\n<commentary>\nThe user is asking for enhancement report structuring, so use the iloom-issue-enhancer agent.\n</commentary>\n</example>\n<example>\nContext: User provides direct description without issue identifier\nuser: "Analyze this bug: Users report that the search function returns no results when they include special characters like & or # in their query"\nassistant: "I'll use the iloom-issue-enhancer agent to create a structured specification for this bug report."\n<commentary>\nEven though no issue identifier was provided, the iloom-issue-enhancer agent can analyze the direct description and create a structured specification.\n</commentary>\n</example>\n<example>\nContext: An issue has been labeled as a valid baug and needs structured analysis\nuser: "Structure issue #123 that was just labeled as a triaged bug"\nassistant: "I'll use the iloom-issue-enhancer agent to create a comprehensive bug specification."\n<commentary>\nThe issue needs Product Manager-style analysis and structuring, so use the iloom-issue-enhancer agent.\n</commentary>\n</example>
|
|
4
|
-
tools: Bash, Glob, Grep, Read, WebFetch, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__issue_management__get_issue, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
4
|
+
tools: Bash, Glob, Grep, Read, WebFetch, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
color: purple
|
|
6
6
|
model: sonnet
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-implementer
|
|
3
3
|
description: Use this agent when you need to implement an issue exactly as specified in its comments and description. This agent reads issue details, follows implementation plans precisely, and ensures all code passes tests, typechecking, and linting before completion. Examples:\n\n<example>\nContext: User wants to implement a specific issue.\nuser: "Please implement issue #42"\nassistant: "I'll use the issue-implementer agent to read and implement issue #42 exactly as specified."\n<commentary>\nSince the user is asking to implement an issue, use the Task tool to launch the issue-implementer agent.\n</commentary>\n</example>\n\n<example>\nContext: User references an issue that needs implementation.\nuser: "Can you work on the authentication issue we discussed in #15?"\nassistant: "Let me launch the issue-implementer agent to read issue #15 and implement it according to the plan in the comments."\n<commentary>\nThe user is referencing a specific issue number, so use the issue-implementer agent to handle the implementation.\n</commentary>\n</example>
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules ,mcp__issue_management__get_issue, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules ,mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
model: sonnet
|
|
6
6
|
color: green
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-planner
|
|
3
3
|
description: Use this agent when you need to analyze issues and create detailed implementation plans. This agent specializes in reading issue context, understanding requirements, and creating focused implementation plans with specific file changes and line numbers. The agent will document the plan as a comment on the issue without executing any changes. Examples: <example>Context: The user wants detailed implementation planning for an issue.\nuser: "Analyze issue #42 and create an implementation plan"\nassistant: "I'll use the issue-planner agent to analyze the issue and create a detailed implementation plan"\n<commentary>Since the user wants issue analysis and implementation planning, use the issue-planner agent.</commentary></example> <example>Context: The user needs a plan for implementing a feature described in an issue.\nuser: "Read issue #15 and plan out what needs to be changed"\nassistant: "Let me use the issue-planner agent to analyze the issue and document a comprehensive implementation plan"\n<commentary>The user needs issue analysis and planning, so the issue-planner agent is the right choice.</commentary></example>
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*),
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(git show:*), mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
color: blue
|
|
6
6
|
model: sonnet
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: iloom-issue-reviewer
|
|
3
3
|
description: Use this agent when you need to review uncommitted code changes against a specific issue to verify completeness and quality. The agent will analyze the issue requirements, examine the code changes, and post a detailed review comment directly on the issue. Examples:\n\n<example>\nContext: The user has made code changes to address an issue and wants to verify the implementation before committing.\nuser: "I've finished implementing the fix for issue #42, can you review it?"\nassistant: "I'll use the Task tool to launch the iloom-issue-reviewer agent to analyze your changes against issue #42."\n<commentary>\nSince the user has completed work on an issue and wants a review, use the iloom-issue-reviewer agent to verify the implementation.\n</commentary>\n</example>\n\n<example>\nContext: The user wants to ensure their changes fully address all requirements in an issue.\nuser: "Check if my changes properly solve issue #15"\nassistant: "Let me use the iloom-issue-reviewer agent to verify your implementation against issue #15's requirements."\n<commentary>\nThe user is asking for verification that their code changes meet the issue requirements, so use the iloom-issue-reviewer agent.\n</commentary>\n</example>
|
|
4
|
-
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__issue_management__get_issue, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__issue_management__get_issue, mcp__issue_management__get_pr, mcp__issue_management__get_comment, mcp__issue_management__create_comment, mcp__issue_management__update_comment, mcp__recap__get_recap, mcp__recap__add_entry, mcp__recap__add_artifact
|
|
5
5
|
model: sonnet
|
|
6
6
|
color: cyan
|
|
7
7
|
---
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
ClaudeService
|
|
4
|
-
} from "./chunk-
|
|
4
|
+
} from "./chunk-O36JLYNW.js";
|
|
5
5
|
import {
|
|
6
6
|
logger
|
|
7
7
|
} from "./chunk-VT4PDUYT.js";
|
|
@@ -63,4 +63,4 @@ var ClaudeContextManager = class {
|
|
|
63
63
|
export {
|
|
64
64
|
ClaudeContextManager
|
|
65
65
|
};
|
|
66
|
-
//# sourceMappingURL=chunk-
|
|
66
|
+
//# sourceMappingURL=chunk-64HCHVJM.js.map
|
|
@@ -4,7 +4,7 @@ import {
|
|
|
4
4
|
} from "./chunk-YETJNRQM.js";
|
|
5
5
|
import {
|
|
6
6
|
IssueManagementProviderFactory
|
|
7
|
-
} from "./chunk-
|
|
7
|
+
} from "./chunk-NPEMVE27.js";
|
|
8
8
|
import {
|
|
9
9
|
getConfiguredRepoFromSettings,
|
|
10
10
|
getEffectivePRTargetRemote,
|
|
@@ -16,7 +16,7 @@ import {
|
|
|
16
16
|
import {
|
|
17
17
|
detectClaudeCli,
|
|
18
18
|
launchClaude
|
|
19
|
-
} from "./chunk-
|
|
19
|
+
} from "./chunk-IGKPPACU.js";
|
|
20
20
|
import {
|
|
21
21
|
getLogger
|
|
22
22
|
} from "./chunk-6MLEBAYZ.js";
|
|
@@ -358,4 +358,4 @@ Then retry: il start`
|
|
|
358
358
|
export {
|
|
359
359
|
PRManager
|
|
360
360
|
};
|
|
361
|
-
//# sourceMappingURL=chunk-
|
|
361
|
+
//# sourceMappingURL=chunk-6YAMWLCP.js.map
|
|
@@ -24,7 +24,7 @@ import {
|
|
|
24
24
|
import {
|
|
25
25
|
detectClaudeCli,
|
|
26
26
|
launchClaude
|
|
27
|
-
} from "./chunk-
|
|
27
|
+
} from "./chunk-IGKPPACU.js";
|
|
28
28
|
import {
|
|
29
29
|
logger
|
|
30
30
|
} from "./chunk-VT4PDUYT.js";
|
|
@@ -701,4 +701,4 @@ export {
|
|
|
701
701
|
ShellCompletion,
|
|
702
702
|
InitCommand
|
|
703
703
|
};
|
|
704
|
-
//# sourceMappingURL=chunk-
|
|
704
|
+
//# sourceMappingURL=chunk-C7YW5IMS.js.map
|
|
@@ -25,7 +25,7 @@ import {
|
|
|
25
25
|
import {
|
|
26
26
|
detectClaudeCli,
|
|
27
27
|
launchClaude
|
|
28
|
-
} from "./chunk-
|
|
28
|
+
} from "./chunk-IGKPPACU.js";
|
|
29
29
|
import {
|
|
30
30
|
getLogger
|
|
31
31
|
} from "./chunk-6MLEBAYZ.js";
|
|
@@ -469,4 +469,4 @@ export {
|
|
|
469
469
|
MergeManager,
|
|
470
470
|
BuildRunner
|
|
471
471
|
};
|
|
472
|
-
//# sourceMappingURL=chunk-
|
|
472
|
+
//# sourceMappingURL=chunk-CAXFWFV6.js.map
|
|
@@ -0,0 +1,111 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import {
|
|
3
|
+
extractIssueNumber
|
|
4
|
+
} from "./chunk-ZA575VLF.js";
|
|
5
|
+
import {
|
|
6
|
+
extractPort,
|
|
7
|
+
findEnvFileContainingVariable,
|
|
8
|
+
logger,
|
|
9
|
+
parseEnvFile
|
|
10
|
+
} from "./chunk-VT4PDUYT.js";
|
|
11
|
+
|
|
12
|
+
// src/utils/port.ts
|
|
13
|
+
import { createHash } from "crypto";
|
|
14
|
+
import path from "path";
|
|
15
|
+
import fs from "fs-extra";
|
|
16
|
+
function wrapPort(rawPort, basePort) {
|
|
17
|
+
if (rawPort <= 65535) return rawPort;
|
|
18
|
+
const range = 65535 - basePort;
|
|
19
|
+
return (rawPort - basePort - 1) % range + basePort + 1;
|
|
20
|
+
}
|
|
21
|
+
function extractNumericSuffix(issueId) {
|
|
22
|
+
const match = issueId.match(/[-_]?(\d+)$/);
|
|
23
|
+
const digits = match == null ? void 0 : match[1];
|
|
24
|
+
if (digits === void 0) return null;
|
|
25
|
+
return parseInt(digits, 10);
|
|
26
|
+
}
|
|
27
|
+
function generatePortOffsetFromBranchName(branchName) {
|
|
28
|
+
if (!branchName || branchName.trim().length === 0) {
|
|
29
|
+
throw new Error("Branch name cannot be empty");
|
|
30
|
+
}
|
|
31
|
+
const hash = createHash("sha256").update(branchName).digest("hex");
|
|
32
|
+
const hashPrefix = hash.slice(0, 8);
|
|
33
|
+
const hashAsInt = parseInt(hashPrefix, 16);
|
|
34
|
+
const portOffset = hashAsInt % 999 + 1;
|
|
35
|
+
return portOffset;
|
|
36
|
+
}
|
|
37
|
+
function calculatePortForBranch(branchName, basePort = 3e3) {
|
|
38
|
+
const offset = generatePortOffsetFromBranchName(branchName);
|
|
39
|
+
const port = basePort + offset;
|
|
40
|
+
return wrapPort(port, basePort);
|
|
41
|
+
}
|
|
42
|
+
function calculatePortFromIdentifier(identifier, basePort = 3e3) {
|
|
43
|
+
if (typeof identifier === "number") {
|
|
44
|
+
return wrapPort(basePort + identifier, basePort);
|
|
45
|
+
}
|
|
46
|
+
const numericValue = parseInt(identifier, 10);
|
|
47
|
+
if (!isNaN(numericValue) && String(numericValue) === identifier) {
|
|
48
|
+
return wrapPort(basePort + numericValue, basePort);
|
|
49
|
+
}
|
|
50
|
+
const numericSuffix = extractNumericSuffix(identifier);
|
|
51
|
+
if (numericSuffix !== null) {
|
|
52
|
+
return wrapPort(basePort + numericSuffix, basePort);
|
|
53
|
+
}
|
|
54
|
+
return calculatePortForBranch(`issue-${identifier}`, basePort);
|
|
55
|
+
}
|
|
56
|
+
async function getWorkspacePort(options, dependencies) {
|
|
57
|
+
const basePort = options.basePort ?? 3e3;
|
|
58
|
+
const checkEnvFile = options.checkEnvFile ?? false;
|
|
59
|
+
if (checkEnvFile) {
|
|
60
|
+
const deps = {
|
|
61
|
+
fileExists: (dependencies == null ? void 0 : dependencies.fileExists) ?? ((p) => fs.pathExists(p)),
|
|
62
|
+
readFile: (dependencies == null ? void 0 : dependencies.readFile) ?? ((p) => fs.readFile(p, "utf8"))
|
|
63
|
+
};
|
|
64
|
+
const envFile = await findEnvFileContainingVariable(
|
|
65
|
+
options.worktreePath,
|
|
66
|
+
"PORT",
|
|
67
|
+
async (p) => deps.fileExists(p),
|
|
68
|
+
async (p, varName) => {
|
|
69
|
+
const content = await deps.readFile(p);
|
|
70
|
+
const envMap = parseEnvFile(content);
|
|
71
|
+
return envMap.get(varName) ?? null;
|
|
72
|
+
}
|
|
73
|
+
);
|
|
74
|
+
if (envFile) {
|
|
75
|
+
const envPath = path.join(options.worktreePath, envFile);
|
|
76
|
+
const envContent = await deps.readFile(envPath);
|
|
77
|
+
const envMap = parseEnvFile(envContent);
|
|
78
|
+
const port2 = extractPort(envMap);
|
|
79
|
+
if (port2) {
|
|
80
|
+
logger.debug(`Using PORT from ${envFile}: ${port2}`);
|
|
81
|
+
return port2;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
logger.debug("PORT not found in any dotenv-flow file, calculating from workspace identifier");
|
|
85
|
+
}
|
|
86
|
+
const dirName = path.basename(options.worktreePath);
|
|
87
|
+
const prPattern = /_pr_(\d+)$/;
|
|
88
|
+
const prMatch = dirName.match(prPattern);
|
|
89
|
+
if (prMatch == null ? void 0 : prMatch[1]) {
|
|
90
|
+
const prNumber = parseInt(prMatch[1], 10);
|
|
91
|
+
const port2 = calculatePortFromIdentifier(prNumber, basePort);
|
|
92
|
+
logger.debug(`Calculated PORT for PR #${prNumber}: ${port2}`);
|
|
93
|
+
return port2;
|
|
94
|
+
}
|
|
95
|
+
const issueId = extractIssueNumber(dirName) ?? extractIssueNumber(options.worktreeBranch);
|
|
96
|
+
if (issueId !== null) {
|
|
97
|
+
const port2 = calculatePortFromIdentifier(issueId, basePort);
|
|
98
|
+
logger.debug(`Calculated PORT for issue ${issueId}: ${port2}`);
|
|
99
|
+
return port2;
|
|
100
|
+
}
|
|
101
|
+
const port = calculatePortForBranch(options.worktreeBranch, basePort);
|
|
102
|
+
logger.debug(`Calculated PORT for branch "${options.worktreeBranch}": ${port}`);
|
|
103
|
+
return port;
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
export {
|
|
107
|
+
calculatePortForBranch,
|
|
108
|
+
calculatePortFromIdentifier,
|
|
109
|
+
getWorkspacePort
|
|
110
|
+
};
|
|
111
|
+
//# sourceMappingURL=chunk-CFQVOTHO.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/utils/port.ts"],"sourcesContent":["import { createHash } from 'crypto'\nimport path from 'path'\nimport fs from 'fs-extra'\nimport { parseEnvFile, extractPort, findEnvFileContainingVariable } from './env.js'\nimport { extractIssueNumber } from './git.js'\nimport { logger } from './logger.js'\n\n/**\n * Wrap a raw port that exceeds 65535 into the valid port range.\n * Uses modulo arithmetic to wrap back into [basePort+1, 65535].\n *\n * @param rawPort - The calculated port (basePort + issueNumber)\n * @param basePort - The base port (default: 3000)\n * @returns Port in valid range [basePort+1, 65535]\n */\nexport function wrapPort(rawPort: number, basePort: number): number {\n\tif (rawPort <= 65535) return rawPort\n\tconst range = 65535 - basePort\n\treturn ((rawPort - basePort - 1) % range) + basePort + 1\n}\n\n/**\n * Extract numeric suffix from alphanumeric issue ID (e.g., MARK-324 -> 324)\n * @returns The numeric part or null if no trailing number found\n */\nexport function extractNumericSuffix(issueId: string): number | null {\n\t// Match trailing digits after optional separator (-, _)\n\tconst match = issueId.match(/[-_]?(\\d+)$/)\n\tconst digits = match?.[1]\n\tif (digits === undefined) return null\n\treturn parseInt(digits, 10)\n}\n\n/**\n * Generate deterministic port offset from branch name using SHA256 hash\n * Range: 1-999 (matches existing random range for branches)\n *\n * @param branchName - Branch name to generate port offset from\n * @returns Port offset in range [1, 999]\n * @throws Error if branchName is empty\n */\nexport function generatePortOffsetFromBranchName(branchName: string): number {\n\t// Validate input\n\tif (!branchName || branchName.trim().length === 0) {\n\t\tthrow new Error('Branch name cannot be empty')\n\t}\n\n\t// Generate SHA256 hash of branch name (same pattern as color.ts)\n\tconst hash = createHash('sha256').update(branchName).digest('hex')\n\n\t// Take first 8 hex characters and convert to port offset (1-999)\n\tconst hashPrefix = hash.slice(0, 8)\n\tconst hashAsInt = parseInt(hashPrefix, 16)\n\tconst portOffset = (hashAsInt % 999) + 1 // +1 ensures range is 1-999, not 0-998\n\n\treturn portOffset\n}\n\n/**\n * Calculate deterministic port for branch-based workspace\n *\n * @param branchName - Branch name\n * @param basePort - Base port (default: 3000)\n * @returns Port number\n * @throws Error if branchName is empty\n */\nexport function calculatePortForBranch(branchName: string, basePort: number = 3000): number {\n\tconst offset = generatePortOffsetFromBranchName(branchName)\n\tconst port = basePort + offset\n\n\t// Use wrap-around for port overflow\n\treturn wrapPort(port, basePort)\n}\n\n/**\n * Calculate port from an identifier (issue number, PR number, or string).\n * This is the single source of truth for port calculation logic.\n *\n * Algorithm:\n * 1. Numeric identifiers: basePort + number (with wrapPort for overflow)\n * 2. String numeric (e.g., \"42\"): parse and same as above\n * 3. Alphanumeric with suffix (e.g., \"MARK-324\"): extract suffix and same as above\n * 4. Pure strings without numeric suffix: hash-based calculation via calculatePortForBranch\n *\n * @param identifier - The identifier (issue number, PR number, or string)\n * @param basePort - Base port (default: 3000)\n * @returns Port number in valid range\n */\nexport function calculatePortFromIdentifier(\n\tidentifier: string | number,\n\tbasePort: number = 3000\n): number {\n\t// Handle numeric identifiers directly\n\tif (typeof identifier === 'number') {\n\t\treturn wrapPort(basePort + identifier, basePort)\n\t}\n\n\t// Handle string identifiers\n\t// First, try to parse as pure numeric string\n\tconst numericValue = parseInt(identifier, 10)\n\tif (!isNaN(numericValue) && String(numericValue) === identifier) {\n\t\treturn wrapPort(basePort + numericValue, basePort)\n\t}\n\n\t// Try extracting numeric suffix from alphanumeric identifiers (e.g., MARK-324 -> 324)\n\tconst numericSuffix = extractNumericSuffix(identifier)\n\tif (numericSuffix !== null) {\n\t\treturn wrapPort(basePort + numericSuffix, basePort)\n\t}\n\n\t// For non-numeric strings without numeric suffix, use hash-based calculation\n\treturn calculatePortForBranch(`issue-${identifier}`, basePort)\n}\n\nexport interface GetWorkspacePortOptions {\n\tbasePort?: number | undefined\n\tworktreePath: string\n\tworktreeBranch: string\n\t/** If true, check .env files for PORT override before calculating. Defaults to false. */\n\tcheckEnvFile?: boolean\n}\n\nexport interface GetWorkspacePortDependencies {\n\tfileExists?: (path: string) => Promise<boolean>\n\treadFile?: (path: string) => Promise<string>\n\tlistWorktrees?: () => Promise<Array<{ path: string; branch: string }>>\n}\n\n/**\n * Get port for workspace - calculates based on workspace type, optionally checking .env files first.\n * Consolidates logic previously duplicated across dev-server, run, open commands.\n *\n * Priority (when checkEnvFile is true):\n * 1. Read PORT from dotenv-flow files (if present)\n * 2. Calculate from PR pattern (_pr_N suffix in directory name)\n * 3. Calculate from issue pattern (issue-N or alphanumeric like MARK-324)\n * 4. Calculate from branch name using deterministic hash\n *\n * When checkEnvFile is false (default), skips step 1 and only calculates.\n */\nexport async function getWorkspacePort(\n\toptions: GetWorkspacePortOptions,\n\tdependencies?: GetWorkspacePortDependencies\n): Promise<number> {\n\tconst basePort = options.basePort ?? 3000\n\tconst checkEnvFile = options.checkEnvFile ?? false\n\n\t// Only check .env files if explicitly requested\n\tif (checkEnvFile) {\n\t\tconst deps = {\n\t\t\tfileExists:\n\t\t\t\tdependencies?.fileExists ?? ((p: string): Promise<boolean> => fs.pathExists(p)),\n\t\t\treadFile:\n\t\t\t\tdependencies?.readFile ?? ((p: string): Promise<string> => fs.readFile(p, 'utf8')),\n\t\t}\n\n\t\t// Try to read PORT from any dotenv-flow file (as override)\n\t\tconst envFile = await findEnvFileContainingVariable(\n\t\t\toptions.worktreePath,\n\t\t\t'PORT',\n\t\t\tasync (p) => deps.fileExists(p),\n\t\t\tasync (p, varName) => {\n\t\t\t\tconst content = await deps.readFile(p)\n\t\t\t\tconst envMap = parseEnvFile(content)\n\t\t\t\treturn envMap.get(varName) ?? null\n\t\t\t}\n\t\t)\n\n\t\tif (envFile) {\n\t\t\tconst envPath = path.join(options.worktreePath, envFile)\n\t\t\tconst envContent = await deps.readFile(envPath)\n\t\t\tconst envMap = parseEnvFile(envContent)\n\t\t\tconst port = extractPort(envMap)\n\n\t\t\tif (port) {\n\t\t\t\tlogger.debug(`Using PORT from ${envFile}: ${port}`)\n\t\t\t\treturn port\n\t\t\t}\n\t\t}\n\n\t\tlogger.debug('PORT not found in any dotenv-flow file, calculating from workspace identifier')\n\t}\n\n\t// Calculate based on workspace identifier\n\n\t// Extract identifier from worktree path/branch\n\tconst dirName = path.basename(options.worktreePath)\n\n\t// Check for PR pattern: _pr_N\n\tconst prPattern = /_pr_(\\d+)$/\n\tconst prMatch = dirName.match(prPattern)\n\tif (prMatch?.[1]) {\n\t\tconst prNumber = parseInt(prMatch[1], 10)\n\t\tconst port = calculatePortFromIdentifier(prNumber, basePort)\n\t\tlogger.debug(`Calculated PORT for PR #${prNumber}: ${port}`)\n\t\treturn port\n\t}\n\n\t// Check for issue pattern: issue-N or alphanumeric like MARK-324\n\tconst issueId = extractIssueNumber(dirName) ?? extractIssueNumber(options.worktreeBranch)\n\tif (issueId !== null) {\n\t\tconst port = calculatePortFromIdentifier(issueId, basePort)\n\t\tlogger.debug(`Calculated PORT for issue ${issueId}: ${port}`)\n\t\treturn port\n\t}\n\n\t// Branch-based workspace - use deterministic hash\n\tconst port = calculatePortForBranch(options.worktreeBranch, basePort)\n\tlogger.debug(`Calculated PORT for branch \"${options.worktreeBranch}\": ${port}`)\n\treturn port\n}\n"],"mappings":";;;;;;;;;;;;AAAA,SAAS,kBAAkB;AAC3B,OAAO,UAAU;AACjB,OAAO,QAAQ;AAaR,SAAS,SAAS,SAAiB,UAA0B;AACnE,MAAI,WAAW,MAAO,QAAO;AAC7B,QAAM,QAAQ,QAAQ;AACtB,UAAS,UAAU,WAAW,KAAK,QAAS,WAAW;AACxD;AAMO,SAAS,qBAAqB,SAAgC;AAEpE,QAAM,QAAQ,QAAQ,MAAM,aAAa;AACzC,QAAM,SAAS,+BAAQ;AACvB,MAAI,WAAW,OAAW,QAAO;AACjC,SAAO,SAAS,QAAQ,EAAE;AAC3B;AAUO,SAAS,iCAAiC,YAA4B;AAE5E,MAAI,CAAC,cAAc,WAAW,KAAK,EAAE,WAAW,GAAG;AAClD,UAAM,IAAI,MAAM,6BAA6B;AAAA,EAC9C;AAGA,QAAM,OAAO,WAAW,QAAQ,EAAE,OAAO,UAAU,EAAE,OAAO,KAAK;AAGjE,QAAM,aAAa,KAAK,MAAM,GAAG,CAAC;AAClC,QAAM,YAAY,SAAS,YAAY,EAAE;AACzC,QAAM,aAAc,YAAY,MAAO;AAEvC,SAAO;AACR;AAUO,SAAS,uBAAuB,YAAoB,WAAmB,KAAc;AAC3F,QAAM,SAAS,iCAAiC,UAAU;AAC1D,QAAM,OAAO,WAAW;AAGxB,SAAO,SAAS,MAAM,QAAQ;AAC/B;AAgBO,SAAS,4BACf,YACA,WAAmB,KACV;AAET,MAAI,OAAO,eAAe,UAAU;AACnC,WAAO,SAAS,WAAW,YAAY,QAAQ;AAAA,EAChD;AAIA,QAAM,eAAe,SAAS,YAAY,EAAE;AAC5C,MAAI,CAAC,MAAM,YAAY,KAAK,OAAO,YAAY,MAAM,YAAY;AAChE,WAAO,SAAS,WAAW,cAAc,QAAQ;AAAA,EAClD;AAGA,QAAM,gBAAgB,qBAAqB,UAAU;AACrD,MAAI,kBAAkB,MAAM;AAC3B,WAAO,SAAS,WAAW,eAAe,QAAQ;AAAA,EACnD;AAGA,SAAO,uBAAuB,SAAS,UAAU,IAAI,QAAQ;AAC9D;AA4BA,eAAsB,iBACrB,SACA,cACkB;AAClB,QAAM,WAAW,QAAQ,YAAY;AACrC,QAAM,eAAe,QAAQ,gBAAgB;AAG7C,MAAI,cAAc;AACjB,UAAM,OAAO;AAAA,MACZ,aACC,6CAAc,gBAAe,CAAC,MAAgC,GAAG,WAAW,CAAC;AAAA,MAC9E,WACC,6CAAc,cAAa,CAAC,MAA+B,GAAG,SAAS,GAAG,MAAM;AAAA,IAClF;AAGA,UAAM,UAAU,MAAM;AAAA,MACrB,QAAQ;AAAA,MACR;AAAA,MACA,OAAO,MAAM,KAAK,WAAW,CAAC;AAAA,MAC9B,OAAO,GAAG,YAAY;AACrB,cAAM,UAAU,MAAM,KAAK,SAAS,CAAC;AACrC,cAAM,SAAS,aAAa,OAAO;AACnC,eAAO,OAAO,IAAI,OAAO,KAAK;AAAA,MAC/B;AAAA,IACD;AAEA,QAAI,SAAS;AACZ,YAAM,UAAU,KAAK,KAAK,QAAQ,cAAc,OAAO;AACvD,YAAM,aAAa,MAAM,KAAK,SAAS,OAAO;AAC9C,YAAM,SAAS,aAAa,UAAU;AACtC,YAAMA,QAAO,YAAY,MAAM;AAE/B,UAAIA,OAAM;AACT,eAAO,MAAM,mBAAmB,OAAO,KAAKA,KAAI,EAAE;AAClD,eAAOA;AAAA,MACR;AAAA,IACD;AAEA,WAAO,MAAM,+EAA+E;AAAA,EAC7F;AAKA,QAAM,UAAU,KAAK,SAAS,QAAQ,YAAY;AAGlD,QAAM,YAAY;AAClB,QAAM,UAAU,QAAQ,MAAM,SAAS;AACvC,MAAI,mCAAU,IAAI;AACjB,UAAM,WAAW,SAAS,QAAQ,CAAC,GAAG,EAAE;AACxC,UAAMA,QAAO,4BAA4B,UAAU,QAAQ;AAC3D,WAAO,MAAM,2BAA2B,QAAQ,KAAKA,KAAI,EAAE;AAC3D,WAAOA;AAAA,EACR;AAGA,QAAM,UAAU,mBAAmB,OAAO,KAAK,mBAAmB,QAAQ,cAAc;AACxF,MAAI,YAAY,MAAM;AACrB,UAAMA,QAAO,4BAA4B,SAAS,QAAQ;AAC1D,WAAO,MAAM,6BAA6B,OAAO,KAAKA,KAAI,EAAE;AAC5D,WAAOA;AAAA,EACR;AAGA,QAAM,OAAO,uBAAuB,QAAQ,gBAAgB,QAAQ;AACpE,SAAO,MAAM,+BAA+B,QAAQ,cAAc,MAAM,IAAI,EAAE;AAC9E,SAAO;AACR;","names":["port"]}
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
import {
|
|
3
3
|
ProcessManager
|
|
4
|
-
} from "./chunk-
|
|
4
|
+
} from "./chunk-WZYBHD7P.js";
|
|
5
5
|
import {
|
|
6
6
|
detectPackageManager,
|
|
7
7
|
runScript
|
|
@@ -212,4 +212,4 @@ var DevServerManager = class {
|
|
|
212
212
|
export {
|
|
213
213
|
DevServerManager
|
|
214
214
|
};
|
|
215
|
-
//# sourceMappingURL=chunk-
|
|
215
|
+
//# sourceMappingURL=chunk-ENMTWE74.js.map
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import {
|
|
3
3
|
calculatePortForBranch,
|
|
4
4
|
calculatePortFromIdentifier
|
|
5
|
-
} from "./chunk-
|
|
5
|
+
} from "./chunk-CFQVOTHO.js";
|
|
6
6
|
import {
|
|
7
7
|
installDependencies,
|
|
8
8
|
runScript
|
|
@@ -31,6 +31,9 @@ import {
|
|
|
31
31
|
import {
|
|
32
32
|
MetadataManager
|
|
33
33
|
} from "./chunk-VWGKGNJP.js";
|
|
34
|
+
import {
|
|
35
|
+
GitHubService
|
|
36
|
+
} from "./chunk-USJSNHGG.js";
|
|
34
37
|
import {
|
|
35
38
|
calculateForegroundColor,
|
|
36
39
|
generateColorFromBranchName,
|
|
@@ -41,7 +44,7 @@ import {
|
|
|
41
44
|
} from "./chunk-433MOLAU.js";
|
|
42
45
|
import {
|
|
43
46
|
generateRandomSessionId
|
|
44
|
-
} from "./chunk-
|
|
47
|
+
} from "./chunk-IGKPPACU.js";
|
|
45
48
|
import {
|
|
46
49
|
getLogger
|
|
47
50
|
} from "./chunk-6MLEBAYZ.js";
|
|
@@ -191,7 +194,7 @@ var VSCodeIntegration = class {
|
|
|
191
194
|
|
|
192
195
|
// src/lib/LoomManager.ts
|
|
193
196
|
var LoomManager = class {
|
|
194
|
-
constructor(gitWorktree, issueTracker, branchNaming, environment, _claude, capabilityDetector, cliIsolation, settings, database) {
|
|
197
|
+
constructor(gitWorktree, issueTracker, branchNaming, environment, _claude, capabilityDetector, cliIsolation, settings, database, githubService) {
|
|
195
198
|
this.gitWorktree = gitWorktree;
|
|
196
199
|
this.issueTracker = issueTracker;
|
|
197
200
|
this.branchNaming = branchNaming;
|
|
@@ -201,6 +204,7 @@ var LoomManager = class {
|
|
|
201
204
|
this.settings = settings;
|
|
202
205
|
this.database = database;
|
|
203
206
|
this.metadataManager = new MetadataManager();
|
|
207
|
+
this.githubService = githubService;
|
|
204
208
|
}
|
|
205
209
|
/**
|
|
206
210
|
* Get database branch name for a loom by reading its .env file
|
|
@@ -322,11 +326,6 @@ var LoomManager = class {
|
|
|
322
326
|
let draftPrUrl = void 0;
|
|
323
327
|
const mergeBehavior = settingsData.mergeBehavior ?? { mode: "local" };
|
|
324
328
|
if (mergeBehavior.mode === "github-draft-pr" && input.type === "issue") {
|
|
325
|
-
if (!this.issueTracker.supportsPullRequests) {
|
|
326
|
-
throw new Error(
|
|
327
|
-
`The 'github-draft-pr' merge mode requires a GitHub-compatible issue tracker. Your provider (${this.issueTracker.providerName}) does not support pull requests.`
|
|
328
|
-
);
|
|
329
|
-
}
|
|
330
329
|
getLogger().info("Creating placeholder commit for draft PR...");
|
|
331
330
|
const { executeGitCommand: executeGitCommand2, PLACEHOLDER_COMMIT_PREFIX, pushBranchToRemote } = await import("./git-ENLT2VNI.js");
|
|
332
331
|
await executeGitCommand2(
|
|
@@ -342,10 +341,10 @@ var LoomManager = class {
|
|
|
342
341
|
getLogger().debug("Placeholder commit created");
|
|
343
342
|
getLogger().info("Pushing branch to remote for draft PR...");
|
|
344
343
|
await pushBranchToRemote(branchName, worktreePath, { dryRun: false });
|
|
345
|
-
const { PRManager } = await import("./PRManager-
|
|
344
|
+
const { PRManager } = await import("./PRManager-7F3AAY66.js");
|
|
346
345
|
const prManager = new PRManager(settingsData);
|
|
347
346
|
const prTitle = (issueData == null ? void 0 : issueData.title) ?? `Work on ${branchName}`;
|
|
348
|
-
const prBody = `
|
|
347
|
+
const prBody = `PR for issue #${input.identifier}
|
|
349
348
|
|
|
350
349
|
This PR was created automatically by iloom.`;
|
|
351
350
|
getLogger().info("Creating draft PR...");
|
|
@@ -393,8 +392,8 @@ This PR was created automatically by iloom.`;
|
|
|
393
392
|
const setArguments = (_j = input.options) == null ? void 0 : _j.setArguments;
|
|
394
393
|
const executablePath = (_k = input.options) == null ? void 0 : _k.executablePath;
|
|
395
394
|
if (enableClaude || enableCode || enableDevServer || enableTerminal) {
|
|
396
|
-
const { LoomLauncher } = await import("./LoomLauncher-
|
|
397
|
-
const { ClaudeContextManager } = await import("./ClaudeContextManager-
|
|
395
|
+
const { LoomLauncher } = await import("./LoomLauncher-U2B3VHPC.js");
|
|
396
|
+
const { ClaudeContextManager } = await import("./ClaudeContextManager-Y2YJC6BU.js");
|
|
398
397
|
const claudeContext = new ClaudeContextManager(void 0, void 0, this.settings);
|
|
399
398
|
const launcher = new LoomLauncher(claudeContext, this.settings);
|
|
400
399
|
await launcher.launchLoom({
|
|
@@ -573,10 +572,14 @@ This PR was created automatically by iloom.`;
|
|
|
573
572
|
if (input.type === "issue") {
|
|
574
573
|
return await this.issueTracker.fetchIssue(input.identifier);
|
|
575
574
|
} else if (input.type === "pr") {
|
|
576
|
-
if (
|
|
577
|
-
|
|
575
|
+
if (this.issueTracker.supportsPullRequests && this.issueTracker.fetchPR) {
|
|
576
|
+
return await this.issueTracker.fetchPR(input.identifier);
|
|
577
|
+
}
|
|
578
|
+
if (this.githubService) {
|
|
579
|
+
return await this.githubService.fetchPR(input.identifier);
|
|
578
580
|
}
|
|
579
|
-
|
|
581
|
+
const github = new GitHubService();
|
|
582
|
+
return await github.fetchPR(input.identifier);
|
|
580
583
|
}
|
|
581
584
|
return null;
|
|
582
585
|
}
|
|
@@ -1043,8 +1046,8 @@ This PR was created automatically by iloom.`;
|
|
|
1043
1046
|
const executablePath = (_i = input.options) == null ? void 0 : _i.executablePath;
|
|
1044
1047
|
if (enableClaude || enableCode || enableDevServer || enableTerminal) {
|
|
1045
1048
|
getLogger().info("Launching workspace components...");
|
|
1046
|
-
const { LoomLauncher } = await import("./LoomLauncher-
|
|
1047
|
-
const { ClaudeContextManager } = await import("./ClaudeContextManager-
|
|
1049
|
+
const { LoomLauncher } = await import("./LoomLauncher-U2B3VHPC.js");
|
|
1050
|
+
const { ClaudeContextManager } = await import("./ClaudeContextManager-Y2YJC6BU.js");
|
|
1048
1051
|
const claudeContext = new ClaudeContextManager(void 0, void 0, this.settings);
|
|
1049
1052
|
const launcher = new LoomLauncher(claudeContext, this.settings);
|
|
1050
1053
|
await launcher.launchLoom({
|
|
@@ -2436,4 +2439,4 @@ export {
|
|
|
2436
2439
|
DatabaseManager,
|
|
2437
2440
|
ResourceCleanup
|
|
2438
2441
|
};
|
|
2439
|
-
//# sourceMappingURL=chunk-
|
|
2442
|
+
//# sourceMappingURL=chunk-ETY2SBW5.js.map
|