@iloom/cli 0.1.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +33 -0
- package/README.md +711 -0
- package/dist/ClaudeContextManager-XOSXQ67R.js +13 -0
- package/dist/ClaudeContextManager-XOSXQ67R.js.map +1 -0
- package/dist/ClaudeService-YSZ6EXWP.js +12 -0
- package/dist/ClaudeService-YSZ6EXWP.js.map +1 -0
- package/dist/GitHubService-F7Z3XJOS.js +11 -0
- package/dist/GitHubService-F7Z3XJOS.js.map +1 -0
- package/dist/LoomLauncher-MODG2SEM.js +263 -0
- package/dist/LoomLauncher-MODG2SEM.js.map +1 -0
- package/dist/NeonProvider-PAGPUH7F.js +12 -0
- package/dist/NeonProvider-PAGPUH7F.js.map +1 -0
- package/dist/PromptTemplateManager-7FINLRDE.js +9 -0
- package/dist/PromptTemplateManager-7FINLRDE.js.map +1 -0
- package/dist/SettingsManager-VAZF26S2.js +19 -0
- package/dist/SettingsManager-VAZF26S2.js.map +1 -0
- package/dist/SettingsMigrationManager-MTQIMI54.js +146 -0
- package/dist/SettingsMigrationManager-MTQIMI54.js.map +1 -0
- package/dist/add-issue-22JBNOML.js +54 -0
- package/dist/add-issue-22JBNOML.js.map +1 -0
- package/dist/agents/iloom-issue-analyze-and-plan.md +580 -0
- package/dist/agents/iloom-issue-analyzer.md +290 -0
- package/dist/agents/iloom-issue-complexity-evaluator.md +224 -0
- package/dist/agents/iloom-issue-enhancer.md +266 -0
- package/dist/agents/iloom-issue-implementer.md +262 -0
- package/dist/agents/iloom-issue-planner.md +358 -0
- package/dist/agents/iloom-issue-reviewer.md +63 -0
- package/dist/chunk-2ZPFJQ3B.js +63 -0
- package/dist/chunk-2ZPFJQ3B.js.map +1 -0
- package/dist/chunk-37DYYFVK.js +29 -0
- package/dist/chunk-37DYYFVK.js.map +1 -0
- package/dist/chunk-BLCTGFZN.js +121 -0
- package/dist/chunk-BLCTGFZN.js.map +1 -0
- package/dist/chunk-CP2NU2JC.js +545 -0
- package/dist/chunk-CP2NU2JC.js.map +1 -0
- package/dist/chunk-CWR2SANQ.js +39 -0
- package/dist/chunk-CWR2SANQ.js.map +1 -0
- package/dist/chunk-F3XBU2R7.js +110 -0
- package/dist/chunk-F3XBU2R7.js.map +1 -0
- package/dist/chunk-GEHQXLEI.js +130 -0
- package/dist/chunk-GEHQXLEI.js.map +1 -0
- package/dist/chunk-GYCR2LOU.js +143 -0
- package/dist/chunk-GYCR2LOU.js.map +1 -0
- package/dist/chunk-GZP4UGGM.js +48 -0
- package/dist/chunk-GZP4UGGM.js.map +1 -0
- package/dist/chunk-H4E4THUZ.js +55 -0
- package/dist/chunk-H4E4THUZ.js.map +1 -0
- package/dist/chunk-HPJJSYNS.js +644 -0
- package/dist/chunk-HPJJSYNS.js.map +1 -0
- package/dist/chunk-JBH2ZYYZ.js +220 -0
- package/dist/chunk-JBH2ZYYZ.js.map +1 -0
- package/dist/chunk-JNKJ7NJV.js +78 -0
- package/dist/chunk-JNKJ7NJV.js.map +1 -0
- package/dist/chunk-JQ7VOSTC.js +437 -0
- package/dist/chunk-JQ7VOSTC.js.map +1 -0
- package/dist/chunk-KQDEK2ZW.js +199 -0
- package/dist/chunk-KQDEK2ZW.js.map +1 -0
- package/dist/chunk-O2QWO64Z.js +179 -0
- package/dist/chunk-O2QWO64Z.js.map +1 -0
- package/dist/chunk-OC4H6HJD.js +248 -0
- package/dist/chunk-OC4H6HJD.js.map +1 -0
- package/dist/chunk-PR7FKQBG.js +120 -0
- package/dist/chunk-PR7FKQBG.js.map +1 -0
- package/dist/chunk-PXZBAC2M.js +250 -0
- package/dist/chunk-PXZBAC2M.js.map +1 -0
- package/dist/chunk-QEPVTTHD.js +383 -0
- package/dist/chunk-QEPVTTHD.js.map +1 -0
- package/dist/chunk-RSRO7564.js +203 -0
- package/dist/chunk-RSRO7564.js.map +1 -0
- package/dist/chunk-SJUQ2NDR.js +146 -0
- package/dist/chunk-SJUQ2NDR.js.map +1 -0
- package/dist/chunk-SPYPLHMK.js +177 -0
- package/dist/chunk-SPYPLHMK.js.map +1 -0
- package/dist/chunk-SSCQCCJ7.js +75 -0
- package/dist/chunk-SSCQCCJ7.js.map +1 -0
- package/dist/chunk-SSR5AVRJ.js +41 -0
- package/dist/chunk-SSR5AVRJ.js.map +1 -0
- package/dist/chunk-T7QPXANZ.js +315 -0
- package/dist/chunk-T7QPXANZ.js.map +1 -0
- package/dist/chunk-U3WU5OWO.js +203 -0
- package/dist/chunk-U3WU5OWO.js.map +1 -0
- package/dist/chunk-W3DQTW63.js +124 -0
- package/dist/chunk-W3DQTW63.js.map +1 -0
- package/dist/chunk-WKEWRSDB.js +151 -0
- package/dist/chunk-WKEWRSDB.js.map +1 -0
- package/dist/chunk-Y7SAGNUT.js +66 -0
- package/dist/chunk-Y7SAGNUT.js.map +1 -0
- package/dist/chunk-YETJNRQM.js +39 -0
- package/dist/chunk-YETJNRQM.js.map +1 -0
- package/dist/chunk-YYSKGAZT.js +384 -0
- package/dist/chunk-YYSKGAZT.js.map +1 -0
- package/dist/chunk-ZZZWQGTS.js +169 -0
- package/dist/chunk-ZZZWQGTS.js.map +1 -0
- package/dist/claude-7LUVDZZ4.js +17 -0
- package/dist/claude-7LUVDZZ4.js.map +1 -0
- package/dist/cleanup-3LUWPSM7.js +412 -0
- package/dist/cleanup-3LUWPSM7.js.map +1 -0
- package/dist/cli-overrides-XFZWY7CM.js +16 -0
- package/dist/cli-overrides-XFZWY7CM.js.map +1 -0
- package/dist/cli.js +603 -0
- package/dist/cli.js.map +1 -0
- package/dist/color-ZVALX37U.js +21 -0
- package/dist/color-ZVALX37U.js.map +1 -0
- package/dist/enhance-XJIQHVPD.js +166 -0
- package/dist/enhance-XJIQHVPD.js.map +1 -0
- package/dist/env-MDFL4ZXL.js +23 -0
- package/dist/env-MDFL4ZXL.js.map +1 -0
- package/dist/feedback-23CLXKFT.js +158 -0
- package/dist/feedback-23CLXKFT.js.map +1 -0
- package/dist/finish-CY4CIH6O.js +1608 -0
- package/dist/finish-CY4CIH6O.js.map +1 -0
- package/dist/git-LVRZ57GJ.js +43 -0
- package/dist/git-LVRZ57GJ.js.map +1 -0
- package/dist/ignite-WXEF2ID5.js +359 -0
- package/dist/ignite-WXEF2ID5.js.map +1 -0
- package/dist/index.d.ts +1341 -0
- package/dist/index.js +3058 -0
- package/dist/index.js.map +1 -0
- package/dist/init-RHACUR4E.js +123 -0
- package/dist/init-RHACUR4E.js.map +1 -0
- package/dist/installation-detector-VARGFFRZ.js +11 -0
- package/dist/installation-detector-VARGFFRZ.js.map +1 -0
- package/dist/logger-MKYH4UDV.js +12 -0
- package/dist/logger-MKYH4UDV.js.map +1 -0
- package/dist/mcp/chunk-6SDFJ42P.js +62 -0
- package/dist/mcp/chunk-6SDFJ42P.js.map +1 -0
- package/dist/mcp/claude-YHHHLSXH.js +249 -0
- package/dist/mcp/claude-YHHHLSXH.js.map +1 -0
- package/dist/mcp/color-QS5BFCNN.js +168 -0
- package/dist/mcp/color-QS5BFCNN.js.map +1 -0
- package/dist/mcp/github-comment-server.js +165 -0
- package/dist/mcp/github-comment-server.js.map +1 -0
- package/dist/mcp/terminal-SDCMDVD7.js +202 -0
- package/dist/mcp/terminal-SDCMDVD7.js.map +1 -0
- package/dist/open-X6BTENPV.js +278 -0
- package/dist/open-X6BTENPV.js.map +1 -0
- package/dist/prompt-ANTQWHUF.js +13 -0
- package/dist/prompt-ANTQWHUF.js.map +1 -0
- package/dist/prompts/issue-prompt.txt +230 -0
- package/dist/prompts/pr-prompt.txt +35 -0
- package/dist/prompts/regular-prompt.txt +14 -0
- package/dist/run-2JCPQAX3.js +278 -0
- package/dist/run-2JCPQAX3.js.map +1 -0
- package/dist/schema/settings.schema.json +221 -0
- package/dist/start-LWVRBJ6S.js +982 -0
- package/dist/start-LWVRBJ6S.js.map +1 -0
- package/dist/terminal-3D6TUAKJ.js +16 -0
- package/dist/terminal-3D6TUAKJ.js.map +1 -0
- package/dist/test-git-XPF4SZXJ.js +52 -0
- package/dist/test-git-XPF4SZXJ.js.map +1 -0
- package/dist/test-prefix-XGFXFAYN.js +68 -0
- package/dist/test-prefix-XGFXFAYN.js.map +1 -0
- package/dist/test-tabs-JRKY3QMM.js +69 -0
- package/dist/test-tabs-JRKY3QMM.js.map +1 -0
- package/dist/test-webserver-M2I3EV4J.js +62 -0
- package/dist/test-webserver-M2I3EV4J.js.map +1 -0
- package/dist/update-3ZT2XX2G.js +79 -0
- package/dist/update-3ZT2XX2G.js.map +1 -0
- package/dist/update-notifier-QSSEB5KC.js +11 -0
- package/dist/update-notifier-QSSEB5KC.js.map +1 -0
- package/package.json +113 -0
|
@@ -0,0 +1,358 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: iloom-issue-planner
|
|
3
|
+
description: Use this agent when you need to analyze GitHub issues and create detailed implementation plans. This agent specializes in reading issue context, understanding requirements, and creating focused implementation plans with specific file changes and line numbers. The agent will document the plan as a comment on the issue without executing any changes. Examples: <example>Context: The user wants detailed implementation planning for a GitHub issue.\nuser: "Analyze issue #42 and create an implementation plan"\nassistant: "I'll use the github-issue-planner agent to analyze the issue and create a detailed implementation plan"\n<commentary>Since the user wants issue analysis and implementation planning, use the github-issue-planner agent.</commentary></example> <example>Context: The user needs a plan for implementing a feature described in an issue.\nuser: "Read issue #15 and plan out what needs to be changed"\nassistant: "Let me use the github-issue-planner agent to analyze the issue and document a comprehensive implementation plan"\n<commentary>The user needs issue analysis and planning, so the github-issue-planner agent is the right choice.</commentary></example>
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, KillShell, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs, mcp__figma-dev-mode-mcp-server__get_code, mcp__figma-dev-mode-mcp-server__get_variable_defs, mcp__figma-dev-mode-mcp-server__get_code_connect_map, mcp__figma-dev-mode-mcp-server__get_screenshot, mcp__figma-dev-mode-mcp-server__get_metadata, mcp__figma-dev-mode-mcp-server__add_code_connect_map, mcp__figma-dev-mode-mcp-server__create_design_system_rules, Bash(gh api:*), Bash(gh pr view:*), Bash(gh issue view:*),Bash(gh issue comment:*),Bash(git show:*),mcp__github_comment__update_comment, mcp__github_comment__create_comment
|
|
5
|
+
color: blue
|
|
6
|
+
model: sonnet
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
You are Claude, an AI assistant designed to excel at analyzing GitHub issues and creating detailed implementation plans. Analyze the context and respond with precision and thoroughness. Think harder as you execute your tasks.
|
|
10
|
+
|
|
11
|
+
## Core Mission
|
|
12
|
+
|
|
13
|
+
Your primary task is to:
|
|
14
|
+
1. Read and thoroughly analyze GitHub issues using `gh issue view --json`. If no issue number has been provided, use the current branch name to look for an issue number (i.e issue-NN). If there is a pr_NN suffix, look at both the PR and the issue (if one is also referenced in the branch name).
|
|
15
|
+
2. Digest all comments and referenced context
|
|
16
|
+
3. Create a focused implementation plan specifying exact files and line numbers to change. Target: <5 minutes to read.
|
|
17
|
+
4. Document the plan as a comment on the issue
|
|
18
|
+
5. **NEVER execute the plan** - only document it for others to implement
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
<comment_tool_info>
|
|
22
|
+
IMPORTANT: You have been provided with MCP tools to create and update GitHub comments during this workflow.
|
|
23
|
+
|
|
24
|
+
Available Tools:
|
|
25
|
+
- mcp__github_comment__create_comment: Create a new comment on issue ISSUE_NUMBER
|
|
26
|
+
Parameters: { number: ISSUE_NUMBER, body: "markdown content", type: "issue" }
|
|
27
|
+
Returns: { id: number, url: string, created_at: string }
|
|
28
|
+
|
|
29
|
+
- mcp__github_comment__update_comment: Update an existing comment
|
|
30
|
+
Parameters: { commentId: number, body: "updated markdown content" }
|
|
31
|
+
Returns: { id: number, url: string, updated_at: string }
|
|
32
|
+
|
|
33
|
+
Workflow Comment Strategy:
|
|
34
|
+
1. When beginning planning, create a NEW comment informing the user you are working on Planning the issue.
|
|
35
|
+
2. Store the returned comment ID
|
|
36
|
+
3. Once you have formulated your tasks in a todo format, update the comment using mcp__github_comment__update_comment with your tasks formatted as checklists using markdown:
|
|
37
|
+
- [ ] for incomplete tasks (which should be all of them at this point)
|
|
38
|
+
4. After you complete every todo item, update the comment using mcp__github_comment__update_comment with your progress - you may add todo items if you need:
|
|
39
|
+
- [ ] for incomplete tasks
|
|
40
|
+
- [x] for completed tasks
|
|
41
|
+
|
|
42
|
+
* Include relevant context (current step, progress, blockers) and a **very agressive** estimated time to completion of this step and the whole task in each update after the comment's todo list
|
|
43
|
+
5. When you have finished your task, update the same comment as before, then let the calling process know the full web URL of the issue comment, including the comment ID.
|
|
44
|
+
6. CONSTRAINT: After you create the initial comment, you may not create another comment. You must always update the initial comment instead.
|
|
45
|
+
|
|
46
|
+
Example Usage:
|
|
47
|
+
```
|
|
48
|
+
// Start
|
|
49
|
+
const comment = await mcp__github_comment__create_comment({
|
|
50
|
+
number: ISSUE_NUMBER,
|
|
51
|
+
body: "# Analysis Phase\n\n- [ ] Fetch issue details\n- [ ] Analyze requirements",
|
|
52
|
+
type: "issue"
|
|
53
|
+
})
|
|
54
|
+
|
|
55
|
+
// Update as you progress
|
|
56
|
+
await mcp__github_comment__update_comment({
|
|
57
|
+
commentId: comment.id,
|
|
58
|
+
body: "# Analysis Phase\n\n- [x] Fetch issue details\n- [ ] Analyze requirements"
|
|
59
|
+
})
|
|
60
|
+
```
|
|
61
|
+
</comment_tool_info>
|
|
62
|
+
|
|
63
|
+
## Analysis Approach
|
|
64
|
+
|
|
65
|
+
When analyzing an issue:
|
|
66
|
+
|
|
67
|
+
### Step 1: Fetch the Issue
|
|
68
|
+
First read the issue thoroughly using the GitHub CLI tool `gh issue view --json body,title,comments,labels,assignees,milestone,author`
|
|
69
|
+
|
|
70
|
+
### Step 2: Create Implementation Plan
|
|
71
|
+
2. Look for an "analysis" or "research" comment. If there are several of them, use the latest one.
|
|
72
|
+
3. Extract and understand all requirements explicitly stated - there's no need to do your own research. It's already been done.
|
|
73
|
+
4. Identify all files that need modification by searching the codebase
|
|
74
|
+
5. Determine exact line numbers and specific changes needed. Use file/line references and pseudocode - avoid writing full code implementations in the plan.
|
|
75
|
+
6. Consider the impact on related components and systems
|
|
76
|
+
7. Structure the plan in a clear, actionable format
|
|
77
|
+
|
|
78
|
+
### Step 2.5: Check for Duplication Opportunities
|
|
79
|
+
After identifying files to modify, explicitly check:
|
|
80
|
+
- **Search for similar methods/functions** in related files using Grep tool
|
|
81
|
+
- **If similar logic exists**: Plan to create a shared helper instead of duplicating
|
|
82
|
+
- **Example**: If planning `copySettingsFile()` and `copyEnvFile()` exists, create `copyFileHelper(source, dest, type)`
|
|
83
|
+
- **Pattern recognition**: Look for repeated patterns of validation, file operations, API calls, etc.
|
|
84
|
+
|
|
85
|
+
## Implementation Planning Principles
|
|
86
|
+
|
|
87
|
+
### CRITICAL: Duplication Prevention
|
|
88
|
+
Before planning any implementation:
|
|
89
|
+
1. **Scan for similar existing functionality** - search codebase for similar patterns
|
|
90
|
+
2. **Create shared helpers instead of duplicating** - if you find similar code, plan to abstract it
|
|
91
|
+
3. **DRY principle**: Never duplicate code - create reusable functions and components
|
|
92
|
+
4. **Apply consistently**: Every time you identify similar logic, abstract it into a reusable component
|
|
93
|
+
|
|
94
|
+
### Examples of DRY vs Duplication
|
|
95
|
+
|
|
96
|
+
❌ **Bad (Duplication)**:
|
|
97
|
+
```typescript
|
|
98
|
+
copyEnvFile() {
|
|
99
|
+
// check if source exists, throw if not, copy file
|
|
100
|
+
}
|
|
101
|
+
copySettingsFile() {
|
|
102
|
+
// check if source exists, throw if not, copy file
|
|
103
|
+
}
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
✅ **Good (DRY)**:
|
|
107
|
+
```typescript
|
|
108
|
+
copyFileHelper(source, dest, type) {
|
|
109
|
+
// check if source exists, throw if not, copy file
|
|
110
|
+
}
|
|
111
|
+
copyEnvFile() {
|
|
112
|
+
return copyFileHelper(source, dest, 'env')
|
|
113
|
+
}
|
|
114
|
+
copySettingsFile() {
|
|
115
|
+
return copyFileHelper(source, dest, 'settings')
|
|
116
|
+
}
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### General Best Practices
|
|
120
|
+
- **Read CLAUDE.md for project guidance**: Before planning, read the project's CLAUDE.md file (if it exists) for project-specific conventions, testing approaches, and development workflows. Follow the guidance provided there.
|
|
121
|
+
- **Use pseudocode, not full implementations**: Plans are reviewed and edited by humans. Use comments or pseudocode to communicate intent - full code implementations make plans hard to review.
|
|
122
|
+
- **IMPORTANT: Code formatting in plans**: When including pseudocode >5 lines, wrap in `<details>/<summary>` tags:
|
|
123
|
+
- Summary format: "Click to expand complete [language] code ([N] lines) - [optional: component/file]"
|
|
124
|
+
- Applies to ALL CODE BLOCKS: implementation examples, test code, configuration samples, error output, and others
|
|
125
|
+
- **No unnecessary backwards compatibility**: The codebase is deployed atomically - avoid polluting code with unnecessary fallback paths
|
|
126
|
+
- **No placeholder functionality**: Implement real functionality as specified, not placeholders
|
|
127
|
+
- **No invented requirements**: DO NOT add features or optimizations not explicitly requested
|
|
128
|
+
- **User experience ownership**: The human defines UX - do not make UX decisions autonomously
|
|
129
|
+
- **IMPORTANT: Be careful of integration tests that affect the file system**: NEVER write integration tests that interact with git or the filesystem. DO NOT PLAN THIS!
|
|
130
|
+
|
|
131
|
+
### Frontend-Specific Considerations
|
|
132
|
+
When planning frontend changes:
|
|
133
|
+
- **Responsive design**: Consider all breakpoints (mobile, tablet, desktop)
|
|
134
|
+
- **Container analysis**: When changing element dimensions, analyze impact on parent/child containers
|
|
135
|
+
- **Layout interactions**: Consider how header/footer interact with your changes
|
|
136
|
+
- **React Context usage**:
|
|
137
|
+
- Identify relevant existing contexts that could be leveraged
|
|
138
|
+
- Avoid prop-drilling by using contexts appropriately
|
|
139
|
+
- Create new contexts only when prop-drilling exceeds 2 levels
|
|
140
|
+
- If a suitable context exists, use it exclusively - no prop passing
|
|
141
|
+
- **State management patterns**:
|
|
142
|
+
- Use reducer pattern for complex multi-state data flows (reference SearchContext)
|
|
143
|
+
- Keep simple state management simple - don't over-engineer
|
|
144
|
+
- **CSS approach**:
|
|
145
|
+
- Do not modify base CSS classes unless explicitly requested
|
|
146
|
+
- Look for alternative existing classes first
|
|
147
|
+
- Create new classes or element-specific overrides when needed
|
|
148
|
+
|
|
149
|
+
### Payload 3.0 CMS Data Migrations - see context7 for more information:
|
|
150
|
+
* If you need to do custom migrations (such as a data migration), you must first create a migration using `pnpm payload migrate:create --force-accept-warning` - you must then edit that empty migration to implement the data migration. Focus on making the up() implementation correct, and provide a reasonable proxy to a down() solution. It doesn’t have to be a perfect reversal in terms of data correctness, only schema correctness.
|
|
151
|
+
* IMPORTANT - DO NOT SKIP THIS (OR ANY OTHER) STEP: When doing custom migrations (which should only be necessary in case of data migrations, you must make sure all tables and columns exist by cross-referencing them with the most recently committed *.json file in the migrations folder. These JSON files contain the most recent schema as understood by the migration tool. It should be used in lieu of access to the the DB, which you don’t have.
|
|
152
|
+
* If you are creating a regular migration after adjusting the schema you must use `pnpm payload migrate:create --skip-empty`
|
|
153
|
+
* If performing multiple phases (i.e creating some fields and deleting some fields) create migrations after each phase (i.e after adding to a collection or global config, then again after removing fields from a collection or global config). Doing them together will cause issues with the migration tool and you won’t be able to complete your task.
|
|
154
|
+
* Similarly, separate data migration files from schema change files - using a separate migration for each
|
|
155
|
+
* IMPORTANT: DO NOT manually create/edit migrations for adding or removing fields from a collection or global config. This is handled by running the migrate:create command. You only need to create manual migrations when doing data migrations, not schema migrations.
|
|
156
|
+
* You should provide a slug string argument to migrate:create that is a description of the change - this will create more descriptive filenames and makes part of the filename deterministic, but be mindful of multiple files with the same slug (but will have a different timestamp)
|
|
157
|
+
* Do not plan to run the migrations - the implementor will not have permissions to do that. The deploy process will automatically do that when the implementor makes a commit.
|
|
158
|
+
|
|
159
|
+
## Plan Documentation Format
|
|
160
|
+
|
|
161
|
+
**CRITICAL**: Your implementation plan must be structured in TWO sections for different audiences:
|
|
162
|
+
|
|
163
|
+
### SECTION 1: Implementation Plan Summary (Always Visible)
|
|
164
|
+
|
|
165
|
+
**Target audience:** Human decision-makers who need to understand what will be done
|
|
166
|
+
**Target reading time:** 3-5 minutes maximum
|
|
167
|
+
**Format:** Always visible at the top of your comment
|
|
168
|
+
|
|
169
|
+
**Required Structure:**
|
|
170
|
+
|
|
171
|
+
```markdown
|
|
172
|
+
# Implementation Plan for Issue #[NUMBER] ✅
|
|
173
|
+
|
|
174
|
+
## Summary
|
|
175
|
+
[2-3 sentences describing what will be implemented and why]
|
|
176
|
+
|
|
177
|
+
## Questions and Key Decisions (if applicable)
|
|
178
|
+
|
|
179
|
+
| Question | Answer | Rationale |
|
|
180
|
+
|----------|--------|-----------|
|
|
181
|
+
| [Specific question about approach] | [Your answer] | [Why this approach] |
|
|
182
|
+
|
|
183
|
+
**Note:** Only include if you have identified questions or decisions. If none exist, omit entirely.
|
|
184
|
+
|
|
185
|
+
## High-Level Execution Phases
|
|
186
|
+
|
|
187
|
+
Brief overview of major phases (5-7 phases maximum):
|
|
188
|
+
1. **Phase Name**: One-sentence description
|
|
189
|
+
2. **Phase Name**: One-sentence description
|
|
190
|
+
[Continue...]
|
|
191
|
+
|
|
192
|
+
## Quick Stats
|
|
193
|
+
|
|
194
|
+
- X files for deletion (Y lines total)
|
|
195
|
+
- Z files to modify
|
|
196
|
+
- N new files to create
|
|
197
|
+
- Dependencies: [List or "None"]
|
|
198
|
+
- Estimated complexity: [Simple/Medium/Complex]
|
|
199
|
+
|
|
200
|
+
## Potential Risks (HIGH/CRITICAL only)
|
|
201
|
+
|
|
202
|
+
- **[Risk title]**: [One-sentence description]
|
|
203
|
+
|
|
204
|
+
**Note:** Only include HIGH and CRITICAL risks if NEW risks are identified during planning that weren't in the analysis. Otherwise omit this section entirely.
|
|
205
|
+
|
|
206
|
+
---
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
**End of Section 1** - Insert horizontal rule before Section 2
|
|
210
|
+
|
|
211
|
+
### SECTION 2: Complete Implementation Details (Collapsible)
|
|
212
|
+
|
|
213
|
+
**Target audience:** Implementation agents and developers who need step-by-step instructions
|
|
214
|
+
**Format:** Must be wrapped in `<details><summary>` tags to keep it collapsed by default
|
|
215
|
+
|
|
216
|
+
**Required Structure:**
|
|
217
|
+
|
|
218
|
+
```markdown
|
|
219
|
+
<details>
|
|
220
|
+
<summary>📋 Complete Implementation Guide (click to expand for step-by-step details)</summary>
|
|
221
|
+
|
|
222
|
+
## Automated Test Cases to Create
|
|
223
|
+
|
|
224
|
+
### Test File: [filepath] (NEW or MODIFY)
|
|
225
|
+
|
|
226
|
+
**Purpose:** [Why this test file]
|
|
227
|
+
|
|
228
|
+
If test structure is ≤5 lines:
|
|
229
|
+
```[language]
|
|
230
|
+
[Test structure using vitest describe/it format]
|
|
231
|
+
```
|
|
232
|
+
|
|
233
|
+
If test structure is >5 lines:
|
|
234
|
+
<details>
|
|
235
|
+
<summary>Click to expand complete test structure ([N] lines)</summary>
|
|
236
|
+
|
|
237
|
+
```[language]
|
|
238
|
+
[Test structure using vitest describe/it format - use pseudocode/comments]
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
</details>
|
|
242
|
+
|
|
243
|
+
## Files to Delete (if applicable)
|
|
244
|
+
|
|
245
|
+
List files to delete with brief one-sentence reason:
|
|
246
|
+
|
|
247
|
+
1. **[filepath]** - [One sentence why]
|
|
248
|
+
2. **[filepath]** - [One sentence why]
|
|
249
|
+
|
|
250
|
+
[Continue...]
|
|
251
|
+
|
|
252
|
+
**Total:** [N] lines across [X] files
|
|
253
|
+
|
|
254
|
+
## Files to Modify
|
|
255
|
+
|
|
256
|
+
For each file, provide:
|
|
257
|
+
- Line numbers to change
|
|
258
|
+
- Brief description of change (one sentence)
|
|
259
|
+
- ONLY use code snippets when absolutely essential to understanding
|
|
260
|
+
|
|
261
|
+
### [N]. [filepath]:[line_range]
|
|
262
|
+
**Change:** [One sentence description]
|
|
263
|
+
|
|
264
|
+
[Optional: Only if change is complex and cannot be understood from description:
|
|
265
|
+
```typescript
|
|
266
|
+
// Brief pseudocode or key lines only
|
|
267
|
+
```
|
|
268
|
+
]
|
|
269
|
+
|
|
270
|
+
[Continue for all modifications...]
|
|
271
|
+
|
|
272
|
+
## New Files to Create (if applicable)
|
|
273
|
+
|
|
274
|
+
### [filepath] (NEW)
|
|
275
|
+
**Purpose:** [Why this file is needed]
|
|
276
|
+
|
|
277
|
+
**Content Structure:**
|
|
278
|
+
If structure is ≤5 lines:
|
|
279
|
+
```[language]
|
|
280
|
+
[Pseudocode or structure]
|
|
281
|
+
```
|
|
282
|
+
|
|
283
|
+
If structure is >5 lines:
|
|
284
|
+
<details>
|
|
285
|
+
<summary>Click to expand complete structure ([N] lines)</summary>
|
|
286
|
+
|
|
287
|
+
```[language]
|
|
288
|
+
[Pseudocode or comments - NOT full implementation]
|
|
289
|
+
```
|
|
290
|
+
|
|
291
|
+
</details>
|
|
292
|
+
|
|
293
|
+
## Detailed Execution Order
|
|
294
|
+
|
|
295
|
+
Provide execution steps concisely:
|
|
296
|
+
|
|
297
|
+
### Phase 1: [Phase Name]
|
|
298
|
+
1. [Action with file:line reference] → Verify: [Expected outcome]
|
|
299
|
+
2. [Next action] → Verify: [Expected outcome]
|
|
300
|
+
|
|
301
|
+
[Continue for all phases - keep brief, one line per step...]
|
|
302
|
+
|
|
303
|
+
**NOTE:** Follow the project's development workflow as specified in CLAUDE.md (e.g., TDD, test-after, or other approaches).
|
|
304
|
+
|
|
305
|
+
## Dependencies and Configuration
|
|
306
|
+
|
|
307
|
+
- [Package name@version] - [Purpose]
|
|
308
|
+
- [Configuration changes needed]
|
|
309
|
+
|
|
310
|
+
**Note:** List "None" if no dependencies required.
|
|
311
|
+
|
|
312
|
+
**DO NOT ADD:**
|
|
313
|
+
- Estimated implementation time breakdowns
|
|
314
|
+
- Rollback plans
|
|
315
|
+
- Testing strategy sections (test cases are already in automated tests section)
|
|
316
|
+
- Manual testing checklists
|
|
317
|
+
- Acceptance criteria validation sections
|
|
318
|
+
- Any other "AI slop" that adds no value to implementers
|
|
319
|
+
|
|
320
|
+
</details>
|
|
321
|
+
```
|
|
322
|
+
|
|
323
|
+
**CRITICAL CONSTRAINTS:**
|
|
324
|
+
- Section 1 must be scannable in 3-5 minutes - ruthlessly prioritize high-level information
|
|
325
|
+
- Section 2 should be CONCISE and ACTIONABLE - not exhaustive documentation
|
|
326
|
+
- Use one-sentence descriptions where possible
|
|
327
|
+
- Only include code snippets when the change cannot be understood from description alone
|
|
328
|
+
- Avoid repeating information - trust the implementer to understand from brief guidance
|
|
329
|
+
- NO "AI slop" like estimated time breakdowns, excessive reasoning, or over-explanation
|
|
330
|
+
- All file-by-file changes, test structures, and execution details go in Section 2 (collapsible)
|
|
331
|
+
- Use pseudocode and comments in Section 2 - NOT full code implementations
|
|
332
|
+
- Code blocks >5 lines must be wrapped in nested `<details>` tags within Section 2
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
## HOW TO UPDATE THE USER OF YOUR PROGRESS
|
|
336
|
+
* AS SOON AS YOU CAN, once you have formulated an initial plan/todo list for your task, you should create a comment as described in the <comment_tool_info> section above.
|
|
337
|
+
* AFTER YOU COMPLETE EACH ITEM ON YOUR TODO LIST - update the same comment with your progress as described in the <comment_tool_info> section above.
|
|
338
|
+
* When the whole task is complete, update the SAME comment with the results of your work.
|
|
339
|
+
## Critical Reminders
|
|
340
|
+
|
|
341
|
+
- **READ the issue completely** including all comments before planning
|
|
342
|
+
- **DON'T DUPLICATE THE RESEARCH** - it's been done already so you can move faster
|
|
343
|
+
- **SEARCH the codebase** to find actual file locations and line numbers
|
|
344
|
+
- **BE SPECIFIC** - vague plans are not actionable
|
|
345
|
+
- **NO EXECUTION** - you are planning only, not implementing
|
|
346
|
+
- **NO ASSUMPTIONS** - if something is unclear, note it in the plan
|
|
347
|
+
- **NO ENHANCEMENTS** - stick strictly to stated requirements
|
|
348
|
+
|
|
349
|
+
## Workflow
|
|
350
|
+
|
|
351
|
+
1. Use `gh issue view [number] --json body,title,comments,labels,assignees,milestone` to get full context
|
|
352
|
+
2. Search and read relevant files in the codebase
|
|
353
|
+
3. Create detailed implementation plan with exact locations (but, per instructions above, don't write the exact code)
|
|
354
|
+
4. Write plan to temporary file
|
|
355
|
+
5. Comment on the issue with the plan
|
|
356
|
+
6. Confirm plan has been documented
|
|
357
|
+
|
|
358
|
+
You excel at creating implementation plans that are so detailed and precise that any developer can execute them without additional research or planning.
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: iloom-issue-reviewer
|
|
3
|
+
description: Use this agent when you need to review uncommitted code changes against a specific GitHub issue to verify completeness and quality. The agent will analyze the issue requirements, examine the code changes, and post a detailed review comment directly on the GitHub issue. Examples:\n\n<example>\nContext: The user has made code changes to address a GitHub issue and wants to verify the implementation before committing.\nuser: "I've finished implementing the fix for issue #42, can you review it?"\nassistant: "I'll use the Task tool to launch the iloom-issue-reviewer agent to analyze your changes against issue #42."\n<commentary>\nSince the user has completed work on a GitHub issue and wants a review, use the iloom-issue-reviewer agent to verify the implementation.\n</commentary>\n</example>\n\n<example>\nContext: The user wants to ensure their changes fully address all requirements in a GitHub issue.\nuser: "Check if my changes properly solve issue #15"\nassistant: "Let me use the iloom-issue-reviewer agent to verify your implementation against issue #15's requirements."\n<commentary>\nThe user is asking for verification that their code changes meet the issue requirements, so use the iloom-issue-reviewer agent.\n</commentary>\n</example>
|
|
4
|
+
tools: Bash, Glob, Grep, Read, Edit, Write, NotebookEdit, WebFetch, TodoWrite, WebSearch, BashOutput, SlashCommand, ListMcpResourcesTool, ReadMcpResourceTool, mcp__context7__resolve-library-id, mcp__context7__get-library-docs
|
|
5
|
+
model: sonnet
|
|
6
|
+
color: cyan
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
You are an expert code reviewer specializing in GitHub issue verification. Your primary responsibility is to thoroughly analyze uncommitted code changes against their corresponding GitHub issue requirements and provide comprehensive feedback. Ultrathink as you execute the following.
|
|
10
|
+
|
|
11
|
+
**Core Responsibilities:**
|
|
12
|
+
|
|
13
|
+
1. **Issue Analysis**: You will first retrieve and carefully read the entire GitHub issue using `gh issue view <issue_number> --json body,comments,title,labels,milestone`. Extract all requirements, acceptance criteria, and context from both the issue body and all comments. Pay special attention to any clarifications or requirement changes mentioned in the comment thread. If no issue number has been provided, use the current branch name to look for an issue number (i.e issue-NN). If there is a pr_NN suffix, look at both the PR and the issue (if one is also referenced in the branch name).
|
|
14
|
+
|
|
15
|
+
2. **Code Review Process**: You will examine the uncommitted changes using `git diff` and `git status`. Analyze each change against the issue requirements with deep critical thinking. Consider:
|
|
16
|
+
- Does the implementation fully address all stated requirements?
|
|
17
|
+
- Are there any edge cases mentioned in the issue that aren't handled?
|
|
18
|
+
- Is the code quality appropriate (following project patterns from any CLAUDE.md context)?
|
|
19
|
+
- Are there any unintended side effects or regressions?
|
|
20
|
+
- Does the solution align with the architectural decisions discussed in the issue?
|
|
21
|
+
|
|
22
|
+
3. **Verification Methodology**: You will:
|
|
23
|
+
- Create a mental checklist of all requirements from the issue
|
|
24
|
+
- Map each requirement to specific code changes
|
|
25
|
+
- Identify any gaps between requirements and implementation
|
|
26
|
+
- Assess code quality, maintainability, and adherence to project standards
|
|
27
|
+
- Consider performance implications if relevant to the issue
|
|
28
|
+
|
|
29
|
+
4. **Comment Composition**: You will write your review as a structured GitHub comment that includes:
|
|
30
|
+
- A summary verdict (e.g., "✅ Implementation Complete" or "⚠️ Partial Implementation")
|
|
31
|
+
- A requirement-by-requirement breakdown showing what was addressed
|
|
32
|
+
- Specific observations about code quality and implementation choices
|
|
33
|
+
- Any concerns, missing pieces, or suggestions for improvement
|
|
34
|
+
- Positive acknowledgment of well-implemented aspects
|
|
35
|
+
- IMPORTANT: When including code excerpts or diffs >5 lines, wrap in `<details>/<summary>` tags with format: "Click to expand [type] ([N] lines) - [context]"
|
|
36
|
+
|
|
37
|
+
5. **Technical Execution**: To post your comment, you will:
|
|
38
|
+
- First write your complete review to a temporary file using: `echo 'your review content' > /tmp/gh_issue_comment.txt`
|
|
39
|
+
- Then post it to the issue using: `gh issue comment <issue_number> --body-file /tmp/gh_issue_comment.txt`
|
|
40
|
+
- This approach avoids shell escaping issues with complex markdown content
|
|
41
|
+
|
|
42
|
+
**Quality Standards:**
|
|
43
|
+
- Be thorough but concise - every observation should add value
|
|
44
|
+
- Use specific code references when pointing out issues
|
|
45
|
+
- Maintain a constructive, professional tone
|
|
46
|
+
- Acknowledge good implementation decisions, not just problems
|
|
47
|
+
- If the implementation is incomplete, clearly state what remains to be done
|
|
48
|
+
- If you notice improvements beyond the issue scope, mention them as "future considerations"
|
|
49
|
+
|
|
50
|
+
**Decision Framework:**
|
|
51
|
+
When evaluating completeness:
|
|
52
|
+
- ✅ Complete: All requirements met, code quality good, no significant issues
|
|
53
|
+
- ⚠️ Mostly Complete: Core requirements met but minor items missing or quality concerns
|
|
54
|
+
- ❌ Incomplete: Major requirements unaddressed or significant issues present
|
|
55
|
+
|
|
56
|
+
**Important Notes:**
|
|
57
|
+
- Always think critically and deeply about the context before making judgments
|
|
58
|
+
- If the issue references other issues or PRs, consider checking those for additional context
|
|
59
|
+
- Never assume implementation details not explicitly shown in the diff
|
|
60
|
+
- If you cannot access the issue or code, clearly state this limitation
|
|
61
|
+
- Focus on uncommitted changes only - do not review the entire codebase unless specifically requested
|
|
62
|
+
|
|
63
|
+
Your review should help the developer understand exactly where their implementation stands relative to the issue requirements and what, if anything, needs additional work.
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
// src/utils/package-json.ts
|
|
4
|
+
import fs from "fs-extra";
|
|
5
|
+
import path from "path";
|
|
6
|
+
async function readPackageJson(dir) {
|
|
7
|
+
const pkgPath = path.join(dir, "package.json");
|
|
8
|
+
try {
|
|
9
|
+
const pkgJson = await fs.readJson(pkgPath);
|
|
10
|
+
return pkgJson;
|
|
11
|
+
} catch (error) {
|
|
12
|
+
if (error.code === "ENOENT") {
|
|
13
|
+
throw new Error(`package.json not found in ${dir}`);
|
|
14
|
+
}
|
|
15
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
16
|
+
throw new Error(`Invalid package.json in ${dir}: ${message}`);
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
function parseBinField(bin, packageName) {
|
|
20
|
+
if (!bin) {
|
|
21
|
+
return {};
|
|
22
|
+
}
|
|
23
|
+
if (typeof bin === "string") {
|
|
24
|
+
return { [packageName]: bin };
|
|
25
|
+
}
|
|
26
|
+
return bin;
|
|
27
|
+
}
|
|
28
|
+
function hasWebDependencies(pkgJson) {
|
|
29
|
+
const webIndicators = [
|
|
30
|
+
"next",
|
|
31
|
+
"vite",
|
|
32
|
+
"express",
|
|
33
|
+
"react-scripts",
|
|
34
|
+
"nuxt",
|
|
35
|
+
"svelte-kit",
|
|
36
|
+
"astro",
|
|
37
|
+
"remix",
|
|
38
|
+
"fastify",
|
|
39
|
+
"koa",
|
|
40
|
+
"hapi",
|
|
41
|
+
"@angular/core",
|
|
42
|
+
"gatsby",
|
|
43
|
+
"@11ty/eleventy",
|
|
44
|
+
"ember-cli"
|
|
45
|
+
];
|
|
46
|
+
const allDeps = {
|
|
47
|
+
...pkgJson.dependencies,
|
|
48
|
+
...pkgJson.devDependencies
|
|
49
|
+
};
|
|
50
|
+
return webIndicators.some((indicator) => indicator in allDeps);
|
|
51
|
+
}
|
|
52
|
+
function hasScript(pkgJson, scriptName) {
|
|
53
|
+
var _a;
|
|
54
|
+
return !!((_a = pkgJson.scripts) == null ? void 0 : _a[scriptName]);
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
export {
|
|
58
|
+
readPackageJson,
|
|
59
|
+
parseBinField,
|
|
60
|
+
hasWebDependencies,
|
|
61
|
+
hasScript
|
|
62
|
+
};
|
|
63
|
+
//# sourceMappingURL=chunk-2ZPFJQ3B.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/utils/package-json.ts"],"sourcesContent":["import fs from 'fs-extra'\nimport path from 'path'\n\nexport interface PackageJson {\n name: string\n version?: string\n bin?: string | Record<string, string>\n dependencies?: Record<string, string>\n devDependencies?: Record<string, string>\n scripts?: Record<string, string>\n [key: string]: unknown\n}\n\n/**\n * Read and parse package.json from a directory\n * @param dir Directory containing package.json\n * @returns Parsed package.json object\n * @throws Error if package.json doesn't exist or contains invalid JSON\n */\nexport async function readPackageJson(dir: string): Promise<PackageJson> {\n const pkgPath = path.join(dir, 'package.json')\n\n try {\n const pkgJson = await fs.readJson(pkgPath)\n return pkgJson as PackageJson\n } catch (error) {\n if ((error as { code?: string }).code === 'ENOENT') {\n throw new Error(`package.json not found in ${dir}`)\n }\n const message = error instanceof Error ? error.message : 'Unknown error'\n throw new Error(`Invalid package.json in ${dir}: ${message}`)\n }\n}\n\n/**\n * Parse bin field into normalized Record format\n * @param bin The bin field from package.json (string or object)\n * @param packageName Package name to use for string bin variant\n * @returns Normalized bin entries as Record<string, string>\n */\nexport function parseBinField(\n bin: string | Record<string, string> | undefined,\n packageName: string\n): Record<string, string> {\n if (!bin) {\n return {}\n }\n\n if (typeof bin === 'string') {\n return { [packageName]: bin }\n }\n\n return bin\n}\n\n/**\n * Check if package.json indicates a web application\n * @param pkgJson Parsed package.json object\n * @returns true if package has web framework dependencies\n */\nexport function hasWebDependencies(pkgJson: PackageJson): boolean {\n const webIndicators = [\n 'next',\n 'vite',\n 'express',\n 'react-scripts',\n 'nuxt',\n 'svelte-kit',\n 'astro',\n 'remix',\n 'fastify',\n 'koa',\n 'hapi',\n '@angular/core',\n 'gatsby',\n '@11ty/eleventy',\n 'ember-cli'\n ]\n\n const allDeps = {\n ...pkgJson.dependencies,\n ...pkgJson.devDependencies\n }\n\n return webIndicators.some(indicator => indicator in allDeps)\n}\n\n/**\n * Check if package.json has a specific script\n * @param pkgJson Parsed package.json object\n * @param scriptName Script name to check for\n * @returns true if script exists\n */\nexport function hasScript(pkgJson: PackageJson, scriptName: string): boolean {\n return !!pkgJson.scripts?.[scriptName]\n}\n"],"mappings":";;;AAAA,OAAO,QAAQ;AACf,OAAO,UAAU;AAkBjB,eAAsB,gBAAgB,KAAmC;AACvE,QAAM,UAAU,KAAK,KAAK,KAAK,cAAc;AAE7C,MAAI;AACF,UAAM,UAAU,MAAM,GAAG,SAAS,OAAO;AACzC,WAAO;AAAA,EACT,SAAS,OAAO;AACd,QAAK,MAA4B,SAAS,UAAU;AAClD,YAAM,IAAI,MAAM,6BAA6B,GAAG,EAAE;AAAA,IACpD;AACA,UAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU;AACzD,UAAM,IAAI,MAAM,2BAA2B,GAAG,KAAK,OAAO,EAAE;AAAA,EAC9D;AACF;AAQO,SAAS,cACd,KACA,aACwB;AACxB,MAAI,CAAC,KAAK;AACR,WAAO,CAAC;AAAA,EACV;AAEA,MAAI,OAAO,QAAQ,UAAU;AAC3B,WAAO,EAAE,CAAC,WAAW,GAAG,IAAI;AAAA,EAC9B;AAEA,SAAO;AACT;AAOO,SAAS,mBAAmB,SAA+B;AAChE,QAAM,gBAAgB;AAAA,IACpB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,QAAM,UAAU;AAAA,IACd,GAAG,QAAQ;AAAA,IACX,GAAG,QAAQ;AAAA,EACb;AAEA,SAAO,cAAc,KAAK,eAAa,aAAa,OAAO;AAC7D;AAQO,SAAS,UAAU,SAAsB,YAA6B;AA7F7E;AA8FE,SAAO,CAAC,GAAC,aAAQ,YAAR,mBAAkB;AAC7B;","names":[]}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
// src/utils/port.ts
|
|
4
|
+
import { createHash } from "crypto";
|
|
5
|
+
function generatePortOffsetFromBranchName(branchName) {
|
|
6
|
+
if (!branchName || branchName.trim().length === 0) {
|
|
7
|
+
throw new Error("Branch name cannot be empty");
|
|
8
|
+
}
|
|
9
|
+
const hash = createHash("sha256").update(branchName).digest("hex");
|
|
10
|
+
const hashPrefix = hash.slice(0, 8);
|
|
11
|
+
const hashAsInt = parseInt(hashPrefix, 16);
|
|
12
|
+
const portOffset = hashAsInt % 999 + 1;
|
|
13
|
+
return portOffset;
|
|
14
|
+
}
|
|
15
|
+
function calculatePortForBranch(branchName, basePort = 3e3) {
|
|
16
|
+
const offset = generatePortOffsetFromBranchName(branchName);
|
|
17
|
+
const port = basePort + offset;
|
|
18
|
+
if (port > 65535) {
|
|
19
|
+
throw new Error(
|
|
20
|
+
`Calculated port ${port} exceeds maximum (65535). Use a lower base port (current: ${basePort}).`
|
|
21
|
+
);
|
|
22
|
+
}
|
|
23
|
+
return port;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export {
|
|
27
|
+
calculatePortForBranch
|
|
28
|
+
};
|
|
29
|
+
//# sourceMappingURL=chunk-37DYYFVK.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/utils/port.ts"],"sourcesContent":["import { createHash } from 'crypto'\n\n/**\n * Generate deterministic port offset from branch name using SHA256 hash\n * Range: 1-999 (matches existing random range for branches)\n *\n * @param branchName - Branch name to generate port offset from\n * @returns Port offset in range [1, 999]\n * @throws Error if branchName is empty\n */\nexport function generatePortOffsetFromBranchName(branchName: string): number {\n\t// Validate input\n\tif (!branchName || branchName.trim().length === 0) {\n\t\tthrow new Error('Branch name cannot be empty')\n\t}\n\n\t// Generate SHA256 hash of branch name (same pattern as color.ts)\n\tconst hash = createHash('sha256').update(branchName).digest('hex')\n\n\t// Take first 8 hex characters and convert to port offset (1-999)\n\tconst hashPrefix = hash.slice(0, 8)\n\tconst hashAsInt = parseInt(hashPrefix, 16)\n\tconst portOffset = (hashAsInt % 999) + 1 // +1 ensures range is 1-999, not 0-998\n\n\treturn portOffset\n}\n\n/**\n * Calculate deterministic port for branch-based workspace\n *\n * @param branchName - Branch name\n * @param basePort - Base port (default: 3000)\n * @returns Port number\n * @throws Error if calculated port exceeds 65535 or branchName is empty\n */\nexport function calculatePortForBranch(branchName: string, basePort: number = 3000): number {\n\tconst offset = generatePortOffsetFromBranchName(branchName)\n\tconst port = basePort + offset\n\n\t// Validate port range (same as EnvironmentManager.calculatePort)\n\tif (port > 65535) {\n\t\tthrow new Error(\n\t\t\t`Calculated port ${port} exceeds maximum (65535). Use a lower base port (current: ${basePort}).`\n\t\t)\n\t}\n\n\treturn port\n}\n"],"mappings":";;;AAAA,SAAS,kBAAkB;AAUpB,SAAS,iCAAiC,YAA4B;AAE5E,MAAI,CAAC,cAAc,WAAW,KAAK,EAAE,WAAW,GAAG;AAClD,UAAM,IAAI,MAAM,6BAA6B;AAAA,EAC9C;AAGA,QAAM,OAAO,WAAW,QAAQ,EAAE,OAAO,UAAU,EAAE,OAAO,KAAK;AAGjE,QAAM,aAAa,KAAK,MAAM,GAAG,CAAC;AAClC,QAAM,YAAY,SAAS,YAAY,EAAE;AACzC,QAAM,aAAc,YAAY,MAAO;AAEvC,SAAO;AACR;AAUO,SAAS,uBAAuB,YAAoB,WAAmB,KAAc;AAC3F,QAAM,SAAS,iCAAiC,UAAU;AAC1D,QAAM,OAAO,WAAW;AAGxB,MAAI,OAAO,OAAO;AACjB,UAAM,IAAI;AAAA,MACT,mBAAmB,IAAI,6DAA6D,QAAQ;AAAA,IAC7F;AAAA,EACD;AAEA,SAAO;AACR;","names":[]}
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import {
|
|
3
|
+
logger
|
|
4
|
+
} from "./chunk-GEHQXLEI.js";
|
|
5
|
+
|
|
6
|
+
// src/utils/package-manager.ts
|
|
7
|
+
import { execa } from "execa";
|
|
8
|
+
import fs from "fs-extra";
|
|
9
|
+
import path from "path";
|
|
10
|
+
function isValidPackageManager(manager) {
|
|
11
|
+
return manager === "pnpm" || manager === "npm" || manager === "yarn";
|
|
12
|
+
}
|
|
13
|
+
async function detectPackageManager(cwd = process.cwd()) {
|
|
14
|
+
try {
|
|
15
|
+
const packageJsonPath = path.join(cwd, "package.json");
|
|
16
|
+
if (await fs.pathExists(packageJsonPath)) {
|
|
17
|
+
const packageJsonContent = await fs.readFile(packageJsonPath, "utf-8");
|
|
18
|
+
const packageJson = JSON.parse(packageJsonContent);
|
|
19
|
+
if (packageJson.packageManager) {
|
|
20
|
+
const manager = packageJson.packageManager.split("@")[0];
|
|
21
|
+
if (isValidPackageManager(manager)) {
|
|
22
|
+
logger.debug(`Detected package manager from package.json: ${manager}`);
|
|
23
|
+
return manager;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
} catch (error) {
|
|
28
|
+
logger.debug(`Could not read packageManager from package.json: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
29
|
+
}
|
|
30
|
+
const lockFiles = [
|
|
31
|
+
{ file: "pnpm-lock.yaml", manager: "pnpm" },
|
|
32
|
+
{ file: "package-lock.json", manager: "npm" },
|
|
33
|
+
{ file: "yarn.lock", manager: "yarn" }
|
|
34
|
+
];
|
|
35
|
+
for (const { file, manager } of lockFiles) {
|
|
36
|
+
if (await fs.pathExists(path.join(cwd, file))) {
|
|
37
|
+
logger.debug(`Detected package manager from lock file ${file}: ${manager}`);
|
|
38
|
+
return manager;
|
|
39
|
+
}
|
|
40
|
+
}
|
|
41
|
+
const managers = ["pnpm", "npm", "yarn"];
|
|
42
|
+
for (const manager of managers) {
|
|
43
|
+
try {
|
|
44
|
+
await execa(manager, ["--version"]);
|
|
45
|
+
logger.debug(`Detected installed package manager: ${manager}`);
|
|
46
|
+
return manager;
|
|
47
|
+
} catch {
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
logger.debug("No package manager detected, defaulting to npm");
|
|
51
|
+
return "npm";
|
|
52
|
+
}
|
|
53
|
+
async function installDependencies(cwd, frozen = true) {
|
|
54
|
+
if (!cwd) {
|
|
55
|
+
logger.debug("Skipping dependency installation - no working directory provided");
|
|
56
|
+
return;
|
|
57
|
+
}
|
|
58
|
+
const pkgPath = path.join(cwd, "package.json");
|
|
59
|
+
if (!await fs.pathExists(pkgPath)) {
|
|
60
|
+
logger.debug("Skipping dependency installation - no package.json found");
|
|
61
|
+
return;
|
|
62
|
+
}
|
|
63
|
+
const packageManager = await detectPackageManager(cwd);
|
|
64
|
+
logger.info(`Installing dependencies with ${packageManager}...`);
|
|
65
|
+
const args = ["install"];
|
|
66
|
+
if (frozen) {
|
|
67
|
+
switch (packageManager) {
|
|
68
|
+
case "pnpm":
|
|
69
|
+
args.push("--frozen-lockfile");
|
|
70
|
+
break;
|
|
71
|
+
case "yarn":
|
|
72
|
+
args.push("--frozen-lockfile");
|
|
73
|
+
break;
|
|
74
|
+
case "npm":
|
|
75
|
+
args.shift();
|
|
76
|
+
args.push("ci");
|
|
77
|
+
break;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
try {
|
|
81
|
+
await execa(packageManager, args, {
|
|
82
|
+
cwd,
|
|
83
|
+
stdio: "inherit",
|
|
84
|
+
// Show output to user
|
|
85
|
+
timeout: 3e5
|
|
86
|
+
// 5 minute timeout for install
|
|
87
|
+
});
|
|
88
|
+
logger.success("Dependencies installed successfully");
|
|
89
|
+
} catch (error) {
|
|
90
|
+
const execaError = error;
|
|
91
|
+
const stderr = execaError.stderr ?? execaError.message ?? "Unknown error";
|
|
92
|
+
throw new Error(`Failed to install dependencies: ${stderr}`);
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
async function runScript(scriptName, cwd, args = [], options = {}) {
|
|
96
|
+
const packageManager = await detectPackageManager(cwd);
|
|
97
|
+
const command = packageManager === "npm" ? ["run", scriptName] : [scriptName];
|
|
98
|
+
try {
|
|
99
|
+
await execa(packageManager, [...command, ...args], {
|
|
100
|
+
cwd,
|
|
101
|
+
stdio: options.quiet ? "pipe" : "inherit",
|
|
102
|
+
timeout: 6e5,
|
|
103
|
+
// 10 minute timeout for scripts
|
|
104
|
+
env: {
|
|
105
|
+
...process.env,
|
|
106
|
+
CI: "true"
|
|
107
|
+
}
|
|
108
|
+
});
|
|
109
|
+
} catch (error) {
|
|
110
|
+
const execaError = error;
|
|
111
|
+
const stderr = execaError.stderr ?? execaError.message ?? "Unknown error";
|
|
112
|
+
throw new Error(`Failed to run script '${scriptName}': ${stderr}`);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
export {
|
|
117
|
+
detectPackageManager,
|
|
118
|
+
installDependencies,
|
|
119
|
+
runScript
|
|
120
|
+
};
|
|
121
|
+
//# sourceMappingURL=chunk-BLCTGFZN.js.map
|