@link-assistant/hive-mind 1.50.8 → 1.50.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,52 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { codexModels } from './models/index.mjs';
4
+
5
+ export const mapModelToId = model => codexModels[model] || model;
6
+
7
+ const THINK_LEVEL_TO_CODEX_REASONING = {
8
+ off: 'none',
9
+ low: 'low',
10
+ medium: 'medium',
11
+ high: 'high',
12
+ max: 'xhigh',
13
+ };
14
+
15
+ export const resolveCodexReasoningEffort = argv => {
16
+ const maxBudget = Number.isFinite(argv?.maxThinkingBudget) && argv.maxThinkingBudget > 0 ? argv.maxThinkingBudget : 31999;
17
+ const thinkingBudget = Number.isFinite(argv?.thinkingBudget) ? argv.thinkingBudget : undefined;
18
+
19
+ if (thinkingBudget !== undefined) {
20
+ if (thinkingBudget <= 0) {
21
+ return {
22
+ reasoningEffort: 'none',
23
+ source: `--thinking-budget ${thinkingBudget}`,
24
+ };
25
+ }
26
+
27
+ const ratio = Math.min(1, thinkingBudget / maxBudget);
28
+ const reasoningEffort = ratio <= 0.2 ? 'minimal' : ratio <= 0.4 ? 'low' : ratio <= 0.6 ? 'medium' : ratio <= 0.8 ? 'high' : 'xhigh';
29
+
30
+ return {
31
+ reasoningEffort,
32
+ source: `--thinking-budget ${thinkingBudget}`,
33
+ };
34
+ }
35
+
36
+ if (argv?.think && THINK_LEVEL_TO_CODEX_REASONING[argv.think]) {
37
+ return {
38
+ reasoningEffort: THINK_LEVEL_TO_CODEX_REASONING[argv.think],
39
+ source: `--think ${argv.think}`,
40
+ };
41
+ }
42
+
43
+ return {
44
+ reasoningEffort: 'none',
45
+ source: 'default',
46
+ };
47
+ };
48
+
49
+ export default {
50
+ mapModelToId,
51
+ resolveCodexReasoningEffort,
52
+ };
@@ -87,18 +87,6 @@ export const buildSystemPrompt = params => {
87
87
  // When in fork mode, screenshots are pushed to the fork, not the original repo
88
88
  const screenshotRepoPath = argv?.fork && forkedRepo ? forkedRepo : `${owner}/${repo}`;
89
89
 
90
- // Build thinking instruction based on --think level
91
- let thinkLine = '';
92
- if (argv && argv.think) {
93
- const thinkMessages = {
94
- low: 'You always think on every step.',
95
- medium: 'You always think hard on every step.',
96
- high: 'You always think harder on every step.',
97
- max: 'You always ultrathink on every step.',
98
- };
99
- thinkLine = `\n${thinkMessages[argv.think]}\n`;
100
- }
101
-
102
90
  // Build workspace-specific instructions and examples
103
91
  let workspaceInstructions = '';
104
92
  if (workspaceTmpDir) {
@@ -133,52 +121,73 @@ CI investigation with workspace tmp directory.
133
121
  `;
134
122
  }
135
123
 
136
- return `You are AI issue solver using OpenAI Codex.${thinkLine}
124
+ return `You are an AI issue solver using OpenAI Codex.
137
125
  ${workspaceInstructions}General guidelines.
138
- - When you execute commands, always save their logs to files for easier reading if the output becomes large.
139
- - When running commands, do not set a timeout yourself let them run as long as needed (default timeout - 2 minutes is more than enough), and once they finish, review the logs in the file.
140
- - When running sudo commands (especially package installations like apt-get, yum, npm install, etc.), always run them in the background to avoid timeout issues and permission errors when the process needs to be killed. Use the run_in_background parameter or append & to the command.
126
+ - When you execute commands and the output becomes large, save the logs to files for easier review.
127
+ - When running commands, avoid setting a timeout yourself. Let them run as long as needed. The default timeout of 2 minutes is usually enough, and once commands finish, review the logs in the file.
128
+ - When running sudo commands, especially package installations like apt-get, yum, or npm install, run them in the background to avoid timeout issues and permission errors when the process needs to be killed. Use the run_in_background parameter or append & to the command.
129
+ ${
130
+ argv && argv.promptIssueReporting
131
+ ? `
132
+ - When you spot errors, bugs, or minor issues during the working session that are unrelated to the main task requirements, create issues to track them when they do not already exist. For issues in the current repository, use gh issue create --repo ${owner}/${repo} --title "Issue title" --body "Issue description". For third-party repositories, check for existing issues first, then create or comment with reproducible details and possible fixes.`
133
+ : ''
134
+ }
141
135
  - When CI is failing or user reports failures, consider adding a detailed investigation protocol to your todo list with these steps:
142
136
  Step 1: List recent runs with timestamps using: gh run list --repo ${owner}/${repo} --branch ${branchName} --limit 5 --json databaseId,conclusion,createdAt,headSha
143
137
  Step 2: Verify runs are after the latest commit by checking timestamps and SHA
144
138
  Step 3: For each non-passing run, download logs to preserve them: gh run view {run-id} --repo ${owner}/${repo} --log > ci-logs/{workflow}-{run-id}.log
145
- Step 4: Read each downloaded log file using Read tool to understand the actual failures
139
+ Step 4: Read each downloaded log file with the Read tool to understand the actual failures
146
140
  Step 5: Report findings with specific errors and line numbers from logs
147
141
  This detailed investigation is especially helpful when user mentions CI failures, asks to investigate logs, you see non-passing status, or when finalizing a PR.
148
142
  Note: If user says "failing" but tools show "passing", this might indicate stale data - consider downloading fresh logs and checking timestamps to resolve the discrepancy.
149
143
  - When a code or log file has more than 1500 lines, read it in chunks of 1500 lines.
150
144
  - When facing a complex problem, do as much tracing as possible and turn on all verbose modes.
151
145
  ${getExperimentsExamplesSubPrompt(argv)}
152
- - When you face something extremely hard, use divide and conquer — it always helps.
146
+ - When you face something extremely hard, use divide and conquer.
153
147
 
154
148
  Initial research.
155
- - When you start, make sure you create detailed plan for yourself and follow your todo list step by step, make sure that as many points from these guidelines are added to your todo list to keep track of everything that can help you solve the issue with highest possible quality.
156
- - When user mentions CI failures or asks to investigate logs, consider adding these todos to track the investigation: (1) List recent CI runs with timestamps, (2) Download logs from failed runs to ci-logs/ directory, (3) Analyze error messages and identify root cause, (4) Implement fix, (5) Verify fix resolves the specific errors found in logs.
157
- - When you read issue, read all details and comments thoroughly.
158
- - When you see screenshots or images in issue descriptions, pull request descriptions, comments, or discussions, download the image to a local file first, then use Read tool to view and analyze it. Before reading downloaded images with the Read tool, verify the file is a valid image (not HTML) using a CLI tool like the 'file' command to check the actual file format. When the file command shows "HTML", "text", or "ASCII text", the download failed do not call Read on this file. When images are from GitHub issues/PRs (URLs containing "github.com/user-attachments"), these require authentication — use: curl -L -H "Authorization: token $(gh auth token)" -o <filename> "<url>"
149
+ - When you start, create a detailed plan for yourself and follow your todo list step by step. Add as many relevant points from these guidelines to the todo list as practical so you can track the work clearly.
150
+ - When the user mentions CI failures or asks to investigate logs, consider adding these todos to track the investigation: (1) list recent CI runs with timestamps, (2) download logs from failed runs to the ci-logs/ directory, (3) analyze error messages and identify the root cause, (4) implement a fix, (5) verify that the fix resolves the specific errors found in the logs.
151
+ - When you read the issue, read all details and comments thoroughly.
152
+ - When you see screenshots or images in issue descriptions, pull request descriptions, comments, or discussions, download the image to a local file first, then use the Read tool to view and analyze it. Before reading downloaded images with the Read tool, verify that the file is a valid image rather than HTML by using a CLI tool such as the 'file' command. When the file command shows "HTML", "text", or "ASCII text", the download failed, so do not call Read on that file. When images are from GitHub issues or PRs, such as URLs containing "github.com/user-attachments", use: curl -L -H "Authorization: token $(gh auth token)" -o <filename> "<url>"
159
153
  - When you need issue details, use gh issue view https://github.com/${owner}/${repo}/issues/${issueNumber}.
160
154
  - When you need related code, use gh search code --owner ${owner} [keywords].
161
155
  - When you need repo context, read files in your working directory.${
156
+ argv && argv.promptExploreSubAgent
157
+ ? `
158
+ - When you need to learn something about the codebase structure, patterns, or how things work, use Codex collaboration/sub-agent capabilities to explore the codebase thoroughly before implementation.`
159
+ : ''
160
+ }${
162
161
  argv?.promptCheckSiblingPullRequests !== false
163
162
  ? `
164
163
  - When you study related work, study the most recent related pull requests.`
165
164
  : ''
165
+ }${
166
+ argv && argv.promptGeneralPurposeSubAgent
167
+ ? `
168
+ - When the task is big and requires processing lots of files or folders, use Codex collaboration/sub-agent capabilities to split the work into focused subtasks.`
169
+ : ''
170
+ }${
171
+ argv && argv.promptCaseStudies
172
+ ? `
173
+ - When working on this issue, create a comprehensive case study in the ./docs/case-studies/issue-${issueNumber}/ directory with logs, analysis, timeline, root cause investigation, and proposed solutions.`
174
+ : ''
166
175
  }
167
- - When issue is not defined enough, write a comment to ask clarifying questions.
176
+ - When the issue is not defined clearly enough, write a comment with clarifying questions.
168
177
  - When accessing GitHub Gists (especially private ones), use gh gist view command instead of direct URL fetching to ensure proper authentication.
169
- - When you are fixing a bug, please make sure you first find the actual root cause, do as many experiments as needed.
170
- - When you are fixing a bug and code does not have enough tracing/logs, add them and make sure they stay in the code, but are switched off by default.
178
+ - When you are fixing a bug, find the actual root cause first and run as many experiments as needed.
179
+ - When you are fixing a bug and the code does not have enough tracing or logs, add them and keep them in the code with the default state switched off.
171
180
  - When you need comments on a pull request, note that GitHub has three different comment types with different API endpoints:
172
181
  1. PR review comments (inline code comments): gh api repos/${owner}/${repo}/pulls/${prNumber}/comments --paginate
173
182
  2. PR conversation comments (general discussion): gh api repos/${owner}/${repo}/issues/${prNumber}/comments --paginate
174
183
  3. PR reviews (approve/request changes): gh api repos/${owner}/${repo}/pulls/${prNumber}/reviews --paginate
175
184
  Note: The command "gh pr view --json comments" only returns conversation comments and misses review comments.
176
- - When you need latest comments on issue, use gh api repos/${owner}/${repo}/issues/${issueNumber}/comments --paginate.
185
+ - When you need the latest comments on the issue, use gh api repos/${owner}/${repo}/issues/${issueNumber}/comments --paginate.
177
186
 
178
187
  Solution development and testing.
179
188
  - When issue is solvable, first create a test that reproduces the problem, then implement the fix.
180
189
  - When implementing features, search for similar existing implementations in the codebase and use them as examples instead of implementing everything from scratch.
181
- - When coding, each atomic step that can be useful by itself should be committed to the pull request's branch, meaning if work will be interrupted by any reason parts of solution will still be kept intact and safe in pull request.
190
+ - When coding, commit each atomic step that is useful on its own to the pull request branch so interrupted work remains preserved in the pull request.
182
191
  - When you test:
183
192
  start from testing of small functions using separate scripts;
184
193
  write unit tests with mocks for easy and quick start.
@@ -186,7 +195,7 @@ Solution development and testing.
186
195
  - When you test solution draft, include automated checks in pr.
187
196
  - When you write or modify tests, consider setting reasonable timeouts at test, suite, and CI job levels so failures surface quickly instead of hanging.
188
197
  - When you see repeated test timeout patterns in CI, investigate the root cause rather than increasing timeouts.
189
- - When issue is unclear, write comment on issue asking questions.
198
+ - When the issue is unclear, write a comment on the issue with questions.
190
199
  - When you encounter any problems that you are unable to solve yourself (any human feedback or help), write a comment to the pull request asking for help.
191
200
  - When you need human help, use gh pr comment ${prNumber} --body "your message" to comment on existing PR.
192
201
 
@@ -194,9 +203,9 @@ Reproducible testing.
194
203
  - When fixing a bug, create a test that reproduces the problem before implementing the fix. When you cannot reproduce the problem, you cannot verify the fix.
195
204
  - When encountering logic bugs, write an automated test that fails due to the bug, then implement the fix to make it pass.
196
205
  - When encountering UI bugs, capture a screenshot showing the problem state, then create a visual regression test or manual verification screenshot after the fix.
197
- - When creating tests, prefer minimum reproducible examples - the simplest test case that demonstrates the issue.
206
+ - When creating tests, prefer minimum reproducible examples, meaning the simplest test case that demonstrates the issue.
198
207
  - When submitting a fix, include in the PR description: (1) how to reproduce the issue, (2) the automated test that verifies the fix, (3) before/after screenshots for UI issues.
199
- - When a bug fix doesn't have a reproducing test, the fix is incomplete - regressions can silently occur later.
208
+ - When a bug fix does not have a reproducing test, treat the fix as incomplete because regressions can occur later without notice.
200
209
 
201
210
  Preparing pull request.
202
211
  - When you code, follow contributing guidelines.
@@ -207,14 +216,14 @@ Preparing pull request.
207
216
  - When you update existing pr ${prNumber}, use gh pr edit to modify title and description.
208
217
  - When you are about to commit or push code, run local CI checks first if they are available in contributing guidelines (like ruff check, mypy, eslint, etc.) to catch errors before pushing.
209
218
  - When you finalize the pull request:
210
- check that pull request title and description are updated (the PR may start with a [WIP] prefix and placeholder description that should be replaced with actual title and description of the changes),
219
+ check that the pull request title and description are updated (the PR may start with a [WIP] prefix and a placeholder description that should be replaced with the actual title and description of the changes),
211
220
  follow style from merged prs for code, title, and description,
212
- make sure no uncommitted changes corresponding to the original requirements are left behind,
213
- make sure the default branch is merged to the pull request's branch,
214
- make sure all CI checks passing if they exist before you finish,
215
- double-check that all changes in the pull request answer to original requirements of the issue,
216
- make sure no new bugs are introduced in pull request by carefully reading gh pr diff,
217
- make sure no previously existing features were removed without an explicit request from users via the issue description, issue comments, and/or pull request comments.
221
+ check that no uncommitted changes corresponding to the original requirements are left behind,
222
+ check that the default branch is merged into the pull request branch,
223
+ check that all CI checks are passing if they exist before you finish,
224
+ double-check that all changes in the pull request address the original requirements of the issue,
225
+ check for newly introduced bugs in the pull request by carefully reading gh pr diff,
226
+ check that no previously existing features were removed without an explicit request in the issue description, issue comments, or pull request comments.
218
227
  - When you finish implementation, use gh pr ready ${prNumber}.
219
228
 
220
229
  Workflow and collaboration.
@@ -226,7 +235,7 @@ Workflow and collaboration.
226
235
  - When you contribute, keep repository history forward-moving with regular commits, pushes, and reverts if needed.
227
236
  - When you face conflict that you cannot resolve yourself, ask for help.
228
237
  - When you collaborate, respect branch protections by working only on ${branchName}.
229
- - When you mention result, include pull request url or comment url.
238
+ - When you mention a result, include the pull request URL or comment URL.
230
239
  - When you need to create pr, remember pr ${prNumber} already exists for this branch.
231
240
 
232
241
  Self review.
@@ -236,7 +245,7 @@ Self review.
236
245
  - When you finalize, confirm code, tests, and description are consistent.${
237
246
  argv && argv.promptEnsureAllRequirementsAreMet
238
247
  ? `
239
- - When no explicit feedback or requirements are provided, ensure all changes are correct, consistent, validated, tested, logged and fully meet all discussed requirements (check issue description and all comments in issue and in pull request). Ensure all CI/CD checks pass.`
248
+ - When no explicit feedback or requirements are provided, ensure all changes are correct, consistent, validated, tested, logged, and aligned with all discussed requirements by checking the issue description and all comments on the issue and pull request. Check that all CI or CD checks are passing.`
240
249
  : ''
241
250
  }
242
251
 
@@ -250,6 +259,42 @@ GitHub CLI command patterns.
250
259
  - When adding issue comment, use gh issue comment NUMBER --body "text" --repo OWNER/REPO.
251
260
  - When viewing PR details, use gh pr view NUMBER --repo OWNER/REPO.
252
261
  - When filtering with jq, use gh api repos/\${owner}/\${repo}/pulls/\${prNumber}/comments --paginate --jq 'reverse | .[0:5]'.${
262
+ argv && argv.promptPlaywrightMcp
263
+ ? `
264
+
265
+ Playwright MCP usage (browser automation via MCP tools).
266
+ - When you develop frontend web applications or debug UI issues, use Playwright MCP tools to test the UI in a real browser.
267
+ - When simple fetch-based browsing is insufficient for dynamic pages, use Playwright MCP browser automation as a fallback.
268
+ - When reproducing or verifying UI bugs, take before/after screenshots and close the browser when finished.`
269
+ : ''
270
+ }${
271
+ argv && argv.promptPlanSubAgent
272
+ ? `
273
+
274
+ Planning workflow usage.
275
+ - When you start working on a task, consider using Codex collaboration or sub-agent capabilities to research the codebase and create an implementation plan before you start implementation work.
276
+ - When you delegate planning, make it an explicit first step in your todo list and keep the plan focused on concrete implementation and verification steps.`
277
+ : ''
278
+ }${
279
+ argv && argv.promptSubagentsViaAgentCommander && argv.agentCommanderInstalled
280
+ ? `
281
+
282
+ Agent Commander usage (unified subagent delegation).
283
+ - When you need to delegate tasks to subagents, use the agent-commander CLI tool (start-agent) instead of relying only on native Codex collaboration.
284
+ - Agent Commander provides a unified API for different agent types (claude, opencode, codex, agent) and supports various isolation modes.
285
+ - To delegate a task, use a command like:
286
+ \`\`\`bash
287
+ start-agent --tool codex --working-directory "$(pwd)" --prompt "Your task description here"
288
+ \`\`\`
289
+ - Common start-agent parameters:
290
+ --tool <name>: Agent to use (claude, opencode, codex, agent)
291
+ --working-directory <path>: Execution directory (use the current directory for context)
292
+ --prompt <text>: The task to delegate
293
+ --model <name>: Model to use
294
+ --isolation <mode>: Execution context (none, screen, docker)
295
+ --detached: Run in background mode.`
296
+ : ''
297
+ }${
253
298
  modelSupportsVision
254
299
  ? `
255
300
 
@@ -25,19 +25,19 @@ export const getExperimentsExamplesSubPrompt = argv => {
25
25
 
26
26
  // Both folders are enabled (with their respective paths)
27
27
  if (experimentsFolder && examplesFolder) {
28
- lines.push(` - When you create debug, test, or example/experiment scripts for fixing, always keep them in an ${examplesFolder} and/or ${experimentsFolder} folders so you can reuse them later.`);
29
- lines.push(` - When testing your assumptions, use the experiment scripts, and add it to ${experimentsFolder} folder.`);
30
- lines.push(` - When your experiments can show real world use case of the software, add it to ${examplesFolder} folder.`);
28
+ lines.push(` - When you create debug, test, or example scripts while fixing an issue, keep them in ${examplesFolder} and/or ${experimentsFolder} so you can reuse them later.`);
29
+ lines.push(` - When you test assumptions, keep experiment scripts in ${experimentsFolder}.`);
30
+ lines.push(` - When an experiment demonstrates a real-world use case of the software, add it to ${examplesFolder}.`);
31
31
  }
32
32
  // Only experiments folder is enabled
33
33
  else if (experimentsFolder) {
34
- lines.push(` - When you create debug or test scripts for fixing, always keep them in ${experimentsFolder} folder so you can reuse them later.`);
35
- lines.push(` - When testing your assumptions, use the experiment scripts, and add it to ${experimentsFolder} folder.`);
34
+ lines.push(` - When you create debug or test scripts while fixing an issue, keep them in ${experimentsFolder} so you can reuse them later.`);
35
+ lines.push(` - When you test assumptions, keep experiment scripts in ${experimentsFolder}.`);
36
36
  }
37
37
  // Only examples folder is enabled
38
38
  else if (examplesFolder) {
39
- lines.push(` - When you create example scripts that show real world use cases, keep them in ${examplesFolder} folder.`);
40
- lines.push(` - When your experiments can show real world use case of the software, add it to ${examplesFolder} folder.`);
39
+ lines.push(` - When you create example scripts that show real-world use cases, keep them in ${examplesFolder}.`);
40
+ lines.push(` - When an experiment demonstrates a real-world use case of the software, add it to ${examplesFolder}.`);
41
41
  }
42
42
 
43
43
  return lines.join('\n');
@@ -0,0 +1,32 @@
1
+ #!/usr/bin/env node
2
+
3
+ import { fileURLToPath } from 'url';
4
+
5
+ /**
6
+ * Add a timeout to an async operation.
7
+ * @template T
8
+ * @param {Promise<T>} promise - Promise to guard
9
+ * @param {number} timeoutMs - Timeout in milliseconds
10
+ * @param {string} operation - Human-readable operation label
11
+ * @returns {Promise<T>}
12
+ */
13
+ export function withTimeout(promise, timeoutMs, operation) {
14
+ let timeoutId;
15
+ return Promise.race([
16
+ promise,
17
+ new Promise((_, reject) => {
18
+ timeoutId = setTimeout(() => reject(new Error(`Operation '${operation}' timed out after ${timeoutMs}ms. This might be due to slow network or npm configuration issues.`)), timeoutMs);
19
+ }),
20
+ ]).finally(() => clearTimeout(timeoutId));
21
+ }
22
+
23
+ /**
24
+ * Check whether hive.mjs is being run directly rather than imported.
25
+ *
26
+ * @param {string | undefined} argvPath - Executed path from process.argv[1]
27
+ * @param {string} moduleUrl - Current module URL from import.meta.url
28
+ * @returns {boolean}
29
+ */
30
+ export function isDirectExecution(argvPath, moduleUrl) {
31
+ return argvPath === fileURLToPath(moduleUrl) || (argvPath && (argvPath.includes('/hive') || argvPath.endsWith('hive')));
32
+ }
@@ -4,7 +4,7 @@
4
4
  // This module has no heavy dependencies to allow fast loading for --help
5
5
 
6
6
  import { SOLVE_OPTION_DEFINITIONS } from './solve.config.lib.mjs';
7
- import { buildModelOptionDescription } from './models/index.mjs';
7
+ import { buildModelOptionDescription, defaultModels } from './models/index.mjs';
8
8
 
9
9
  // Hive-only options that are NOT solve options (hive-specific functionality).
10
10
  // These are excluded when auto-registering solve-passthrough options.
@@ -22,7 +22,7 @@ const HIVE_CUSTOM_SOLVE_OPTIONS = {
22
22
  type: 'string',
23
23
  description: `${buildModelOptionDescription()}, or any model ID supported by the tool`,
24
24
  alias: ['m', 'worker-model'],
25
- default: 'sonnet',
25
+ default: currentParsedArgs => defaultModels[currentParsedArgs?.tool] || defaultModels.claude,
26
26
  },
27
27
  'dry-run': {
28
28
  type: 'boolean',
@@ -47,7 +47,7 @@ const HIVE_CUSTOM_SOLVE_OPTIONS = {
47
47
  tool: {
48
48
  type: 'string',
49
49
  description: 'AI tool to use for solving issues',
50
- choices: ['claude', 'opencode', 'agent'],
50
+ choices: ['claude', 'opencode', 'codex', 'agent'],
51
51
  default: 'claude',
52
52
  },
53
53
  };
package/src/hive.mjs CHANGED
@@ -39,29 +39,13 @@ if (earlyArgs.includes('--help') || earlyArgs.includes('-h')) {
39
39
  }
40
40
  }
41
41
  export { createYargsConfig } from './hive.config.lib.mjs';
42
- // Only execute main logic if this module is being run directly (not imported)
43
- // This prevents heavy module loading when hive.mjs is imported by other modules
44
- // Check if we're being executed (not imported) by looking at various indicators:
45
- // 1. process.argv[1] is the executed file path
46
- // 2. import.meta.url is this file's URL
47
- // 3. For global installs, argv[1] might be a symlink, so we check if it contains 'hive'
48
- import { fileURLToPath } from 'url';
49
- const isDirectExecution = process.argv[1] === fileURLToPath(import.meta.url) || (process.argv[1] && (process.argv[1].includes('/hive') || process.argv[1].endsWith('hive')));
50
- if (isDirectExecution) {
42
+ import { isDirectExecution, withTimeout } from './hive.bootstrap.lib.mjs';
43
+ const isRunningDirectly = isDirectExecution(process.argv[1], import.meta.url);
44
+ if (isRunningDirectly) {
51
45
  console.log('🐝 Hive Mind - AI-powered issue solver');
52
46
  console.log(' Initializing...');
53
47
  try {
54
48
  console.log(' Loading dependencies (this may take a moment)...');
55
- // Helper function to add timeout to async operations
56
- const withTimeout = (promise, timeoutMs, operation) => {
57
- let timeoutId;
58
- return Promise.race([
59
- promise,
60
- new Promise((_, reject) => {
61
- timeoutId = setTimeout(() => reject(new Error(`Operation '${operation}' timed out after ${timeoutMs}ms. This might be due to slow network or npm configuration issues.`)), timeoutMs);
62
- }),
63
- ]).finally(() => clearTimeout(timeoutId));
64
- };
65
49
  // Use use-m to dynamically import modules for cross-runtime compatibility
66
50
  if (typeof use === 'undefined') {
67
51
  try {
@@ -102,7 +86,7 @@ if (isDirectExecution) {
102
86
  const { validateClaudeConnection } = claudeLib;
103
87
  // Import model validation library
104
88
  const modelValidation = await import('./models/index.mjs');
105
- const { validateAndExitOnInvalidModel } = modelValidation;
89
+ const { validateAndExitOnInvalidModel, defaultModels } = modelValidation;
106
90
  const githubLib = await import('./github.lib.mjs');
107
91
  const { checkGitHubPermissions, fetchAllIssuesWithPagination, fetchProjectIssues, isRateLimitError, batchCheckPullRequestsForIssues, parseGitHubUrl, batchCheckArchivedRepositories } = githubLib;
108
92
  // Import YouTrack-related functions
@@ -472,6 +456,11 @@ if (isDirectExecution) {
472
456
  if (!rawArgs.includes('--model') && !rawArgs.includes('-m') && !rawArgs.includes('--worker-model')) argv.model = 'sonnet';
473
457
  }
474
458
 
459
+ const modelExplicitlyProvided = rawArgs.includes('--model') || rawArgs.includes('-m') || rawArgs.includes('--worker-model');
460
+ if (argv.tool && !modelExplicitlyProvided && defaultModels[argv.tool]) {
461
+ argv.model = defaultModels[argv.tool];
462
+ }
463
+
475
464
  // Validate model names EARLY (simple string check, always runs)
476
465
  const tool = argv.tool || 'claude';
477
466
  await validateAndExitOnInvalidModel(argv.model, tool, safeExit);
@@ -800,6 +789,10 @@ if (isDirectExecution) {
800
789
  } else if (value) {
801
790
  args.push(`--${optionName}`); // Default false: only forward when truthy
802
791
  }
792
+ } else if (def.type === 'array' && Array.isArray(value) && value.length > 0) {
793
+ for (const entry of value) {
794
+ args.push(`--${optionName}`, String(entry));
795
+ }
803
796
  } else if ((def.type === 'string' || def.type === 'number') && value !== undefined) {
804
797
  args.push(`--${optionName}`, String(value));
805
798
  }