@link-assistant/hive-mind 1.46.7 → 1.46.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,18 @@
1
1
  # @link-assistant/hive-mind
2
2
 
3
+ ## 1.46.9
4
+
5
+ ### Patch Changes
6
+
7
+ - 8104fad: Fix wrong context window calculation showing impossible percentages like 250% (Issue #1539). When peakContextUsage is unknown (e.g. sub-agent models from result JSON only), skip the context window input tokens display entirely instead of falling back to cumulative totals across all requests, which are not valid per-request context window metrics.
8
+
9
+ ## 1.46.8
10
+
11
+ ### Patch Changes
12
+
13
+ - Fix wrong context window calculation showing impossible percentages like 250% (Issue #1539). When peakContextUsage is unknown (e.g. sub-agent models from result JSON only), skip the context window input tokens display entirely instead of falling back to cumulative totals across all requests, which are not valid per-request context window metrics.
14
+ - bcf2b9b: Retry on network issues and minimize terminal/log output differences (#1536): add ghRetry/ghCmdRetry utilities with exponential backoff for transient network errors (TCP reset, TLS timeout, connection refused, unexpected EOF). Apply retry to critical gh CLI calls: accept-invite, repository setup, auto-fork permission check, visibility detection, write permission check. Log stderr to log file on command failure for terminal/log parity. Add 'unexpected eof' to transient error detection patterns.
15
+
3
16
  ## 1.46.7
4
17
 
5
18
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@link-assistant/hive-mind",
3
- "version": "1.46.7",
3
+ "version": "1.46.9",
4
4
  "description": "AI-powered issue solver and hive mind for collaborative problem solving",
5
5
  "main": "src/hive.mjs",
6
6
  "type": "module",
@@ -158,16 +158,19 @@ export const displayBudgetStats = async (usage, tokenUsage, log) => {
158
158
  const subSessions = tokenUsage?.subSessions || [];
159
159
  const hasMultipleSubSessions = subSessions.length > 1;
160
160
 
161
+ const peakContext = usage.peakContextUsage || 0;
162
+
161
163
  if (hasMultipleSubSessions) {
162
164
  for (let i = 0; i < subSessions.length; i++) {
163
165
  const sub = subSessions[i];
164
166
  const subPeak = sub.peakContextUsage || 0;
165
- const subCumulative = (sub.inputTokens || 0) + (sub.cacheCreationTokens || 0) + (sub.cacheReadTokens || 0);
166
- const contextValue = subPeak > 0 ? subPeak : subCumulative;
167
+ // Issue #1539: Only use peak per-request context for context window display.
168
+ // Cumulative totals across all requests can exceed the context limit and produce
169
+ // impossible percentages (e.g. 250%). When peak is unknown, skip context display.
167
170
  const parts = [];
168
- if (contextLimit && contextValue > 0) {
169
- const pct = ((contextValue / contextLimit) * 100).toFixed(0);
170
- parts.push(`${formatNumber(contextValue)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
171
+ if (contextLimit && subPeak > 0) {
172
+ const pct = ((subPeak / contextLimit) * 100).toFixed(0);
173
+ parts.push(`${formatNumber(subPeak)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
171
174
  }
172
175
  if (outputLimit) {
173
176
  const outPct = ((sub.outputTokens / outputLimit) * 100).toFixed(0);
@@ -177,15 +180,12 @@ export const displayBudgetStats = async (usage, tokenUsage, log) => {
177
180
  await log(` ${i + 1}. Context window: ${parts.join(', ')}`);
178
181
  }
179
182
  }
180
- } else {
181
- // Single sub-session: single-line format
182
- const peakContext = usage.peakContextUsage || 0;
183
- const cumulativeContext = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
184
- const contextValue = peakContext > 0 ? peakContext : cumulativeContext;
183
+ } else if (peakContext > 0) {
184
+ // Single sub-session with known peak: single-line format
185
185
  const parts = [];
186
- if (contextLimit && contextValue > 0) {
187
- const pct = ((contextValue / contextLimit) * 100).toFixed(0);
188
- parts.push(`${formatNumber(contextValue)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
186
+ if (contextLimit) {
187
+ const pct = ((peakContext / contextLimit) * 100).toFixed(0);
188
+ parts.push(`${formatNumber(peakContext)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
189
189
  }
190
190
  if (outputLimit) {
191
191
  const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
@@ -195,6 +195,8 @@ export const displayBudgetStats = async (usage, tokenUsage, log) => {
195
195
  await log(` Context window: ${parts.join(', ')}`);
196
196
  }
197
197
  }
198
+ // Issue #1539: When peakContextUsage is unknown, skip context window line entirely.
199
+ // Cumulative totals are shown on the Total line below — no duplication needed.
198
200
 
199
201
  // Cumulative totals — single line
200
202
  const totalInputNonCached = usage.inputTokens + usage.cacheCreationTokens;
@@ -202,6 +204,11 @@ export const displayBudgetStats = async (usage, tokenUsage, log) => {
202
204
  let totalLine = `${formatNumber(totalInputNonCached)}`;
203
205
  if (cachedTokens > 0) totalLine += ` + ${formatNumber(cachedTokens)} cached`;
204
206
  totalLine += ` input tokens, ${formatNumber(usage.outputTokens)} output tokens`;
207
+ // Issue #1539: When peakContextUsage is unknown, embed output percentage in Total line
208
+ if (peakContext === 0 && outputLimit) {
209
+ const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
210
+ totalLine += ` (${outPct}% of ${formatNumber(outputLimit)} output limit)`;
211
+ }
205
212
  await log(` Total: ${totalLine}`);
206
213
  };
207
214
 
@@ -230,6 +237,15 @@ export const mergeResultModelUsage = (modelUsage, resultModelUsage) => {
230
237
  if (resultUsage.costUSD != null) {
231
238
  modelUsage[modelId]._resultCostUSD = resultUsage.costUSD;
232
239
  }
240
+ // Issue #1539: Extract model limits from result JSON for sub-agent models
241
+ // Claude Code's result event includes contextWindow and maxOutputTokens per model,
242
+ // which we use as fallback when modelInfo API is unavailable.
243
+ if (resultUsage.contextWindow) {
244
+ modelUsage[modelId]._resultContextWindow = resultUsage.contextWindow;
245
+ }
246
+ if (resultUsage.maxOutputTokens) {
247
+ modelUsage[modelId]._resultMaxOutputTokens = resultUsage.maxOutputTokens;
248
+ }
233
249
  } else {
234
250
  const jsonlUsage = modelUsage[modelId];
235
251
  const jsonlTotal = jsonlUsage.inputTokens + jsonlUsage.cacheCreationTokens + jsonlUsage.cacheReadTokens + jsonlUsage.outputTokens;
@@ -244,6 +260,13 @@ export const mergeResultModelUsage = (modelUsage, resultModelUsage) => {
244
260
  if (resultUsage.costUSD != null) {
245
261
  jsonlUsage._resultCostUSD = resultUsage.costUSD;
246
262
  }
263
+ // Issue #1539: Also extract model limits from result JSON as fallback
264
+ if (resultUsage.contextWindow) {
265
+ jsonlUsage._resultContextWindow = resultUsage.contextWindow;
266
+ }
267
+ if (resultUsage.maxOutputTokens) {
268
+ jsonlUsage._resultMaxOutputTokens = resultUsage.maxOutputTokens;
269
+ }
247
270
  }
248
271
  }
249
272
  };
@@ -274,36 +297,35 @@ const formatSubSessionsList = (subSessions, contextLimit, outputLimit) => {
274
297
  let result = '';
275
298
  for (let i = 0; i < subSessions.length; i++) {
276
299
  const sub = subSessions[i];
300
+ // Issue #1539: Only use peak per-request context; skip context display when unknown
277
301
  const subPeakContext = sub.peakContextUsage || 0;
278
- // Cumulative fallback: inputTokens + cacheCreationTokens + cacheReadTokens for this sub-session
279
- const subCumulative = (sub.inputTokens || 0) + (sub.cacheCreationTokens || 0) + (sub.cacheReadTokens || 0);
280
- result += formatContextOutputLine(subPeakContext, contextLimit, sub.outputTokens, outputLimit, `${i + 1}. `, subCumulative);
302
+ result += formatContextOutputLine(subPeakContext, contextLimit, sub.outputTokens, outputLimit, `${i + 1}. `);
281
303
  }
282
304
  return result;
283
305
  };
284
306
 
285
307
  /**
286
308
  * Issue #1526: Build a single-line context window + output tokens string.
309
+ * Issue #1539: Only show context window when peakContext > 0 (per-request peak known).
310
+ * When peakContext is 0 (unknown), context part is omitted to avoid misleading percentages.
287
311
  * Format: "- Context window: X / Y input tokens (Z%), A / B output tokens (W%)"
288
- * When only one of context or output limits is available, shows just that part.
289
- * @param {number} peakContext - Peak context usage (0 if unknown)
312
+ * @param {number} peakContext - Peak context usage (0 if unknown context display skipped)
290
313
  * @param {number} contextLimit - Context window limit (null if unknown)
291
314
  * @param {number} outputTokens - Output tokens used
292
315
  * @param {number} outputLimit - Output token limit (null if unknown)
293
316
  * @param {string} [prefix='- '] - Line prefix
294
317
  * @returns {string} Formatted line or empty string
295
318
  */
296
- const formatContextOutputLine = (peakContext, contextLimit, outputTokens, outputLimit, prefix = '- ', cumulativeContext = 0) => {
319
+ const formatContextOutputLine = (peakContext, contextLimit, outputTokens, outputLimit, prefix = '- ') => {
297
320
  const parts = [];
298
321
  if (contextLimit) {
299
- // Use peakContextUsage when available (per-request peak from JSONL tracking).
300
- // Fall back to cumulative total (inputTokens + cacheCreationTokens + cacheReadTokens)
301
- // when peak is unknown (e.g., model only from result JSON, not in JSONL).
302
- // Issue #1526: Never skip context display always show what data we have.
303
- const contextValue = peakContext > 0 ? peakContext : cumulativeContext;
304
- if (contextValue > 0) {
305
- const pct = ((contextValue / contextLimit) * 100).toFixed(0);
306
- parts.push(`${formatTokensCompact(contextValue)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`);
322
+ // Issue #1539: Only use peak per-request context for context window display.
323
+ // When peak is unknown (e.g., model only from result JSON, not in JSONL),
324
+ // skip context display. Cumulative totals across all requests are not valid
325
+ // context window metrics and produce impossible percentages (e.g. 250%).
326
+ if (peakContext > 0) {
327
+ const pct = ((peakContext / contextLimit) * 100).toFixed(0);
328
+ parts.push(`${formatTokensCompact(peakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`);
307
329
  }
308
330
  }
309
331
  if (outputLimit) {
@@ -322,7 +344,8 @@ const formatContextOutputLine = (peakContext, contextLimit, outputTokens, output
322
344
  * Sub-sessions are shown as a global section (not duplicated per model) since JSONL
323
345
  * sub-session tracking is global across all models.
324
346
  * Issue #1526: Shorter output format — context window + output tokens on single line.
325
- * Fix: exclude cacheReadTokens from context window fallback calculation (cumulative ≠ per-request).
347
+ * Issue #1539: Only display context window when peak per-request usage is known.
348
+ * Cumulative totals are never used as context window metrics (they can exceed model limits).
326
349
  * @param {Object} tokenUsage - Token usage data from calculateSessionTokens or buildAgentBudgetStats
327
350
  * @returns {string} Formatted markdown string for PR comment
328
351
  */
@@ -358,17 +381,17 @@ export const buildBudgetStatsString = tokenUsage => {
358
381
 
359
382
  if (isMultiModel) stats += `\n\n**${modelName}:**`;
360
383
 
384
+ const peakContext = usage.peakContextUsage || 0;
385
+
361
386
  if (!isMultiModel && hasMultipleSubSessions) {
362
387
  // Single-model + multiple sub-sessions: show numbered sub-sessions under that model
363
388
  stats += formatSubSessionsList(subSessions, contextLimit, outputLimit);
364
- } else {
389
+ } else if (peakContext > 0) {
365
390
  // Issue #1526: Single line format for context window + output tokens
366
- // Use peakContextUsage when available; fall back to cumulative total when peak is unknown
367
- // (e.g., for result-JSON-sourced sub-agent models where only cumulative totals are available)
368
- const peakContext = usage.peakContextUsage || 0;
369
- const cumulativeContext = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
370
- stats += formatContextOutputLine(peakContext, contextLimit, usage.outputTokens, outputLimit, '- ', cumulativeContext);
391
+ stats += formatContextOutputLine(peakContext, contextLimit, usage.outputTokens, outputLimit, '- ');
371
392
  }
393
+ // Issue #1539: When peakContextUsage is unknown, skip context window line entirely.
394
+ // Cumulative totals are shown on the Total line below — no duplication needed.
372
395
 
373
396
  // Cumulative totals per model: input tokens + cached shown separately
374
397
  // Issue #1526: Shorter format — single "Total:" line
@@ -378,6 +401,13 @@ export const buildBudgetStatsString = tokenUsage => {
378
401
  if (cachedTokens > 0) totalLine += ` + ${formatTokensCompact(cachedTokens)} cached`;
379
402
  totalLine += ` input tokens, ${formatTokensCompact(usage.outputTokens)} output tokens`;
380
403
 
404
+ // Issue #1539: When peakContextUsage is unknown (no per-request data), embed
405
+ // output token percentage in the Total line so no data is lost.
406
+ if (peakContext === 0 && outputLimit) {
407
+ const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
408
+ totalLine += ` (${outPct}% of ${formatTokensCompact(outputLimit)} output limit)`;
409
+ }
410
+
381
411
  // Issue #1508: Show per-model cost when available
382
412
  if (usage.costUSD !== null && usage.costUSD !== undefined) {
383
413
  totalLine += `, $${usage.costUSD.toFixed(6)} cost`;
@@ -498,13 +498,10 @@ export const calculateSessionTokens = async (sessionId, tempDir, resultModelUsag
498
498
  }
499
499
  // Initialize per-model usage tracking
500
500
  const modelUsage = {};
501
- // Issue #1501: Deduplicate JSONL entries by message ID (upstream: anthropics/claude-code#6805)
502
- // Claude Code's stream-json mode splits single API responses with multiple content blocks
503
- // into separate JSONL entries, each with the same message ID and identical usage stats.
501
+ // Issue #1501: Deduplicate JSONL entries by message ID (stream-json splits responses)
504
502
  const seenMessageIds = new Set();
505
503
  let duplicateCount = 0;
506
504
  // Issue #1501: Track peak context usage per request (not cumulative)
507
- // The context window limit is per-request, so we track the max single-request fill.
508
505
  const peakContextByModel = {};
509
506
  let globalPeakContext = 0;
510
507
  // Issue #1491: Track sub-sessions between compactification events
@@ -610,7 +607,10 @@ export const calculateSessionTokens = async (sessionId, tempDir, resultModelUsag
610
607
  usage.costUSD = usage._resultCostUSD ?? null;
611
608
  usage.costBreakdown = null;
612
609
  usage.modelName = modelId;
613
- usage.modelInfo = null;
610
+ // Issue #1539: Use contextWindow/maxOutputTokens from result JSON as fallback model limits
611
+ const ctx = usage._resultContextWindow,
612
+ out = usage._resultMaxOutputTokens;
613
+ usage.modelInfo = ctx || out ? { limit: { context: ctx || null, output: out || null } } : null;
614
614
  }
615
615
  }
616
616
  // Calculate grand totals across all models
@@ -2,7 +2,7 @@
2
2
  // GitHub-related utility functions. Check if use is already defined (when imported from solve.mjs), if not, fetch it (when running standalone)
3
3
  if (typeof globalThis.use === 'undefined') globalThis.use = (await eval(await (await fetch('https://unpkg.com/use-m/use.js')).text())).use;
4
4
  const { $ } = await use('command-stream'); // Use command-stream for consistent $ behavior
5
- import { log, maskToken, cleanErrorMessage, isENOSPC } from './lib.mjs';
5
+ import { log, maskToken, cleanErrorMessage, isENOSPC, ghCmdRetry } from './lib.mjs';
6
6
  import { reportError } from './sentry.lib.mjs';
7
7
  import { githubLimits, timeouts } from './config.lib.mjs';
8
8
  import { batchCheckPullRequestsForIssues as batchCheckPRs, batchCheckArchivedRepositories as batchCheckArchived } from './github.batch.lib.mjs';
@@ -172,8 +172,8 @@ export const checkRepositoryWritePermission = async (owner, repo, options = {})
172
172
  }
173
173
  try {
174
174
  await log('🔍 Checking repository write permissions...');
175
- // Use GitHub API to check repository permissions
176
- const permResult = await $`gh api repos/${owner}/${repo} --jq .permissions`;
175
+ // Use GitHub API to check repository permissions (issue #1536: retry on network errors)
176
+ const permResult = await ghCmdRetry(() => $`gh api repos/${owner}/${repo} --jq .permissions`, { label: `write perms ${owner}/${repo}` });
177
177
  if (permResult.code !== 0) {
178
178
  // API call failed - might be a private repo or network issue
179
179
  const errorOutput = (permResult.stderr ? permResult.stderr.toString() : '') + (permResult.stdout ? permResult.stdout.toString() : '');
@@ -1437,7 +1437,8 @@ export async function handlePRNotFoundError({ prNumber, owner, repo, argv, shoul
1437
1437
  */
1438
1438
  export async function detectRepositoryVisibility(owner, repo) {
1439
1439
  try {
1440
- const visibilityResult = await $`gh api repos/${owner}/${repo} --jq .visibility`;
1440
+ // Issue #1536: retry on transient network errors
1441
+ const visibilityResult = await ghCmdRetry(() => $`gh api repos/${owner}/${repo} --jq .visibility`, { label: `visibility ${owner}/${repo}` });
1441
1442
  if (visibilityResult.code === 0) {
1442
1443
  const visibility = visibilityResult.stdout.toString().trim();
1443
1444
  const isPublic = visibility === 'public';
package/src/lib.mjs CHANGED
@@ -291,11 +291,92 @@ export const isTransientNetworkError = error => {
291
291
  const output = (error?.stderr?.toString() || error?.stdout?.toString() || '').toLowerCase();
292
292
  const combined = msg + ' ' + output;
293
293
 
294
- const transientPatterns = ['i/o timeout', 'dial tcp', 'connection refused', 'connection reset', 'econnreset', 'etimedout', 'enotfound', 'ehostunreach', 'enetunreach', 'network is unreachable', 'temporary failure', 'http 502', 'http 503', 'http 504', 'bad gateway', 'service unavailable', 'gateway timeout', 'tls handshake timeout', 'ssl_error', 'socket hang up'];
294
+ // Issue #1536: added 'unexpected eof' seen in gh CLI when connection drops mid-response
295
+ const transientPatterns = ['i/o timeout', 'dial tcp', 'connection refused', 'connection reset', 'econnreset', 'etimedout', 'enotfound', 'ehostunreach', 'enetunreach', 'network is unreachable', 'temporary failure', 'http 502', 'http 503', 'http 504', 'bad gateway', 'service unavailable', 'gateway timeout', 'tls handshake timeout', 'ssl_error', 'socket hang up', 'unexpected eof'];
295
296
 
296
297
  return transientPatterns.some(pattern => combined.includes(pattern));
297
298
  };
298
299
 
300
+ /**
301
+ * Retry a GitHub CLI / API operation with exponential backoff on transient network errors.
302
+ * Unlike the generic `retry()`, this function:
303
+ * - Only retries on transient network errors (TCP reset, TLS timeout, etc.)
304
+ * - Immediately rethrows non-transient errors (404, 403, auth failures)
305
+ * - Logs stderr to the log file when a command fails (fixing terminal/log parity)
306
+ *
307
+ * Issue #1536: Most gh commands had no retry logic, causing solve to abort on
308
+ * intermittent network issues.
309
+ *
310
+ * @param {Function} fn - Async function to execute (should call gh CLI or GitHub API)
311
+ * @param {Object} [options] - Options
312
+ * @param {number} [options.maxAttempts=3] - Maximum number of attempts
313
+ * @param {number} [options.delay=1000] - Initial delay between retries in ms
314
+ * @param {number} [options.backoff=2] - Backoff multiplier
315
+ * @param {string} [options.label='gh command'] - Label for log messages
316
+ * @returns {Promise<*>} Result of successful function execution
317
+ * @throws {Error} Last error if all attempts fail or error is non-transient
318
+ */
319
+ export const ghRetry = async (fn, options = {}) => {
320
+ const { maxAttempts = 3, delay = 1000, backoff = 2, label = 'gh command' } = options;
321
+
322
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
323
+ try {
324
+ return await fn();
325
+ } catch (error) {
326
+ if (isTransientNetworkError(error) && attempt < maxAttempts) {
327
+ const waitTime = delay * Math.pow(backoff, attempt - 1);
328
+ await log(`⚠️ ${label}: Network error (attempt ${attempt}/${maxAttempts}), retrying in ${waitTime / 1000}s...`, { level: 'warn' });
329
+ await sleep(waitTime);
330
+ continue;
331
+ }
332
+ throw error;
333
+ }
334
+ }
335
+ };
336
+
337
+ /**
338
+ * Execute a command-stream `$` call with retry on transient network errors.
339
+ * This wraps the pattern: call $`gh ...`, check exit code, handle errors.
340
+ * On failure, stderr is logged to the log file (fixing terminal/log parity from issue #1536).
341
+ *
342
+ * @param {Function} cmdFn - Function that returns a command-stream result (e.g., () => $`gh api ...`)
343
+ * @param {Object} [options] - Options
344
+ * @param {number} [options.maxAttempts=3] - Maximum number of attempts
345
+ * @param {number} [options.delay=1000] - Initial delay between retries in ms
346
+ * @param {number} [options.backoff=2] - Backoff multiplier
347
+ * @param {string} [options.label='gh command'] - Label for log messages
348
+ * @returns {Promise<{stdout: string, stderr: string, code: number}>} Command result
349
+ */
350
+ export const ghCmdRetry = async (cmdFn, options = {}) => {
351
+ const { maxAttempts = 3, delay = 1000, backoff = 2, label = 'gh command' } = options;
352
+
353
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
354
+ const result = await cmdFn();
355
+
356
+ // Log stderr to log file for parity (issue #1536)
357
+ const stderr = result.stderr?.toString().trim();
358
+ if (stderr && result.code !== 0) {
359
+ await log(` [stderr] ${stderr}`, { level: 'warn' });
360
+ }
361
+
362
+ if (result.code === 0) {
363
+ return result;
364
+ }
365
+
366
+ // Check if this is a transient network error worth retrying
367
+ const combinedOutput = (result.stdout?.toString() || '') + ' ' + (result.stderr?.toString() || '');
368
+ if (isTransientNetworkError({ message: combinedOutput }) && attempt < maxAttempts) {
369
+ const waitTime = delay * Math.pow(backoff, attempt - 1);
370
+ await log(`⚠️ ${label}: Network error (attempt ${attempt}/${maxAttempts}), retrying in ${waitTime / 1000}s...`, { level: 'warn' });
371
+ await sleep(waitTime);
372
+ continue;
373
+ }
374
+
375
+ // Non-transient error or last attempt — return the result as-is
376
+ return result;
377
+ }
378
+ };
379
+
299
380
  /**
300
381
  * Format bytes to human readable string
301
382
  * @param {number} bytes - Number of bytes
@@ -5,9 +5,13 @@
5
5
  * this module only accepts the invitation for the specific repository or organization
6
6
  * that is being solved. This is safer and more targeted.
7
7
  *
8
+ * Issue #1536: Added retry with exponential backoff for transient network errors
9
+ * (TLS handshake timeout, unexpected EOF, connection reset, etc.)
10
+ *
8
11
  * @see https://docs.github.com/en/rest/collaborators/invitations
9
12
  * @see https://docs.github.com/en/rest/orgs/members
10
13
  * @see https://github.com/link-assistant/hive-mind/issues/1373
14
+ * @see https://github.com/link-assistant/hive-mind/issues/1536
11
15
  */
12
16
 
13
17
  import { promisify } from 'util';
@@ -15,6 +19,10 @@ import { exec as execCallback } from 'child_process';
15
19
 
16
20
  const exec = promisify(execCallback);
17
21
 
22
+ // Import retry utility (issue #1536)
23
+ const lib = await import('./lib.mjs');
24
+ const { ghRetry } = lib;
25
+
18
26
  /**
19
27
  * Accepts pending GitHub repository or organization invitation for a specific target.
20
28
  *
@@ -35,7 +43,7 @@ export async function autoAcceptInviteForRepo(owner, repo, log, verbose) {
35
43
 
36
44
  // Check for pending repository invitation
37
45
  try {
38
- const { stdout: repoInvJson } = await exec('gh api /user/repository_invitations 2>/dev/null || echo "[]"');
46
+ const { stdout: repoInvJson } = await ghRetry(() => exec('gh api /user/repository_invitations 2>/dev/null || echo "[]"'), { label: 'fetch repo invitations' });
39
47
  const repoInvitations = JSON.parse(repoInvJson.trim() || '[]');
40
48
  verbose && (await log(` Found ${repoInvitations.length} total pending repo invitation(s)`, { verbose: true }));
41
49
 
@@ -43,7 +51,7 @@ export async function autoAcceptInviteForRepo(owner, repo, log, verbose) {
43
51
 
44
52
  if (matchingInv) {
45
53
  try {
46
- await exec(`gh api -X PATCH /user/repository_invitations/${matchingInv.id}`);
54
+ await ghRetry(() => exec(`gh api -X PATCH /user/repository_invitations/${matchingInv.id}`), { label: `accept repo invitation for ${fullName}` });
47
55
  await log(`✅ --auto-accept-invite: Accepted repository invitation for ${fullName}`);
48
56
  result.acceptedRepo = true;
49
57
  } catch (e) {
@@ -58,7 +66,7 @@ export async function autoAcceptInviteForRepo(owner, repo, log, verbose) {
58
66
 
59
67
  // Check for pending organization membership
60
68
  try {
61
- const { stdout: orgMemJson } = await exec('gh api /user/memberships/orgs 2>/dev/null || echo "[]"');
69
+ const { stdout: orgMemJson } = await ghRetry(() => exec('gh api /user/memberships/orgs 2>/dev/null || echo "[]"'), { label: 'fetch org memberships' });
62
70
  const orgMemberships = JSON.parse(orgMemJson.trim() || '[]');
63
71
  const pendingOrgs = orgMemberships.filter(m => m.state === 'pending');
64
72
  verbose && (await log(` Found ${pendingOrgs.length} total pending org invitation(s)`, { verbose: true }));
@@ -68,7 +76,7 @@ export async function autoAcceptInviteForRepo(owner, repo, log, verbose) {
68
76
  if (matchingOrg) {
69
77
  const orgName = matchingOrg.organization.login;
70
78
  try {
71
- await exec(`gh api -X PATCH /user/memberships/orgs/${orgName} -f state=active`);
79
+ await ghRetry(() => exec(`gh api -X PATCH /user/memberships/orgs/${orgName} -f state=active`), { label: `accept org invitation for ${orgName}` });
72
80
  await log(`✅ --auto-accept-invite: Accepted organization invitation for ${orgName}`);
73
81
  result.acceptedOrg = true;
74
82
  } catch (e) {
package/src/solve.mjs CHANGED
@@ -239,9 +239,9 @@ const claudePath = argv.executeToolWithBun ? 'bunx claude' : process.env.CLAUDE_
239
239
  if (argv.autoFork && !argv.fork) {
240
240
  const { detectRepositoryVisibility } = githubLib;
241
241
 
242
- // Check if we have write access first
242
+ // Check if we have write access first (issue #1536: retry on transient network errors)
243
243
  await log('🔍 Checking repository access for auto-fork...');
244
- const permResult = await $`gh api repos/${owner}/${repo} --jq .permissions`;
244
+ const permResult = await lib.ghCmdRetry(() => $`gh api repos/${owner}/${repo} --jq .permissions`, { label: 'auto-fork perms' });
245
245
 
246
246
  if (permResult.code === 0) {
247
247
  const permissions = JSON.parse(permResult.stdout.toString().trim());
@@ -36,7 +36,7 @@ const { checkRepositoryWritePermission } = githubLib;
36
36
  // Get root repository (fork source or self), or null if inaccessible
37
37
  export const getRootRepository = async (owner, repo) => {
38
38
  try {
39
- const result = await $`gh api repos/${owner}/${repo} --jq '{fork: .fork, source: .source.full_name}' 2>&1`;
39
+ const result = await lib.ghCmdRetry(() => $`gh api repos/${owner}/${repo} --jq '{fork: .fork, source: .source.full_name}' 2>&1`, { label: `get root repo ${owner}/${repo}` });
40
40
  if (result.code !== 0) return null;
41
41
 
42
42
  const repoInfo = JSON.parse(result.stdout.toString().trim());
@@ -50,11 +50,10 @@ export const getRootRepository = async (owner, repo) => {
50
50
  // Check if current user has a fork of the given root repository
51
51
  export const checkExistingForkOfRoot = async rootRepo => {
52
52
  try {
53
- const userResult = await $`gh api user --jq .login`;
53
+ const userResult = await lib.ghCmdRetry(() => $`gh api user --jq .login`, { label: 'get user (fork check)' });
54
54
  if (userResult.code !== 0) return null;
55
55
  const currentUser = userResult.stdout.toString().trim();
56
-
57
- const forksResult = await $`gh api repos/${rootRepo}/forks --paginate --jq '.[] | select(.owner.login == "${currentUser}") | .full_name'`;
56
+ const forksResult = await lib.ghCmdRetry(() => $`gh api repos/${rootRepo}/forks --paginate --jq '.[] | select(.owner.login == "${currentUser}") | .full_name'`, { label: `check forks of ${rootRepo}` });
58
57
  if (forksResult.code !== 0) return null;
59
58
 
60
59
  const forks = forksResult.stdout
@@ -363,8 +362,8 @@ export const setupRepository = async (argv, owner, repo, forkOwner = null, issue
363
362
  await log(`\n${formatAligned('🍴', 'Fork mode:', 'ENABLED')}`);
364
363
  await log(`${formatAligned('', 'Checking fork status...', '')}\n`);
365
364
 
366
- // Get current user
367
- const userResult = await $`gh api user --jq .login`;
365
+ // Get current user (issue #1536: retry on transient network errors)
366
+ const userResult = await lib.ghCmdRetry(() => $`gh api user --jq .login`, { label: 'get current user' });
368
367
  if (userResult.code !== 0) {
369
368
  await log(`${formatAligned('❌', 'Error:', 'Failed to get current user')}`);
370
369
  await safeExit(1, 'Repository setup failed');