@link-assistant/hive-mind 1.50.8 → 1.50.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -30,11 +30,87 @@ const execAsync = promisify(exec);
30
30
  * Default path to Claude credentials file
31
31
  */
32
32
  const DEFAULT_CREDENTIALS_PATH = join(homedir(), '.claude', '.credentials.json');
33
+ const DEFAULT_CODEX_AUTH_PATH = join(homedir(), '.codex', 'auth.json');
34
+ const DEFAULT_CODEX_CONFIG_PATH = join(homedir(), '.codex', 'config.toml');
33
35
 
34
36
  /**
35
37
  * Anthropic OAuth usage API endpoint
36
38
  */
37
39
  const USAGE_API_ENDPOINT = 'https://api.anthropic.com/api/oauth/usage';
40
+ const CODEX_USAGE_API_DEFAULT_BASE_URL = 'https://chatgpt.com/backend-api';
41
+
42
+ function decodeJwtPayload(token) {
43
+ if (!token || typeof token !== 'string') return null;
44
+
45
+ try {
46
+ const payload = token.split('.')[1];
47
+ if (!payload) return null;
48
+ const normalized = payload.replace(/-/g, '+').replace(/_/g, '/');
49
+ const padded = normalized + '='.repeat((4 - (normalized.length % 4)) % 4);
50
+ return JSON.parse(Buffer.from(padded, 'base64').toString('utf8'));
51
+ } catch {
52
+ return null;
53
+ }
54
+ }
55
+
56
+ function unixSecondsToIsoDate(seconds) {
57
+ if (seconds === null || seconds === undefined) return null;
58
+ const numeric = Number(seconds);
59
+ if (!Number.isFinite(numeric) || numeric <= 0) return null;
60
+ return new Date(numeric * 1000).toISOString();
61
+ }
62
+
63
+ function mapCodexWindow(window) {
64
+ const resetsAt = unixSecondsToIsoDate(window?.reset_at);
65
+ return {
66
+ percentage: window?.used_percent ?? null,
67
+ resetTime: formatResetTime(resetsAt),
68
+ resetsAt,
69
+ windowSeconds: window?.limit_window_seconds ?? null,
70
+ resetAfterSeconds: window?.reset_after_seconds ?? null,
71
+ };
72
+ }
73
+
74
+ async function readCodexAuth(authPath = DEFAULT_CODEX_AUTH_PATH, verbose = false) {
75
+ try {
76
+ const content = await readFile(authPath, 'utf-8');
77
+ const auth = JSON.parse(content);
78
+
79
+ if (verbose) {
80
+ console.log('[VERBOSE] /limits Codex auth loaded from:', authPath);
81
+ }
82
+
83
+ return auth;
84
+ } catch (error) {
85
+ if (verbose) {
86
+ console.error('[VERBOSE] /limits failed to read Codex auth:', error.message);
87
+ }
88
+ return null;
89
+ }
90
+ }
91
+
92
+ async function getCodexUsageBaseUrl(configPath = DEFAULT_CODEX_CONFIG_PATH, verbose = false) {
93
+ try {
94
+ const content = await readFile(configPath, 'utf-8');
95
+ const match = content.match(/^\s*chatgpt_base_url\s*=\s*["']([^"']+)["']/m);
96
+ if (!match?.[1]) return CODEX_USAGE_API_DEFAULT_BASE_URL;
97
+
98
+ const baseUrl = match[1].trim().replace(/\/+$/, '');
99
+ const normalized = baseUrl.endsWith('/backend-api') ? baseUrl : `${baseUrl}/backend-api`;
100
+
101
+ if (verbose) {
102
+ console.log('[VERBOSE] /limits Codex base URL loaded from config:', normalized);
103
+ }
104
+
105
+ return normalized;
106
+ } catch (error) {
107
+ if (verbose) {
108
+ console.log('[VERBOSE] /limits using default Codex base URL:', CODEX_USAGE_API_DEFAULT_BASE_URL);
109
+ console.log('[VERBOSE] /limits failed to read Codex config:', error.message);
110
+ }
111
+ return CODEX_USAGE_API_DEFAULT_BASE_URL;
112
+ }
113
+ }
38
114
 
39
115
  /**
40
116
  * Read Claude credentials from the credentials file
@@ -702,6 +778,162 @@ export async function getClaudeUsageLimits(verbose = false, credentialsPath = DE
702
778
  }
703
779
  }
704
780
 
781
+ /**
782
+ * Get Codex usage limits through the ChatGPT-authenticated usage endpoint.
783
+ * Mirrors the supported upstream Codex account/rate-limits path.
784
+ *
785
+ * Returns usage data for:
786
+ * - Current session (5-hour) usage percentage and reset time
787
+ * - Current week usage percentage and reset date
788
+ * - Additional metered Codex limits when available
789
+ *
790
+ * @param {boolean} verbose - Whether to log verbose output
791
+ * @param {string} authPath - Optional path to Codex auth.json
792
+ * @param {string|null} baseUrl - Optional backend base URL override
793
+ * @returns {Object} Object with success boolean, and either usage data or error message
794
+ */
795
+ export async function getCodexUsageLimits(verbose = false, authPath = DEFAULT_CODEX_AUTH_PATH, baseUrl = null) {
796
+ try {
797
+ const auth = await readCodexAuth(authPath, verbose);
798
+
799
+ if (!auth) {
800
+ return {
801
+ success: false,
802
+ error: 'Could not read Codex authentication. Make sure Codex is properly installed and authenticated.',
803
+ };
804
+ }
805
+
806
+ if (auth.auth_mode && auth.auth_mode !== 'chatgpt') {
807
+ return {
808
+ success: false,
809
+ error: 'Codex rate limits require ChatGPT authentication. API key auth does not expose account usage windows.',
810
+ };
811
+ }
812
+
813
+ const accessToken = auth?.tokens?.access_token;
814
+ if (!accessToken) {
815
+ return {
816
+ success: false,
817
+ error: 'No Codex access token found. Please authenticate Codex with your ChatGPT account.',
818
+ };
819
+ }
820
+
821
+ const resolvedBaseUrl = (baseUrl || (await getCodexUsageBaseUrl(undefined, verbose))).replace(/\/+$/, '');
822
+ const usageEndpoint = `${resolvedBaseUrl}/wham/usage`;
823
+ const tokenPayload = decodeJwtPayload(accessToken);
824
+ const requestHeaders = {
825
+ Accept: 'application/json',
826
+ Authorization: `Bearer ${accessToken}`,
827
+ 'User-Agent': 'hive-mind-codex-limits/1.0',
828
+ };
829
+
830
+ if (verbose) {
831
+ console.log('[VERBOSE] /limits fetching Codex usage from API...');
832
+ console.log(`[VERBOSE] /limits Codex API request: GET ${usageEndpoint}`);
833
+ console.log('[VERBOSE] /limits Codex auth mode:', auth.auth_mode || 'unknown');
834
+ console.log('[VERBOSE] /limits Codex account id:', auth?.tokens?.account_id || tokenPayload?.['https://api.openai.com/auth']?.chatgpt_account_id || 'unknown');
835
+ console.log('[VERBOSE] /limits Codex plan type:', tokenPayload?.['https://api.openai.com/auth']?.chatgpt_plan_type || 'unknown');
836
+ console.log(
837
+ '[VERBOSE] /limits Codex API request headers:',
838
+ JSON.stringify(
839
+ {
840
+ Accept: requestHeaders.Accept,
841
+ Authorization: `Bearer ...${accessToken.slice(-8)}`,
842
+ 'User-Agent': requestHeaders['User-Agent'],
843
+ },
844
+ null,
845
+ 2
846
+ )
847
+ );
848
+ }
849
+
850
+ const response = await fetch(usageEndpoint, {
851
+ method: 'GET',
852
+ headers: requestHeaders,
853
+ });
854
+
855
+ if (verbose) {
856
+ console.log(`[VERBOSE] /limits Codex API HTTP status: ${response.status} ${response.statusText}`);
857
+ const responseHeaders = {};
858
+ response.headers.forEach((value, key) => {
859
+ responseHeaders[key] = value;
860
+ });
861
+ console.log('[VERBOSE] /limits Codex API response headers:', JSON.stringify(responseHeaders, null, 2));
862
+ }
863
+
864
+ if (!response.ok) {
865
+ const errorText = await response.text();
866
+ if (verbose) {
867
+ console.error('[VERBOSE] /limits Codex API error body:', errorText);
868
+ }
869
+
870
+ if (response.status === 401) {
871
+ return {
872
+ success: false,
873
+ error: 'Codex authentication expired. Please re-authenticate Codex with your ChatGPT account.',
874
+ };
875
+ }
876
+
877
+ if (response.status === 429) {
878
+ const retryAfter = response.headers.get('retry-after');
879
+ return {
880
+ success: false,
881
+ error: `Codex usage API access has reached rate limit.${formatRetryAfterMessage(retryAfter)}`,
882
+ };
883
+ }
884
+
885
+ return {
886
+ success: false,
887
+ error: `Failed to fetch Codex usage from API: ${response.status} ${response.statusText}`,
888
+ };
889
+ }
890
+
891
+ const data = await response.json();
892
+
893
+ if (verbose) {
894
+ console.log('[VERBOSE] /limits Codex API response body:', JSON.stringify(data, null, 2));
895
+ }
896
+
897
+ const usage = {
898
+ currentSession: mapCodexWindow(data?.rate_limit?.primary_window),
899
+ allModels: mapCodexWindow(data?.rate_limit?.secondary_window),
900
+ sonnetOnly: {
901
+ percentage: null,
902
+ resetTime: null,
903
+ resetsAt: null,
904
+ },
905
+ };
906
+
907
+ const additionalRateLimits = Array.isArray(data?.additional_rate_limits)
908
+ ? data.additional_rate_limits.map(limit => ({
909
+ limitId: limit?.metered_feature || null,
910
+ limitName: limit?.limit_name || limit?.metered_feature || 'additional',
911
+ currentSession: mapCodexWindow(limit?.rate_limit?.primary_window),
912
+ allModels: mapCodexWindow(limit?.rate_limit?.secondary_window),
913
+ allowed: limit?.rate_limit?.allowed ?? null,
914
+ limitReached: limit?.rate_limit?.limit_reached ?? null,
915
+ }))
916
+ : [];
917
+
918
+ return {
919
+ success: true,
920
+ usage,
921
+ planType: data?.plan_type || tokenPayload?.['https://api.openai.com/auth']?.chatgpt_plan_type || null,
922
+ credits: data?.credits || null,
923
+ additionalRateLimits,
924
+ raw: data,
925
+ };
926
+ } catch (error) {
927
+ if (verbose) {
928
+ console.error('[VERBOSE] /limits Codex error:', error);
929
+ }
930
+ return {
931
+ success: false,
932
+ error: `Failed to get Codex usage limits: ${error.message}`,
933
+ };
934
+ }
935
+ }
936
+
705
937
  /**
706
938
  * Generate a text-based progress bar for usage percentage
707
939
  * @param {number} percentage - Usage percentage (0-100)
@@ -953,6 +1185,85 @@ export function formatUsageMessage(usage, diskSpace = null, githubRateLimit = nu
953
1185
  return '```\n' + sections.join('\n') + '```';
954
1186
  }
955
1187
 
1188
+ /**
1189
+ * Format Codex usage data into a section suitable for appending to /limits output.
1190
+ *
1191
+ * @param {Object|null} codexLimits - Result object from getCodexUsageLimits, or null
1192
+ * @param {string|null} codexError - Optional error message
1193
+ * @returns {string} Formatted section text
1194
+ */
1195
+ export function formatCodexLimitsSection(codexLimits, codexError = null) {
1196
+ if (codexError) {
1197
+ return `Codex limits\n${codexError}\n`;
1198
+ }
1199
+
1200
+ const usage = codexLimits?.usage || null;
1201
+ const additionalRateLimits = codexLimits?.additionalRateLimits || [];
1202
+ const credits = codexLimits?.credits || null;
1203
+ const planType = codexLimits?.planType || null;
1204
+
1205
+ let section = 'Codex limits\n';
1206
+ if (planType) {
1207
+ section += `Plan: ${planType}\n`;
1208
+ }
1209
+
1210
+ let sessionSection = 'Codex 5 hour session\n';
1211
+ if (usage?.currentSession?.percentage !== null) {
1212
+ const timePassed = calculateTimePassedPercentage(usage.currentSession.resetsAt, 5);
1213
+ if (timePassed !== null) {
1214
+ sessionSection += `${getProgressBar(timePassed)} ${timePassed}% passed\n`;
1215
+ }
1216
+ const pct = Math.floor(usage.currentSession.percentage);
1217
+ const bar = getProgressBar(pct, DISPLAY_THRESHOLDS.CODEX_5_HOUR_SESSION);
1218
+ const suffix = pct >= DISPLAY_THRESHOLDS.CODEX_5_HOUR_SESSION ? ' ⚠️' : ' used';
1219
+ sessionSection += `${bar} ${pct}%${suffix}\n`;
1220
+ if (usage.currentSession.resetTime) {
1221
+ const relativeTime = formatRelativeTime(usage.currentSession.resetsAt);
1222
+ sessionSection += relativeTime ? `Resets in ${relativeTime} (${usage.currentSession.resetTime})\n` : `Resets ${usage.currentSession.resetTime}\n`;
1223
+ }
1224
+ } else {
1225
+ sessionSection += 'N/A\n';
1226
+ }
1227
+
1228
+ let weeklySection = 'Current week (all models)\n';
1229
+ if (usage?.allModels?.percentage !== null) {
1230
+ const timePassed = calculateTimePassedPercentage(usage.allModels.resetsAt, 168);
1231
+ if (timePassed !== null) {
1232
+ weeklySection += `${getProgressBar(timePassed)} ${timePassed}% passed\n`;
1233
+ }
1234
+ const pct = Math.floor(usage.allModels.percentage);
1235
+ const bar = getProgressBar(pct, DISPLAY_THRESHOLDS.CODEX_WEEKLY);
1236
+ const suffix = pct >= DISPLAY_THRESHOLDS.CODEX_WEEKLY ? ' ⚠️' : ' used';
1237
+ weeklySection += `${bar} ${pct}%${suffix}\n`;
1238
+ if (usage.allModels.resetTime) {
1239
+ const relativeTime = formatRelativeTime(usage.allModels.resetsAt);
1240
+ weeklySection += relativeTime ? `Resets in ${relativeTime} (${usage.allModels.resetTime})\n` : `Resets ${usage.allModels.resetTime}\n`;
1241
+ }
1242
+ } else {
1243
+ weeklySection += 'N/A\n';
1244
+ }
1245
+
1246
+ section += `${sessionSection}\n${weeklySection}`;
1247
+
1248
+ if (additionalRateLimits.length > 0) {
1249
+ section += '\nAdditional Codex limits\n';
1250
+ for (const limit of additionalRateLimits) {
1251
+ const sessionPct = limit.currentSession?.percentage;
1252
+ const weeklyPct = limit.allModels?.percentage;
1253
+ const sessionText = sessionPct === null ? 'session N/A' : `session ${Math.floor(sessionPct)}%`;
1254
+ const weeklyText = weeklyPct === null ? 'week N/A' : `week ${Math.floor(weeklyPct)}%`;
1255
+ section += `${limit.limitName}: ${sessionText}, ${weeklyText}\n`;
1256
+ }
1257
+ }
1258
+
1259
+ if (credits) {
1260
+ const creditSummary = credits.unlimited ? 'unlimited' : `${credits.balance ?? '0'} balance`;
1261
+ section += `\nCodex credits\n${creditSummary}\n`;
1262
+ }
1263
+
1264
+ return section;
1265
+ }
1266
+
956
1267
  // ============================================================================
957
1268
  // Caching Layer
958
1269
  // ============================================================================
@@ -1064,6 +1375,29 @@ export async function getCachedClaudeLimits(verbose = false) {
1064
1375
  return result;
1065
1376
  }
1066
1377
 
1378
+ export async function getCachedCodexLimits(verbose = false) {
1379
+ const cache = getLimitCache();
1380
+ const cached = cache.get('codex', CACHE_TTL.USAGE_API);
1381
+ if (cached) {
1382
+ if (verbose) console.log('[VERBOSE] /limits-cache: Using cached Codex limits (TTL: ' + Math.round(CACHE_TTL.USAGE_API / 60000) + ' minutes)');
1383
+ return cached;
1384
+ }
1385
+ const cachedError = cache.get('codex-rate-limited', CACHE_TTL.USAGE_API);
1386
+ if (cachedError) {
1387
+ if (verbose) console.log('[VERBOSE] /limits-cache: Using cached Codex rate-limit error');
1388
+ return cachedError;
1389
+ }
1390
+ if (verbose) console.log('[VERBOSE] /limits-cache: Cache miss for Codex limits, fetching from API...');
1391
+ const result = await getCodexUsageLimits(verbose);
1392
+ if (result.success) {
1393
+ cache.set('codex', result, CACHE_TTL.USAGE_API);
1394
+ } else if (result.error && result.error.includes('rate limit')) {
1395
+ cache.set('codex-rate-limited', result, CACHE_TTL.USAGE_API);
1396
+ if (verbose) console.log('[VERBOSE] /limits-cache: Cached Codex rate-limit error for ' + Math.round(CACHE_TTL.USAGE_API / 60000) + ' minutes');
1397
+ }
1398
+ return result;
1399
+ }
1400
+
1067
1401
  export async function getCachedGitHubLimits(verbose = false) {
1068
1402
  const cache = getLimitCache();
1069
1403
  const cached = cache.get('github', CACHE_TTL.API);
@@ -1113,13 +1447,14 @@ export async function getCachedDiskInfo(verbose = false) {
1113
1447
  }
1114
1448
 
1115
1449
  export async function getAllCachedLimits(verbose = false) {
1116
- const [claude, github, memory, cpu, disk] = await Promise.all([getCachedClaudeLimits(verbose), getCachedGitHubLimits(verbose), getCachedMemoryInfo(verbose), getCachedCpuInfo(verbose), getCachedDiskInfo(verbose)]);
1117
- return { claude, github, memory, cpu, disk };
1450
+ const [claude, codex, github, memory, cpu, disk] = await Promise.all([getCachedClaudeLimits(verbose), getCachedCodexLimits(verbose), getCachedGitHubLimits(verbose), getCachedMemoryInfo(verbose), getCachedCpuInfo(verbose), getCachedDiskInfo(verbose)]);
1451
+ return { claude, codex, github, memory, cpu, disk };
1118
1452
  }
1119
1453
 
1120
1454
  export default {
1121
1455
  // Raw functions (no caching)
1122
1456
  getClaudeUsageLimits,
1457
+ getCodexUsageLimits,
1123
1458
  getCpuLoadInfo,
1124
1459
  getMemoryInfo,
1125
1460
  getDiskSpaceInfo,
@@ -1127,6 +1462,7 @@ export default {
1127
1462
  getProgressBar,
1128
1463
  calculateTimePassedPercentage,
1129
1464
  formatUsageMessage,
1465
+ formatCodexLimitsSection,
1130
1466
  formatRetryAfterMessage,
1131
1467
  // Threshold constants for progress bar visualization
1132
1468
  DISPLAY_THRESHOLDS,
@@ -1136,6 +1472,7 @@ export default {
1136
1472
  resetLimitCache,
1137
1473
  // Cached functions
1138
1474
  getCachedClaudeLimits,
1475
+ getCachedCodexLimits,
1139
1476
  getCachedGitHubLimits,
1140
1477
  getCachedMemoryInfo,
1141
1478
  getCachedCpuInfo,
@@ -103,14 +103,19 @@ export const opencodeModels = {
103
103
  // Codex models (OpenAI API)
104
104
  export const codexModels = {
105
105
  gpt5: 'gpt-5',
106
- 'gpt5-codex': 'gpt-5-codex',
107
- o3: 'o3',
106
+ 'gpt-5': 'gpt-5',
107
+ 'gpt-5.4': 'gpt-5.4',
108
+ 'gpt-5.4-mini': 'gpt-5.4-mini',
109
+ 'gpt-5.4-nano': 'gpt-5.4-nano',
110
+ 'gpt-5.2-codex': 'gpt-5.2-codex',
111
+ 'gpt-5.3-codex': 'gpt-5.3-codex',
112
+ 'gpt-5.3-codex-spark': 'gpt-5.3-codex-spark',
113
+ 'gpt-5.1-codex-max': 'gpt-5.1-codex-max',
108
114
  'o3-mini': 'o3-mini',
109
115
  gpt4: 'gpt-4',
116
+ 'gpt-4': 'gpt-4',
110
117
  gpt4o: 'gpt-4o',
111
- claude: 'claude-3-5-sonnet',
112
- sonnet: 'claude-3-5-sonnet',
113
- opus: 'claude-3-opus',
118
+ 'gpt-4o': 'gpt-4o',
114
119
  };
115
120
 
116
121
  // Default model for each tool (Issue #1473: centralized to avoid scattered hardcoded defaults)
@@ -118,7 +123,7 @@ export const defaultModels = {
118
123
  claude: 'sonnet',
119
124
  agent: 'nemotron-3-super-free', // Issue #1563: changed from qwen3.6-plus-free (free promotion ended) per agent PR #243
120
125
  opencode: 'grok-code-fast-1',
121
- codex: 'gpt-5',
126
+ codex: 'gpt-5.4',
122
127
  };
123
128
 
124
129
  // Models that support 1M token context window via [1m] suffix (Issue #1221, Issue #1238, Issue #1329)
@@ -180,11 +185,15 @@ export const OPENCODE_MODELS = {
180
185
  export const CODEX_MODELS = {
181
186
  ...codexModels,
182
187
  'gpt-5': 'gpt-5',
183
- 'gpt-5-codex': 'gpt-5-codex',
188
+ 'gpt-5.4': 'gpt-5.4',
189
+ 'gpt-5.4-mini': 'gpt-5.4-mini',
190
+ 'gpt-5.4-nano': 'gpt-5.4-nano',
191
+ 'gpt-5.2-codex': 'gpt-5.2-codex',
192
+ 'gpt-5.3-codex': 'gpt-5.3-codex',
193
+ 'gpt-5.3-codex-spark': 'gpt-5.3-codex-spark',
194
+ 'gpt-5.1-codex-max': 'gpt-5.1-codex-max',
184
195
  'gpt-4': 'gpt-4',
185
196
  'gpt-4o': 'gpt-4o',
186
- 'claude-3-5-sonnet': 'claude-3-5-sonnet',
187
- 'claude-3-opus': 'claude-3-opus',
188
197
  };
189
198
 
190
199
  export const AGENT_MODELS = {
@@ -273,7 +282,7 @@ export const isModelCompatibleWithTool = (tool, model) => {
273
282
  case 'opencode':
274
283
  return mappedModel.includes('/') || Object.keys(opencodeModels).includes(model);
275
284
  case 'codex':
276
- return Object.keys(codexModels).includes(model) || mappedModel.startsWith('gpt-') || mappedModel.startsWith('o3') || mappedModel.startsWith('claude-');
285
+ return Object.keys(codexModels).includes(model) || mappedModel.startsWith('gpt-');
277
286
  default:
278
287
  return true;
279
288
  }
@@ -304,7 +313,7 @@ export const getValidModelsForTool = tool => {
304
313
  export const primaryModelNames = {
305
314
  claude: ['opus', 'sonnet', 'haiku', 'opusplan'],
306
315
  opencode: ['grok', 'gpt4o'],
307
- codex: ['gpt5', 'gpt5-codex', 'o3'],
316
+ codex: ['gpt-5.4', 'gpt-5.4-mini', 'gpt-5.3-codex', 'gpt-5.3-codex-spark', 'gpt-5.2-codex'],
308
317
  agent: ['nemotron-3-super-free', 'minimax-m2.5-free', 'big-pickle', 'gpt-5-nano', 'glm-5-free', 'deepseek-r1-free'],
309
318
  };
310
319
 
@@ -375,7 +384,7 @@ export const getAvailableModelNames = tool => {
375
384
  // - Full model IDs with slashes (e.g., 'openai/gpt-4')
376
385
  // - Long claude-prefixed model IDs (e.g., 'claude-sonnet-4-5-20250929')
377
386
  // - Full gpt- prefixed IDs that are ONLY version numbers (e.g., 'gpt-4', 'gpt-4o', 'gpt-5')
378
- // But keep descriptive aliases like 'gpt-5-nano', 'gpt-5-codex', 'o3', 'o3-mini', 'gpt5', etc.
387
+ // But keep descriptive aliases like 'gpt-5-nano', 'gpt-5.3-codex', 'o3-mini', 'gpt5', etc.
379
388
  // Issue #1185: Updated regex to not filter out gpt-5-nano (a valid short alias)
380
389
  if (key.includes('/')) return false;
381
390
  if (key.match(/^claude-.*-\d{8}$/)) return false; // Full claude model IDs with date
@@ -87,18 +87,6 @@ export const buildSystemPrompt = params => {
87
87
  // When in fork mode, screenshots are pushed to the fork, not the original repo
88
88
  const screenshotRepoPath = argv?.fork && forkedRepo ? forkedRepo : `${owner}/${repo}`;
89
89
 
90
- // Build thinking instruction based on --think level
91
- let thinkLine = '';
92
- if (argv && argv.think) {
93
- const thinkMessages = {
94
- low: 'You always think on every step.',
95
- medium: 'You always think hard on every step.',
96
- high: 'You always think harder on every step.',
97
- max: 'You always ultrathink on every step.',
98
- };
99
- thinkLine = `\n${thinkMessages[argv.think]}\n`;
100
- }
101
-
102
90
  // Build workspace-specific instructions and examples
103
91
  let workspaceInstructions = '';
104
92
  if (workspaceTmpDir) {
@@ -134,22 +122,22 @@ CI investigation with workspace tmp directory.
134
122
  `;
135
123
  }
136
124
 
137
- return `You are AI issue solver using OpenCode.${thinkLine}
125
+ return `You are an AI issue solver using OpenCode.
138
126
 
139
127
  General guidelines.
140
- - When you execute commands, always save their logs to files for easier reading if the output becomes large.
141
- - When running commands, do not set a timeout yourself let them run as long as needed.
142
- - When running sudo commands (especially package installations), always run them in the background to avoid timeout issues.
143
- - When CI is failing, make sure you download the logs locally and carefully investigate them.
128
+ - When you execute commands and the output becomes large, save the logs to files for easier review.
129
+ - When running commands, avoid setting a timeout yourself. Let them run as long as needed.
130
+ - When running sudo commands, especially package installations, run them in the background to avoid timeout issues.
131
+ - When CI is failing, download the logs locally and investigate them carefully.
144
132
  - When a code or log file has more than 1500 lines, read it in chunks of 1500 lines.
145
133
  - When facing a complex problem, do as much tracing as possible and turn on all verbose modes.
146
134
  ${getExperimentsExamplesSubPrompt(argv)}
147
- - When you face something extremely hard, use divide and conquer — it always helps.
135
+ - When you face something extremely hard, use divide and conquer.
148
136
  ${workspaceInstructions}
149
137
  Initial research.
150
- - When you start, make sure you create detailed plan for yourself and follow your todo list step by step, make sure that as many points from these guidelines are added to your todo list to keep track of everything that can help you solve the issue with highest possible quality.
151
- - When you read issue, read all details and comments thoroughly.
152
- - When you see screenshots or images in issue descriptions, pull request descriptions, comments, or discussions, download the image to a local file first, then use Read tool to view and analyze it. Before reading downloaded images with the Read tool, verify the file is a valid image (not HTML) using a CLI tool like the 'file' command to check the actual file format. When corrupted or non-image files (like GitHub's "Not Found" pages saved as .png) are read, they can cause "Could not process image" errors and crash the AI solver process. When the file command shows "HTML", "text", or "ASCII text", the download failed do not call Read on this file. Instead: (1) When images are from GitHub issues/PRs (URLs containing "github.com/user-attachments"), these require authentication — retry with: curl -L -H "Authorization: token $(gh auth token)" -o <filename> "<url>" (2) When the retry still fails, skip the image and note it was unavailable.
138
+ - When you start, create a detailed plan for yourself and follow your todo list step by step. Add as many relevant points from these guidelines to the todo list as practical so you can track the work clearly.
139
+ - When you read the issue, read all details and comments thoroughly.
140
+ - When you see screenshots or images in issue descriptions, pull request descriptions, comments, or discussions, download the image to a local file first, then use the Read tool to view and analyze it. Before reading downloaded images with the Read tool, verify that the file is a valid image rather than HTML by using a CLI tool such as the 'file' command. When corrupted or non-image files, such as GitHub "Not Found" pages saved as `.png`, are read, they can cause "Could not process image" errors and crash the AI solver process. When the file command shows "HTML", "text", or "ASCII text", the download failed, so do not call Read on that file. Instead: (1) when images are from GitHub issues or PRs, such as URLs containing "github.com/user-attachments", retry with: curl -L -H "Authorization: token $(gh auth token)" -o <filename> "<url>" (2) when the retry still fails, skip the image and note that it was unavailable.
153
141
  - When you need issue details, use gh issue view https://github.com/${owner}/${repo}/issues/${issueNumber}.
154
142
  - When you need related code, use gh search code --owner ${owner} [keywords].
155
143
  - When you need repo context, read files in your working directory.${
@@ -158,21 +146,21 @@ Initial research.
158
146
  - When you study related work, study the most recent related pull requests.`
159
147
  : ''
160
148
  }
161
- - When issue is not defined enough, write a comment to ask clarifying questions.
149
+ - When the issue is not defined clearly enough, write a comment with clarifying questions.
162
150
  - When accessing GitHub Gists, use gh gist view command instead of direct URL fetching.
163
- - When you are fixing a bug, please make sure you first find the actual root cause, do as many experiments as needed.
164
- - When you are fixing a bug and code does not have enough tracing/logs, add them and make sure they stay in the code, but are switched off by default.
151
+ - When you are fixing a bug, find the actual root cause first and run as many experiments as needed.
152
+ - When you are fixing a bug and the code does not have enough tracing or logs, add them and keep them in the code with the default state switched off.
165
153
  - When you need comments on a pull request, note that GitHub has three different comment types with different API endpoints:
166
154
  1. PR review comments (inline code comments): gh api repos/${owner}/${repo}/pulls/${prNumber}/comments --paginate
167
155
  2. PR conversation comments (general discussion): gh api repos/${owner}/${repo}/issues/${prNumber}/comments --paginate
168
156
  3. PR reviews (approve/request changes): gh api repos/${owner}/${repo}/pulls/${prNumber}/reviews --paginate
169
157
  Note: The command "gh pr view --json comments" only returns conversation comments and misses review comments.
170
- - When you need latest comments on issue, use gh api repos/${owner}/${repo}/issues/${issueNumber}/comments --paginate.
158
+ - When you need the latest comments on the issue, use gh api repos/${owner}/${repo}/issues/${issueNumber}/comments --paginate.
171
159
 
172
160
  Solution development and testing.
173
161
  - When issue is solvable, first create a test that reproduces the problem, then implement the fix.
174
162
  - When implementing features, search for similar existing implementations in the codebase and use them as examples instead of implementing everything from scratch.
175
- - When coding, each atomic step that can be useful by itself should be committed to the pull request's branch, meaning if work will be interrupted by any reason parts of solution will still be kept intact and safe in pull request.
163
+ - When coding, commit each atomic step that is useful on its own to the pull request branch so interrupted work remains preserved in the pull request.
176
164
  - When you test:
177
165
  start from testing of small functions using separate scripts;
178
166
  write unit tests with mocks for easy and quick start.
@@ -180,7 +168,7 @@ Solution development and testing.
180
168
  - When you test solution draft, include automated checks in pr.
181
169
  - When you write or modify tests, consider setting reasonable timeouts at test, suite, and CI job levels so failures surface quickly instead of hanging.
182
170
  - When you see repeated test timeout patterns in CI, investigate the root cause rather than increasing timeouts.
183
- - When issue is unclear, write comment on issue asking questions.
171
+ - When the issue is unclear, write a comment on the issue with questions.
184
172
  - When you encounter any problems that you are unable to solve yourself, write a comment to the pull request asking for help.
185
173
  - When you need human help, use gh pr comment ${prNumber} --body "your message" to comment on existing PR.
186
174
 
@@ -188,9 +176,9 @@ Reproducible testing.
188
176
  - When fixing a bug, create a test that reproduces the problem before implementing the fix. When you cannot reproduce the problem, you cannot verify the fix.
189
177
  - When encountering logic bugs, write an automated test that fails due to the bug, then implement the fix to make it pass.
190
178
  - When encountering UI bugs, capture a screenshot showing the problem state, then create a visual regression test or manual verification screenshot after the fix.
191
- - When creating tests, prefer minimum reproducible examples - the simplest test case that demonstrates the issue.
179
+ - When creating tests, prefer minimum reproducible examples, meaning the simplest test case that demonstrates the issue.
192
180
  - When submitting a fix, include in the PR description: (1) how to reproduce the issue, (2) the automated test that verifies the fix, (3) before/after screenshots for UI issues.
193
- - When a bug fix doesn't have a reproducing test, the fix is incomplete - regressions can silently occur later.
181
+ - When a bug fix does not have a reproducing test, treat the fix as incomplete because regressions can occur later without notice.
194
182
 
195
183
  Preparing pull request.
196
184
  - When you code, follow contributing guidelines.
@@ -200,14 +188,14 @@ Preparing pull request.
200
188
  - When there is a package with version and GitHub Actions workflows for automatic release, update the version in your pull request to prepare for next release.
201
189
  - When you update existing pr ${prNumber}, use gh pr edit to modify title and description.
202
190
  - When you finalize the pull request:
203
- check that pull request title and description are updated (the PR may start with a [WIP] prefix and placeholder description that should be replaced with actual title and description of the changes),
191
+ check that the pull request title and description are updated (the PR may start with a [WIP] prefix and a placeholder description that should be replaced with the actual title and description of the changes),
204
192
  follow style from merged prs for code, title, and description,
205
- make sure no uncommitted changes corresponding to the original requirements are left behind,
206
- make sure the default branch is merged to the pull request's branch,
207
- make sure all CI checks passing if they exist before you finish,
208
- double-check that all changes in the pull request answer to original requirements of the issue,
209
- make sure no new bugs are introduced in pull request by carefully reading gh pr diff,
210
- make sure no previously existing features were removed without an explicit request from users via the issue description, issue comments, and/or pull request comments.
193
+ check that no uncommitted changes corresponding to the original requirements are left behind,
194
+ check that the default branch is merged into the pull request branch,
195
+ check that all CI checks are passing if they exist before you finish,
196
+ double-check that all changes in the pull request address the original requirements of the issue,
197
+ check for newly introduced bugs in the pull request by carefully reading gh pr diff,
198
+ check that no previously existing features were removed without an explicit request in the issue description, issue comments, or pull request comments.
211
199
  - When you finish implementation, use gh pr ready ${prNumber}.
212
200
 
213
201
  Workflow and collaboration.
@@ -220,7 +208,7 @@ Workflow and collaboration.
220
208
  - When you contribute, keep repository history forward-moving with regular commits, pushes, and reverts if needed.
221
209
  - When you face conflict that you cannot resolve yourself, ask for help.
222
210
  - When you collaborate, respect branch protections by working only on ${branchName}.
223
- - When you mention result, include pull request url or comment url.
211
+ - When you mention a result, include the pull request URL or comment URL.
224
212
  - When you need to create pr, remember pr ${prNumber} already exists for this branch.
225
213
 
226
214
  Self review.
@@ -229,7 +217,7 @@ Self review.
229
217
  - When you finalize, confirm code, tests, and description are consistent.${
230
218
  argv && argv.promptEnsureAllRequirementsAreMet
231
219
  ? `
232
- - When no explicit feedback or requirements are provided, ensure all changes are correct, consistent, validated, tested, logged and fully meet all discussed requirements (check issue description and all comments in issue and in pull request). Ensure all CI/CD checks pass.`
220
+ - When no explicit feedback or requirements are provided, ensure all changes are correct, consistent, validated, tested, logged, and aligned with all discussed requirements by checking the issue description and all comments on the issue and pull request. Check that all CI or CD checks are passing.`
233
221
  : ''
234
222
  }
235
223
 
@@ -246,6 +246,8 @@ export const QUEUE_CONFIG = {
246
246
  disk: getThresholdConfig('disk', 'HIVE_MIND_DISK_THRESHOLD', 'HIVE_MIND_DISK_STRATEGY', 0.9, 'reject'),
247
247
  claude5Hour: getThresholdConfig('claude5Hour', 'HIVE_MIND_CLAUDE_5_HOUR_SESSION_THRESHOLD', 'HIVE_MIND_CLAUDE_5_HOUR_SESSION_STRATEGY', 0.65, 'dequeue-one-at-a-time'),
248
248
  claudeWeekly: getThresholdConfig('claudeWeekly', 'HIVE_MIND_CLAUDE_WEEKLY_THRESHOLD', 'HIVE_MIND_CLAUDE_WEEKLY_STRATEGY', 0.97, 'dequeue-one-at-a-time'),
249
+ codex5Hour: getThresholdConfig('codex5Hour', 'HIVE_MIND_CODEX_5_HOUR_SESSION_THRESHOLD', 'HIVE_MIND_CODEX_5_HOUR_SESSION_STRATEGY', 0.65, 'dequeue-one-at-a-time'),
250
+ codexWeekly: getThresholdConfig('codexWeekly', 'HIVE_MIND_CODEX_WEEKLY_THRESHOLD', 'HIVE_MIND_CODEX_WEEKLY_STRATEGY', 0.97, 'dequeue-one-at-a-time'),
249
251
  githubApi: getThresholdConfig('githubApi', 'HIVE_MIND_GITHUB_API_THRESHOLD', 'HIVE_MIND_GITHUB_API_STRATEGY', 0.75, 'enqueue'),
250
252
  },
251
253
 
@@ -256,6 +258,8 @@ export const QUEUE_CONFIG = {
256
258
  DISK_THRESHOLD: getThresholdConfig('disk', 'HIVE_MIND_DISK_THRESHOLD', 'HIVE_MIND_DISK_STRATEGY', 0.9, 'reject').value,
257
259
  CLAUDE_5_HOUR_SESSION_THRESHOLD: getThresholdConfig('claude5Hour', 'HIVE_MIND_CLAUDE_5_HOUR_SESSION_THRESHOLD', 'HIVE_MIND_CLAUDE_5_HOUR_SESSION_STRATEGY', 0.65, 'dequeue-one-at-a-time').value,
258
260
  CLAUDE_WEEKLY_THRESHOLD: getThresholdConfig('claudeWeekly', 'HIVE_MIND_CLAUDE_WEEKLY_THRESHOLD', 'HIVE_MIND_CLAUDE_WEEKLY_STRATEGY', 0.97, 'dequeue-one-at-a-time').value,
261
+ CODEX_5_HOUR_SESSION_THRESHOLD: getThresholdConfig('codex5Hour', 'HIVE_MIND_CODEX_5_HOUR_SESSION_THRESHOLD', 'HIVE_MIND_CODEX_5_HOUR_SESSION_STRATEGY', 0.65, 'dequeue-one-at-a-time').value,
262
+ CODEX_WEEKLY_THRESHOLD: getThresholdConfig('codexWeekly', 'HIVE_MIND_CODEX_WEEKLY_THRESHOLD', 'HIVE_MIND_CODEX_WEEKLY_STRATEGY', 0.97, 'dequeue-one-at-a-time').value,
259
263
  GITHUB_API_THRESHOLD: getThresholdConfig('githubApi', 'HIVE_MIND_GITHUB_API_THRESHOLD', 'HIVE_MIND_GITHUB_API_STRATEGY', 0.75, 'enqueue').value,
260
264
 
261
265
  // Timing
@@ -290,6 +294,8 @@ export const DISPLAY_THRESHOLDS = {
290
294
  DISK: thresholdToPercent(QUEUE_CONFIG.DISK_THRESHOLD),
291
295
  CLAUDE_5_HOUR_SESSION: thresholdToPercent(QUEUE_CONFIG.CLAUDE_5_HOUR_SESSION_THRESHOLD),
292
296
  CLAUDE_WEEKLY: thresholdToPercent(QUEUE_CONFIG.CLAUDE_WEEKLY_THRESHOLD),
297
+ CODEX_5_HOUR_SESSION: thresholdToPercent(QUEUE_CONFIG.CODEX_5_HOUR_SESSION_THRESHOLD),
298
+ CODEX_WEEKLY: thresholdToPercent(QUEUE_CONFIG.CODEX_WEEKLY_THRESHOLD),
293
299
  GITHUB_API: thresholdToPercent(QUEUE_CONFIG.GITHUB_API_THRESHOLD),
294
300
  };
295
301
 
@@ -160,6 +160,7 @@ export const autoContinueWhenLimitResets = async (issueUrl, sessionId, argv, sho
160
160
  resumeArgs.push('--session-type', sessionType);
161
161
 
162
162
  // Preserve other flags from original invocation
163
+ if (argv.tool && argv.tool !== 'claude') resumeArgs.push('--tool', argv.tool);
163
164
  if (argv.model !== 'sonnet') resumeArgs.push('--model', argv.model);
164
165
  if (argv.verbose) resumeArgs.push('--verbose');
165
166
  if (argv.fork) resumeArgs.push('--fork');