@link-assistant/hive-mind 1.38.1 → 1.38.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +16 -0
- package/package.json +1 -1
- package/src/claude.budget-stats.lib.mjs +113 -123
- package/src/claude.lib.mjs +51 -14
- package/src/github-merge-repo-actions.lib.mjs +103 -0
- package/src/github-merge.lib.mjs +19 -15
- package/src/github.lib.mjs +1 -1
- package/src/solve.auto-merge.lib.mjs +143 -19
- package/src/solve.config.lib.mjs +5 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,21 @@
|
|
|
1
1
|
# @link-assistant/hive-mind
|
|
2
2
|
|
|
3
|
+
## 1.38.3
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- deb31bf: fix: add multi-mechanism CI consensus, repo-wide action monitoring, and 5-min minimum CI check interval to prevent false positive "Ready to merge"
|
|
8
|
+
|
|
9
|
+
## 1.38.2
|
|
10
|
+
|
|
11
|
+
### Patch Changes
|
|
12
|
+
|
|
13
|
+
- 290139f: fix: correct cost and token/context budget calculations (#1501)
|
|
14
|
+
- Deduplicate JSONL session entries by message ID to fix inflated token counts caused by upstream anthropics/claude-code#6805
|
|
15
|
+
- Show peak context window usage (max single-request fill) instead of cumulative sum which produced nonsensical percentages like 7516%
|
|
16
|
+
- Add "Total tokens processed" as a separate cumulative metric for session throughput visibility
|
|
17
|
+
- Add verbose logging for JSONL deduplication stats and peak context values
|
|
18
|
+
|
|
3
19
|
## 1.38.1
|
|
4
20
|
|
|
5
21
|
### Patch Changes
|
package/package.json
CHANGED
|
@@ -14,6 +14,8 @@ export const createEmptySubSessionUsage = () => ({
|
|
|
14
14
|
cacheReadTokens: 0,
|
|
15
15
|
outputTokens: 0,
|
|
16
16
|
messageCount: 0,
|
|
17
|
+
peakContextUsage: 0,
|
|
18
|
+
peakOutputUsage: 0,
|
|
17
19
|
});
|
|
18
20
|
|
|
19
21
|
/**
|
|
@@ -136,173 +138,161 @@ export const displayCostComparison = async (publicCost, anthropicCost, log) => {
|
|
|
136
138
|
/**
|
|
137
139
|
* Display token budget statistics (context window usage and ratios)
|
|
138
140
|
* @param {Object} usage - Usage data for a model
|
|
141
|
+
* @param {Object} tokenUsage - Full token usage data (with subSessions)
|
|
139
142
|
* @param {Function} log - Logging function
|
|
140
143
|
*/
|
|
141
|
-
export const displayBudgetStats = async (usage, log) => {
|
|
144
|
+
export const displayBudgetStats = async (usage, tokenUsage, log) => {
|
|
142
145
|
const modelInfo = usage.modelInfo;
|
|
143
146
|
if (!modelInfo?.limit) {
|
|
144
147
|
await log('\n ⚠️ Budget stats not available (no model limits found)');
|
|
145
148
|
return;
|
|
146
149
|
}
|
|
147
150
|
|
|
148
|
-
await log('\n 📊
|
|
151
|
+
await log('\n 📊 Context and tokens usage:');
|
|
149
152
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
const totalInputUsed = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
|
|
155
|
-
const contextUsageRatio = totalInputUsed / contextLimit;
|
|
156
|
-
const contextUsagePercent = (contextUsageRatio * 100).toFixed(2);
|
|
153
|
+
const contextLimit = modelInfo.limit.context;
|
|
154
|
+
const outputLimit = modelInfo.limit.output;
|
|
155
|
+
const subSessions = tokenUsage?.subSessions || [];
|
|
156
|
+
const hasMultipleSubSessions = subSessions.length > 1;
|
|
157
157
|
|
|
158
|
-
|
|
159
|
-
await log(
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
}
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
* Display sub-session breakdown when compactification events occurred (Issue #1491)
|
|
183
|
-
* @param {Object} tokenUsage - Token usage data with subSessions and compactifications
|
|
184
|
-
* @param {Object} modelInfo - Model info with context/output limits
|
|
185
|
-
* @param {Function} log - Logging function
|
|
186
|
-
*/
|
|
187
|
-
export const displaySubSessionStats = async (tokenUsage, modelInfo, log) => {
|
|
188
|
-
if (!tokenUsage.subSessions || !tokenUsage.compactifications) return;
|
|
189
|
-
|
|
190
|
-
const contextLimit = modelInfo?.limit?.context;
|
|
191
|
-
await log(`\n 🔄 Compactification events: ${tokenUsage.compactifications.length}`);
|
|
192
|
-
|
|
193
|
-
for (let i = 0; i < tokenUsage.subSessions.length; i++) {
|
|
194
|
-
const sub = tokenUsage.subSessions[i];
|
|
195
|
-
const totalInput = sub.inputTokens + sub.cacheCreationTokens + sub.cacheReadTokens;
|
|
196
|
-
const label = i === 0 ? 'Initial session' : `After compactification #${i}`;
|
|
197
|
-
|
|
198
|
-
await log(` Sub-session ${i + 1} (${label}):`);
|
|
199
|
-
await log(` Messages: ${sub.messageCount}`);
|
|
200
|
-
await log(` Context used: ${formatNumber(totalInput)} tokens`);
|
|
158
|
+
if (hasMultipleSubSessions) {
|
|
159
|
+
await log(' Sub sessions (between compact events):');
|
|
160
|
+
for (let i = 0; i < subSessions.length; i++) {
|
|
161
|
+
const sub = subSessions[i];
|
|
162
|
+
const subPeak = sub.peakContextUsage || 0;
|
|
163
|
+
let line = ` ${i + 1}. `;
|
|
164
|
+
if (contextLimit && subPeak > 0) {
|
|
165
|
+
const pct = ((subPeak / contextLimit) * 100).toFixed(0);
|
|
166
|
+
line += `${formatNumber(subPeak)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`;
|
|
167
|
+
} else {
|
|
168
|
+
const subTotal = sub.inputTokens + sub.cacheCreationTokens + sub.cacheReadTokens;
|
|
169
|
+
line += `${formatNumber(subTotal)} input tokens`;
|
|
170
|
+
}
|
|
171
|
+
if (outputLimit) {
|
|
172
|
+
const outPct = ((sub.outputTokens / outputLimit) * 100).toFixed(0);
|
|
173
|
+
line += `; ${formatNumber(sub.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`;
|
|
174
|
+
} else {
|
|
175
|
+
line += `; ${formatNumber(sub.outputTokens)} output tokens`;
|
|
176
|
+
}
|
|
177
|
+
await log(line);
|
|
178
|
+
}
|
|
179
|
+
} else {
|
|
180
|
+
// Single sub-session: simplified format
|
|
181
|
+
const peakContext = usage.peakContextUsage || 0;
|
|
201
182
|
if (contextLimit) {
|
|
202
|
-
|
|
203
|
-
|
|
183
|
+
if (peakContext > 0) {
|
|
184
|
+
const pct = ((peakContext / contextLimit) * 100).toFixed(0);
|
|
185
|
+
await log(` Max context window: ${formatNumber(peakContext)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
if (outputLimit) {
|
|
189
|
+
const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
|
|
190
|
+
await log(` Max output tokens: ${formatNumber(usage.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`);
|
|
204
191
|
}
|
|
205
|
-
await log(` Output: ${formatNumber(sub.outputTokens)} tokens`);
|
|
206
192
|
}
|
|
207
193
|
|
|
208
|
-
//
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
}
|
|
194
|
+
// Cumulative totals
|
|
195
|
+
const totalInputNonCached = usage.inputTokens + usage.cacheCreationTokens;
|
|
196
|
+
const cachedTokens = usage.cacheReadTokens;
|
|
197
|
+
let totalLine = ` Total input tokens: ${formatNumber(totalInputNonCached)}`;
|
|
198
|
+
if (cachedTokens > 0) totalLine += ` + ${formatNumber(cachedTokens)} cached`;
|
|
199
|
+
await log(totalLine);
|
|
200
|
+
await log(` Total output tokens: ${formatNumber(usage.outputTokens)}`);
|
|
215
201
|
};
|
|
216
202
|
|
|
217
203
|
/**
|
|
218
|
-
*
|
|
219
|
-
*
|
|
220
|
-
* @
|
|
221
|
-
* @param {Object} jsonlTokenUsage - Token usage calculated from JSONL session file
|
|
222
|
-
* @param {Function} log - Logging function
|
|
204
|
+
* Format a token count with K/M suffix for compact display
|
|
205
|
+
* @param {number} tokens - Token count
|
|
206
|
+
* @returns {string} Formatted string like "850K" or "1.5M"
|
|
223
207
|
*/
|
|
224
|
-
|
|
225
|
-
if (
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
const jsonlTotal = jsonlTokenUsage.inputTokens + jsonlTokenUsage.cacheCreationTokens + jsonlTokenUsage.outputTokens;
|
|
229
|
-
|
|
230
|
-
await log('\n 🔍 Token calculation comparison:');
|
|
231
|
-
await log(` Stream JSON events: ${formatNumber(streamTotal)} tokens (${streamTokenUsage.eventCount} events)`);
|
|
232
|
-
await log(` JSONL session file: ${formatNumber(jsonlTotal)} tokens`);
|
|
233
|
-
|
|
234
|
-
if (streamTotal !== jsonlTotal) {
|
|
235
|
-
const diff = jsonlTotal - streamTotal;
|
|
236
|
-
const pct = streamTotal > 0 ? ((diff / streamTotal) * 100).toFixed(2) : 'N/A';
|
|
237
|
-
await log(` Difference: ${formatNumber(Math.abs(diff))} tokens (${diff > 0 ? '+' : ''}${pct}%)`);
|
|
238
|
-
} else {
|
|
239
|
-
await log(' Match: calculations are consistent');
|
|
240
|
-
}
|
|
208
|
+
const formatTokensCompact = tokens => {
|
|
209
|
+
if (tokens >= 1000000) return `${(tokens / 1000000).toFixed(tokens % 1000000 === 0 ? 0 : 1)}M`;
|
|
210
|
+
if (tokens >= 1000) return `${(tokens / 1000).toFixed(tokens % 1000 === 0 ? 0 : 1)}K`;
|
|
211
|
+
return tokens.toLocaleString();
|
|
241
212
|
};
|
|
242
213
|
|
|
243
214
|
/**
|
|
244
|
-
* Build budget stats string for GitHub PR comments (Issue #1491)
|
|
245
|
-
*
|
|
215
|
+
* Build budget stats string for GitHub PR comments (Issue #1491, #1501)
|
|
216
|
+
* Format requested by user: sub-sessions between compactification events,
|
|
217
|
+
* per-model breakdown, cumulative totals with cached tokens shown separately.
|
|
246
218
|
* @param {Object} tokenUsage - Token usage data from calculateSessionTokens
|
|
247
|
-
* @param {Object|null} streamTokenUsage - Token usage from stream JSON events
|
|
219
|
+
* @param {Object|null} streamTokenUsage - Token usage from stream JSON events (used for comparison, not displayed)
|
|
248
220
|
* @returns {string} Formatted markdown string for PR comment
|
|
249
221
|
*/
|
|
250
|
-
export const buildBudgetStatsString =
|
|
222
|
+
export const buildBudgetStatsString = tokenUsage => {
|
|
251
223
|
if (!tokenUsage) return '';
|
|
252
224
|
|
|
253
|
-
let stats = '\n\n### 📊 **
|
|
225
|
+
let stats = '\n\n### 📊 **Context and tokens usage:**';
|
|
254
226
|
|
|
255
227
|
// Per-model breakdown
|
|
256
228
|
if (tokenUsage.modelUsage) {
|
|
257
229
|
const modelIds = Object.keys(tokenUsage.modelUsage);
|
|
230
|
+
const isMultiModel = modelIds.length > 1;
|
|
231
|
+
|
|
258
232
|
for (const modelId of modelIds) {
|
|
259
233
|
const usage = tokenUsage.modelUsage[modelId];
|
|
260
234
|
const modelName = usage.modelName || modelId;
|
|
261
235
|
const contextLimit = usage.modelInfo?.limit?.context;
|
|
262
236
|
const outputLimit = usage.modelInfo?.limit?.output;
|
|
263
|
-
const totalInput = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
|
|
264
237
|
|
|
265
|
-
if (
|
|
238
|
+
if (isMultiModel) stats += `\n\n**${modelName}:**`;
|
|
266
239
|
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
} else {
|
|
271
|
-
stats += `\n- Context tokens used: ${totalInput.toLocaleString()}`;
|
|
272
|
-
}
|
|
240
|
+
// Sub-session display (Issue #1501: show per sub-session stats)
|
|
241
|
+
const subSessions = tokenUsage.subSessions || [];
|
|
242
|
+
const hasMultipleSubSessions = subSessions.length > 1;
|
|
273
243
|
|
|
274
|
-
if (
|
|
275
|
-
|
|
276
|
-
stats +=
|
|
244
|
+
if (hasMultipleSubSessions) {
|
|
245
|
+
// Multiple sub-sessions: show numbered list
|
|
246
|
+
stats += '\n\nSub sessions (between compact events):';
|
|
247
|
+
for (let i = 0; i < subSessions.length; i++) {
|
|
248
|
+
const sub = subSessions[i];
|
|
249
|
+
const subPeakContext = sub.peakContextUsage || 0;
|
|
250
|
+
const subTotalInput = sub.inputTokens + sub.cacheCreationTokens + sub.cacheReadTokens;
|
|
251
|
+
let line = `\n${i + 1}. `;
|
|
252
|
+
if (contextLimit && subPeakContext > 0) {
|
|
253
|
+
const pct = ((subPeakContext / contextLimit) * 100).toFixed(0);
|
|
254
|
+
line += `${formatTokensCompact(subPeakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`;
|
|
255
|
+
} else {
|
|
256
|
+
line += `${formatTokensCompact(subTotalInput)} input tokens`;
|
|
257
|
+
}
|
|
258
|
+
if (outputLimit) {
|
|
259
|
+
const outPct = ((sub.outputTokens / outputLimit) * 100).toFixed(0);
|
|
260
|
+
line += `; ${formatTokensCompact(sub.outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`;
|
|
261
|
+
} else {
|
|
262
|
+
line += `; ${formatTokensCompact(sub.outputTokens)} output tokens`;
|
|
263
|
+
}
|
|
264
|
+
stats += line;
|
|
265
|
+
}
|
|
277
266
|
} else {
|
|
278
|
-
|
|
267
|
+
// Single sub-session (or no sub-sessions): simplified format
|
|
268
|
+
const peakContext = usage.peakContextUsage || 0;
|
|
269
|
+
if (contextLimit) {
|
|
270
|
+
if (peakContext > 0) {
|
|
271
|
+
const pct = ((peakContext / contextLimit) * 100).toFixed(0);
|
|
272
|
+
stats += `\n- Max context window: ${formatTokensCompact(peakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`;
|
|
273
|
+
} else {
|
|
274
|
+
const totalInput = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
|
|
275
|
+
const pct = ((totalInput / contextLimit) * 100).toFixed(0);
|
|
276
|
+
stats += `\n- Context window: ${formatTokensCompact(totalInput)} / ${formatTokensCompact(contextLimit)} tokens (${pct}%)`;
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
if (outputLimit) {
|
|
280
|
+
const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
|
|
281
|
+
stats += `\n- Max output tokens: ${formatTokensCompact(usage.outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`;
|
|
282
|
+
}
|
|
279
283
|
}
|
|
280
|
-
}
|
|
281
|
-
}
|
|
282
284
|
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
const label = i === 0 ? 'initial' : `after compactification #${i}`;
|
|
290
|
-
stats += `\n - Sub-session ${i + 1} (${label}): ${totalInput.toLocaleString()} context, ${sub.outputTokens.toLocaleString()} output, ${sub.messageCount} messages`;
|
|
285
|
+
// Cumulative totals: input tokens + cached shown separately
|
|
286
|
+
const totalInputNonCached = usage.inputTokens + usage.cacheCreationTokens;
|
|
287
|
+
const cachedTokens = usage.cacheReadTokens;
|
|
288
|
+
stats += `\n\nTotal input tokens: ${formatTokensCompact(totalInputNonCached)}`;
|
|
289
|
+
if (cachedTokens > 0) stats += ` + ${formatTokensCompact(cachedTokens)} cached`;
|
|
290
|
+
stats += `\nTotal output tokens: ${formatTokensCompact(usage.outputTokens)} output`;
|
|
291
291
|
}
|
|
292
292
|
}
|
|
293
293
|
|
|
294
|
-
// Stream vs JSONL comparison
|
|
295
|
-
|
|
296
|
-
const streamTotal = streamTokenUsage.inputTokens + streamTokenUsage.cacheCreationTokens + streamTokenUsage.outputTokens;
|
|
297
|
-
const jsonlTotal = tokenUsage.inputTokens + tokenUsage.cacheCreationTokens + tokenUsage.outputTokens;
|
|
298
|
-
stats += `\n- Own calculation (stream): ${streamTotal.toLocaleString()} tokens (${streamTokenUsage.eventCount} events)`;
|
|
299
|
-
stats += `\n- JSONL calculation: ${jsonlTotal.toLocaleString()} tokens`;
|
|
300
|
-
if (streamTotal !== jsonlTotal) {
|
|
301
|
-
const diff = jsonlTotal - streamTotal;
|
|
302
|
-
const pct = streamTotal > 0 ? ((diff / streamTotal) * 100).toFixed(2) : 'N/A';
|
|
303
|
-
stats += ` (diff: ${diff > 0 ? '+' : ''}${pct}%)`;
|
|
304
|
-
}
|
|
305
|
-
}
|
|
294
|
+
// Stream vs JSONL comparison — kept for internal diagnostics only in verbose/debug mode
|
|
295
|
+
// Not shown to users per feedback (Issue #1501 PR comment)
|
|
306
296
|
|
|
307
297
|
return stats;
|
|
308
298
|
};
|
package/src/claude.lib.mjs
CHANGED
|
@@ -12,7 +12,7 @@ import { timeouts, retryLimits, claudeCode, getClaudeEnv, getThinkingLevelToToke
|
|
|
12
12
|
import { detectUsageLimit, formatUsageLimitMessage } from './usage-limit.lib.mjs';
|
|
13
13
|
import { createInteractiveHandler } from './interactive-mode.lib.mjs';
|
|
14
14
|
import { sanitizeObjectStrings } from './unicode-sanitization.lib.mjs';
|
|
15
|
-
import { displayBudgetStats,
|
|
15
|
+
import { displayBudgetStats, createEmptySubSessionUsage, accumulateModelUsage, displayModelUsage, displayCostComparison } from './claude.budget-stats.lib.mjs';
|
|
16
16
|
import { buildClaudeResumeCommand } from './claude.command-builder.lib.mjs';
|
|
17
17
|
import { handleClaudeRuntimeSwitch } from './claude.runtime-switch.lib.mjs'; // see issue #1141
|
|
18
18
|
import { CLAUDE_MODELS as availableModels } from './models/index.mjs'; // Issue #1221
|
|
@@ -497,6 +497,15 @@ export const calculateSessionTokens = async (sessionId, tempDir) => {
|
|
|
497
497
|
}
|
|
498
498
|
// Initialize per-model usage tracking
|
|
499
499
|
const modelUsage = {};
|
|
500
|
+
// Issue #1501: Deduplicate JSONL entries by message ID (upstream: anthropics/claude-code#6805)
|
|
501
|
+
// Claude Code's stream-json mode splits single API responses with multiple content blocks
|
|
502
|
+
// into separate JSONL entries, each with the same message ID and identical usage stats.
|
|
503
|
+
const seenMessageIds = new Set();
|
|
504
|
+
let duplicateCount = 0;
|
|
505
|
+
// Issue #1501: Track peak context usage per request (not cumulative)
|
|
506
|
+
// The context window limit is per-request, so we track the max single-request fill.
|
|
507
|
+
const peakContextByModel = {};
|
|
508
|
+
let globalPeakContext = 0;
|
|
500
509
|
// Issue #1491: Track sub-sessions between compactification events
|
|
501
510
|
const subSessions = [];
|
|
502
511
|
let currentSubSession = createEmptySubSessionUsage();
|
|
@@ -524,14 +533,39 @@ export const calculateSessionTokens = async (sessionId, tempDir) => {
|
|
|
524
533
|
continue;
|
|
525
534
|
}
|
|
526
535
|
if (entry.message && entry.message.usage && entry.message.model) {
|
|
536
|
+
// Issue #1501: Skip duplicate JSONL entries (same message ID = same API response)
|
|
537
|
+
const msgId = entry.message.id;
|
|
538
|
+
if (msgId) {
|
|
539
|
+
if (seenMessageIds.has(msgId)) {
|
|
540
|
+
duplicateCount++;
|
|
541
|
+
continue; // Skip — already counted this message's usage
|
|
542
|
+
}
|
|
543
|
+
seenMessageIds.add(msgId);
|
|
544
|
+
}
|
|
527
545
|
accumulateModelUsage(modelUsage, entry);
|
|
528
|
-
// Issue #
|
|
546
|
+
// Issue #1501: Track peak context usage per single API request
|
|
529
547
|
const usage = entry.message.usage;
|
|
548
|
+
const requestContext = (usage.input_tokens || 0) + (usage.cache_creation_input_tokens || 0) + (usage.cache_read_input_tokens || 0);
|
|
549
|
+
const model = entry.message.model;
|
|
550
|
+
if (requestContext > (peakContextByModel[model] || 0)) {
|
|
551
|
+
peakContextByModel[model] = requestContext;
|
|
552
|
+
}
|
|
553
|
+
if (requestContext > globalPeakContext) {
|
|
554
|
+
globalPeakContext = requestContext;
|
|
555
|
+
}
|
|
556
|
+
// Issue #1491: Also track per-sub-session usage
|
|
530
557
|
if (usage.input_tokens) currentSubSession.inputTokens += usage.input_tokens;
|
|
531
558
|
if (usage.cache_creation_input_tokens) currentSubSession.cacheCreationTokens += usage.cache_creation_input_tokens;
|
|
532
559
|
if (usage.cache_read_input_tokens) currentSubSession.cacheReadTokens += usage.cache_read_input_tokens;
|
|
533
560
|
if (usage.output_tokens) currentSubSession.outputTokens += usage.output_tokens;
|
|
534
561
|
currentSubSession.messageCount++;
|
|
562
|
+
// Issue #1501: Track peak context and output per sub-session
|
|
563
|
+
if (requestContext > currentSubSession.peakContextUsage) {
|
|
564
|
+
currentSubSession.peakContextUsage = requestContext;
|
|
565
|
+
}
|
|
566
|
+
if ((usage.output_tokens || 0) > currentSubSession.peakOutputUsage) {
|
|
567
|
+
currentSubSession.peakOutputUsage = usage.output_tokens || 0;
|
|
568
|
+
}
|
|
535
569
|
}
|
|
536
570
|
} catch {
|
|
537
571
|
// Skip lines that aren't valid JSON
|
|
@@ -561,6 +595,8 @@ export const calculateSessionTokens = async (sessionId, tempDir) => {
|
|
|
561
595
|
// Calculate cost for each model and store all characteristics
|
|
562
596
|
for (const [modelId, usage] of Object.entries(modelUsage)) {
|
|
563
597
|
const modelInfo = modelInfoMap[modelId];
|
|
598
|
+
// Issue #1501: Attach peak context usage per model
|
|
599
|
+
usage.peakContextUsage = peakContextByModel[modelId] || 0;
|
|
564
600
|
// Calculate cost using pricing API
|
|
565
601
|
if (modelInfo) {
|
|
566
602
|
const costData = calculateModelCost(usage, modelInfo, true);
|
|
@@ -604,8 +640,11 @@ export const calculateSessionTokens = async (sessionId, tempDir) => {
|
|
|
604
640
|
outputTokens: totalOutputTokens,
|
|
605
641
|
totalTokens,
|
|
606
642
|
totalCostUSD: hasCostData ? totalCostUSD : null,
|
|
607
|
-
// Issue #
|
|
608
|
-
|
|
643
|
+
// Issue #1501: Peak context usage (max single-request fill) and dedup stats
|
|
644
|
+
peakContextUsage: globalPeakContext,
|
|
645
|
+
duplicateEntriesSkipped: duplicateCount,
|
|
646
|
+
// Issue #1491/#1501: Sub-session and compactification data (always include for display)
|
|
647
|
+
subSessions,
|
|
609
648
|
compactifications: compactifications.length > 0 ? compactifications : null,
|
|
610
649
|
};
|
|
611
650
|
} catch (readError) {
|
|
@@ -1248,6 +1287,13 @@ export const executeClaudeCommand = async params => {
|
|
|
1248
1287
|
try {
|
|
1249
1288
|
const tokenUsage = await calculateSessionTokens(sessionId, tempDir);
|
|
1250
1289
|
if (tokenUsage) {
|
|
1290
|
+
// Issue #1501: Log deduplication stats in verbose mode
|
|
1291
|
+
if (tokenUsage.duplicateEntriesSkipped > 0) {
|
|
1292
|
+
await log(`\n⚠️ JSONL deduplication: skipped ${tokenUsage.duplicateEntriesSkipped} duplicate entries (upstream: anthropics/claude-code#6805)`, { verbose: true });
|
|
1293
|
+
}
|
|
1294
|
+
if (tokenUsage.peakContextUsage > 0) {
|
|
1295
|
+
await log(`📊 Peak single-request context: ${formatNumber(tokenUsage.peakContextUsage)} tokens`, { verbose: true });
|
|
1296
|
+
}
|
|
1251
1297
|
await log('\n💰 Token Usage Summary:');
|
|
1252
1298
|
// Display per-model breakdown
|
|
1253
1299
|
if (tokenUsage.modelUsage) {
|
|
@@ -1258,18 +1304,9 @@ export const executeClaudeCommand = async params => {
|
|
|
1258
1304
|
await displayModelUsage(usage, log);
|
|
1259
1305
|
// Display budget stats if flag is enabled
|
|
1260
1306
|
if (argv.tokensBudgetStats && usage.modelInfo?.limit) {
|
|
1261
|
-
await displayBudgetStats(usage, log);
|
|
1307
|
+
await displayBudgetStats(usage, tokenUsage, log);
|
|
1262
1308
|
}
|
|
1263
1309
|
}
|
|
1264
|
-
// Issue #1491: Display sub-session breakdown if compactification occurred
|
|
1265
|
-
if (argv.tokensBudgetStats && tokenUsage.subSessions) {
|
|
1266
|
-
const primaryModelInfo = Object.values(tokenUsage.modelUsage).find(u => u.modelInfo?.limit)?.modelInfo;
|
|
1267
|
-
await displaySubSessionStats(tokenUsage, primaryModelInfo, log);
|
|
1268
|
-
}
|
|
1269
|
-
// Issue #1491: Display stream vs JSONL token comparison
|
|
1270
|
-
if (argv.tokensBudgetStats && streamTokenUsage.eventCount > 0) {
|
|
1271
|
-
await displayTokenComparison(streamTokenUsage, tokenUsage, log);
|
|
1272
|
-
}
|
|
1273
1310
|
// Show totals if multiple models were used
|
|
1274
1311
|
if (modelIds.length > 1) {
|
|
1275
1312
|
await log('\n 📈 Total across all models:');
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* GitHub Repository-Wide Actions Monitoring
|
|
4
|
+
*
|
|
5
|
+
* Issue #1503: Functions to check and wait for ALL active GitHub Actions
|
|
6
|
+
* workflow runs across the entire repository. This is the "absolute safety
|
|
7
|
+
* mechanism" modeled after the /merge command's waitForBranchCI pattern.
|
|
8
|
+
*
|
|
9
|
+
* @see https://github.com/link-assistant/hive-mind/issues/1503
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import { promisify } from 'util';
|
|
13
|
+
import { exec as execCallback } from 'child_process';
|
|
14
|
+
const exec = promisify(execCallback);
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Get ALL active workflow runs across the entire repository (no branch filter).
|
|
18
|
+
* @param {string} owner - Repository owner
|
|
19
|
+
* @param {string} repo - Repository name
|
|
20
|
+
* @param {boolean} verbose - Whether to log verbose output
|
|
21
|
+
* @returns {Promise<{runs: Array, hasActiveRuns: boolean, count: number}>}
|
|
22
|
+
*/
|
|
23
|
+
export async function getAllActiveRepoRuns(owner, repo, verbose = false) {
|
|
24
|
+
try {
|
|
25
|
+
const activeFilter = '.workflow_runs[] | select(.status=="in_progress" or .status=="queued" or .status=="waiting" or .status=="requested" or .status=="pending")';
|
|
26
|
+
const fields = '{id: .id, name: .name, status: .status, head_branch: .head_branch, head_sha: (.head_sha[:7])}';
|
|
27
|
+
const { stdout } = await exec(`gh api "repos/${owner}/${repo}/actions/runs?per_page=100" --jq '[${activeFilter}] | map(${fields})'`);
|
|
28
|
+
const runs = JSON.parse(stdout.trim() || '[]');
|
|
29
|
+
if (verbose && runs.length > 0) {
|
|
30
|
+
console.log(`[VERBOSE] repo-actions: ${runs.length} active run(s) in ${owner}/${repo}`);
|
|
31
|
+
for (const r of runs) console.log(`[VERBOSE] repo-actions: ${r.name} (${r.status}) on ${r.head_branch}`);
|
|
32
|
+
}
|
|
33
|
+
return { runs, hasActiveRuns: runs.length > 0, count: runs.length };
|
|
34
|
+
} catch {
|
|
35
|
+
return { runs: [], hasActiveRuns: false, count: 0 };
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
/**
|
|
40
|
+
* Wait for ALL active workflow runs in the repository to complete.
|
|
41
|
+
* Blocks until every in-progress/queued run across ALL branches finishes.
|
|
42
|
+
* @param {string} owner - Repository owner
|
|
43
|
+
* @param {string} repo - Repository name
|
|
44
|
+
* @param {Object} options - Wait options (timeout, pollInterval, onStatusUpdate)
|
|
45
|
+
* @param {boolean} verbose - Whether to log verbose output
|
|
46
|
+
* @returns {Promise<{success: boolean, waitedForRuns: boolean, timedOut: boolean, remainingRuns: Array}>}
|
|
47
|
+
*/
|
|
48
|
+
export async function waitForAllRepoActions(owner, repo, options = {}, verbose = false) {
|
|
49
|
+
const { timeout = 45 * 60 * 1000, pollInterval = 5 * 60 * 1000, onStatusUpdate = null } = options;
|
|
50
|
+
const startTime = Date.now();
|
|
51
|
+
let peakRunCount = 0;
|
|
52
|
+
|
|
53
|
+
while (Date.now() - startTime < timeout) {
|
|
54
|
+
const active = await getAllActiveRepoRuns(owner, repo, verbose);
|
|
55
|
+
if (onStatusUpdate) {
|
|
56
|
+
try {
|
|
57
|
+
await onStatusUpdate({ ...active, elapsedMs: Date.now() - startTime });
|
|
58
|
+
} catch {
|
|
59
|
+
// Ignore callback errors — continue monitoring
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
if (!active.hasActiveRuns) {
|
|
63
|
+
return { success: true, waitedForRuns: peakRunCount > 0, timedOut: false, remainingRuns: [] };
|
|
64
|
+
}
|
|
65
|
+
peakRunCount = Math.max(peakRunCount, active.count);
|
|
66
|
+
await new Promise(resolve => setTimeout(resolve, pollInterval));
|
|
67
|
+
}
|
|
68
|
+
const finalRuns = await getAllActiveRepoRuns(owner, repo, verbose);
|
|
69
|
+
return { success: false, waitedForRuns: true, timedOut: true, remainingRuns: finalRuns.runs };
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Multi-mechanism CI consensus check. Requires Check Runs API, Workflow Runs API,
|
|
74
|
+
* and optionally repo-wide active runs to ALL agree before concluding CI is complete.
|
|
75
|
+
* @param {Object} params
|
|
76
|
+
* @returns {Promise<{allAgree: boolean, mechanisms: Object, ciStatus: Object, workflowRuns: Array}>}
|
|
77
|
+
*/
|
|
78
|
+
export async function checkCIConsensus({ owner, repo, prNumber, sha, waitForAllRepoActionsFlag, verbose, getDetailedCIStatus, getWorkflowRunsForSha }) {
|
|
79
|
+
const ciStatus = await getDetailedCIStatus(owner, repo, prNumber, verbose);
|
|
80
|
+
const checkRunsOK = ciStatus.status === 'success' || ciStatus.status === 'no_checks';
|
|
81
|
+
|
|
82
|
+
const workflowRuns = await getWorkflowRunsForSha(owner, repo, sha, verbose);
|
|
83
|
+
const workflowsOK = workflowRuns.length === 0 || workflowRuns.every(r => r.status === 'completed');
|
|
84
|
+
|
|
85
|
+
let repoOK = true;
|
|
86
|
+
let repoInfo = null;
|
|
87
|
+
if (waitForAllRepoActionsFlag) {
|
|
88
|
+
repoInfo = await getAllActiveRepoRuns(owner, repo, verbose);
|
|
89
|
+
repoOK = !repoInfo.hasActiveRuns;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
const allAgree = checkRunsOK && workflowsOK && repoOK;
|
|
93
|
+
const mechanisms = {
|
|
94
|
+
checkRunsAPI: { complete: checkRunsOK, status: ciStatus.status },
|
|
95
|
+
workflowRunsAPI: { complete: workflowsOK, total: workflowRuns.length, inProgress: workflowRuns.filter(r => r.status !== 'completed').length },
|
|
96
|
+
repoActions: waitForAllRepoActionsFlag ? { complete: repoOK, count: repoInfo?.count ?? 0 } : { skipped: true },
|
|
97
|
+
};
|
|
98
|
+
|
|
99
|
+
if (verbose) {
|
|
100
|
+
console.log(`[VERBOSE] consensus: CheckRuns=${checkRunsOK}(${ciStatus.status}), WorkflowRuns=${workflowsOK}(${workflowRuns.length}), RepoActions=${waitForAllRepoActionsFlag ? repoOK : 'skip'} → ${allAgree ? 'AGREE' : 'DISAGREE'}`);
|
|
101
|
+
}
|
|
102
|
+
return { allAgree, mechanisms, ciStatus, workflowRuns };
|
|
103
|
+
}
|
package/src/github-merge.lib.mjs
CHANGED
|
@@ -1385,31 +1385,30 @@ export async function checkPreviousPRCommitsHadCI(owner, repo, prNumber, headSha
|
|
|
1385
1385
|
* @param {boolean} verbose - Whether to log verbose output
|
|
1386
1386
|
* @returns {Promise<{hasPRTriggers: boolean, hasWorkflowFiles: boolean, workflows: Array<{name: string, triggers: string[]}>}>}
|
|
1387
1387
|
*/
|
|
1388
|
-
export async function checkWorkflowsHavePRTriggers(owner, repo, verbose = false) {
|
|
1388
|
+
export async function checkWorkflowsHavePRTriggers(owner, repo, verbose = false, ref = null) {
|
|
1389
1389
|
try {
|
|
1390
|
-
//
|
|
1391
|
-
const
|
|
1390
|
+
// Issue #1503: Support querying workflow files from a specific branch (ref)
|
|
1391
|
+
const refParam = ref ? `?ref=${encodeURIComponent(ref)}` : '';
|
|
1392
|
+
// List workflow files in .github/workflows/ (uses ref if provided, otherwise default branch)
|
|
1393
|
+
const { stdout: listJson } = await exec(`gh api "repos/${owner}/${repo}/contents/.github/workflows${refParam}" --jq '[.[] | select(.name | test("\\\\.(yml|yaml)$")) | {name: .name, download_url: .download_url, path: .path}]' 2>/dev/null`);
|
|
1392
1394
|
const files = JSON.parse(listJson.trim() || '[]');
|
|
1393
1395
|
|
|
1394
1396
|
if (files.length === 0) {
|
|
1395
|
-
if (verbose) {
|
|
1396
|
-
console.log(`[VERBOSE] /merge: No workflow files found in ${owner}/${repo}/.github/workflows/ — no CI/CD will execute`);
|
|
1397
|
-
}
|
|
1398
|
-
// Issue #1480: hasWorkflowFiles=false is a strong signal that no CI/CD is configured at the file level
|
|
1397
|
+
if (verbose) console.log(`[VERBOSE] /merge: No workflow files in ${owner}/${repo}/.github/workflows/`);
|
|
1399
1398
|
return { hasPRTriggers: false, hasWorkflowFiles: false, workflows: [] };
|
|
1400
1399
|
}
|
|
1401
1400
|
|
|
1402
1401
|
const prTriggerPatterns = [/\bon:\s*\n\s+pull_request/m, /\bon:\s*\[.*pull_request.*\]/m, /\bon:\s*pull_request\b/m, /\bpull_request_target\b/m];
|
|
1403
|
-
|
|
1404
|
-
// Also check for push triggers (push to PR branches triggers CI)
|
|
1405
1402
|
const pushTriggerPatterns = [/\bon:\s*\n\s+push/m, /\bon:\s*\[.*push.*\]/m, /\bon:\s*push\b/m];
|
|
1403
|
+
// Issue #1503: Non-PR triggers for diagnostics (won't produce check-runs on PRs)
|
|
1404
|
+
const nonPROnlyTriggerPatterns = [/\bworkflow_dispatch\b/m, /\bschedule\b/m, /\brepository_dispatch\b/m, /\bworkflow_call\b/m];
|
|
1406
1405
|
|
|
1407
1406
|
const results = [];
|
|
1408
1407
|
|
|
1409
1408
|
for (const file of files) {
|
|
1410
1409
|
try {
|
|
1411
|
-
// Fetch file content
|
|
1412
|
-
const { stdout: contentJson } = await exec(`gh api "repos/${owner}/${repo}/contents/${file.path}" --jq '.content'`);
|
|
1410
|
+
// Issue #1503: Fetch file content using same ref parameter for branch-specific workflows
|
|
1411
|
+
const { stdout: contentJson } = await exec(`gh api "repos/${owner}/${repo}/contents/${file.path}${refParam}" --jq '.content'`);
|
|
1413
1412
|
const content = Buffer.from(contentJson.trim().replace(/"/g, ''), 'base64').toString('utf-8');
|
|
1414
1413
|
|
|
1415
1414
|
const triggers = [];
|
|
@@ -1419,13 +1418,15 @@ export async function checkWorkflowsHavePRTriggers(owner, repo, verbose = false)
|
|
|
1419
1418
|
if (pushTriggerPatterns.some(p => p.test(content))) {
|
|
1420
1419
|
triggers.push('push');
|
|
1421
1420
|
}
|
|
1421
|
+
// Issue #1503: Track non-PR triggers for diagnostics
|
|
1422
|
+
const nonPRTriggers = nonPROnlyTriggerPatterns.filter(p => p.test(content)).map(p => p.source.replace(/\\b/g, ''));
|
|
1422
1423
|
|
|
1423
1424
|
if (triggers.length > 0) {
|
|
1424
1425
|
results.push({ name: file.name, triggers });
|
|
1425
1426
|
}
|
|
1426
1427
|
|
|
1427
1428
|
if (verbose) {
|
|
1428
|
-
console.log(`[VERBOSE] /merge: Workflow ${file.name}:
|
|
1429
|
+
console.log(`[VERBOSE] /merge: Workflow ${file.name}: pr_triggers=[${triggers.join(', ')}], non_pr_triggers=[${nonPRTriggers.join(', ')}]`);
|
|
1429
1430
|
}
|
|
1430
1431
|
} catch (fileError) {
|
|
1431
1432
|
if (verbose) {
|
|
@@ -1454,6 +1455,9 @@ export async function checkWorkflowsHavePRTriggers(owner, repo, verbose = false)
|
|
|
1454
1455
|
import { waitForCommitCI, checkBranchCIHealth, getMergeCommitSha } from './github-merge-ci.lib.mjs';
|
|
1455
1456
|
export { waitForCommitCI, checkBranchCIHealth, getMergeCommitSha };
|
|
1456
1457
|
|
|
1458
|
+
import { getAllActiveRepoRuns, waitForAllRepoActions, checkCIConsensus } from './github-merge-repo-actions.lib.mjs'; // Issue #1503
|
|
1459
|
+
export { getAllActiveRepoRuns, waitForAllRepoActions, checkCIConsensus };
|
|
1460
|
+
|
|
1457
1461
|
export default {
|
|
1458
1462
|
READY_LABEL,
|
|
1459
1463
|
checkReadyLabelExists,
|
|
@@ -1482,15 +1486,15 @@ export default {
|
|
|
1482
1486
|
rerunWorkflowRun,
|
|
1483
1487
|
rerunFailedJobs,
|
|
1484
1488
|
getWorkflowRunsForSha,
|
|
1485
|
-
// Issue #1341: Post-merge CI waiting; Issue #1363: Detect active workflows
|
|
1486
1489
|
waitForCommitCI,
|
|
1487
1490
|
checkBranchCIHealth,
|
|
1488
1491
|
getMergeCommitSha,
|
|
1489
1492
|
getActiveRepoWorkflows,
|
|
1490
|
-
// Issue #1480: Commit date, workflow PR triggers, and previous commit CI history for race condition detection
|
|
1491
1493
|
getCommitDate,
|
|
1492
1494
|
checkPreviousPRCommitsHadCI,
|
|
1493
1495
|
checkWorkflowsHavePRTriggers,
|
|
1494
|
-
// Issue #1413: Use issue timeline to find genuinely linked PRs (avoids false positives from text search)
|
|
1495
1496
|
getLinkedPRsFromTimeline,
|
|
1497
|
+
getAllActiveRepoRuns,
|
|
1498
|
+
waitForAllRepoActions,
|
|
1499
|
+
checkCIConsensus, // Issue #1503
|
|
1496
1500
|
};
|
package/src/github.lib.mjs
CHANGED
|
@@ -368,7 +368,7 @@ export async function attachLogToGitHub(options) {
|
|
|
368
368
|
resultModelUsage = null, // Issue #1454
|
|
369
369
|
budgetStatsData = null, // Issue #1491: budget stats for comment
|
|
370
370
|
} = options;
|
|
371
|
-
const budgetStats = budgetStatsData ? buildBudgetStatsString(budgetStatsData.tokenUsage
|
|
371
|
+
const budgetStats = budgetStatsData ? buildBudgetStatsString(budgetStatsData.tokenUsage) : '';
|
|
372
372
|
const targetName = targetType === 'pr' ? 'Pull Request' : 'Issue';
|
|
373
373
|
const ghCommand = targetType === 'pr' ? 'pr' : 'issue';
|
|
374
374
|
try {
|
|
@@ -33,7 +33,7 @@ const { reportError } = sentryLib;
|
|
|
33
33
|
|
|
34
34
|
// Import GitHub merge functions
|
|
35
35
|
const githubMergeLib = await import('./github-merge.lib.mjs');
|
|
36
|
-
const { checkPRMergeable, checkMergePermissions, mergePullRequest, waitForCI, checkForBillingLimitError, getRepoVisibility, BILLING_LIMIT_ERROR_PATTERN, getDetailedCIStatus, rerunWorkflowRun, getWorkflowRunsForSha, getActiveRepoWorkflows, getCommitDate, checkWorkflowsHavePRTriggers } = githubMergeLib;
|
|
36
|
+
const { checkPRMergeable, checkMergePermissions, mergePullRequest, waitForCI, checkForBillingLimitError, getRepoVisibility, BILLING_LIMIT_ERROR_PATTERN, getDetailedCIStatus, rerunWorkflowRun, getWorkflowRunsForSha, getActiveRepoWorkflows, getCommitDate, checkWorkflowsHavePRTriggers, checkPreviousPRCommitsHadCI, getAllActiveRepoRuns, checkCIConsensus } = githubMergeLib;
|
|
37
37
|
|
|
38
38
|
// Import GitHub functions for log attachment
|
|
39
39
|
const githubLib = await import('./github.lib.mjs');
|
|
@@ -187,7 +187,7 @@ const checkForNonBotComments = async (owner, repo, prNumber, issueNumber, lastCh
|
|
|
187
187
|
* - billing_limit: Billing/spending limit reached → stop (private) or wait (public)
|
|
188
188
|
* - no_checks: No CI checks yet (race condition) → wait
|
|
189
189
|
*/
|
|
190
|
-
const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCount = 1) => {
|
|
190
|
+
const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCount = 1, prBranchRef = null) => {
|
|
191
191
|
const blockers = [];
|
|
192
192
|
|
|
193
193
|
// Use detailed CI status to distinguish between all possible states
|
|
@@ -239,7 +239,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
239
239
|
// Treat the same as "CI not triggered" to avoid infinite waiting.
|
|
240
240
|
const conclusions = [...new Set(workflowRuns.map(r => r.conclusion))].join(', ');
|
|
241
241
|
if (verbose) {
|
|
242
|
-
|
|
242
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} has ${workflowRuns.length} workflow run(s) for SHA ${ciStatus.sha.substring(0, 7)}, but all completed without executing (conclusions: ${conclusions}) — check-runs will never appear`);
|
|
243
243
|
}
|
|
244
244
|
await log(formatAligned('ℹ️', 'CI workflows completed without executing:', `${conclusions} (${workflowRuns.map(r => r.name).join(', ')})`, 2));
|
|
245
245
|
return { blockers, ciStatus, noCiConfigured: false, noCiTriggered: true, workflowRunConclusions: conclusions };
|
|
@@ -247,7 +247,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
247
247
|
|
|
248
248
|
// Some workflow runs are still in progress or produced results — genuine race condition
|
|
249
249
|
if (verbose) {
|
|
250
|
-
|
|
250
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} has no CI check-runs yet, but ${workflowRuns.length} workflow run(s) were triggered for SHA ${ciStatus.sha.substring(0, 7)} - genuine race condition (waiting for check-runs to appear)`);
|
|
251
251
|
}
|
|
252
252
|
blockers.push({
|
|
253
253
|
type: 'ci_pending',
|
|
@@ -269,13 +269,13 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
269
269
|
const commitInfo = await getCommitDate(owner, repo, ciStatus.sha, verbose);
|
|
270
270
|
|
|
271
271
|
// Issue #1480: Parse workflow files for PR triggers (used in both grace period and post-grace checks)
|
|
272
|
-
const prTriggers = await checkWorkflowsHavePRTriggers(owner, repo, verbose);
|
|
272
|
+
const prTriggers = await checkWorkflowsHavePRTriggers(owner, repo, verbose, prBranchRef);
|
|
273
273
|
|
|
274
274
|
// Issue #1480: If .github/workflows folder doesn't exist or has no workflow files,
|
|
275
275
|
// that's a definitive signal — no CI/CD will execute, skip grace period entirely
|
|
276
276
|
if (!prTriggers.hasWorkflowFiles) {
|
|
277
277
|
if (verbose) {
|
|
278
|
-
|
|
278
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} repo has no workflow files in .github/workflows/ — CI definitively not configured at file level`);
|
|
279
279
|
}
|
|
280
280
|
return { blockers, ciStatus, noCiConfigured: false, noCiTriggered: true };
|
|
281
281
|
}
|
|
@@ -295,15 +295,32 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
295
295
|
// changed files, conditional workflows that don't match, etc.
|
|
296
296
|
const MAX_NO_RUNS_CHECKS = 5;
|
|
297
297
|
if (checkCount >= MAX_NO_RUNS_CHECKS) {
|
|
298
|
-
//
|
|
298
|
+
// Issue #1503 (enhanced): Before concluding CI was not triggered, check if
|
|
299
|
+
// previous commits in this PR had CI runs. If they did, CI should be expected
|
|
300
|
+
// for the current commit too — extend waiting with a higher threshold.
|
|
301
|
+
const MAX_NO_RUNS_CHECKS_WITH_CI_HISTORY = 10;
|
|
302
|
+
if (checkCount < MAX_NO_RUNS_CHECKS_WITH_CI_HISTORY) {
|
|
303
|
+
const previousCI = await checkPreviousPRCommitsHadCI(owner, repo, prNumber, ciStatus.sha, verbose);
|
|
304
|
+
if (previousCI.hadPreviousCI) {
|
|
305
|
+
// Previous commits had CI — this commit should too, keep waiting
|
|
306
|
+
await log(formatAligned('⚠️', 'CI history signal:', `${previousCI.previousCommitsWithCI} previous commit(s) had CI runs — extending wait (check ${checkCount}/${MAX_NO_RUNS_CHECKS_WITH_CI_HISTORY})`, 2));
|
|
307
|
+
blockers.push({
|
|
308
|
+
type: 'ci_pending',
|
|
309
|
+
message: `CI/CD workflow runs have not appeared yet — previous commits had CI runs, extending wait (check ${checkCount}/${MAX_NO_RUNS_CHECKS_WITH_CI_HISTORY})`,
|
|
310
|
+
details: prTriggers.workflows.map(w => w.name),
|
|
311
|
+
});
|
|
312
|
+
return { blockers, ciStatus, noCiConfigured: false, noCiTriggered: false };
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
// We've waited long enough (and no CI history signal) — CI was genuinely not triggered
|
|
299
316
|
if (verbose) {
|
|
300
|
-
|
|
317
|
+
await log(formatAligned('ℹ️', 'CI not triggered:', `No workflow runs after ${checkCount} consecutive checks — concluding CI was not triggered`, 2));
|
|
301
318
|
}
|
|
302
319
|
return { blockers, ciStatus, noCiConfigured: false, noCiTriggered: true };
|
|
303
320
|
}
|
|
304
321
|
|
|
305
322
|
if (verbose) {
|
|
306
|
-
|
|
323
|
+
await log(formatAligned('⏳', 'Waiting for CI:', `No workflow runs for SHA ${ciStatus.sha.substring(0, 7)}, but workflows have PR/push triggers (${prTriggers.workflows.map(w => w.name).join(', ')}) — check ${checkCount}/${MAX_NO_RUNS_CHECKS}, commit age: ${commitInfo.ageSeconds ?? 'unknown'}s`, 2));
|
|
307
324
|
}
|
|
308
325
|
blockers.push({
|
|
309
326
|
type: 'ci_pending',
|
|
@@ -313,7 +330,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
313
330
|
} else if (commitInfo.ageSeconds !== null && commitInfo.ageSeconds < WORKFLOW_RUN_GRACE_PERIOD_SECONDS) {
|
|
314
331
|
// No PR triggers found in workflow files, but commit is still recent — be safe and wait
|
|
315
332
|
if (verbose) {
|
|
316
|
-
|
|
333
|
+
await log(`[VERBOSE] /merge: No PR/push triggers found in workflow files, but commit is only ${commitInfo.ageSeconds}s old — waiting to be safe`);
|
|
317
334
|
}
|
|
318
335
|
blockers.push({
|
|
319
336
|
type: 'ci_pending',
|
|
@@ -325,7 +342,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
325
342
|
// Issue #1442: Fork PRs needing maintainer approval, paths-ignore filtering,
|
|
326
343
|
// workflow conditions not matching, etc. all result in zero workflow runs.
|
|
327
344
|
if (verbose) {
|
|
328
|
-
|
|
345
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} has no CI checks and no workflow runs for SHA ${ciStatus.sha.substring(0, 7)} (commit age: ${commitInfo.ageSeconds ?? 'unknown'}s, no PR/push triggers in workflow files) — CI was not triggered`);
|
|
329
346
|
}
|
|
330
347
|
return { blockers, ciStatus, noCiConfigured: false, noCiTriggered: true };
|
|
331
348
|
}
|
|
@@ -336,7 +353,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
336
353
|
// Do NOT add a ci_pending blocker. The mergeability check below will also
|
|
337
354
|
// confirm this is mergeable, so blockers will be empty → PR IS MERGEABLE path.
|
|
338
355
|
if (verbose) {
|
|
339
|
-
|
|
356
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} has no CI checks and repo has no active workflows - no CI/CD configured`);
|
|
340
357
|
}
|
|
341
358
|
// Return early with no CI blocker, mergeability already confirmed
|
|
342
359
|
return { blockers, ciStatus, noCiConfigured: true };
|
|
@@ -362,7 +379,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
362
379
|
if (incompleteRuns.length > 0) {
|
|
363
380
|
// Some workflow runs are still in progress — more check-runs may appear
|
|
364
381
|
if (verbose) {
|
|
365
|
-
|
|
382
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} CI status is 'success' (${ciStatus.passedChecks.length} checks passed), but ${incompleteRuns.length} workflow run(s) still in progress — waiting for completion`);
|
|
366
383
|
}
|
|
367
384
|
blockers.push({
|
|
368
385
|
type: 'ci_pending',
|
|
@@ -376,7 +393,7 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
376
393
|
// (e.g., CodeFactor, Codecov). Check if the repo has workflows that should produce runs.
|
|
377
394
|
const repoWorkflows = await getActiveRepoWorkflows(owner, repo, verbose);
|
|
378
395
|
if (repoWorkflows.hasWorkflows) {
|
|
379
|
-
const prTriggers = await checkWorkflowsHavePRTriggers(owner, repo, verbose);
|
|
396
|
+
const prTriggers = await checkWorkflowsHavePRTriggers(owner, repo, verbose, prBranchRef);
|
|
380
397
|
if (prTriggers.hasPRTriggers) {
|
|
381
398
|
// Repo has workflows with PR triggers but no runs yet — CI hasn't started
|
|
382
399
|
// This is the exact scenario from Case 2 of Issue #1480
|
|
@@ -385,12 +402,12 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
385
402
|
const MAX_NO_RUNS_CHECKS = 5;
|
|
386
403
|
if (checkCount >= MAX_NO_RUNS_CHECKS) {
|
|
387
404
|
if (verbose) {
|
|
388
|
-
|
|
405
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} CI 'success' with ${ciStatus.passedChecks.length} external checks, no workflow runs after ${checkCount} checks — trusting external checks`);
|
|
389
406
|
}
|
|
390
407
|
// Fall through — trust the success status from external checks
|
|
391
408
|
} else {
|
|
392
409
|
if (verbose) {
|
|
393
|
-
|
|
410
|
+
await log(`[VERBOSE] /merge: PR #${prNumber} CI status is 'success' (${ciStatus.passedChecks.length} external checks), but repo has PR-triggered workflows with 0 workflow runs — likely race condition (check ${checkCount}/${MAX_NO_RUNS_CHECKS})`);
|
|
394
411
|
}
|
|
395
412
|
// Wait for GitHub Actions to register workflow runs
|
|
396
413
|
blockers.push({
|
|
@@ -492,8 +509,14 @@ const getMergeBlockers = async (owner, repo, prNumber, verbose = false, checkCou
|
|
|
492
509
|
export const watchUntilMergeable = async params => {
|
|
493
510
|
const { issueUrl, owner, repo, issueNumber, prNumber, prBranch, branchName, tempDir, argv } = params;
|
|
494
511
|
|
|
495
|
-
const
|
|
512
|
+
const rawWatchInterval = argv.watchInterval || 60; // seconds
|
|
513
|
+
// Issue #1503: Enforce minimum 5-minute (300s) CI check interval to conserve GitHub API rate limits.
|
|
514
|
+
// This prevents excessive API calls during long-running CI pipelines.
|
|
515
|
+
const MIN_CI_CHECK_INTERVAL_SECONDS = 300;
|
|
516
|
+
const watchInterval = Math.max(rawWatchInterval, MIN_CI_CHECK_INTERVAL_SECONDS);
|
|
496
517
|
const isAutoMerge = argv.autoMerge || false;
|
|
518
|
+
// Issue #1503: --wait-for-all-actions-in-repository-before-mergable (default: true)
|
|
519
|
+
const waitForAllRepoActionsFlag = argv.waitForAllActionsInRepositoryBeforeMergable ?? argv['wait-for-all-actions-in-repository-before-mergable'] ?? true;
|
|
497
520
|
|
|
498
521
|
// Track latest session data across all iterations for accurate pricing
|
|
499
522
|
let latestSessionId = null;
|
|
@@ -513,11 +536,25 @@ export const watchUntilMergeable = async params => {
|
|
|
513
536
|
|
|
514
537
|
let currentBackoffSeconds = watchInterval;
|
|
515
538
|
|
|
539
|
+
// Issue #1503: Track consecutive "no workflow runs" checks per-SHA separately from iteration count.
|
|
540
|
+
// The `checkCount` parameter in getMergeBlockers is a safety valve that triggers after
|
|
541
|
+
// MAX_NO_RUNS_CHECKS (5) consecutive checks with zero workflow runs, concluding CI was
|
|
542
|
+
// genuinely not triggered (paths-ignore, fork PRs, etc.). Previously, `iteration` (total
|
|
543
|
+
// loop count) was passed as `checkCount`, which meant after 5 iterations (regardless of
|
|
544
|
+
// CI state), any new push would immediately trigger the safety valve because checkCount
|
|
545
|
+
// was already >= 5. This caused false positive "Ready to merge" when a new commit was
|
|
546
|
+
// pushed and CI hadn't registered yet.
|
|
547
|
+
//
|
|
548
|
+
// Fix: Track the HEAD SHA and reset the counter when it changes (new push detected).
|
|
549
|
+
let consecutiveNoRunsChecks = 0;
|
|
550
|
+
let lastKnownHeadSha = null;
|
|
551
|
+
|
|
516
552
|
await log('');
|
|
517
553
|
await log(formatAligned('🔄', 'AUTO-RESTART-UNTIL-MERGEABLE MODE ACTIVE', ''));
|
|
518
554
|
await log(formatAligned('', 'Monitoring PR:', `#${prNumber}`, 2));
|
|
519
555
|
await log(formatAligned('', 'Mode:', isAutoMerge ? 'Auto-merge (will merge when ready)' : 'Auto-restart-until-mergeable (will NOT auto-merge)', 2));
|
|
520
|
-
await log(formatAligned('', 'Checking interval:', `${watchInterval} seconds`, 2));
|
|
556
|
+
await log(formatAligned('', 'Checking interval:', `${watchInterval} seconds (minimum: ${MIN_CI_CHECK_INTERVAL_SECONDS}s)`, 2));
|
|
557
|
+
await log(formatAligned('', 'Wait for all repo actions:', waitForAllRepoActionsFlag ? 'Yes (absolute safety)' : 'No', 2));
|
|
521
558
|
await log(formatAligned('', 'Stop conditions:', 'PR merged, PR closed, or becomes mergeable', 2));
|
|
522
559
|
await log(formatAligned('', 'Restart triggers:', 'New non-bot comments, CI failures, merge conflicts', 2));
|
|
523
560
|
await log('');
|
|
@@ -554,8 +591,47 @@ export const watchUntilMergeable = async params => {
|
|
|
554
591
|
await log(formatAligned('🔍', `Check #${iteration}:`, currentTime.toLocaleTimeString()));
|
|
555
592
|
|
|
556
593
|
try {
|
|
594
|
+
// Issue #1503: Get the current HEAD SHA to detect new pushes and reset the
|
|
595
|
+
// consecutive no-runs counter. This prevents false positives where the counter
|
|
596
|
+
// from a previous commit's checks carries over to a new commit.
|
|
597
|
+
let currentHeadSha = null;
|
|
598
|
+
try {
|
|
599
|
+
const shaResult = await $`gh pr view ${prNumber} --repo ${owner}/${repo} --json headRefOid --jq .headRefOid`;
|
|
600
|
+
if (shaResult.code === 0) {
|
|
601
|
+
currentHeadSha = shaResult.stdout.toString().trim();
|
|
602
|
+
}
|
|
603
|
+
} catch {
|
|
604
|
+
// If SHA check fails, proceed with current counter (safe: doesn't reset)
|
|
605
|
+
}
|
|
606
|
+
if (currentHeadSha && currentHeadSha !== lastKnownHeadSha) {
|
|
607
|
+
if (lastKnownHeadSha !== null) {
|
|
608
|
+
await log(formatAligned('🔄', 'New commit detected:', `${lastKnownHeadSha.substring(0, 7)} → ${currentHeadSha.substring(0, 7)} (resetting CI check counter)`, 2));
|
|
609
|
+
}
|
|
610
|
+
lastKnownHeadSha = currentHeadSha;
|
|
611
|
+
consecutiveNoRunsChecks = 0;
|
|
612
|
+
// Issue #1503: Also reset the readyToMergeCommentPosted flag when SHA changes,
|
|
613
|
+
// so a new "Ready to merge" comment can be posted for the new commit's CI results.
|
|
614
|
+
readyToMergeCommentPosted = false;
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// Issue #1503: Increment counter; getMergeBlockers will use it as a safety valve.
|
|
618
|
+
// If getMergeBlockers sees no workflow runs on this check, the counter stays incremented.
|
|
619
|
+
// If it sees workflow runs or checks, the counter is irrelevant (different code paths).
|
|
620
|
+
consecutiveNoRunsChecks++;
|
|
621
|
+
|
|
557
622
|
// Get merge blockers
|
|
558
|
-
const { blockers, noCiConfigured, noCiTriggered, workflowRunConclusions } = await getMergeBlockers(owner, repo, prNumber, argv.verbose,
|
|
623
|
+
const { blockers, noCiConfigured, noCiTriggered, workflowRunConclusions, ciStatus } = await getMergeBlockers(owner, repo, prNumber, argv.verbose, consecutiveNoRunsChecks, prBranch);
|
|
624
|
+
|
|
625
|
+
// Issue #1503: Reset consecutive counter when CI checks or workflow runs were found.
|
|
626
|
+
// This ensures the safety valve only fires after truly consecutive "no runs" checks,
|
|
627
|
+
// not after interleaved pending/success/failure states that happened to reach the count.
|
|
628
|
+
if (ciStatus && ciStatus.status !== 'no_checks') {
|
|
629
|
+
// CI checks exist (pending, success, failure, etc.) — the "no runs" counter is irrelevant
|
|
630
|
+
consecutiveNoRunsChecks = 0;
|
|
631
|
+
} else if (noCiConfigured || noCiTriggered) {
|
|
632
|
+
// CI was definitively determined: either not configured or not triggered.
|
|
633
|
+
// Keep the counter as-is (it reached the safety valve or wasn't needed).
|
|
634
|
+
}
|
|
559
635
|
|
|
560
636
|
// Check for new comments from non-bot users
|
|
561
637
|
const { hasNewComments, comments } = await checkForNonBotComments(owner, repo, prNumber, issueNumber, lastCheckTime, argv.verbose);
|
|
@@ -576,6 +652,54 @@ export const watchUntilMergeable = async params => {
|
|
|
576
652
|
|
|
577
653
|
// If PR is mergeable, no blockers, no new comments, and no uncommitted changes
|
|
578
654
|
if (blockers.length === 0 && !hasNewComments && !hasUncommittedChanges) {
|
|
655
|
+
// Issue #1503 (enhanced): Multi-mechanism consensus + repo-wide action check.
|
|
656
|
+
// Before declaring PR mergeable, run multiple independent CI detection mechanisms
|
|
657
|
+
// and require all to agree. This catches race conditions where CI starts between
|
|
658
|
+
// checks or where interacting CI/CD pipelines affect mergeability.
|
|
659
|
+
if (!noCiConfigured) {
|
|
660
|
+
const DOUBLE_CHECK_DELAY_MS = 10000; // 10 seconds
|
|
661
|
+
await log(formatAligned('🔍', 'Multi-mechanism CI consensus check:', `Waiting ${DOUBLE_CHECK_DELAY_MS / 1000}s then verifying...`, 2));
|
|
662
|
+
await new Promise(resolve => setTimeout(resolve, DOUBLE_CHECK_DELAY_MS));
|
|
663
|
+
|
|
664
|
+
// Run multi-mechanism consensus: Check Runs API + Workflow Runs API + Repo-wide actions
|
|
665
|
+
const consensus = await checkCIConsensus({
|
|
666
|
+
owner,
|
|
667
|
+
repo,
|
|
668
|
+
prNumber,
|
|
669
|
+
sha: currentHeadSha || ciStatus?.sha,
|
|
670
|
+
waitForAllRepoActionsFlag,
|
|
671
|
+
verbose: argv.verbose,
|
|
672
|
+
getDetailedCIStatus,
|
|
673
|
+
getWorkflowRunsForSha,
|
|
674
|
+
});
|
|
675
|
+
|
|
676
|
+
if (!consensus.allAgree) {
|
|
677
|
+
const m = consensus.mechanisms;
|
|
678
|
+
await log(formatAligned('🔄', 'CI mechanisms DISAGREE:', `CheckRuns=${m.checkRunsAPI.status}, WorkflowRuns=${m.workflowRunsAPI.inProgress} in-progress, RepoActions=${m.repoActions.skipped ? 'skipped' : m.repoActions.count + ' active'}`, 2));
|
|
679
|
+
await log(formatAligned('⏳', 'Continuing to monitor...', 'Mechanisms must agree before declaring mergeable', 2));
|
|
680
|
+
consecutiveNoRunsChecks = 0;
|
|
681
|
+
lastCheckTime = currentTime;
|
|
682
|
+
const actualWaitSeconds = currentBackoffSeconds;
|
|
683
|
+
await log(formatAligned('⏱️', 'Next check in:', `${actualWaitSeconds} seconds...`, 2));
|
|
684
|
+
await log('');
|
|
685
|
+
await new Promise(resolve => setTimeout(resolve, actualWaitSeconds * 1000));
|
|
686
|
+
continue;
|
|
687
|
+
}
|
|
688
|
+
await log(formatAligned('✅', 'All CI mechanisms agree:', `CheckRuns=${consensus.mechanisms.checkRunsAPI.status}, WorkflowRuns=complete(${consensus.mechanisms.workflowRunsAPI.total}), RepoActions=${consensus.mechanisms.repoActions.skipped ? 'skipped' : 'clear'}`, 2));
|
|
689
|
+
} else if (waitForAllRepoActionsFlag) {
|
|
690
|
+
// Even with no CI configured, check repo-wide actions for absolute safety
|
|
691
|
+
const repoRuns = await getAllActiveRepoRuns(owner, repo, argv.verbose);
|
|
692
|
+
if (repoRuns.hasActiveRuns) {
|
|
693
|
+
await log(formatAligned('⏳', 'Waiting for repo-wide actions:', `${repoRuns.count} active run(s) in repository`, 2));
|
|
694
|
+
lastCheckTime = currentTime;
|
|
695
|
+
const actualWaitSeconds = currentBackoffSeconds;
|
|
696
|
+
await log(formatAligned('⏱️', 'Next check in:', `${actualWaitSeconds} seconds...`, 2));
|
|
697
|
+
await log('');
|
|
698
|
+
await new Promise(resolve => setTimeout(resolve, actualWaitSeconds * 1000));
|
|
699
|
+
continue;
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
|
|
579
703
|
await log(formatAligned('✅', 'PR IS MERGEABLE!', ''));
|
|
580
704
|
|
|
581
705
|
if (isAutoMerge) {
|
package/src/solve.config.lib.mjs
CHANGED
|
@@ -186,6 +186,11 @@ export const SOLVE_OPTION_DEFINITIONS = {
|
|
|
186
186
|
description: 'Auto-restart until PR becomes mergeable (no iteration limit). Restarts on new comments from non-bot users, CI failures, merge conflicts, or other issues. Does NOT auto-merge.',
|
|
187
187
|
default: true,
|
|
188
188
|
},
|
|
189
|
+
'wait-for-all-actions-in-repository-before-mergable': {
|
|
190
|
+
type: 'boolean',
|
|
191
|
+
description: 'Wait for ALL active GitHub Actions workflow runs in the entire repository to complete before declaring PR mergeable. Provides absolute safety against interacting CI/CD pipelines. Enabled by default.',
|
|
192
|
+
default: true,
|
|
193
|
+
},
|
|
189
194
|
'auto-restart-on-non-updated-pull-request-description': {
|
|
190
195
|
type: 'boolean',
|
|
191
196
|
description: 'Automatically restart if PR title or description still contains auto-generated placeholder text after agent execution. Restarts with a hint about what was not updated.',
|