@link-assistant/hive-mind 1.46.3 → 1.46.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,24 @@
1
1
  # @link-assistant/hive-mind
2
2
 
3
+ ## 1.46.5
4
+
5
+ ### Patch Changes
6
+
7
+ - c900fb8: Usage stats improvements for Agent CLI and Claude Code CLI (Issue #1526)
8
+ - Fix context window 288% bug by skipping display when peakContextUsage is 0
9
+ - Add Agent CLI "Context and tokens usage" section with model/context parsing
10
+ - Shorter output format combining context window and output tokens on single line
11
+ - Consolidated Total line with cost information
12
+ - Sub-sessions use numbered Context window lines directly
13
+
14
+ ## 1.46.4
15
+
16
+ ### Patch Changes
17
+
18
+ - a3bdea6: Fix CI/CD false positive for .gitkeep files using positive matching (Issue #1528).
19
+
20
+ Use consistent positive matching in detect-code-changes.mjs: "Files considered as code changes" now only shows files matching codePattern, so unknown file types like .gitkeep are naturally excluded without explicit exclusion rules. Add 40 unit tests covering the full detection pipeline.
21
+
3
22
  ## 1.46.3
4
23
 
5
24
  ### Patch Changes
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@link-assistant/hive-mind",
3
- "version": "1.46.3",
3
+ "version": "1.46.5",
4
4
  "description": "AI-powered issue solver and hive mind for collaborative problem solving",
5
5
  "main": "src/hive.mjs",
6
6
  "type": "module",
package/src/agent.lib.mjs CHANGED
@@ -40,6 +40,12 @@ export const parseAgentTokenUsage = output => {
40
40
  cacheWriteTokens: 0,
41
41
  totalCost: 0,
42
42
  stepCount: 0,
43
+ // Issue #1526: Track model and context info from step_finish events
44
+ requestedModelId: null,
45
+ respondedModelId: null,
46
+ contextLimit: null,
47
+ outputLimit: null,
48
+ peakContextUsage: 0, // Track peak context usage across steps
43
49
  };
44
50
 
45
51
  // Try to parse each line as JSON (agent outputs NDJSON format)
@@ -71,6 +77,24 @@ export const parseAgentTokenUsage = output => {
71
77
  if (parsed.part.cost !== undefined) {
72
78
  usage.totalCost += parsed.part.cost;
73
79
  }
80
+
81
+ // Issue #1526: Extract model info from step_finish events
82
+ if (parsed.part.model) {
83
+ if (parsed.part.model.requestedModelID) usage.requestedModelId = parsed.part.model.requestedModelID;
84
+ if (parsed.part.model.respondedModelID) usage.respondedModelId = parsed.part.model.respondedModelID;
85
+ }
86
+
87
+ // Issue #1526: Extract context limits and track peak context usage
88
+ if (parsed.part.context) {
89
+ if (parsed.part.context.contextLimit) usage.contextLimit = parsed.part.context.contextLimit;
90
+ if (parsed.part.context.outputLimit) usage.outputLimit = parsed.part.context.outputLimit;
91
+ // Track peak context usage: input_tokens (current request) is the context usage for this step
92
+ // The actual context used per request = input tokens + cache_read tokens for that request
93
+ const stepContextUsage = (tokens.input || 0) + (tokens.cache?.read || 0);
94
+ if (stepContextUsage > usage.peakContextUsage) {
95
+ usage.peakContextUsage = stepContextUsage;
96
+ }
97
+ }
74
98
  }
75
99
  } catch {
76
100
  // Skip lines that aren't valid JSON
@@ -560,6 +584,12 @@ export const executeAgentCommand = async params => {
560
584
  cacheWriteTokens: 0,
561
585
  totalCost: 0,
562
586
  stepCount: 0,
587
+ // Issue #1526: Track model and context info from step_finish events
588
+ requestedModelId: null,
589
+ respondedModelId: null,
590
+ contextLimit: null,
591
+ outputLimit: null,
592
+ peakContextUsage: 0,
563
593
  };
564
594
  // Helper to accumulate tokens from step_finish events during streaming
565
595
  const accumulateTokenUsage = data => {
@@ -576,6 +606,20 @@ export const executeAgentCommand = async params => {
576
606
  if (data.part.cost !== undefined) {
577
607
  streamingTokenUsage.totalCost += data.part.cost;
578
608
  }
609
+ // Issue #1526: Extract model info from step_finish events
610
+ if (data.part.model) {
611
+ if (data.part.model.requestedModelID) streamingTokenUsage.requestedModelId = data.part.model.requestedModelID;
612
+ if (data.part.model.respondedModelID) streamingTokenUsage.respondedModelId = data.part.model.respondedModelID;
613
+ }
614
+ // Issue #1526: Extract context limits and track peak context usage
615
+ if (data.part.context) {
616
+ if (data.part.context.contextLimit) streamingTokenUsage.contextLimit = data.part.context.contextLimit;
617
+ if (data.part.context.outputLimit) streamingTokenUsage.outputLimit = data.part.context.outputLimit;
618
+ const stepContextUsage = (tokens.input || 0) + (tokens.cache?.read || 0);
619
+ if (stepContextUsage > streamingTokenUsage.peakContextUsage) {
620
+ streamingTokenUsage.peakContextUsage = stepContextUsage;
621
+ }
622
+ }
579
623
  }
580
624
  };
581
625
 
@@ -141,6 +141,9 @@ export const displayCostComparison = async (publicCost, anthropicCost, log) => {
141
141
  * @param {Object} tokenUsage - Full token usage data (with subSessions)
142
142
  * @param {Function} log - Logging function
143
143
  */
144
+ /**
145
+ * Issue #1526: Updated to use single-line context+output format.
146
+ */
144
147
  export const displayBudgetStats = async (usage, tokenUsage, log) => {
145
148
  const modelInfo = usage.modelInfo;
146
149
  if (!modelInfo?.limit) {
@@ -156,48 +159,50 @@ export const displayBudgetStats = async (usage, tokenUsage, log) => {
156
159
  const hasMultipleSubSessions = subSessions.length > 1;
157
160
 
158
161
  if (hasMultipleSubSessions) {
159
- await log(' Sub sessions (between compact events):');
160
162
  for (let i = 0; i < subSessions.length; i++) {
161
163
  const sub = subSessions[i];
162
164
  const subPeak = sub.peakContextUsage || 0;
163
- let line = ` ${i + 1}. `;
164
- if (contextLimit && subPeak > 0) {
165
- const pct = ((subPeak / contextLimit) * 100).toFixed(0);
166
- line += `${formatNumber(subPeak)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`;
167
- } else {
168
- const subTotal = sub.inputTokens + sub.cacheCreationTokens + sub.cacheReadTokens;
169
- line += `${formatNumber(subTotal)} input tokens`;
165
+ const subCumulative = (sub.inputTokens || 0) + (sub.cacheCreationTokens || 0) + (sub.cacheReadTokens || 0);
166
+ const contextValue = subPeak > 0 ? subPeak : subCumulative;
167
+ const parts = [];
168
+ if (contextLimit && contextValue > 0) {
169
+ const pct = ((contextValue / contextLimit) * 100).toFixed(0);
170
+ parts.push(`${formatNumber(contextValue)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
170
171
  }
171
172
  if (outputLimit) {
172
173
  const outPct = ((sub.outputTokens / outputLimit) * 100).toFixed(0);
173
- line += `; ${formatNumber(sub.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`;
174
- } else {
175
- line += `; ${formatNumber(sub.outputTokens)} output tokens`;
174
+ parts.push(`${formatNumber(sub.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`);
175
+ }
176
+ if (parts.length > 0) {
177
+ await log(` ${i + 1}. Context window: ${parts.join(', ')}`);
176
178
  }
177
- await log(line);
178
179
  }
179
180
  } else {
180
- // Single sub-session: simplified format
181
+ // Single sub-session: single-line format
181
182
  const peakContext = usage.peakContextUsage || 0;
182
- if (contextLimit) {
183
- if (peakContext > 0) {
184
- const pct = ((peakContext / contextLimit) * 100).toFixed(0);
185
- await log(` Max context window: ${formatNumber(peakContext)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
186
- }
183
+ const cumulativeContext = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
184
+ const contextValue = peakContext > 0 ? peakContext : cumulativeContext;
185
+ const parts = [];
186
+ if (contextLimit && contextValue > 0) {
187
+ const pct = ((contextValue / contextLimit) * 100).toFixed(0);
188
+ parts.push(`${formatNumber(contextValue)} / ${formatNumber(contextLimit)} input tokens (${pct}%)`);
187
189
  }
188
190
  if (outputLimit) {
189
191
  const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
190
- await log(` Max output tokens: ${formatNumber(usage.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`);
192
+ parts.push(`${formatNumber(usage.outputTokens)} / ${formatNumber(outputLimit)} output tokens (${outPct}%)`);
193
+ }
194
+ if (parts.length > 0) {
195
+ await log(` Context window: ${parts.join(', ')}`);
191
196
  }
192
197
  }
193
198
 
194
- // Cumulative totals
199
+ // Cumulative totals — single line
195
200
  const totalInputNonCached = usage.inputTokens + usage.cacheCreationTokens;
196
201
  const cachedTokens = usage.cacheReadTokens;
197
- let totalLine = ` Total input tokens: ${formatNumber(totalInputNonCached)}`;
202
+ let totalLine = `${formatNumber(totalInputNonCached)}`;
198
203
  if (cachedTokens > 0) totalLine += ` + ${formatNumber(cachedTokens)} cached`;
199
- await log(totalLine);
200
- await log(` Total output tokens: ${formatNumber(usage.outputTokens)}`);
204
+ totalLine += ` input tokens, ${formatNumber(usage.outputTokens)} output tokens`;
205
+ await log(` Total: ${totalLine}`);
201
206
  };
202
207
 
203
208
  /**
@@ -261,39 +266,64 @@ const formatTokensCompact = tokens => {
261
266
  * @param {number|null} outputLimit - Output token limit for the model
262
267
  * @returns {string} Formatted sub-sessions string
263
268
  */
269
+ /**
270
+ * Issue #1526: Format sub-sessions list using numbered single-line format.
271
+ * Each sub-session gets: "N. Context window: X / Y input tokens (Z%), A / B output tokens (W%)"
272
+ */
264
273
  const formatSubSessionsList = (subSessions, contextLimit, outputLimit) => {
265
- let result = '\n\nSub sessions (between compact events):';
274
+ let result = '';
266
275
  for (let i = 0; i < subSessions.length; i++) {
267
276
  const sub = subSessions[i];
268
277
  const subPeakContext = sub.peakContextUsage || 0;
269
- const subTotalInput = sub.inputTokens + sub.cacheCreationTokens + sub.cacheReadTokens;
270
- let line = `\n${i + 1}. `;
271
- if (contextLimit && subPeakContext > 0) {
272
- const pct = ((subPeakContext / contextLimit) * 100).toFixed(0);
273
- line += `${formatTokensCompact(subPeakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`;
274
- } else {
275
- line += `${formatTokensCompact(subTotalInput)} input tokens`;
276
- }
277
- if (outputLimit) {
278
- const outPct = ((sub.outputTokens / outputLimit) * 100).toFixed(0);
279
- line += `; ${formatTokensCompact(sub.outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`;
280
- } else {
281
- line += `; ${formatTokensCompact(sub.outputTokens)} output tokens`;
282
- }
283
- result += line;
278
+ // Cumulative fallback: inputTokens + cacheCreationTokens + cacheReadTokens for this sub-session
279
+ const subCumulative = (sub.inputTokens || 0) + (sub.cacheCreationTokens || 0) + (sub.cacheReadTokens || 0);
280
+ result += formatContextOutputLine(subPeakContext, contextLimit, sub.outputTokens, outputLimit, `${i + 1}. `, subCumulative);
284
281
  }
285
282
  return result;
286
283
  };
287
284
 
288
285
  /**
289
- * Build budget stats string for GitHub PR comments (Issue #1491, #1501, #1508)
286
+ * Issue #1526: Build a single-line context window + output tokens string.
287
+ * Format: "- Context window: X / Y input tokens (Z%), A / B output tokens (W%)"
288
+ * When only one of context or output limits is available, shows just that part.
289
+ * @param {number} peakContext - Peak context usage (0 if unknown)
290
+ * @param {number} contextLimit - Context window limit (null if unknown)
291
+ * @param {number} outputTokens - Output tokens used
292
+ * @param {number} outputLimit - Output token limit (null if unknown)
293
+ * @param {string} [prefix='- '] - Line prefix
294
+ * @returns {string} Formatted line or empty string
295
+ */
296
+ const formatContextOutputLine = (peakContext, contextLimit, outputTokens, outputLimit, prefix = '- ', cumulativeContext = 0) => {
297
+ const parts = [];
298
+ if (contextLimit) {
299
+ // Use peakContextUsage when available (per-request peak from JSONL tracking).
300
+ // Fall back to cumulative total (inputTokens + cacheCreationTokens + cacheReadTokens)
301
+ // when peak is unknown (e.g., model only from result JSON, not in JSONL).
302
+ // Issue #1526: Never skip context display — always show what data we have.
303
+ const contextValue = peakContext > 0 ? peakContext : cumulativeContext;
304
+ if (contextValue > 0) {
305
+ const pct = ((contextValue / contextLimit) * 100).toFixed(0);
306
+ parts.push(`${formatTokensCompact(contextValue)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`);
307
+ }
308
+ }
309
+ if (outputLimit) {
310
+ const outPct = ((outputTokens / outputLimit) * 100).toFixed(0);
311
+ parts.push(`${formatTokensCompact(outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`);
312
+ }
313
+ if (parts.length === 0) return '';
314
+ return `\n${prefix}Context window: ${parts.join(', ')}`;
315
+ };
316
+
317
+ /**
318
+ * Build budget stats string for GitHub PR comments (Issue #1491, #1501, #1508, #1526)
290
319
  * Format requested by user: sub-sessions between compactification events,
291
320
  * per-model breakdown, cumulative totals with cached tokens shown separately.
292
321
  * Issue #1508: When multiple models are used, token and context usage is now split by model.
293
322
  * Sub-sessions are shown as a global section (not duplicated per model) since JSONL
294
323
  * sub-session tracking is global across all models.
295
- * @param {Object} tokenUsage - Token usage data from calculateSessionTokens
296
- * @param {Object|null} streamTokenUsage - Token usage from stream JSON events (used for comparison, not displayed)
324
+ * Issue #1526: Shorter output format context window + output tokens on single line.
325
+ * Fix: exclude cacheReadTokens from context window fallback calculation (cumulative per-request).
326
+ * @param {Object} tokenUsage - Token usage data from calculateSessionTokens or buildAgentBudgetStats
297
327
  * @returns {string} Formatted markdown string for PR comment
298
328
  */
299
329
  export const buildBudgetStatsString = tokenUsage => {
@@ -329,61 +359,73 @@ export const buildBudgetStatsString = tokenUsage => {
329
359
  if (isMultiModel) stats += `\n\n**${modelName}:**`;
330
360
 
331
361
  if (!isMultiModel && hasMultipleSubSessions) {
332
- // Single-model + multiple sub-sessions: show sub-sessions under that model
362
+ // Single-model + multiple sub-sessions: show numbered sub-sessions under that model
333
363
  stats += formatSubSessionsList(subSessions, contextLimit, outputLimit);
334
- } else if (!isMultiModel && !hasMultipleSubSessions) {
335
- // Single-model + single sub-session: simplified format with context/output limits
336
- const peakContext = usage.peakContextUsage || 0;
337
- if (contextLimit) {
338
- if (peakContext > 0) {
339
- const pct = ((peakContext / contextLimit) * 100).toFixed(0);
340
- stats += `\n- Max context window: ${formatTokensCompact(peakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`;
341
- } else {
342
- const totalInput = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
343
- const pct = ((totalInput / contextLimit) * 100).toFixed(0);
344
- stats += `\n- Context window: ${formatTokensCompact(totalInput)} / ${formatTokensCompact(contextLimit)} tokens (${pct}%)`;
345
- }
346
- }
347
- if (outputLimit) {
348
- const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
349
- stats += `\n- Max output tokens: ${formatTokensCompact(usage.outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`;
350
- }
351
364
  } else {
352
- // Multi-model (single or multiple sub-sessions): show per-model context/output limits
353
- // Issue #1508: Context window and max output tokens should be split by model
365
+ // Issue #1526: Single line format for context window + output tokens
366
+ // Use peakContextUsage when available; fall back to cumulative total when peak is unknown
367
+ // (e.g., for result-JSON-sourced sub-agent models where only cumulative totals are available)
354
368
  const peakContext = usage.peakContextUsage || 0;
355
- if (contextLimit) {
356
- if (peakContext > 0) {
357
- const pct = ((peakContext / contextLimit) * 100).toFixed(0);
358
- stats += `\n- Max context window: ${formatTokensCompact(peakContext)} / ${formatTokensCompact(contextLimit)} input tokens (${pct}%)`;
359
- } else {
360
- const totalInput = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
361
- const pct = ((totalInput / contextLimit) * 100).toFixed(0);
362
- stats += `\n- Context window: ${formatTokensCompact(totalInput)} / ${formatTokensCompact(contextLimit)} tokens (${pct}%)`;
363
- }
364
- }
365
- if (outputLimit) {
366
- const outPct = ((usage.outputTokens / outputLimit) * 100).toFixed(0);
367
- stats += `\n- Max output tokens: ${formatTokensCompact(usage.outputTokens)} / ${formatTokensCompact(outputLimit)} output tokens (${outPct}%)`;
368
- }
369
+ const cumulativeContext = usage.inputTokens + usage.cacheCreationTokens + usage.cacheReadTokens;
370
+ stats += formatContextOutputLine(peakContext, contextLimit, usage.outputTokens, outputLimit, '- ', cumulativeContext);
369
371
  }
370
372
 
371
373
  // Cumulative totals per model: input tokens + cached shown separately
374
+ // Issue #1526: Shorter format — single "Total:" line
372
375
  const totalInputNonCached = usage.inputTokens + usage.cacheCreationTokens;
373
376
  const cachedTokens = usage.cacheReadTokens;
374
- stats += `\n\nTotal input tokens: ${formatTokensCompact(totalInputNonCached)}`;
375
- if (cachedTokens > 0) stats += ` + ${formatTokensCompact(cachedTokens)} cached`;
376
- stats += `\nTotal output tokens: ${formatTokensCompact(usage.outputTokens)} output`;
377
+ let totalLine = `${formatTokensCompact(totalInputNonCached)}`;
378
+ if (cachedTokens > 0) totalLine += ` + ${formatTokensCompact(cachedTokens)} cached`;
379
+ totalLine += ` input tokens, ${formatTokensCompact(usage.outputTokens)} output tokens`;
377
380
 
378
381
  // Issue #1508: Show per-model cost when available
379
382
  if (usage.costUSD !== null && usage.costUSD !== undefined) {
380
- stats += `\nCost: $${usage.costUSD.toFixed(6)}`;
383
+ totalLine += `, $${usage.costUSD.toFixed(6)} cost`;
381
384
  }
385
+
386
+ stats += `\n\nTotal: ${totalLine}`;
382
387
  }
383
388
  }
384
389
 
385
- // Stream vs JSONL comparison — kept for internal diagnostics only in verbose/debug mode
386
- // Not shown to users per feedback (Issue #1501 PR comment)
387
-
388
390
  return stats;
389
391
  };
392
+
393
+ /**
394
+ * Issue #1526: Build budget stats data from Agent CLI token/context information.
395
+ * Converts Agent CLI parsed data into the same format used by calculateSessionTokens
396
+ * so that buildBudgetStatsString can render it uniformly.
397
+ * @param {Object} tokenUsage - Token usage from parseAgentTokenUsage (with context/model info)
398
+ * @param {Object|null} pricingInfo - Pricing info from calculateAgentPricing
399
+ * @returns {Object|null} Budget stats data compatible with buildBudgetStatsString, or null if no data
400
+ */
401
+ export const buildAgentBudgetStats = (tokenUsage, pricingInfo) => {
402
+ if (!tokenUsage || tokenUsage.stepCount === 0) return null;
403
+
404
+ const modelName = pricingInfo?.modelName || tokenUsage.respondedModelId || tokenUsage.requestedModelId || 'Unknown';
405
+ const modelId = tokenUsage.respondedModelId || tokenUsage.requestedModelId || pricingInfo?.modelId || 'unknown';
406
+
407
+ // Use context limits from step_finish events if available, otherwise from pricing model info
408
+ const contextLimit = tokenUsage.contextLimit || pricingInfo?.modelInfo?.limit?.context || null;
409
+ const outputLimit = tokenUsage.outputLimit || pricingInfo?.modelInfo?.limit?.output || null;
410
+
411
+ const modelUsageEntry = {
412
+ inputTokens: tokenUsage.inputTokens,
413
+ cacheCreationTokens: tokenUsage.cacheWriteTokens || 0,
414
+ cacheReadTokens: tokenUsage.cacheReadTokens || 0,
415
+ outputTokens: tokenUsage.outputTokens,
416
+ modelName,
417
+ modelInfo: contextLimit || outputLimit ? { limit: { context: contextLimit, output: outputLimit } } : null,
418
+ peakContextUsage: tokenUsage.peakContextUsage || 0,
419
+ costUSD: pricingInfo?.totalCostUSD ?? null,
420
+ };
421
+
422
+ return {
423
+ modelUsage: { [modelId]: modelUsageEntry },
424
+ subSessions: [],
425
+ inputTokens: tokenUsage.inputTokens,
426
+ cacheCreationTokens: tokenUsage.cacheWriteTokens || 0,
427
+ cacheReadTokens: tokenUsage.cacheReadTokens || 0,
428
+ outputTokens: tokenUsage.outputTokens,
429
+ totalTokens: tokenUsage.inputTokens + (tokenUsage.cacheWriteTokens || 0) + tokenUsage.outputTokens,
430
+ };
431
+ };
@@ -497,7 +497,7 @@ export const showSessionSummary = async (sessionId, limitReached, argv, issueUrl
497
497
  export const verifyResults = async (owner, repo, branchName, issueNumber, prNumber, prUrl, referenceTime, argv, shouldAttachLogs, shouldRestart = false, sessionId = null, tempDir = null, anthropicTotalCostUSD = null, publicPricingEstimate = null, pricingInfo = null, errorDuringExecution = false, sessionType = 'new', resultModelUsage = null, streamTokenUsage = null) => {
498
498
  await log('\n🔍 Searching for created pull requests or comments...');
499
499
 
500
- // Issue #1491: Build budget stats data for GitHub comment (computed once, used in both PR and issue paths)
500
+ // Issue #1491, #1526: Build budget stats data for GitHub comment (computed once, used in both PR and issue paths)
501
501
  let budgetStatsData = null;
502
502
  if (argv.tokensBudgetStats && sessionId && tempDir) {
503
503
  try {
@@ -510,6 +510,18 @@ export const verifyResults = async (owner, repo, branchName, issueNumber, prNumb
510
510
  if (argv.verbose) await log(` ⚠️ Could not calculate budget stats: ${budgetError.message}`, { verbose: true });
511
511
  }
512
512
  }
513
+ // Issue #1526: Build budget stats from Agent CLI token/context data when no JSONL session available
514
+ if (!budgetStatsData && argv.tokensBudgetStats && pricingInfo?.tokenUsage) {
515
+ try {
516
+ const { buildAgentBudgetStats } = await import('./claude.budget-stats.lib.mjs');
517
+ const agentBudgetData = buildAgentBudgetStats(pricingInfo.tokenUsage, pricingInfo);
518
+ if (agentBudgetData) {
519
+ budgetStatsData = { tokenUsage: agentBudgetData };
520
+ }
521
+ } catch (agentBudgetError) {
522
+ if (argv.verbose) await log(` ⚠️ Could not build agent budget stats: ${agentBudgetError.message}`, { verbose: true });
523
+ }
524
+ }
513
525
 
514
526
  try {
515
527
  // Get the current user's GitHub username