agent-state-machine 2.3.0 → 2.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/cli.js CHANGED
@@ -99,6 +99,7 @@ Options:
99
99
  --new, -n Generate a new remote follow path
100
100
  --full-auto, -a Auto-select first option for choice interactions (no blocking)
101
101
  --delay, -d Seconds to wait before auto-select in full-auto mode (default: 20)
102
+ --non-verbose, -q Suppress per-agent token usage display (show only final summary)
102
103
  -reset Reset workflow state before running
103
104
  -reset-hard Hard reset workflow before running
104
105
  --help, -h Show help
@@ -190,6 +191,50 @@ function summarizeStatus(state) {
190
191
  return state.status ? ` [${state.status}]` : '';
191
192
  }
192
193
 
194
+ /**
195
+ * Display usage summary after workflow completion
196
+ */
197
+ function displayUsageSummary(runtime) {
198
+ const u = runtime._usageTotals;
199
+ if (!u || (!u.totalInputTokens && !u.totalOutputTokens)) return;
200
+
201
+ const C = {
202
+ bold: '\x1b[1m',
203
+ dim: '\x1b[2m',
204
+ cyan: '\x1b[36m',
205
+ reset: '\x1b[0m'
206
+ };
207
+
208
+ const formatTokens = (count) => {
209
+ if (count >= 1000000) return `${(count / 1000000).toFixed(1)}M`;
210
+ if (count >= 10000) return `${Math.round(count / 1000)}k`;
211
+ if (count >= 1000) return `${(count / 1000).toFixed(1)}k`;
212
+ return count.toString();
213
+ };
214
+
215
+ console.log(`\n${C.bold}Token Usage Summary${C.reset}`);
216
+ console.log(`${C.dim}${'─'.repeat(40)}${C.reset}`);
217
+ console.log(` Input: ${formatTokens(u.totalInputTokens)}`);
218
+ console.log(` Output: ${formatTokens(u.totalOutputTokens)}`);
219
+ if (u.totalCachedTokens > 0) {
220
+ console.log(` Cached: ${formatTokens(u.totalCachedTokens)}`);
221
+ }
222
+ console.log(` ${C.bold}Total: ${formatTokens(u.totalInputTokens + u.totalOutputTokens)}${C.reset}`);
223
+ if (u.totalCost > 0) {
224
+ console.log(` ${C.cyan}Cost: $${u.totalCost.toFixed(4)}${C.reset}`);
225
+ }
226
+
227
+ // Show per-model breakdown if multiple models used
228
+ const models = Object.keys(u.modelUsage || {});
229
+ if (models.length > 1) {
230
+ console.log(`\n${C.dim}By Model:${C.reset}`);
231
+ for (const model of models) {
232
+ const m = u.modelUsage[model];
233
+ console.log(` ${model}: ${formatTokens(m.inputTokens)} in / ${formatTokens(m.outputTokens)} out`);
234
+ }
235
+ }
236
+ }
237
+
193
238
  function listWorkflows() {
194
239
  const root = workflowsRoot();
195
240
 
@@ -242,7 +287,8 @@ async function runOrResume(
242
287
  preReset = false,
243
288
  preResetHard = false,
244
289
  fullAuto = false,
245
- autoSelectDelay = null
290
+ autoSelectDelay = null,
291
+ nonVerbose = false
246
292
  } = {}
247
293
  ) {
248
294
  const workflowDir = resolveWorkflowDir(workflowName);
@@ -292,13 +338,7 @@ async function runOrResume(
292
338
  remoteUrl = process.env.STATE_MACHINE_REMOTE_URL || DEFAULT_REMOTE_URL;
293
339
  }
294
340
 
295
- // Enable remote follow mode if we have a URL
296
- if (remoteUrl) {
297
- const sessionToken = ensureRemotePath(configFile, { forceNew: forceNewRemotePath });
298
- await runtime.enableRemote(remoteUrl, { sessionToken, uiBaseUrl: useLocalServer });
299
- }
300
-
301
- // Set full-auto mode from CLI flag (will be merged with config.js during runWorkflow)
341
+ // Set full-auto mode from CLI flag BEFORE enabling remote (so session_init includes correct config)
302
342
  if (fullAuto) {
303
343
  runtime.workflowConfig.fullAuto = true;
304
344
  if (autoSelectDelay !== null) {
@@ -308,6 +348,17 @@ async function runOrResume(
308
348
  console.log(`\n\x1b[36m\x1b[1m⚡ Full-auto mode enabled\x1b[0m - Agent will auto-select recommended options after ${delay}s countdown`);
309
349
  }
310
350
 
351
+ // Enable remote follow mode if we have a URL
352
+ if (remoteUrl) {
353
+ const sessionToken = ensureRemotePath(configFile, { forceNew: forceNewRemotePath });
354
+ await runtime.enableRemote(remoteUrl, { sessionToken, uiBaseUrl: useLocalServer });
355
+ }
356
+
357
+ // Set non-verbose mode from CLI flag
358
+ if (nonVerbose) {
359
+ runtime.workflowConfig.nonVerbose = true;
360
+ }
361
+
311
362
  // Prevent system sleep while workflow runs (macOS only)
312
363
  // Display can still sleep, but system stays awake for remote follow
313
364
  const stopCaffeinate = preventSleep();
@@ -317,6 +368,9 @@ async function runOrResume(
317
368
 
318
369
  try {
319
370
  await runtime.runWorkflow(workflowUrl);
371
+
372
+ // Display usage summary after workflow completion
373
+ displayUsageSummary(runtime);
320
374
  } finally {
321
375
  // Allow sleep again
322
376
  if (stopCaffeinate) {
@@ -385,6 +439,7 @@ async function main() {
385
439
  const preReset = args.includes('-reset');
386
440
  const preResetHard = args.includes('-reset-hard');
387
441
  const fullAuto = args.includes('--full-auto') || args.includes('-a');
442
+ const nonVerbose = args.includes('--non-verbose') || args.includes('-q') || args.includes('--quiet');
388
443
  const remoteEnabled = !useLocalServer; // Use Vercel if not local
389
444
 
390
445
  // Parse --delay or -d flag
@@ -405,7 +460,8 @@ async function main() {
405
460
  preReset,
406
461
  preResetHard,
407
462
  fullAuto,
408
- autoSelectDelay
463
+ autoSelectDelay,
464
+ nonVerbose
409
465
  });
410
466
  } catch (err) {
411
467
  console.error('Error:', err.message || String(err));
package/lib/llm.js CHANGED
@@ -12,6 +12,111 @@ import { resolveUnknownModel } from './runtime/model-resolution.js';
12
12
 
13
13
  const require = createRequire(import.meta.url);
14
14
 
15
+ /**
16
+ * Parse Claude CLI JSON output
17
+ * @param {string} output - Raw JSON output from claude --output-format json
18
+ * @returns {{ text: string, model: string|null, usage: object|null }}
19
+ */
20
+ function parseClaudeOutput(output) {
21
+ try {
22
+ const json = JSON.parse(output);
23
+ const modelUsage = json.modelUsage || {};
24
+ const modelName = Object.keys(modelUsage)[0] || null;
25
+
26
+ const usage = json.usage ? {
27
+ inputTokens: json.usage.input_tokens || 0,
28
+ outputTokens: json.usage.output_tokens || 0,
29
+ cacheReadInputTokens: json.usage.cache_read_input_tokens || 0,
30
+ cacheCreationInputTokens: json.usage.cache_creation_input_tokens || 0,
31
+ cost: json.total_cost_usd || null
32
+ } : null;
33
+
34
+ return {
35
+ text: json.result || output,
36
+ model: modelName,
37
+ usage
38
+ };
39
+ } catch {
40
+ return { text: output, model: null, usage: null };
41
+ }
42
+ }
43
+
44
+ /**
45
+ * Parse Gemini CLI JSON output
46
+ * @param {string} output - Raw JSON output from gemini --output-format json
47
+ * @returns {{ text: string, model: string|null, usage: object|null }}
48
+ */
49
+ function parseGeminiOutput(output) {
50
+ try {
51
+ const json = JSON.parse(output);
52
+ const stats = json.stats?.models || {};
53
+ const modelName = Object.keys(stats)[0] || null;
54
+ const tokens = modelName ? stats[modelName]?.tokens || {} : {};
55
+
56
+ const usage = {
57
+ inputTokens: tokens.input || tokens.prompt || 0,
58
+ outputTokens: tokens.candidates || 0,
59
+ cachedTokens: tokens.cached || 0,
60
+ thoughtTokens: tokens.thoughts || 0
61
+ };
62
+
63
+ return {
64
+ text: json.response || output,
65
+ model: modelName,
66
+ usage
67
+ };
68
+ } catch {
69
+ return { text: output, model: null, usage: null };
70
+ }
71
+ }
72
+
73
+ /**
74
+ * Parse Codex CLI JSON output (NDJSON format)
75
+ * @param {string} output - Raw NDJSON output from codex --json
76
+ * @returns {{ text: string, model: string|null, usage: object|null }}
77
+ */
78
+ function parseCodexOutput(output) {
79
+ const lines = output.trim().split('\n');
80
+ let text = '';
81
+ let usage = null;
82
+
83
+ for (const line of lines) {
84
+ try {
85
+ const json = JSON.parse(line);
86
+ // Extract text from agent_message items
87
+ if (json.type === 'item.completed' && json.item?.type === 'agent_message') {
88
+ text = json.item.text || text;
89
+ }
90
+ // Extract usage from turn.completed event
91
+ if (json.type === 'turn.completed' && json.usage) {
92
+ usage = {
93
+ inputTokens: json.usage.input_tokens || 0,
94
+ outputTokens: json.usage.output_tokens || 0,
95
+ cachedInputTokens: json.usage.cached_input_tokens || 0
96
+ };
97
+ }
98
+ } catch {
99
+ // Non-JSON line - might be the actual response text
100
+ if (!text && line.trim()) text = line;
101
+ }
102
+ }
103
+
104
+ return { text, model: null, usage };
105
+ }
106
+
107
+ /**
108
+ * Parse CLI output based on tool type
109
+ * @param {string} output - Raw CLI output
110
+ * @param {string} baseCmd - Base command (claude, gemini, codex)
111
+ * @returns {{ text: string, model: string|null, usage: object|null }}
112
+ */
113
+ function parseCLIOutput(output, baseCmd) {
114
+ if (baseCmd === 'claude') return parseClaudeOutput(output);
115
+ if (baseCmd === 'gemini') return parseGeminiOutput(output);
116
+ if (baseCmd === 'codex') return parseCodexOutput(output);
117
+ return { text: output.trim(), model: null, usage: null };
118
+ }
119
+
15
120
  /**
16
121
  * LLM Helper Module
17
122
  *
@@ -250,17 +355,15 @@ async function executeCLI(command, promptText, options = {}, apiKeys = {}) {
250
355
  if (baseCmd === 'claude') {
251
356
  args.push('--print');
252
357
  args.push('--permission-mode', 'acceptEdits');
358
+ args.push('--output-format', 'json');
253
359
  // Input via stdin
254
360
  } else if (baseCmd === 'gemini') {
255
361
  args.push('--approval-mode', 'auto_edit');
362
+ args.push('--output-format', 'json');
256
363
  // Input via stdin
257
364
  } else if (baseCmd === 'codex') {
258
365
  ensureCodexExec();
259
- const lastMessageFile = path.join(
260
- os.tmpdir(),
261
- `codex-last-message-${process.pid}-${Date.now()}.txt`
262
- );
263
- args.push('--output-last-message', lastMessageFile);
366
+ args.push('--json');
264
367
  args.push('-'); // Explicitly read from stdin
265
368
  } else {
266
369
  // Generic CLI: Fallback to temp file if not a known stdin consumer
@@ -310,24 +413,23 @@ async function executeCLI(command, promptText, options = {}, apiKeys = {}) {
310
413
  }
311
414
 
312
415
  if (code === 0) {
313
- if (baseCmd === 'codex') {
314
- const outputFlagIndex = args.findIndex(a => a === '--output-last-message' || a === '-o');
315
- const outputFile = outputFlagIndex >= 0 ? args[outputFlagIndex + 1] : null;
316
- if (outputFile && fs.existsSync(outputFile)) {
317
- try {
318
- stdout = fs.readFileSync(outputFile, 'utf-8');
319
- } finally {
320
- try { fs.unlinkSync(outputFile); } catch {}
321
- }
322
- }
416
+ // Parse JSON output for standard CLI tools
417
+ if (isStandardCLI) {
418
+ const parsed = parseCLIOutput(stdout, baseCmd);
419
+ resolve({
420
+ text: parsed.text,
421
+ model: parsed.model || command,
422
+ provider: 'cli',
423
+ usage: parsed.usage
424
+ });
425
+ } else {
426
+ resolve({
427
+ text: stdout.trim(),
428
+ model: command,
429
+ provider: 'cli',
430
+ usage: null
431
+ });
323
432
  }
324
-
325
- resolve({
326
- text: stdout.trim(),
327
- model: command,
328
- provider: 'cli',
329
- usage: null
330
- });
331
433
  } else {
332
434
  reject(new Error(`CLI command failed (exit ${code}): ${stderr || stdout}`));
333
435
  }
@@ -482,6 +584,16 @@ export async function llm(context, options) {
482
584
  result = await executeCLI(modelConfig, fullPrompt, options, apiKeys);
483
585
  }
484
586
 
587
+ // Record usage in agent tracker (if active)
588
+ if (result.usage) {
589
+ try {
590
+ const { recordLLMUsage } = await import('./runtime/agent.js');
591
+ recordLLMUsage(result.usage, result.model, result.provider);
592
+ } catch {
593
+ // Agent tracking not available (outside agent context)
594
+ }
595
+ }
596
+
485
597
  return { ...result, fullPrompt };
486
598
  }
487
599
 
@@ -89,6 +89,7 @@ export class RemoteClient {
89
89
  * @param {string} options.serverUrl - Base URL of remote server (e.g., https://example.vercel.app)
90
90
  * @param {string} options.workflowName - Name of the workflow
91
91
  * @param {function} options.onInteractionResponse - Callback when interaction response received
92
+ * @param {function} [options.onConfigUpdate] - Callback when config update received from browser
92
93
  * @param {function} [options.onStatusChange] - Callback when connection status changes
93
94
  * @param {string} [options.sessionToken] - Optional session token to reuse
94
95
  * @param {boolean} [options.uiBaseUrl] - If true, return base URL for UI instead of /s/{token}
@@ -97,6 +98,7 @@ export class RemoteClient {
97
98
  this.serverUrl = options.serverUrl.replace(/\/$/, ''); // Remove trailing slash
98
99
  this.workflowName = options.workflowName;
99
100
  this.onInteractionResponse = options.onInteractionResponse;
101
+ this.onConfigUpdate = options.onConfigUpdate || (() => {});
100
102
  this.onStatusChange = options.onStatusChange || (() => {});
101
103
  this.uiBaseUrl = Boolean(options.uiBaseUrl);
102
104
 
@@ -166,16 +168,18 @@ export class RemoteClient {
166
168
  }
167
169
 
168
170
  /**
169
- * Send initial session info with history
171
+ * Send initial session info with history and config
170
172
  * @param {Array} history - Array of history entries
173
+ * @param {object} [config] - Optional workflow config (fullAuto, autoSelectDelay)
171
174
  */
172
- async sendSessionInit(history = []) {
175
+ async sendSessionInit(history = [], config = null) {
173
176
  this.initialHistorySent = true;
174
177
  await this.send({
175
178
  type: 'session_init',
176
179
  sessionToken: this.sessionToken,
177
180
  workflowName: this.workflowName,
178
181
  history,
182
+ config,
179
183
  });
180
184
  }
181
185
 
@@ -231,7 +235,7 @@ export class RemoteClient {
231
235
  }
232
236
 
233
237
  /**
234
- * Poll for interaction responses
238
+ * Poll for interaction responses and config updates
235
239
  * Uses 35s timeout to stay under Vercel's 50s limit with buffer
236
240
  */
237
241
  async poll() {
@@ -246,20 +250,29 @@ export class RemoteClient {
246
250
  consecutiveErrors = 0; // Reset on success
247
251
 
248
252
  if (response.status === 200 && response.data) {
249
- const { type, slug, targetKey, response: interactionResponse } = response.data;
253
+ const { type, slug, targetKey, response: interactionResponse, fullAuto, autoSelectDelay, stop } = response.data;
250
254
 
251
255
  if (type === 'interaction_response' && this.onInteractionResponse) {
252
256
  // Confirm receipt BEFORE processing - removes from Redis pending queue
253
- // This ensures we don't lose the interaction if processing fails
254
257
  try {
255
258
  const confirmUrl = `${this.serverUrl}/api/ws/cli?token=${this.sessionToken}`;
256
259
  await makeRequest(confirmUrl, { method: 'DELETE' }, null, 10000);
257
260
  } catch (err) {
258
- // Non-fatal - interaction will be re-delivered on next poll
259
261
  console.error(`${C.dim}Remote: Failed to confirm receipt: ${err.message}${C.reset}`);
260
262
  }
261
263
 
262
264
  this.onInteractionResponse(slug, targetKey, interactionResponse);
265
+ } else if (type === 'config_update') {
266
+ // Confirm receipt of config update
267
+ try {
268
+ const confirmUrl = `${this.serverUrl}/api/ws/cli?token=${this.sessionToken}&type=config`;
269
+ await makeRequest(confirmUrl, { method: 'DELETE' }, null, 10000);
270
+ } catch (err) {
271
+ console.error(`${C.dim}Remote: Failed to confirm config receipt: ${err.message}${C.reset}`);
272
+ }
273
+
274
+ // Call config update callback
275
+ this.onConfigUpdate({ fullAuto, autoSelectDelay, stop });
263
276
  }
264
277
  }
265
278
 
@@ -13,9 +13,79 @@ import { pathToFileURL } from 'url';
13
13
  import { getCurrentRuntime } from './runtime.js';
14
14
  import { formatInteractionPrompt } from './interaction.js';
15
15
  import { withChangeTracking } from './track-changes.js';
16
+ import { resolveUnknownModel } from './model-resolution.js';
17
+ import { detectAvailableCLIs } from '../llm.js';
16
18
 
17
19
  const require = createRequire(import.meta.url);
18
20
 
21
+ /**
22
+ * Token Usage Tracking
23
+ *
24
+ * Tracks LLM token usage across all calls within a single agent execution.
25
+ * The tracker is cleared before each agent runs and aggregated after completion.
26
+ */
27
+ const AGENT_USAGE_KEY = Symbol.for('agent-state-machine.agent-usage');
28
+
29
+ function getAgentUsageTracker() {
30
+ return globalThis[AGENT_USAGE_KEY] || (globalThis[AGENT_USAGE_KEY] = []);
31
+ }
32
+
33
+ export function clearAgentUsageTracker() {
34
+ globalThis[AGENT_USAGE_KEY] = [];
35
+ }
36
+
37
+ /**
38
+ * Record usage from an LLM call (called from llm.js)
39
+ */
40
+ export function recordLLMUsage(usage, model, provider) {
41
+ if (!usage) return;
42
+ const tracker = getAgentUsageTracker();
43
+ tracker.push({ usage, model, provider, timestamp: new Date().toISOString() });
44
+ }
45
+
46
+ /**
47
+ * Aggregate all recorded usage into a summary
48
+ */
49
+ export function aggregateUsage() {
50
+ const tracker = getAgentUsageTracker();
51
+ if (tracker.length === 0) return null;
52
+
53
+ const agg = {
54
+ inputTokens: 0,
55
+ outputTokens: 0,
56
+ cachedTokens: 0,
57
+ cost: 0,
58
+ calls: tracker.length,
59
+ models: {}
60
+ };
61
+
62
+ for (const { usage, model } of tracker) {
63
+ agg.inputTokens += usage.inputTokens || 0;
64
+ agg.outputTokens += usage.outputTokens || 0;
65
+ agg.cachedTokens += usage.cachedTokens || usage.cacheReadInputTokens || usage.cachedInputTokens || 0;
66
+ if (usage.cost) agg.cost += usage.cost;
67
+
68
+ const m = model || 'unknown';
69
+ if (!agg.models[m]) {
70
+ agg.models[m] = { inputTokens: 0, outputTokens: 0 };
71
+ }
72
+ agg.models[m].inputTokens += usage.inputTokens || 0;
73
+ agg.models[m].outputTokens += usage.outputTokens || 0;
74
+ }
75
+
76
+ return agg;
77
+ }
78
+
79
+ /**
80
+ * Format token count for display
81
+ */
82
+ function formatTokens(count) {
83
+ if (count >= 1000000) return `${(count / 1000000).toFixed(1)}M`;
84
+ if (count >= 10000) return `${Math.round(count / 1000)}k`;
85
+ if (count >= 1000) return `${(count / 1000).toFixed(1)}k`;
86
+ return count.toString();
87
+ }
88
+
19
89
  /**
20
90
  * Run an agent with context
21
91
  * @param {string} name - Agent name (file basename)
@@ -43,20 +113,49 @@ export async function agent(name, params = {}, options = {}) {
43
113
  console.log(` [Agent: ${name}] Starting...`);
44
114
  }
45
115
 
116
+ // Clear usage tracker before each attempt
117
+ clearAgentUsageTracker();
118
+
46
119
  const result = await executeAgent(runtime, name, params, options);
47
120
 
48
121
  if (result && typeof result === 'object' && result._debug_prompt) {
49
122
  delete result._debug_prompt;
50
123
  }
51
124
 
125
+ // Aggregate token usage from all LLM calls in this agent
126
+ const usage = aggregateUsage();
127
+
52
128
  console.log(` [Agent: ${name}] Completed`);
129
+
130
+ // Display token usage (unless non-verbose mode)
131
+ if (usage && !runtime.workflowConfig?.nonVerbose) {
132
+ let usageLine = ` Tokens: ${formatTokens(usage.inputTokens)} in / ${formatTokens(usage.outputTokens)} out`;
133
+ if (usage.cachedTokens > 0) {
134
+ usageLine += ` (${formatTokens(usage.cachedTokens)} cached)`;
135
+ }
136
+ if (usage.cost) {
137
+ usageLine += ` $${usage.cost.toFixed(4)}`;
138
+ }
139
+ console.log(usageLine);
140
+ }
141
+
142
+ // Get primary model from usage
143
+ const primaryModel = usage?.models ? Object.keys(usage.models)[0] : null;
144
+
53
145
  await runtime.prependHistory({
54
146
  event: 'AGENT_COMPLETED',
55
147
  agent: name,
56
148
  output: result,
57
- attempts: attempt + 1
149
+ attempts: attempt + 1,
150
+ usage: usage,
151
+ model: primaryModel
58
152
  });
59
153
 
154
+ // Update running totals
155
+ if (usage && runtime.updateUsageTotals) {
156
+ runtime.updateUsageTotals(name, usage);
157
+ }
158
+
60
159
  return result;
61
160
  } catch (error) {
62
161
  lastError = error;
@@ -277,6 +376,23 @@ async function executeMDAgent(runtime, agentPath, name, params, options = {}) {
277
376
 
278
377
  const model = config.model || 'fast';
279
378
 
379
+ // Resolve model alias to actual model config for display
380
+ let resolvedModel = baseConfig.models?.[model];
381
+ if (!resolvedModel) {
382
+ // Auto-resolve unknown model (same logic as llm.js)
383
+ try {
384
+ resolvedModel = await resolveUnknownModel(model, baseConfig, runtime.workflowDir, {
385
+ availableCLIs: detectAvailableCLIs()
386
+ });
387
+ // Cache it for future use
388
+ if (!baseConfig.models) baseConfig.models = {};
389
+ baseConfig.models[model] = resolvedModel;
390
+ runtime.workflowConfig.models[model] = resolvedModel;
391
+ } catch {
392
+ resolvedModel = model; // Fallback to alias if resolution fails
393
+ }
394
+ }
395
+
280
396
  const fullPrompt = buildPrompt(context, {
281
397
  model,
282
398
  prompt: interpolatedPrompt,
@@ -284,7 +400,7 @@ async function executeMDAgent(runtime, agentPath, name, params, options = {}) {
284
400
  responseType: config.response
285
401
  });
286
402
 
287
- await logAgentStart(runtime, name, fullPrompt);
403
+ await logAgentStart(runtime, name, fullPrompt, resolvedModel, model);
288
404
 
289
405
  console.log(` Using model: ${model}`);
290
406
 
@@ -550,7 +666,7 @@ ${content}
550
666
  return response;
551
667
  }
552
668
 
553
- async function logAgentStart(runtime, name, prompt) {
669
+ async function logAgentStart(runtime, name, prompt, model = null, modelAlias = null) {
554
670
  if (runtime._agentResumeFlags?.has(name)) {
555
671
  runtime._agentResumeFlags.delete(name);
556
672
  await runtime.prependHistory({
@@ -569,5 +685,13 @@ async function logAgentStart(runtime, name, prompt) {
569
685
  entry.prompt = prompt;
570
686
  }
571
687
 
688
+ if (model) {
689
+ entry.model = model;
690
+ }
691
+
692
+ if (modelAlias && modelAlias !== model) {
693
+ entry.modelAlias = modelAlias;
694
+ }
695
+
572
696
  await runtime.prependHistory(entry);
573
697
  }