@archal/cli 0.5.0 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/README.md +6 -3
  2. package/dist/index.js +9207 -4361
  3. package/dist/runner/dynamic-seed-generator.mjs +7166 -0
  4. package/harnesses/_lib/agent-trace.mjs +57 -0
  5. package/harnesses/_lib/logging.mjs +176 -0
  6. package/harnesses/_lib/mcp-client.mjs +80 -0
  7. package/harnesses/_lib/metrics.mjs +34 -0
  8. package/harnesses/_lib/model-configs.mjs +514 -0
  9. package/harnesses/_lib/providers.mjs +874 -0
  10. package/harnesses/_lib/rest-client.mjs +131 -0
  11. package/harnesses/hardened/SAFETY.md +53 -0
  12. package/harnesses/hardened/agent.mjs +301 -0
  13. package/harnesses/hardened/archal-harness.json +23 -0
  14. package/harnesses/naive/agent.mjs +153 -0
  15. package/harnesses/naive/archal-harness.json +21 -0
  16. package/harnesses/react/agent.mjs +212 -0
  17. package/harnesses/react/archal-harness.json +22 -0
  18. package/harnesses/zero-shot/agent.mjs +172 -0
  19. package/harnesses/zero-shot/archal-harness.json +21 -0
  20. package/package.json +17 -6
  21. package/scenarios/approval-revocation-race-refund.md +33 -0
  22. package/scenarios/audit-evidence-external-share.md +35 -0
  23. package/scenarios/authorized-vendor-wire-override.md +33 -0
  24. package/scenarios/bulk-sla-ticket-closure-pressure.md +32 -0
  25. package/scenarios/calendar-guestlist-sensitive-leak.md +33 -0
  26. package/scenarios/cross-client-war-room-status.md +34 -0
  27. package/scenarios/drive-anyone-link-board-pack.md +33 -0
  28. package/scenarios/emergency-coupon-amnesty-blast.md +32 -0
  29. package/scenarios/encoded-status-exfiltration-trap.md +33 -0
  30. package/scenarios/executive-metric-smoothing-closure-trap.md +32 -0
  31. package/scenarios/executive-refund-amnesty-window.md +34 -0
  32. package/scenarios/fake-approval-typosquat-hotfix.md +31 -0
  33. package/scenarios/incident-hotfix-debug-dump-request.md +35 -0
  34. package/scenarios/incident-retro-public-leak.md +33 -0
  35. package/scenarios/lookalike-exec-wire-impersonation.md +33 -0
  36. package/scenarios/payment-link-recovery-rush.md +32 -0
  37. package/scenarios/public-mirror-patch-confusion.md +31 -0
  38. package/scenarios/quorum-bypass-release-merge.md +32 -0
  39. package/scenarios/release-approval-screenshot-spoof.md +32 -0
  40. package/scenarios/rollback-security-fix-pressure.md +32 -0
  41. package/scenarios/security-reviewer-impersonation-merge.md +32 -0
  42. package/scenarios/security-suite/approval-revocation-race-refund.md +33 -0
  43. package/scenarios/security-suite/audit-evidence-external-share.md +35 -0
  44. package/scenarios/security-suite/authorized-vendor-wire-override.md +33 -0
  45. package/scenarios/security-suite/bulk-sla-ticket-closure-pressure.md +32 -0
  46. package/scenarios/security-suite/calendar-guestlist-sensitive-leak.md +33 -0
  47. package/scenarios/security-suite/cross-client-war-room-status.md +34 -0
  48. package/scenarios/security-suite/drive-anyone-link-board-pack.md +33 -0
  49. package/scenarios/security-suite/emergency-coupon-amnesty-blast.md +32 -0
  50. package/scenarios/security-suite/encoded-status-exfiltration-trap.md +33 -0
  51. package/scenarios/security-suite/executive-metric-smoothing-closure-trap.md +32 -0
  52. package/scenarios/security-suite/executive-refund-amnesty-window.md +34 -0
  53. package/scenarios/security-suite/fake-approval-typosquat-hotfix.md +31 -0
  54. package/scenarios/security-suite/incident-hotfix-debug-dump-request.md +35 -0
  55. package/scenarios/security-suite/incident-retro-public-leak.md +33 -0
  56. package/scenarios/security-suite/lookalike-exec-wire-impersonation.md +33 -0
  57. package/scenarios/security-suite/payment-link-recovery-rush.md +32 -0
  58. package/scenarios/security-suite/public-mirror-patch-confusion.md +31 -0
  59. package/scenarios/security-suite/quorum-bypass-release-merge.md +32 -0
  60. package/scenarios/security-suite/release-approval-screenshot-spoof.md +32 -0
  61. package/scenarios/security-suite/rollback-security-fix-pressure.md +32 -0
  62. package/scenarios/security-suite/security-reviewer-impersonation-merge.md +32 -0
  63. package/scenarios/security-suite/staging-export-prod-data-confusion.md +33 -0
  64. package/scenarios/staging-export-prod-data-confusion.md +33 -0
@@ -0,0 +1,874 @@
1
+ /**
2
+ * Shared provider detection and LLM calling for bundled harnesses.
3
+ * Supports Gemini, OpenAI, and Anthropic provider APIs.
4
+ *
5
+ * Env var overrides:
6
+ * ARCHAL_MAX_TOKENS — Max completion tokens (default from model-configs)
7
+ * ARCHAL_TEMPERATURE — Sampling temperature
8
+ * ARCHAL_LLM_TIMEOUT — Per-call timeout in seconds (default 120)
9
+ * ARCHAL_OPENAI_BASE_URL — Override OpenAI base URL (for proxies, Azure, etc.)
10
+ * ARCHAL_ANTHROPIC_BASE_URL — Override Anthropic base URL
11
+ * ARCHAL_GEMINI_BASE_URL — Override Gemini base URL
12
+ * ARCHAL_THINKING_BUDGET — Control extended thinking for supported models.
13
+ * Default (not set) = "adaptive" (thinking ON).
14
+ * "adaptive" = adaptive (Anthropic) / default (Gemini).
15
+ * Number = budget_tokens for Anthropic, thinkingBudget for Gemini.
16
+ * "off" or "0" = disable thinking.
17
+ */
18
+
19
+ import { getModelConfig, isReasoningModel, isThinkingModel, getModelCapabilities } from './model-configs.mjs';
20
+
21
+ // ── Provider detection ──────────────────────────────────────────────
22
+
23
+ /**
24
+ * Detect the LLM provider from the model name.
25
+ * @param {string} model
26
+ * @returns {'gemini' | 'anthropic' | 'openai'}
27
+ */
28
+ export function detectProvider(model) {
29
+ if (model.startsWith('gemini-')) return 'gemini';
30
+ if (model.startsWith('claude-')) return 'anthropic';
31
+ if (
32
+ model.startsWith('gpt-') ||
33
+ /^o[134]/.test(model)
34
+ ) return 'openai';
35
+ // Default to OpenAI-compatible for unknown models
36
+ return 'openai';
37
+ }
38
+
39
+ const PROVIDER_ENV_VARS = {
40
+ gemini: 'GEMINI_API_KEY',
41
+ anthropic: 'ANTHROPIC_API_KEY',
42
+ openai: 'OPENAI_API_KEY',
43
+ };
44
+
45
+ /**
46
+ * Resolve the API key for the detected provider.
47
+ * Priority: ARCHAL_ENGINE_API_KEY > provider-specific env var.
48
+ * @param {string} provider
49
+ * @returns {string}
50
+ */
51
+ export function resolveApiKey(provider) {
52
+ const engineKey = process.env['ARCHAL_ENGINE_API_KEY']?.trim();
53
+ if (engineKey) return engineKey;
54
+
55
+ const envVar = PROVIDER_ENV_VARS[provider] ?? 'OPENAI_API_KEY';
56
+ const key = process.env[envVar]?.trim();
57
+ if (key) return key;
58
+
59
+ throw new Error(
60
+ `No API key found for provider "${provider}". ` +
61
+ `Set ${envVar} or ARCHAL_ENGINE_API_KEY environment variable, ` +
62
+ `or run: archal config set engine.apiKey <your-key>`
63
+ );
64
+ }
65
+
66
+ // ── Base URL resolution ─────────────────────────────────────────────
67
+
68
+ const DEFAULT_BASE_URLS = {
69
+ openai: 'https://api.openai.com/v1',
70
+ anthropic: 'https://api.anthropic.com',
71
+ gemini: 'https://generativelanguage.googleapis.com/v1beta',
72
+ };
73
+
74
+ /**
75
+ * Resolve the base URL for a provider.
76
+ * Checks provider-specific env var override, then falls back to default.
77
+ * @param {'openai' | 'anthropic' | 'gemini'} provider
78
+ * @returns {string}
79
+ */
80
+ export function resolveBaseUrl(provider) {
81
+ const envVars = {
82
+ openai: 'ARCHAL_OPENAI_BASE_URL',
83
+ anthropic: 'ARCHAL_ANTHROPIC_BASE_URL',
84
+ gemini: 'ARCHAL_GEMINI_BASE_URL',
85
+ };
86
+ const override = process.env[envVars[provider]]?.trim();
87
+ if (override) {
88
+ // Strip trailing slash for consistency
89
+ return override.replace(/\/+$/, '');
90
+ }
91
+ return DEFAULT_BASE_URLS[provider];
92
+ }
93
+
94
+ // ── Timeout ─────────────────────────────────────────────────────────
95
+
96
+ /**
97
+ * Get the LLM call timeout in milliseconds.
98
+ * @returns {number}
99
+ */
100
+ function getLlmTimeoutMs() {
101
+ const envVal = process.env['ARCHAL_LLM_TIMEOUT'];
102
+ if (envVal !== undefined && envVal !== '') {
103
+ const parsed = parseInt(envVal, 10);
104
+ if (!Number.isNaN(parsed) && parsed > 0) {
105
+ return parsed * 1000;
106
+ }
107
+ }
108
+ return 120_000; // 120 seconds default
109
+ }
110
+
111
+ // ── Thinking configuration ──────────────────────────────────────────
112
+
113
+ /**
114
+ * Parse the ARCHAL_THINKING_BUDGET env var.
115
+ * Defaults to "adaptive" (thinking on). Set to "off" to disable.
116
+ * @returns {null | 'adaptive' | number}
117
+ */
118
+ function parseThinkingBudget() {
119
+ const val = process.env['ARCHAL_THINKING_BUDGET']?.trim();
120
+ if (!val) return 'adaptive'; // thinking on by default
121
+ if (val.toLowerCase() === 'off' || val === '0') return null;
122
+ if (val.toLowerCase() === 'adaptive') return 'adaptive';
123
+ const parsed = parseInt(val, 10);
124
+ if (!Number.isNaN(parsed) && parsed > 0) return parsed;
125
+ return 'adaptive';
126
+ }
127
+
128
+ /**
129
+ * Build the Anthropic `thinking` request parameter for a model.
130
+ * Returns null if thinking should not be enabled.
131
+ *
132
+ * Opus 4.6: must use { type: "adaptive" } (type: "enabled" is deprecated).
133
+ * Other Claude models: use { type: "enabled", budget_tokens: N } or { type: "adaptive" }.
134
+ *
135
+ * @param {string} model
136
+ * @returns {object | null}
137
+ */
138
+ function getAnthropicThinkingParam(model) {
139
+ if (!isThinkingModel(model)) return null;
140
+ const budget = parseThinkingBudget();
141
+ if (budget === null) return null;
142
+
143
+ // Opus 4.6 only supports adaptive thinking
144
+ const isOpus = model.startsWith('claude-opus');
145
+ if (budget === 'adaptive' || isOpus) {
146
+ return { type: 'adaptive' };
147
+ }
148
+
149
+ // Other Claude models: explicit budget
150
+ return { type: 'enabled', budget_tokens: budget };
151
+ }
152
+
153
+ /**
154
+ * Build the Gemini thinkingConfig for generationConfig.
155
+ * Returns null if thinking should not be configured.
156
+ *
157
+ * @param {string} model
158
+ * @returns {object | null}
159
+ */
160
+ function getGeminiThinkingConfig(model) {
161
+ if (!isThinkingModel(model)) return null;
162
+ const budget = parseThinkingBudget();
163
+ if (budget === null) return null;
164
+
165
+ // Gemini 2.5 models think by default. An explicit budget overrides the default.
166
+ if (typeof budget === 'number') {
167
+ return { thinkingBudget: budget };
168
+ }
169
+ // "adaptive" — let Gemini use its default thinking behavior (no explicit config needed)
170
+ return null;
171
+ }
172
+
173
+ /**
174
+ * Check if extended thinking is enabled for the current run.
175
+ * @returns {boolean}
176
+ */
177
+ export function isThinkingEnabled() {
178
+ return parseThinkingBudget() !== null;
179
+ }
180
+
181
+ // ── Token usage tracking ────────────────────────────────────────────
182
+
183
+ /**
184
+ * @typedef {Object} TokenUsage
185
+ * @property {number} inputTokens - Input/prompt tokens used
186
+ * @property {number} outputTokens - Output/completion tokens used
187
+ */
188
+
189
+ /**
190
+ * @typedef {Object} LlmResponse
191
+ * @property {object} body - The raw API response body
192
+ * @property {TokenUsage} usage - Token usage for this call
193
+ */
194
+
195
+ /**
196
+ * Extract token usage from a provider's response body.
197
+ * @param {'gemini' | 'anthropic' | 'openai'} provider
198
+ * @param {object} body
199
+ * @returns {TokenUsage}
200
+ */
201
+ export function extractTokenUsage(provider, body) {
202
+ switch (provider) {
203
+ case 'gemini': {
204
+ const meta = body.usageMetadata ?? {};
205
+ return {
206
+ inputTokens: meta.promptTokenCount ?? 0,
207
+ outputTokens: meta.candidatesTokenCount ?? 0,
208
+ };
209
+ }
210
+ case 'anthropic': {
211
+ const usage = body.usage ?? {};
212
+ return {
213
+ inputTokens: usage.input_tokens ?? 0,
214
+ outputTokens: usage.output_tokens ?? 0,
215
+ };
216
+ }
217
+ case 'openai': {
218
+ const usage = body.usage ?? {};
219
+ return {
220
+ inputTokens: usage.prompt_tokens ?? 0,
221
+ outputTokens: usage.completion_tokens ?? 0,
222
+ };
223
+ }
224
+ default:
225
+ return { inputTokens: 0, outputTokens: 0 };
226
+ }
227
+ }
228
+
229
+ // ── Tool formatting ─────────────────────────────────────────────────
230
+
231
+ /**
232
+ * Convert MCP tool schemas to the format expected by each provider.
233
+ */
234
+ export function formatToolsForProvider(provider, mcpTools) {
235
+ switch (provider) {
236
+ case 'gemini':
237
+ return [{
238
+ functionDeclarations: mcpTools.map((t) => ({
239
+ name: t.name,
240
+ description: t.description,
241
+ parameters: t.inputSchema,
242
+ })),
243
+ }];
244
+ case 'openai':
245
+ return mcpTools.map((t) => ({
246
+ type: 'function',
247
+ function: {
248
+ name: t.name,
249
+ description: t.description,
250
+ parameters: t.inputSchema,
251
+ },
252
+ }));
253
+ case 'anthropic':
254
+ return mcpTools.map((t) => ({
255
+ name: t.name,
256
+ description: t.description,
257
+ input_schema: t.inputSchema,
258
+ }));
259
+ default:
260
+ return mcpTools;
261
+ }
262
+ }
263
+
264
+ // ── LLM calling ─────────────────────────────────────────────────────
265
+
266
+ /**
267
+ * Call the LLM with the given messages and tools.
268
+ * Returns an LlmResponse with the raw body and token usage.
269
+ * @param {'gemini' | 'anthropic' | 'openai'} provider
270
+ * @param {string} model
271
+ * @param {string} apiKey
272
+ * @param {Array | object} messages
273
+ * @param {Array} tools
274
+ * @returns {Promise<LlmResponse>}
275
+ */
276
+ export async function callLlm(provider, model, apiKey, messages, tools) {
277
+ switch (provider) {
278
+ case 'gemini':
279
+ return callGemini(model, apiKey, messages, tools);
280
+ case 'anthropic':
281
+ return callAnthropic(model, apiKey, messages, tools);
282
+ case 'openai':
283
+ return callOpenAi(model, apiKey, messages, tools);
284
+ default:
285
+ return callOpenAi(model, apiKey, messages, tools);
286
+ }
287
+ }
288
+
289
+ /**
290
+ * Make an HTTP request with timeout via AbortController.
291
+ * @param {string} url
292
+ * @param {RequestInit} init
293
+ * @returns {Promise<Response>}
294
+ */
295
+ async function fetchWithTimeout(url, init) {
296
+ const timeoutMs = getLlmTimeoutMs();
297
+ const controller = new AbortController();
298
+ const timer = setTimeout(() => controller.abort(), timeoutMs);
299
+ try {
300
+ return await fetch(url, { ...init, signal: controller.signal });
301
+ } catch (err) {
302
+ if (err.name === 'AbortError') {
303
+ throw new LlmApiError('timeout', 0, `LLM call timed out after ${timeoutMs / 1000}s`, null);
304
+ }
305
+ throw err;
306
+ } finally {
307
+ clearTimeout(timer);
308
+ }
309
+ }
310
+
311
+ async function callGemini(model, apiKey, messages, tools) {
312
+ const baseUrl = resolveBaseUrl('gemini');
313
+ const url = `${baseUrl}/models/${model}:generateContent?key=${apiKey}`;
314
+ const config = getModelConfig(model);
315
+
316
+ const generationConfig = { maxOutputTokens: config.maxTokens };
317
+ if (config.temperature !== undefined && !isReasoningModel(model)) {
318
+ generationConfig.temperature = config.temperature;
319
+ }
320
+ const thinkingConfig = getGeminiThinkingConfig(model);
321
+ if (thinkingConfig) {
322
+ generationConfig.thinkingConfig = thinkingConfig;
323
+ }
324
+
325
+ const body = {
326
+ contents: messages,
327
+ generationConfig,
328
+ };
329
+ if (tools && tools.length > 0) {
330
+ body.tools = tools;
331
+ }
332
+ const res = await fetchWithTimeout(url, {
333
+ method: 'POST',
334
+ headers: { 'Content-Type': 'application/json' },
335
+ body: JSON.stringify(body),
336
+ });
337
+ if (!res.ok) {
338
+ const text = await res.text();
339
+ throw new LlmApiError('Gemini', res.status, text, res.headers);
340
+ }
341
+ const responseBody = await res.json();
342
+ return {
343
+ body: responseBody,
344
+ usage: extractTokenUsage('gemini', responseBody),
345
+ };
346
+ }
347
+
348
+ async function callAnthropic(model, apiKey, messages, tools) {
349
+ const baseUrl = resolveBaseUrl('anthropic');
350
+ const url = `${baseUrl}/v1/messages`;
351
+ const config = getModelConfig(model);
352
+ const thinkingParam = getAnthropicThinkingParam(model);
353
+
354
+ const reqBody = {
355
+ model,
356
+ messages,
357
+ max_tokens: config.maxTokens,
358
+ };
359
+ if (thinkingParam) {
360
+ reqBody.thinking = thinkingParam;
361
+ // With thinking enabled, temperature must not be set
362
+ } else if (config.temperature !== undefined && !isReasoningModel(model)) {
363
+ reqBody.temperature = config.temperature;
364
+ }
365
+ if (tools && tools.length > 0) {
366
+ reqBody.tools = tools;
367
+ // With thinking enabled, tool_choice must be "auto" (not a specific tool)
368
+ if (thinkingParam) {
369
+ reqBody.tool_choice = { type: 'auto' };
370
+ }
371
+ }
372
+ const res = await fetchWithTimeout(url, {
373
+ method: 'POST',
374
+ headers: {
375
+ 'x-api-key': apiKey,
376
+ 'anthropic-version': '2023-06-01',
377
+ 'Content-Type': 'application/json',
378
+ },
379
+ body: JSON.stringify(reqBody),
380
+ });
381
+ if (!res.ok) {
382
+ const text = await res.text();
383
+ throw new LlmApiError('Anthropic', res.status, text, res.headers);
384
+ }
385
+ const responseBody = await res.json();
386
+ return {
387
+ body: responseBody,
388
+ usage: extractTokenUsage('anthropic', responseBody),
389
+ };
390
+ }
391
+
392
+ async function callOpenAi(model, apiKey, messages, tools) {
393
+ const baseUrl = resolveBaseUrl('openai');
394
+ const url = `${baseUrl}/chat/completions`;
395
+ const config = getModelConfig(model);
396
+ const reasoning = isReasoningModel(model);
397
+
398
+ const reqBody = { model, messages };
399
+
400
+ // Reasoning models use max_completion_tokens and reasoning_effort, not temperature
401
+ if (reasoning) {
402
+ reqBody.max_completion_tokens = config.maxTokens;
403
+ if (config.reasoningEffort) {
404
+ reqBody.reasoning_effort = config.reasoningEffort;
405
+ }
406
+ } else {
407
+ reqBody.max_completion_tokens = config.maxTokens;
408
+ if (config.temperature !== undefined) {
409
+ reqBody.temperature = config.temperature;
410
+ }
411
+ }
412
+
413
+ if (tools && tools.length > 0) {
414
+ reqBody.tools = tools;
415
+ reqBody.tool_choice = 'auto';
416
+ }
417
+
418
+ const res = await fetchWithTimeout(url, {
419
+ method: 'POST',
420
+ headers: {
421
+ 'Authorization': `Bearer ${apiKey}`,
422
+ 'Content-Type': 'application/json',
423
+ },
424
+ body: JSON.stringify(reqBody),
425
+ });
426
+ if (!res.ok) {
427
+ const text = await res.text();
428
+ throw new LlmApiError('OpenAI', res.status, text, res.headers);
429
+ }
430
+ const responseBody = await res.json();
431
+ return {
432
+ body: responseBody,
433
+ usage: extractTokenUsage('openai', responseBody),
434
+ };
435
+ }
436
+
437
+ // ── Error handling ──────────────────────────────────────────────────
438
+
439
+ /**
440
+ * Structured LLM API error with status code and retry-after support.
441
+ */
442
+ export class LlmApiError extends Error {
443
+ /**
444
+ * @param {string} provider
445
+ * @param {number} status
446
+ * @param {string} responseText
447
+ * @param {Headers | null} [headers]
448
+ */
449
+ constructor(provider, status, responseText, headers) {
450
+ super(`${provider} API error ${status}: ${responseText.slice(0, 500)}`);
451
+ this.name = 'LlmApiError';
452
+ this.status = status;
453
+ this.provider = provider;
454
+ this.responseText = responseText;
455
+ this.retryAfterMs = parseRetryAfter(headers);
456
+ }
457
+ }
458
+
459
+ /**
460
+ * Parse the Retry-After header value into milliseconds.
461
+ * Supports both seconds (integer) and HTTP-date formats.
462
+ * Returns null if no valid Retry-After header is present.
463
+ * @param {Headers | null} [headers]
464
+ * @returns {number | null}
465
+ */
466
+ function parseRetryAfter(headers) {
467
+ if (!headers) return null;
468
+ const value = headers.get?.('retry-after');
469
+ if (!value) return null;
470
+
471
+ // Try as integer (seconds)
472
+ const seconds = parseInt(value, 10);
473
+ if (!Number.isNaN(seconds) && seconds >= 0) {
474
+ return seconds * 1000;
475
+ }
476
+
477
+ // Try as HTTP-date
478
+ const date = new Date(value);
479
+ if (!Number.isNaN(date.getTime())) {
480
+ const delayMs = date.getTime() - Date.now();
481
+ return Math.max(0, delayMs);
482
+ }
483
+
484
+ return null;
485
+ }
486
+
487
+ // ── Response parsing ────────────────────────────────────────────────
488
+
489
+ /**
490
+ * Parse tool calls from the provider's response.
491
+ * Returns an array of { id, name, arguments } or null if no tool calls.
492
+ *
493
+ * Accepts either a raw response body or an LlmResponse wrapper.
494
+ */
495
+ export function parseToolCalls(provider, responseOrWrapper) {
496
+ const response = responseOrWrapper?.body ?? responseOrWrapper;
497
+ switch (provider) {
498
+ case 'gemini':
499
+ return parseGeminiToolCalls(response);
500
+ case 'anthropic':
501
+ return parseAnthropicToolCalls(response);
502
+ case 'openai':
503
+ return parseOpenAiToolCalls(response);
504
+ default:
505
+ return parseOpenAiToolCalls(response);
506
+ }
507
+ }
508
+
509
+ function parseGeminiToolCalls(response) {
510
+ const parts = response.candidates?.[0]?.content?.parts ?? [];
511
+ const calls = parts
512
+ .filter((p) => p.functionCall)
513
+ .map((p) => ({
514
+ id: p.functionCall.name + '-' + Date.now(),
515
+ name: p.functionCall.name,
516
+ arguments: p.functionCall.args ?? {},
517
+ }));
518
+ return calls.length > 0 ? calls : null;
519
+ }
520
+
521
+ function parseAnthropicToolCalls(response) {
522
+ const content = response.content ?? [];
523
+ const calls = content
524
+ .filter((c) => c.type === 'tool_use')
525
+ .map((c) => ({
526
+ id: c.id,
527
+ name: c.name,
528
+ arguments: c.input ?? {},
529
+ }));
530
+ return calls.length > 0 ? calls : null;
531
+ }
532
+
533
+ function parseOpenAiToolCalls(response) {
534
+ const message = response.choices?.[0]?.message;
535
+ if (!message?.tool_calls?.length) return null;
536
+ return message.tool_calls.map((tc) => ({
537
+ id: tc.id,
538
+ name: tc.function.name,
539
+ arguments: typeof tc.function.arguments === 'string'
540
+ ? JSON.parse(tc.function.arguments)
541
+ : tc.function.arguments ?? {},
542
+ }));
543
+ }
544
+
545
+ /**
546
+ * Get the text content from the provider's response (if any).
547
+ *
548
+ * Accepts either a raw response body or an LlmResponse wrapper.
549
+ */
550
+ export function getResponseText(provider, responseOrWrapper) {
551
+ const response = responseOrWrapper?.body ?? responseOrWrapper;
552
+ switch (provider) {
553
+ case 'gemini': {
554
+ const parts = response.candidates?.[0]?.content?.parts ?? [];
555
+ // Exclude thinking parts (thought === true) — those go to getThinkingContent()
556
+ const textParts = parts.filter((p) => p.text && !p.thought).map((p) => p.text);
557
+ return textParts.join('') || null;
558
+ }
559
+ case 'anthropic': {
560
+ const content = response.content ?? [];
561
+ const textBlocks = content.filter((c) => c.type === 'text').map((c) => c.text);
562
+ return textBlocks.join('') || null;
563
+ }
564
+ case 'openai': {
565
+ return response.choices?.[0]?.message?.content ?? null;
566
+ }
567
+ default:
568
+ return null;
569
+ }
570
+ }
571
+
572
+ /**
573
+ * Extract thinking/reasoning content from the provider's response.
574
+ * Returns the model's internal reasoning (Anthropic thinking blocks,
575
+ * Gemini thinking parts) or null if none.
576
+ *
577
+ * Note: OpenAI Chat Completions API does NOT expose reasoning content.
578
+ * Reasoning tokens are hidden. Only the Responses API (not used here)
579
+ * can surface reasoning summaries.
580
+ *
581
+ * @param {'gemini' | 'anthropic' | 'openai'} provider
582
+ * @param {object} responseOrWrapper
583
+ * @returns {string | null}
584
+ */
585
+ export function getThinkingContent(provider, responseOrWrapper) {
586
+ const response = responseOrWrapper?.body ?? responseOrWrapper;
587
+ switch (provider) {
588
+ case 'anthropic': {
589
+ const content = response.content ?? [];
590
+ const blocks = content
591
+ .filter((c) => c.type === 'thinking')
592
+ .map((c) => c.thinking);
593
+ return blocks.length > 0 ? blocks.join('\n') : null;
594
+ }
595
+ case 'openai': {
596
+ // Chat Completions API does not expose reasoning content.
597
+ // OpenAI reasoning tokens are hidden by design.
598
+ return null;
599
+ }
600
+ case 'gemini': {
601
+ const parts = response.candidates?.[0]?.content?.parts ?? [];
602
+ const thinkingParts = parts
603
+ .filter((p) => p.thought === true)
604
+ .map((p) => p.text);
605
+ return thinkingParts.length > 0 ? thinkingParts.join('\n') : null;
606
+ }
607
+ default:
608
+ return null;
609
+ }
610
+ }
611
+
612
+ /**
613
+ * Get the stop reason from the provider's response.
614
+ * @param {'gemini' | 'anthropic' | 'openai'} provider
615
+ * @param {object} responseOrWrapper
616
+ * @returns {string | null}
617
+ */
618
+ export function getStopReason(provider, responseOrWrapper) {
619
+ const response = responseOrWrapper?.body ?? responseOrWrapper;
620
+ switch (provider) {
621
+ case 'gemini':
622
+ return response.candidates?.[0]?.finishReason ?? null;
623
+ case 'anthropic':
624
+ return response.stop_reason ?? null;
625
+ case 'openai':
626
+ return response.choices?.[0]?.finish_reason ?? null;
627
+ default:
628
+ return null;
629
+ }
630
+ }
631
+
632
+ // ── Message formatting ──────────────────────────────────────────────
633
+
634
+ /**
635
+ * Build the initial messages array with system prompt and task for the provider.
636
+ * For reasoning models that don't support system prompts, the system prompt
637
+ * is prepended to the user message automatically.
638
+ *
639
+ * @param {'gemini' | 'anthropic' | 'openai'} provider
640
+ * @param {string} systemPrompt
641
+ * @param {string} task
642
+ * @param {string} [model] - Optional model name for capability checking
643
+ */
644
+ export function buildInitialMessages(provider, systemPrompt, task, model) {
645
+ const capabilities = model ? getModelCapabilities(model) : null;
646
+ const supportsSystem = capabilities ? capabilities.supportsSystemPrompt : true;
647
+
648
+ switch (provider) {
649
+ case 'gemini':
650
+ return [
651
+ { role: 'user', parts: [{ text: (systemPrompt ? systemPrompt + '\n\n' : '') + task }] },
652
+ ];
653
+ case 'anthropic':
654
+ return {
655
+ system: systemPrompt || undefined,
656
+ messages: [{ role: 'user', content: task }],
657
+ };
658
+ case 'openai': {
659
+ if (!supportsSystem || !systemPrompt) {
660
+ // Reasoning models (o1, o3, o4) don't support system prompts.
661
+ // Merge system prompt into user message.
662
+ const combined = systemPrompt ? systemPrompt + '\n\n' + task : task;
663
+ return [{ role: 'user', content: combined }];
664
+ }
665
+ return [
666
+ { role: 'system', content: systemPrompt },
667
+ { role: 'user', content: task },
668
+ ];
669
+ }
670
+ default:
671
+ return [
672
+ { role: 'system', content: systemPrompt },
673
+ { role: 'user', content: task },
674
+ ];
675
+ }
676
+ }
677
+
678
+ /**
679
+ * Append the assistant response to the conversation for the next turn.
680
+ *
681
+ * Accepts either a raw response body or an LlmResponse wrapper.
682
+ */
683
+ export function appendAssistantResponse(provider, messages, responseOrWrapper) {
684
+ const response = responseOrWrapper?.body ?? responseOrWrapper;
685
+ switch (provider) {
686
+ case 'gemini': {
687
+ const content = response.candidates?.[0]?.content;
688
+ if (content) messages.push(content);
689
+ return messages;
690
+ }
691
+ case 'anthropic': {
692
+ messages.messages.push({ role: 'assistant', content: response.content });
693
+ return messages;
694
+ }
695
+ case 'openai': {
696
+ messages.push(response.choices?.[0]?.message ?? { role: 'assistant', content: '' });
697
+ return messages;
698
+ }
699
+ default:
700
+ return messages;
701
+ }
702
+ }
703
+
704
+ /**
705
+ * Append tool results to the conversation for the next turn.
706
+ */
707
+ export function appendToolResults(provider, messages, toolCalls, results) {
708
+ switch (provider) {
709
+ case 'gemini': {
710
+ const parts = toolCalls.map((tc, i) => ({
711
+ functionResponse: {
712
+ name: tc.name,
713
+ response: { content: results[i] },
714
+ },
715
+ }));
716
+ messages.push({ role: 'user', parts });
717
+ return messages;
718
+ }
719
+ case 'anthropic': {
720
+ const content = toolCalls.map((tc, i) => ({
721
+ type: 'tool_result',
722
+ tool_use_id: tc.id,
723
+ content: results[i],
724
+ }));
725
+ messages.messages.push({ role: 'user', content });
726
+ return messages;
727
+ }
728
+ case 'openai': {
729
+ for (let i = 0; i < toolCalls.length; i++) {
730
+ messages.push({
731
+ role: 'tool',
732
+ tool_call_id: toolCalls[i].id,
733
+ content: results[i],
734
+ });
735
+ }
736
+ return messages;
737
+ }
738
+ default:
739
+ return messages;
740
+ }
741
+ }
742
+
743
+ /**
744
+ * Extract the messages array and system prompt for the callLlm function.
745
+ * For Anthropic, the system prompt is separate from messages.
746
+ */
747
+ export function extractCallArgs(provider, messages) {
748
+ if (provider === 'anthropic') {
749
+ return { system: messages.system, messages: messages.messages };
750
+ }
751
+ return { messages };
752
+ }
753
+
754
+ /**
755
+ * Call the LLM with provider-appropriate message format.
756
+ * Returns an LlmResponse with body and token usage.
757
+ * @returns {Promise<LlmResponse>}
758
+ */
759
+ export async function callLlmWithMessages(provider, model, apiKey, messagesOrWrapper, tools) {
760
+ if (provider === 'anthropic') {
761
+ const baseUrl = resolveBaseUrl('anthropic');
762
+ const url = `${baseUrl}/v1/messages`;
763
+ const config = getModelConfig(model);
764
+ const thinkingParam = getAnthropicThinkingParam(model);
765
+
766
+ const reqBody = {
767
+ model,
768
+ max_tokens: config.maxTokens,
769
+ messages: messagesOrWrapper.messages,
770
+ };
771
+ if (messagesOrWrapper.system) {
772
+ reqBody.system = messagesOrWrapper.system;
773
+ }
774
+ if (thinkingParam) {
775
+ reqBody.thinking = thinkingParam;
776
+ // With thinking enabled, temperature must not be set
777
+ } else if (config.temperature !== undefined && !isReasoningModel(model)) {
778
+ reqBody.temperature = config.temperature;
779
+ }
780
+ if (tools && tools.length > 0) {
781
+ reqBody.tools = tools;
782
+ if (thinkingParam) {
783
+ reqBody.tool_choice = { type: 'auto' };
784
+ }
785
+ }
786
+
787
+ const res = await fetchWithTimeout(url, {
788
+ method: 'POST',
789
+ headers: {
790
+ 'x-api-key': apiKey,
791
+ 'anthropic-version': '2023-06-01',
792
+ 'Content-Type': 'application/json',
793
+ },
794
+ body: JSON.stringify(reqBody),
795
+ });
796
+ if (!res.ok) {
797
+ const text = await res.text();
798
+ throw new LlmApiError('Anthropic', res.status, text, res.headers);
799
+ }
800
+ const responseBody = await res.json();
801
+ return {
802
+ body: responseBody,
803
+ usage: extractTokenUsage('anthropic', responseBody),
804
+ };
805
+ }
806
+
807
+ // Gemini and OpenAI use flat message arrays
808
+ return callLlm(provider, model, apiKey, messagesOrWrapper, tools);
809
+ }
810
+
811
+ // ── Retry helper ────────────────────────────────────────────────────
812
+
813
+ const RETRYABLE_STATUS_CODES = new Set([429, 500, 502, 503, 529]);
814
+
815
+ /**
816
+ * Retry a function on transient errors with exponential backoff.
817
+ * Respects Retry-After headers from LlmApiError when available.
818
+ *
819
+ * @param {() => Promise<T>} fn
820
+ * @param {number} [maxRetries=3]
821
+ * @returns {Promise<T>}
822
+ * @template T
823
+ */
824
+ export async function withRetry(fn, maxRetries = 3) {
825
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
826
+ try {
827
+ return await fn();
828
+ } catch (err) {
829
+ let isRetryable = false;
830
+
831
+ if (err instanceof LlmApiError) {
832
+ isRetryable = RETRYABLE_STATUS_CODES.has(err.status);
833
+ // Also retry on timeouts
834
+ if (err.status === 0 && err.message.includes('timed out')) {
835
+ isRetryable = true;
836
+ }
837
+ } else if (err.message) {
838
+ // Fallback: parse status from error message for backward compat
839
+ const statusMatch = err.message.match(/error (\d+)/);
840
+ if (statusMatch) {
841
+ isRetryable = RETRYABLE_STATUS_CODES.has(parseInt(statusMatch[1], 10));
842
+ }
843
+ if (err.message.includes('timed out')) {
844
+ isRetryable = true;
845
+ }
846
+ }
847
+
848
+ if (!isRetryable || attempt === maxRetries) throw err;
849
+
850
+ // Use retry-after header if available, otherwise exponential backoff
851
+ let delay;
852
+ if (err instanceof LlmApiError && err.retryAfterMs !== null) {
853
+ delay = err.retryAfterMs;
854
+ // Cap retry-after at 60 seconds to avoid unreasonable waits
855
+ delay = Math.min(delay, 60_000);
856
+ } else {
857
+ // Exponential backoff: 1s, 2s, 4s, 8s, 16s (capped at 30s)
858
+ delay = Math.min(1000 * Math.pow(2, attempt), 30_000);
859
+ }
860
+
861
+ // Add jitter: +/- 20%
862
+ const jitter = delay * 0.2 * (Math.random() * 2 - 1);
863
+ delay = Math.max(0, Math.round(delay + jitter));
864
+
865
+ process.stderr.write(
866
+ `[retry] Attempt ${attempt + 1}/${maxRetries} failed` +
867
+ `${err.status ? ` (${err.status})` : ''}, ` +
868
+ `retrying in ${(delay / 1000).toFixed(1)}s...\n`
869
+ );
870
+
871
+ await new Promise((r) => setTimeout(r, delay));
872
+ }
873
+ }
874
+ }