@steipete/oracle 0.4.4 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +11 -9
  2. package/dist/.DS_Store +0 -0
  3. package/dist/bin/oracle-cli.js +16 -48
  4. package/dist/scripts/agent-send.js +147 -0
  5. package/dist/scripts/docs-list.js +110 -0
  6. package/dist/scripts/git-policy.js +125 -0
  7. package/dist/scripts/runner.js +1378 -0
  8. package/dist/scripts/test-browser.js +103 -0
  9. package/dist/scripts/test-remote-chrome.js +68 -0
  10. package/dist/src/browser/actions/attachments.js +47 -16
  11. package/dist/src/browser/actions/promptComposer.js +29 -18
  12. package/dist/src/browser/actions/remoteFileTransfer.js +36 -4
  13. package/dist/src/browser/chromeCookies.js +37 -6
  14. package/dist/src/browser/chromeLifecycle.js +166 -25
  15. package/dist/src/browser/config.js +25 -1
  16. package/dist/src/browser/constants.js +22 -3
  17. package/dist/src/browser/index.js +301 -21
  18. package/dist/src/browser/prompt.js +3 -1
  19. package/dist/src/browser/reattach.js +59 -0
  20. package/dist/src/browser/sessionRunner.js +15 -1
  21. package/dist/src/browser/windowsCookies.js +2 -1
  22. package/dist/src/cli/browserConfig.js +11 -0
  23. package/dist/src/cli/browserDefaults.js +41 -0
  24. package/dist/src/cli/detach.js +2 -2
  25. package/dist/src/cli/dryRun.js +4 -2
  26. package/dist/src/cli/engine.js +2 -2
  27. package/dist/src/cli/help.js +2 -2
  28. package/dist/src/cli/options.js +2 -1
  29. package/dist/src/cli/runOptions.js +1 -1
  30. package/dist/src/cli/sessionDisplay.js +98 -5
  31. package/dist/src/cli/sessionRunner.js +39 -6
  32. package/dist/src/cli/tui/index.js +15 -18
  33. package/dist/src/heartbeat.js +2 -2
  34. package/dist/src/oracle/background.js +10 -2
  35. package/dist/src/oracle/client.js +17 -0
  36. package/dist/src/oracle/config.js +10 -2
  37. package/dist/src/oracle/errors.js +24 -4
  38. package/dist/src/oracle/modelResolver.js +144 -0
  39. package/dist/src/oracle/oscProgress.js +1 -1
  40. package/dist/src/oracle/run.js +82 -34
  41. package/dist/src/oracle/runUtils.js +12 -8
  42. package/dist/src/remote/server.js +214 -23
  43. package/dist/src/sessionManager.js +5 -2
  44. package/dist/vendor/oracle-notifier/OracleNotifier.app/Contents/CodeResources +0 -0
  45. package/dist/vendor/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  46. package/dist/vendor/oracle-notifier/build-notifier.sh +0 -0
  47. package/dist/vendor/oracle-notifier/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  48. package/dist/vendor/oracle-notifier/oracle-notifier/build-notifier.sh +0 -0
  49. package/package.json +47 -46
  50. package/vendor/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  51. package/vendor/oracle-notifier/build-notifier.sh +0 -0
  52. package/vendor/oracle-notifier/README.md +0 -24
@@ -74,7 +74,7 @@ export function extractResponseMetadata(response) {
74
74
  }
75
75
  return metadata;
76
76
  }
77
- export function toTransportError(error) {
77
+ export function toTransportError(error, model) {
78
78
  if (error instanceof OracleTransportError) {
79
79
  return error;
80
80
  }
@@ -87,10 +87,26 @@ export function toTransportError(error) {
87
87
  if (error instanceof APIConnectionError) {
88
88
  return new OracleTransportError('connection-lost', 'Connection to OpenAI dropped before the response completed.', error);
89
89
  }
90
- if (error instanceof APIError) {
91
- if (error.status === 404 || error.status === 405) {
92
- return new OracleTransportError('unsupported-endpoint', 'HTTP 404/405 from the Responses API; this base URL or gateway likely does not expose /v1/responses. Set OPENAI_BASE_URL to api.openai.com/v1, update your Azure API version/deployment, or use the browser engine.', error);
90
+ const isApiError = error instanceof APIError || error?.name === 'APIError';
91
+ if (isApiError) {
92
+ const apiError = error;
93
+ const code = apiError.code ?? apiError.error?.code;
94
+ const messageText = apiError.message?.toLowerCase?.() ?? '';
95
+ const apiMessage = apiError.error?.message ||
96
+ apiError.message ||
97
+ (apiError.status ? `${apiError.status} OpenAI API error` : 'OpenAI API error');
98
+ // TODO: Remove once gpt-5.1-pro is available via the Responses API.
99
+ if (model === 'gpt-5.1-pro' &&
100
+ (code === 'model_not_found' ||
101
+ messageText.includes('does not exist') ||
102
+ messageText.includes('unknown model') ||
103
+ messageText.includes('model_not_found'))) {
104
+ return new OracleTransportError('model-unavailable', 'gpt-5.1-pro is not yet available on this API base. Use gpt-5-pro for now until OpenAI enables it. // TODO: Remove once gpt-5.1-pro is available', apiError);
93
105
  }
106
+ if (apiError.status === 404 || apiError.status === 405) {
107
+ return new OracleTransportError('unsupported-endpoint', 'HTTP 404/405 from the Responses API; this base URL or gateway likely does not expose /v1/responses. Set OPENAI_BASE_URL to api.openai.com/v1, update your Azure API version/deployment, or use the browser engine.', apiError);
108
+ }
109
+ return new OracleTransportError('api-error', apiMessage, apiError);
94
110
  }
95
111
  return new OracleTransportError('unknown', error instanceof Error ? error.message : 'Unknown transport failure.', error);
96
112
  }
@@ -104,6 +120,10 @@ export function describeTransportError(error, deadlineMs) {
104
120
  return 'Connection to OpenAI ended unexpectedly before the response completed.';
105
121
  case 'client-abort':
106
122
  return 'Request was aborted before OpenAI completed the response.';
123
+ case 'api-error':
124
+ return error.message;
125
+ case 'model-unavailable':
126
+ return error.message;
107
127
  case 'unsupported-endpoint':
108
128
  return 'The Responses API returned 404/405 — your base URL/gateway probably lacks /v1/responses (check OPENAI_BASE_URL or switch to api.openai.com / browser engine).';
109
129
  default:
@@ -0,0 +1,144 @@
1
+ import { MODEL_CONFIGS, PRO_MODELS } from './config.js';
2
+ import { countTokens as countTokensGpt5Pro } from 'gpt-tokenizer/model/gpt-5-pro';
3
+ const OPENROUTER_DEFAULT_BASE = 'https://openrouter.ai/api/v1';
4
+ const OPENROUTER_MODELS_ENDPOINT = 'https://openrouter.ai/api/v1/models';
5
+ export function isKnownModel(model) {
6
+ return Object.hasOwn(MODEL_CONFIGS, model);
7
+ }
8
+ export function isOpenRouterBaseUrl(baseUrl) {
9
+ if (!baseUrl)
10
+ return false;
11
+ try {
12
+ const url = new URL(baseUrl);
13
+ return url.hostname.includes('openrouter.ai');
14
+ }
15
+ catch {
16
+ return false;
17
+ }
18
+ }
19
+ export function defaultOpenRouterBaseUrl() {
20
+ return OPENROUTER_DEFAULT_BASE;
21
+ }
22
+ export function normalizeOpenRouterBaseUrl(baseUrl) {
23
+ try {
24
+ const url = new URL(baseUrl);
25
+ // If user passed the responses endpoint, trim it so the client does not double-append.
26
+ if (url.pathname.endsWith('/responses')) {
27
+ url.pathname = url.pathname.replace(/\/responses\/?$/, '');
28
+ }
29
+ return url.toString().replace(/\/+$/, '');
30
+ }
31
+ catch {
32
+ return baseUrl;
33
+ }
34
+ }
35
+ export function safeModelSlug(model) {
36
+ return model.replace(/[/\\]/g, '__').replace(/[:*?"<>|]/g, '_');
37
+ }
38
+ const catalogCache = new Map();
39
+ const CACHE_TTL_MS = 5 * 60 * 1000;
40
+ async function fetchOpenRouterCatalog(apiKey, fetcher) {
41
+ const cached = catalogCache.get(apiKey);
42
+ const now = Date.now();
43
+ if (cached && now - cached.fetchedAt < CACHE_TTL_MS) {
44
+ return cached.models;
45
+ }
46
+ const response = await fetcher(OPENROUTER_MODELS_ENDPOINT, {
47
+ headers: {
48
+ authorization: `Bearer ${apiKey}`,
49
+ },
50
+ });
51
+ if (!response.ok) {
52
+ throw new Error(`Failed to load OpenRouter models (${response.status})`);
53
+ }
54
+ const json = (await response.json());
55
+ const models = json?.data ?? [];
56
+ catalogCache.set(apiKey, { fetchedAt: now, models });
57
+ return models;
58
+ }
59
+ function mapToOpenRouterId(candidate, catalog, providerHint) {
60
+ if (candidate.includes('/'))
61
+ return candidate;
62
+ const byExact = catalog.find((entry) => entry.id === candidate);
63
+ if (byExact)
64
+ return byExact.id;
65
+ const bySuffix = catalog.find((entry) => entry.id.endsWith(`/${candidate}`));
66
+ if (bySuffix)
67
+ return bySuffix.id;
68
+ if (providerHint) {
69
+ return `${providerHint}/${candidate}`;
70
+ }
71
+ return candidate;
72
+ }
73
+ export async function resolveModelConfig(model, options = {}) {
74
+ const known = isKnownModel(model) ? MODEL_CONFIGS[model] : null;
75
+ const fetcher = options.fetcher ?? globalThis.fetch.bind(globalThis);
76
+ const openRouterActive = isOpenRouterBaseUrl(options.baseUrl) || Boolean(options.openRouterApiKey);
77
+ if (known && !openRouterActive) {
78
+ return known;
79
+ }
80
+ // Try to enrich from OpenRouter catalog when available.
81
+ if (openRouterActive && options.openRouterApiKey) {
82
+ try {
83
+ const catalog = await fetchOpenRouterCatalog(options.openRouterApiKey, fetcher);
84
+ const targetId = mapToOpenRouterId(typeof model === 'string' ? model : String(model), catalog, known?.provider);
85
+ const info = catalog.find((entry) => entry.id === targetId) ?? null;
86
+ if (info) {
87
+ return {
88
+ ...(known ?? {
89
+ model,
90
+ tokenizer: countTokensGpt5Pro,
91
+ inputLimit: info.context_length ?? 200_000,
92
+ reasoning: null,
93
+ }),
94
+ apiModel: targetId,
95
+ openRouterId: targetId,
96
+ provider: known?.provider ?? 'other',
97
+ inputLimit: info.context_length ?? known?.inputLimit ?? 200_000,
98
+ pricing: info.pricing && info.pricing.prompt != null && info.pricing.completion != null
99
+ ? {
100
+ inputPerToken: info.pricing.prompt / 1_000_000,
101
+ outputPerToken: info.pricing.completion / 1_000_000,
102
+ }
103
+ : known?.pricing ?? null,
104
+ supportsBackground: known?.supportsBackground ?? true,
105
+ supportsSearch: known?.supportsSearch ?? true,
106
+ };
107
+ }
108
+ // No metadata hit; fall through to synthesized config.
109
+ return {
110
+ ...(known ?? {
111
+ model,
112
+ tokenizer: countTokensGpt5Pro,
113
+ inputLimit: 200_000,
114
+ reasoning: null,
115
+ }),
116
+ apiModel: targetId,
117
+ openRouterId: targetId,
118
+ provider: known?.provider ?? 'other',
119
+ supportsBackground: known?.supportsBackground ?? true,
120
+ supportsSearch: known?.supportsSearch ?? true,
121
+ pricing: known?.pricing ?? null,
122
+ };
123
+ }
124
+ catch {
125
+ // If catalog fetch fails, fall back to a synthesized config.
126
+ }
127
+ }
128
+ // Synthesized generic config for custom endpoints or failed catalog fetch.
129
+ return {
130
+ ...(known ?? {
131
+ model,
132
+ tokenizer: countTokensGpt5Pro,
133
+ inputLimit: 200_000,
134
+ reasoning: null,
135
+ }),
136
+ provider: known?.provider ?? 'other',
137
+ supportsBackground: known?.supportsBackground ?? true,
138
+ supportsSearch: known?.supportsSearch ?? true,
139
+ pricing: known?.pricing ?? null,
140
+ };
141
+ }
142
+ export function isProModel(model) {
143
+ return isKnownModel(model) && PRO_MODELS.has(model);
144
+ }
@@ -55,7 +55,7 @@ export function startOscProgress(options = {}) {
55
55
  timer.unref?.();
56
56
  let stopped = false;
57
57
  return () => {
58
- // biome-ignore lint/nursery/noUnnecessaryConditions: multiple callers may try to stop
58
+ // multiple callers may try to stop
59
59
  if (stopped) {
60
60
  return;
61
61
  }
@@ -4,7 +4,7 @@ import fs from 'node:fs/promises';
4
4
  import path from 'node:path';
5
5
  import process from 'node:process';
6
6
  import { performance } from 'node:perf_hooks';
7
- import { DEFAULT_SYSTEM_PROMPT, MODEL_CONFIGS, PRO_MODELS, TOKENIZER_OPTIONS } from './config.js';
7
+ import { DEFAULT_SYSTEM_PROMPT, MODEL_CONFIGS, TOKENIZER_OPTIONS } from './config.js';
8
8
  import { readFiles } from './files.js';
9
9
  import { buildPrompt, buildRequestBody } from './request.js';
10
10
  import { estimateRequestTokens } from './tokenEstimate.js';
@@ -21,6 +21,7 @@ import { resolveClaudeModelId } from './claude.js';
21
21
  import { renderMarkdownAnsi } from '../cli/markdownRenderer.js';
22
22
  import { executeBackgroundResponse } from './background.js';
23
23
  import { formatTokenEstimate, formatTokenValue, resolvePreviewMode } from './runUtils.js';
24
+ import { defaultOpenRouterBaseUrl, isKnownModel, isOpenRouterBaseUrl, isProModel, resolveModelConfig, normalizeOpenRouterBaseUrl, } from './modelResolver.js';
24
25
  const isStdoutTty = process.stdout.isTTY && chalk.level > 0;
25
26
  const dim = (text) => (isStdoutTty ? kleur.dim(text) : text);
26
27
  // Default timeout for non-pro API runs (fast models) — give them up to 120s.
@@ -36,15 +37,42 @@ export async function runOracle(options, deps = {}) {
36
37
  : () => true;
37
38
  const isTty = allowStdout && isStdoutTty;
38
39
  const resolvedXaiBaseUrl = process.env.XAI_BASE_URL?.trim() || 'https://api.x.ai/v1';
40
+ const openRouterApiKey = process.env.OPENROUTER_API_KEY?.trim();
41
+ const defaultOpenRouterBase = defaultOpenRouterBaseUrl();
42
+ const knownModelConfig = isKnownModel(options.model) ? MODEL_CONFIGS[options.model] : undefined;
43
+ const provider = knownModelConfig?.provider ?? 'other';
44
+ const hasOpenAIKey = Boolean(optionsApiKey) ||
45
+ Boolean(process.env.OPENAI_API_KEY) ||
46
+ Boolean(process.env.AZURE_OPENAI_API_KEY && options.azure?.endpoint);
47
+ const hasAnthropicKey = Boolean(optionsApiKey) || Boolean(process.env.ANTHROPIC_API_KEY);
48
+ const hasGeminiKey = Boolean(optionsApiKey) || Boolean(process.env.GEMINI_API_KEY);
49
+ const hasXaiKey = Boolean(optionsApiKey) || Boolean(process.env.XAI_API_KEY);
39
50
  let baseUrl = options.baseUrl?.trim();
40
51
  if (!baseUrl) {
41
52
  if (options.model.startsWith('grok')) {
42
53
  baseUrl = resolvedXaiBaseUrl;
43
54
  }
55
+ else if (provider === 'anthropic') {
56
+ baseUrl = process.env.ANTHROPIC_BASE_URL?.trim();
57
+ }
44
58
  else {
45
59
  baseUrl = process.env.OPENAI_BASE_URL?.trim();
46
60
  }
47
61
  }
62
+ const providerKeyMissing = (provider === 'openai' && !hasOpenAIKey) ||
63
+ (provider === 'anthropic' && !hasAnthropicKey) ||
64
+ (provider === 'google' && !hasGeminiKey) ||
65
+ (provider === 'xai' && !hasXaiKey) ||
66
+ provider === 'other';
67
+ const openRouterFallback = providerKeyMissing && Boolean(openRouterApiKey);
68
+ if (!baseUrl || openRouterFallback) {
69
+ if (openRouterFallback) {
70
+ baseUrl = defaultOpenRouterBase;
71
+ }
72
+ }
73
+ if (baseUrl && isOpenRouterBaseUrl(baseUrl)) {
74
+ baseUrl = normalizeOpenRouterBaseUrl(baseUrl);
75
+ }
48
76
  const logVerbose = (message) => {
49
77
  if (options.verbose) {
50
78
  log(dim(`[verbose] ${message}`));
@@ -54,51 +82,61 @@ export async function runOracle(options, deps = {}) {
54
82
  const isPreview = Boolean(previewMode);
55
83
  const isAzureOpenAI = Boolean(options.azure?.endpoint);
56
84
  const getApiKeyForModel = (model) => {
57
- if (model.startsWith('gpt')) {
85
+ if (isOpenRouterBaseUrl(baseUrl) || openRouterFallback) {
86
+ return { key: optionsApiKey ?? openRouterApiKey, source: 'OPENROUTER_API_KEY' };
87
+ }
88
+ if (typeof model === 'string' && model.startsWith('gpt')) {
58
89
  if (optionsApiKey)
59
- return optionsApiKey;
90
+ return { key: optionsApiKey, source: 'apiKey option' };
60
91
  if (isAzureOpenAI) {
61
- return process.env.AZURE_OPENAI_API_KEY ?? process.env.OPENAI_API_KEY;
92
+ const key = process.env.AZURE_OPENAI_API_KEY ?? process.env.OPENAI_API_KEY;
93
+ return { key, source: 'AZURE_OPENAI_API_KEY|OPENAI_API_KEY' };
62
94
  }
63
- return process.env.OPENAI_API_KEY;
95
+ return { key: process.env.OPENAI_API_KEY, source: 'OPENAI_API_KEY' };
64
96
  }
65
- if (model.startsWith('gemini')) {
66
- return optionsApiKey ?? process.env.GEMINI_API_KEY;
97
+ if (typeof model === 'string' && model.startsWith('gemini')) {
98
+ return { key: optionsApiKey ?? process.env.GEMINI_API_KEY, source: 'GEMINI_API_KEY' };
67
99
  }
68
- if (model.startsWith('claude')) {
69
- return optionsApiKey ?? process.env.ANTHROPIC_API_KEY;
100
+ if (typeof model === 'string' && model.startsWith('claude')) {
101
+ return { key: optionsApiKey ?? process.env.ANTHROPIC_API_KEY, source: 'ANTHROPIC_API_KEY' };
70
102
  }
71
- if (model.startsWith('grok')) {
72
- return optionsApiKey ?? process.env.XAI_API_KEY;
103
+ if (typeof model === 'string' && model.startsWith('grok')) {
104
+ return { key: optionsApiKey ?? process.env.XAI_API_KEY, source: 'XAI_API_KEY' };
73
105
  }
74
- return undefined;
106
+ return { key: optionsApiKey ?? openRouterApiKey, source: optionsApiKey ? 'apiKey option' : 'OPENROUTER_API_KEY' };
75
107
  };
76
- const envVar = options.model.startsWith('gpt')
77
- ? isAzureOpenAI
78
- ? 'AZURE_OPENAI_API_KEY (or OPENAI_API_KEY)'
79
- : 'OPENAI_API_KEY'
80
- : options.model.startsWith('gemini')
81
- ? 'GEMINI_API_KEY'
82
- : options.model.startsWith('claude')
83
- ? 'ANTHROPIC_API_KEY'
84
- : 'XAI_API_KEY';
85
- const apiKey = getApiKeyForModel(options.model);
108
+ const apiKeyResult = getApiKeyForModel(options.model);
109
+ const apiKey = apiKeyResult.key;
86
110
  if (!apiKey) {
111
+ const envVar = isOpenRouterBaseUrl(baseUrl) || openRouterFallback
112
+ ? 'OPENROUTER_API_KEY'
113
+ : options.model.startsWith('gpt')
114
+ ? isAzureOpenAI
115
+ ? 'AZURE_OPENAI_API_KEY (or OPENAI_API_KEY)'
116
+ : 'OPENAI_API_KEY'
117
+ : options.model.startsWith('gemini')
118
+ ? 'GEMINI_API_KEY'
119
+ : options.model.startsWith('claude')
120
+ ? 'ANTHROPIC_API_KEY'
121
+ : options.model.startsWith('grok')
122
+ ? 'XAI_API_KEY'
123
+ : 'OPENROUTER_API_KEY';
87
124
  throw new PromptValidationError(`Missing ${envVar}. Set it via the environment or a .env file.`, {
88
125
  env: envVar,
89
126
  });
90
127
  }
128
+ const envVar = apiKeyResult.source;
91
129
  const minPromptLength = Number.parseInt(process.env.ORACLE_MIN_PROMPT_CHARS ?? '10', 10);
92
130
  const promptLength = options.prompt?.trim().length ?? 0;
93
131
  // Enforce the short-prompt guardrail on pro-tier models because they're costly; cheaper models can run short prompts without blocking.
94
- const isProTierModel = PRO_MODELS.has(options.model);
132
+ const isProTierModel = isProModel(options.model);
95
133
  if (isProTierModel && !Number.isNaN(minPromptLength) && promptLength < minPromptLength) {
96
134
  throw new PromptValidationError(`Prompt is too short (<${minPromptLength} chars). This was likely accidental; please provide more detail.`, { minPromptLength, promptLength });
97
135
  }
98
- const modelConfig = MODEL_CONFIGS[options.model];
99
- if (!modelConfig) {
100
- throw new PromptValidationError(`Unsupported model "${options.model}". Choose one of: ${Object.keys(MODEL_CONFIGS).join(', ')}`, { model: options.model });
101
- }
136
+ const modelConfig = await resolveModelConfig(options.model, {
137
+ baseUrl,
138
+ openRouterApiKey: openRouterApiKey ?? (isOpenRouterBaseUrl(baseUrl) ? apiKey : undefined),
139
+ });
102
140
  const isLongRunningModel = isProTierModel;
103
141
  const supportsBackground = modelConfig.supportsBackground !== false;
104
142
  const useBackground = supportsBackground ? options.background ?? isLongRunningModel : false;
@@ -227,9 +265,11 @@ export async function runOracle(options, deps = {}) {
227
265
  }
228
266
  const apiEndpoint = modelConfig.model.startsWith('gemini')
229
267
  ? undefined
230
- : modelConfig.model.startsWith('claude')
231
- ? process.env.ANTHROPIC_BASE_URL ?? baseUrl
232
- : baseUrl;
268
+ : isOpenRouterBaseUrl(baseUrl)
269
+ ? baseUrl
270
+ : modelConfig.model.startsWith('claude')
271
+ ? process.env.ANTHROPIC_BASE_URL ?? baseUrl
272
+ : baseUrl;
233
273
  const clientInstance = client ??
234
274
  clientFactory(apiKey, {
235
275
  baseUrl: apiEndpoint,
@@ -289,7 +329,15 @@ export async function runOracle(options, deps = {}) {
289
329
  elapsedMs = now() - runStart;
290
330
  }
291
331
  else {
292
- const stream = await clientInstance.responses.stream(requestBody);
332
+ let stream;
333
+ try {
334
+ stream = await clientInstance.responses.stream(requestBody);
335
+ }
336
+ catch (streamInitError) {
337
+ const transportError = toTransportError(streamInitError, requestBody.model);
338
+ log(chalk.yellow(describeTransportError(transportError, timeoutMs)));
339
+ throw transportError;
340
+ }
293
341
  let heartbeatActive = false;
294
342
  let stopHeartbeat = null;
295
343
  const stopHeartbeatNow = () => {
@@ -348,7 +396,7 @@ export async function runOracle(options, deps = {}) {
348
396
  catch (streamError) {
349
397
  // stream.abort() is not available on the interface
350
398
  stopHeartbeatNow();
351
- const transportError = toTransportError(streamError);
399
+ const transportError = toTransportError(streamError, requestBody.model);
352
400
  log(chalk.yellow(describeTransportError(transportError, timeoutMs)));
353
401
  throw transportError;
354
402
  }
@@ -364,7 +412,7 @@ export async function runOracle(options, deps = {}) {
364
412
  if (!response) {
365
413
  throw new Error('API did not return a response.');
366
414
  }
367
- // biome-ignore lint/nursery/noUnnecessaryConditions: we only add spacing when any streamed text was printed
415
+ // We only add spacing when streamed text was printed.
368
416
  if (sawTextDelta && !options.silent) {
369
417
  const fullStreamedText = streamedChunks.join('');
370
418
  const shouldRenderAfterStream = isTty && !renderPlain && fullStreamedText.length > 0;
@@ -410,7 +458,7 @@ export async function runOracle(options, deps = {}) {
410
458
  }
411
459
  const answerText = extractTextOutput(response);
412
460
  if (!options.silent) {
413
- // biome-ignore lint/nursery/noUnnecessaryConditions: flips true when streaming events arrive
461
+ // Flag flips to true when streaming events arrive.
414
462
  if (sawTextDelta) {
415
463
  // Already handled above (rendered or streamed); avoid double-printing.
416
464
  }
@@ -8,20 +8,24 @@ export function resolvePreviewMode(value) {
8
8
  }
9
9
  return undefined;
10
10
  }
11
- export function formatTokenEstimate(value, format = (text) => text) {
12
- if (value >= 1000) {
13
- const abbreviated = Math.floor(value / 100) / 10; // 4,252 -> 4.2
14
- const text = `${abbreviated.toFixed(1).replace(/\.0$/, '')}k`;
15
- return format(text);
11
+ /**
12
+ * Format a token count, abbreviating thousands as e.g. 11.38k and trimming trailing zeros.
13
+ */
14
+ export function formatTokenCount(value) {
15
+ if (Math.abs(value) >= 1000) {
16
+ const abbreviated = (value / 1000).toFixed(2).replace(/\.0+$/, '').replace(/\.([1-9]*)0$/, '.$1');
17
+ return `${abbreviated}k`;
16
18
  }
17
- const text = value.toLocaleString();
18
- return format(text);
19
+ return value.toLocaleString();
20
+ }
21
+ export function formatTokenEstimate(value, format = (text) => text) {
22
+ return format(formatTokenCount(value));
19
23
  }
20
24
  export function formatTokenValue(value, usage, index) {
21
25
  const estimatedFlag = (index === 0 && usage?.input_tokens == null) ||
22
26
  (index === 1 && usage?.output_tokens == null) ||
23
27
  (index === 2 && usage?.reasoning_tokens == null) ||
24
28
  (index === 3 && usage?.total_tokens == null);
25
- const text = value.toLocaleString();
29
+ const text = formatTokenCount(value);
26
30
  return estimatedFlag ? `${text}*` : text;
27
31
  }