@indykish/oracle 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +215 -0
- package/assets-oracle-icon.png +0 -0
- package/dist/bin/oracle-cli.js +1252 -0
- package/dist/bin/oracle-mcp.js +6 -0
- package/dist/scripts/agent-send.js +147 -0
- package/dist/scripts/browser-tools.js +536 -0
- package/dist/scripts/check.js +21 -0
- package/dist/scripts/debug/extract-chatgpt-response.js +53 -0
- package/dist/scripts/docs-list.js +110 -0
- package/dist/scripts/git-policy.js +125 -0
- package/dist/scripts/run-cli.js +14 -0
- package/dist/scripts/runner.js +1378 -0
- package/dist/scripts/test-browser.js +103 -0
- package/dist/scripts/test-remote-chrome.js +68 -0
- package/dist/src/bridge/connection.js +103 -0
- package/dist/src/bridge/userConfigFile.js +28 -0
- package/dist/src/browser/actions/assistantResponse.js +1067 -0
- package/dist/src/browser/actions/attachmentDataTransfer.js +138 -0
- package/dist/src/browser/actions/attachments.js +1910 -0
- package/dist/src/browser/actions/domEvents.js +19 -0
- package/dist/src/browser/actions/modelSelection.js +485 -0
- package/dist/src/browser/actions/navigation.js +445 -0
- package/dist/src/browser/actions/promptComposer.js +485 -0
- package/dist/src/browser/actions/remoteFileTransfer.js +37 -0
- package/dist/src/browser/actions/thinkingTime.js +206 -0
- package/dist/src/browser/chromeLifecycle.js +344 -0
- package/dist/src/browser/config.js +103 -0
- package/dist/src/browser/constants.js +71 -0
- package/dist/src/browser/cookies.js +191 -0
- package/dist/src/browser/detect.js +164 -0
- package/dist/src/browser/domDebug.js +36 -0
- package/dist/src/browser/index.js +1741 -0
- package/dist/src/browser/modelStrategy.js +13 -0
- package/dist/src/browser/pageActions.js +5 -0
- package/dist/src/browser/policies.js +43 -0
- package/dist/src/browser/profileState.js +280 -0
- package/dist/src/browser/prompt.js +152 -0
- package/dist/src/browser/promptSummary.js +20 -0
- package/dist/src/browser/reattach.js +186 -0
- package/dist/src/browser/reattachHelpers.js +382 -0
- package/dist/src/browser/sessionRunner.js +119 -0
- package/dist/src/browser/types.js +1 -0
- package/dist/src/browser/utils.js +122 -0
- package/dist/src/browserMode.js +1 -0
- package/dist/src/cli/bridge/claudeConfig.js +54 -0
- package/dist/src/cli/bridge/client.js +73 -0
- package/dist/src/cli/bridge/codexConfig.js +43 -0
- package/dist/src/cli/bridge/doctor.js +107 -0
- package/dist/src/cli/bridge/host.js +259 -0
- package/dist/src/cli/browserConfig.js +278 -0
- package/dist/src/cli/browserDefaults.js +81 -0
- package/dist/src/cli/bundleWarnings.js +9 -0
- package/dist/src/cli/clipboard.js +10 -0
- package/dist/src/cli/detach.js +11 -0
- package/dist/src/cli/dryRun.js +105 -0
- package/dist/src/cli/duplicatePromptGuard.js +14 -0
- package/dist/src/cli/engine.js +41 -0
- package/dist/src/cli/errorUtils.js +9 -0
- package/dist/src/cli/format.js +13 -0
- package/dist/src/cli/help.js +77 -0
- package/dist/src/cli/hiddenAliases.js +22 -0
- package/dist/src/cli/markdownBundle.js +17 -0
- package/dist/src/cli/markdownRenderer.js +97 -0
- package/dist/src/cli/notifier.js +306 -0
- package/dist/src/cli/options.js +281 -0
- package/dist/src/cli/oscUtils.js +2 -0
- package/dist/src/cli/promptRequirement.js +17 -0
- package/dist/src/cli/renderFlags.js +9 -0
- package/dist/src/cli/renderOutput.js +26 -0
- package/dist/src/cli/rootAlias.js +30 -0
- package/dist/src/cli/runOptions.js +78 -0
- package/dist/src/cli/sessionCommand.js +111 -0
- package/dist/src/cli/sessionDisplay.js +567 -0
- package/dist/src/cli/sessionRunner.js +602 -0
- package/dist/src/cli/sessionTable.js +92 -0
- package/dist/src/cli/tagline.js +258 -0
- package/dist/src/cli/tui/index.js +486 -0
- package/dist/src/cli/writeOutputPath.js +21 -0
- package/dist/src/config.js +26 -0
- package/dist/src/gemini-web/client.js +328 -0
- package/dist/src/gemini-web/executor.js +285 -0
- package/dist/src/gemini-web/index.js +1 -0
- package/dist/src/gemini-web/types.js +1 -0
- package/dist/src/heartbeat.js +43 -0
- package/dist/src/mcp/server.js +40 -0
- package/dist/src/mcp/tools/consult.js +290 -0
- package/dist/src/mcp/tools/sessionResources.js +75 -0
- package/dist/src/mcp/tools/sessions.js +105 -0
- package/dist/src/mcp/types.js +22 -0
- package/dist/src/mcp/utils.js +37 -0
- package/dist/src/oracle/background.js +141 -0
- package/dist/src/oracle/claude.js +101 -0
- package/dist/src/oracle/client.js +197 -0
- package/dist/src/oracle/config.js +227 -0
- package/dist/src/oracle/errors.js +132 -0
- package/dist/src/oracle/files.js +378 -0
- package/dist/src/oracle/finishLine.js +32 -0
- package/dist/src/oracle/format.js +30 -0
- package/dist/src/oracle/fsAdapter.js +10 -0
- package/dist/src/oracle/gemini.js +195 -0
- package/dist/src/oracle/logging.js +36 -0
- package/dist/src/oracle/markdown.js +46 -0
- package/dist/src/oracle/modelResolver.js +183 -0
- package/dist/src/oracle/multiModelRunner.js +153 -0
- package/dist/src/oracle/oscProgress.js +24 -0
- package/dist/src/oracle/promptAssembly.js +13 -0
- package/dist/src/oracle/request.js +50 -0
- package/dist/src/oracle/run.js +596 -0
- package/dist/src/oracle/runUtils.js +31 -0
- package/dist/src/oracle/tokenEstimate.js +37 -0
- package/dist/src/oracle/tokenStats.js +39 -0
- package/dist/src/oracle/tokenStringifier.js +24 -0
- package/dist/src/oracle/types.js +1 -0
- package/dist/src/oracle.js +12 -0
- package/dist/src/oracleHome.js +13 -0
- package/dist/src/remote/client.js +129 -0
- package/dist/src/remote/health.js +113 -0
- package/dist/src/remote/remoteServiceConfig.js +31 -0
- package/dist/src/remote/server.js +533 -0
- package/dist/src/remote/types.js +1 -0
- package/dist/src/sessionManager.js +637 -0
- package/dist/src/sessionStore.js +56 -0
- package/dist/src/version.js +39 -0
- package/dist/vendor/oracle-notifier/OracleNotifier.swift +45 -0
- package/dist/vendor/oracle-notifier/README.md +24 -0
- package/dist/vendor/oracle-notifier/build-notifier.sh +93 -0
- package/package.json +115 -0
- package/vendor/oracle-notifier/OracleNotifier.swift +45 -0
- package/vendor/oracle-notifier/README.md +24 -0
- package/vendor/oracle-notifier/build-notifier.sh +93 -0
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import fs from 'node:fs/promises';
|
|
2
|
+
import { DEFAULT_SYSTEM_PROMPT } from './config.js';
|
|
3
|
+
import { createFileSections, readFiles } from './files.js';
|
|
4
|
+
import { formatFileSection } from './markdown.js';
|
|
5
|
+
import { createFsAdapter } from './fsAdapter.js';
|
|
6
|
+
export function buildPrompt(basePrompt, files, cwd = process.cwd()) {
|
|
7
|
+
if (!files.length) {
|
|
8
|
+
return basePrompt;
|
|
9
|
+
}
|
|
10
|
+
const sections = createFileSections(files, cwd);
|
|
11
|
+
const sectionText = sections.map((section) => section.sectionText).join('\n\n');
|
|
12
|
+
return `${basePrompt.trim()}\n\n${sectionText}`;
|
|
13
|
+
}
|
|
14
|
+
export function buildRequestBody({ modelConfig, systemPrompt, userPrompt, searchEnabled, maxOutputTokens, background, storeResponse, }) {
|
|
15
|
+
const searchToolType = modelConfig.searchToolType ?? 'web_search_preview';
|
|
16
|
+
return {
|
|
17
|
+
model: modelConfig.apiModel ?? modelConfig.model,
|
|
18
|
+
instructions: systemPrompt,
|
|
19
|
+
input: [
|
|
20
|
+
{
|
|
21
|
+
role: 'user',
|
|
22
|
+
content: [
|
|
23
|
+
{
|
|
24
|
+
type: 'input_text',
|
|
25
|
+
text: userPrompt,
|
|
26
|
+
},
|
|
27
|
+
],
|
|
28
|
+
},
|
|
29
|
+
],
|
|
30
|
+
tools: searchEnabled ? [{ type: searchToolType }] : undefined,
|
|
31
|
+
reasoning: modelConfig.reasoning || undefined,
|
|
32
|
+
max_output_tokens: maxOutputTokens,
|
|
33
|
+
background: background ? true : undefined,
|
|
34
|
+
store: storeResponse ? true : undefined,
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
export async function renderPromptMarkdown(options, deps = {}) {
|
|
38
|
+
const cwd = deps.cwd ?? process.cwd();
|
|
39
|
+
const fsModule = deps.fs ?? createFsAdapter(fs);
|
|
40
|
+
const files = await readFiles(options.file ?? [], { cwd, fsModule });
|
|
41
|
+
const sections = createFileSections(files, cwd);
|
|
42
|
+
const systemPrompt = options.system?.trim() || DEFAULT_SYSTEM_PROMPT;
|
|
43
|
+
const userPrompt = (options.prompt ?? '').trim();
|
|
44
|
+
const lines = ['[SYSTEM]', systemPrompt, ''];
|
|
45
|
+
lines.push('[USER]', userPrompt, '');
|
|
46
|
+
sections.forEach((section) => {
|
|
47
|
+
lines.push(formatFileSection(section.displayPath, section.content));
|
|
48
|
+
});
|
|
49
|
+
return lines.join('\n').replace(/\n{3,}/g, '\n\n').trimEnd();
|
|
50
|
+
}
|
|
@@ -0,0 +1,596 @@
|
|
|
1
|
+
import chalk from 'chalk';
|
|
2
|
+
import kleur from 'kleur';
|
|
3
|
+
import fs from 'node:fs/promises';
|
|
4
|
+
import path from 'node:path';
|
|
5
|
+
import process from 'node:process';
|
|
6
|
+
import { performance } from 'node:perf_hooks';
|
|
7
|
+
import { DEFAULT_SYSTEM_PROMPT, MODEL_CONFIGS, TOKENIZER_OPTIONS } from './config.js';
|
|
8
|
+
import { readFiles } from './files.js';
|
|
9
|
+
import { buildPrompt, buildRequestBody } from './request.js';
|
|
10
|
+
import { estimateRequestTokens } from './tokenEstimate.js';
|
|
11
|
+
import { formatElapsed } from './format.js';
|
|
12
|
+
import { formatFinishLine } from './finishLine.js';
|
|
13
|
+
import { getFileTokenStats, printFileTokenStats } from './tokenStats.js';
|
|
14
|
+
import { OracleResponseError, OracleTransportError, PromptValidationError, describeTransportError, toTransportError, } from './errors.js';
|
|
15
|
+
import { createDefaultClientFactory } from './client.js';
|
|
16
|
+
import { formatBaseUrlForLog, maskApiKey } from './logging.js';
|
|
17
|
+
import { startHeartbeat } from '../heartbeat.js';
|
|
18
|
+
import { startOscProgress } from './oscProgress.js';
|
|
19
|
+
import { createFsAdapter } from './fsAdapter.js';
|
|
20
|
+
import { resolveGeminiModelId } from './gemini.js';
|
|
21
|
+
import { resolveClaudeModelId } from './claude.js';
|
|
22
|
+
import { renderMarkdownAnsi } from '../cli/markdownRenderer.js';
|
|
23
|
+
import { createMarkdownStreamer } from 'markdansi';
|
|
24
|
+
import { executeBackgroundResponse } from './background.js';
|
|
25
|
+
import { formatTokenEstimate, formatTokenValue, resolvePreviewMode } from './runUtils.js';
|
|
26
|
+
import { estimateUsdCost } from 'tokentally';
|
|
27
|
+
import { defaultOpenRouterBaseUrl, isKnownModel, isOpenRouterBaseUrl, isProModel, resolveModelConfig, normalizeOpenRouterBaseUrl, } from './modelResolver.js';
|
|
28
|
+
const isStdoutTty = process.stdout.isTTY && chalk.level > 0;
|
|
29
|
+
const dim = (text) => (isStdoutTty ? kleur.dim(text) : text);
|
|
30
|
+
// Default timeout for non-pro API runs (fast models) — give them up to 120s.
|
|
31
|
+
const DEFAULT_TIMEOUT_NON_PRO_MS = 120_000;
|
|
32
|
+
const DEFAULT_TIMEOUT_PRO_MS = 60 * 60 * 1000;
|
|
33
|
+
const defaultWait = (ms) => new Promise((resolve) => {
|
|
34
|
+
setTimeout(resolve, ms);
|
|
35
|
+
});
|
|
36
|
+
export async function runOracle(options, deps = {}) {
|
|
37
|
+
const { apiKey: optionsApiKey = options.apiKey, cwd = process.cwd(), fs: fsModule = createFsAdapter(fs), log = console.log, write: sinkWrite = (_text) => true, allowStdout = true, stdoutWrite: stdoutWriteDep, now = () => performance.now(), clientFactory = createDefaultClientFactory(), client, wait = defaultWait, } = deps;
|
|
38
|
+
const stdoutWrite = allowStdout
|
|
39
|
+
? stdoutWriteDep ?? process.stdout.write.bind(process.stdout)
|
|
40
|
+
: () => true;
|
|
41
|
+
const isTty = allowStdout && isStdoutTty;
|
|
42
|
+
const resolvedXaiBaseUrl = process.env.XAI_BASE_URL?.trim() || 'https://api.x.ai/v1';
|
|
43
|
+
const openRouterApiKey = process.env.OPENROUTER_API_KEY?.trim();
|
|
44
|
+
const defaultOpenRouterBase = defaultOpenRouterBaseUrl();
|
|
45
|
+
const knownModelConfig = isKnownModel(options.model) ? MODEL_CONFIGS[options.model] : undefined;
|
|
46
|
+
const provider = knownModelConfig?.provider ?? 'other';
|
|
47
|
+
const hasOpenAIKey = Boolean(optionsApiKey) ||
|
|
48
|
+
Boolean(process.env.OPENAI_API_KEY) ||
|
|
49
|
+
Boolean(process.env.AZURE_OPENAI_API_KEY && options.azure?.endpoint);
|
|
50
|
+
const hasAnthropicKey = Boolean(optionsApiKey) || Boolean(process.env.ANTHROPIC_API_KEY);
|
|
51
|
+
const hasGeminiKey = Boolean(optionsApiKey) || Boolean(process.env.GEMINI_API_KEY);
|
|
52
|
+
const hasXaiKey = Boolean(optionsApiKey) || Boolean(process.env.XAI_API_KEY);
|
|
53
|
+
let baseUrl = options.baseUrl?.trim();
|
|
54
|
+
if (!baseUrl) {
|
|
55
|
+
if (options.model.startsWith('grok')) {
|
|
56
|
+
baseUrl = resolvedXaiBaseUrl;
|
|
57
|
+
}
|
|
58
|
+
else if (provider === 'anthropic') {
|
|
59
|
+
baseUrl = process.env.ANTHROPIC_BASE_URL?.trim();
|
|
60
|
+
}
|
|
61
|
+
else {
|
|
62
|
+
baseUrl = process.env.OPENAI_BASE_URL?.trim();
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
const providerKeyMissing = (provider === 'openai' && !hasOpenAIKey) ||
|
|
66
|
+
(provider === 'anthropic' && !hasAnthropicKey) ||
|
|
67
|
+
(provider === 'google' && !hasGeminiKey) ||
|
|
68
|
+
(provider === 'xai' && !hasXaiKey) ||
|
|
69
|
+
provider === 'other';
|
|
70
|
+
const openRouterFallback = providerKeyMissing && Boolean(openRouterApiKey);
|
|
71
|
+
if (!baseUrl || openRouterFallback) {
|
|
72
|
+
if (openRouterFallback) {
|
|
73
|
+
baseUrl = defaultOpenRouterBase;
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
if (baseUrl && isOpenRouterBaseUrl(baseUrl)) {
|
|
77
|
+
baseUrl = normalizeOpenRouterBaseUrl(baseUrl);
|
|
78
|
+
}
|
|
79
|
+
const logVerbose = (message) => {
|
|
80
|
+
if (options.verbose) {
|
|
81
|
+
log(dim(`[verbose] ${message}`));
|
|
82
|
+
}
|
|
83
|
+
};
|
|
84
|
+
const previewMode = resolvePreviewMode(options.previewMode ?? options.preview);
|
|
85
|
+
const isPreview = Boolean(previewMode);
|
|
86
|
+
const isAzureOpenAI = Boolean(options.azure?.endpoint);
|
|
87
|
+
const getApiKeyForModel = (model) => {
|
|
88
|
+
if (isOpenRouterBaseUrl(baseUrl) || openRouterFallback) {
|
|
89
|
+
return { key: optionsApiKey ?? openRouterApiKey, source: 'OPENROUTER_API_KEY' };
|
|
90
|
+
}
|
|
91
|
+
if (typeof model === 'string' && model.startsWith('gpt')) {
|
|
92
|
+
if (optionsApiKey)
|
|
93
|
+
return { key: optionsApiKey, source: 'apiKey option' };
|
|
94
|
+
if (isAzureOpenAI) {
|
|
95
|
+
const key = process.env.AZURE_OPENAI_API_KEY ?? process.env.OPENAI_API_KEY;
|
|
96
|
+
return { key, source: 'AZURE_OPENAI_API_KEY|OPENAI_API_KEY' };
|
|
97
|
+
}
|
|
98
|
+
return { key: process.env.OPENAI_API_KEY, source: 'OPENAI_API_KEY' };
|
|
99
|
+
}
|
|
100
|
+
if (typeof model === 'string' && model.startsWith('gemini')) {
|
|
101
|
+
return { key: optionsApiKey ?? process.env.GEMINI_API_KEY, source: 'GEMINI_API_KEY' };
|
|
102
|
+
}
|
|
103
|
+
if (typeof model === 'string' && model.startsWith('claude')) {
|
|
104
|
+
return { key: optionsApiKey ?? process.env.ANTHROPIC_API_KEY, source: 'ANTHROPIC_API_KEY' };
|
|
105
|
+
}
|
|
106
|
+
if (typeof model === 'string' && model.startsWith('grok')) {
|
|
107
|
+
return { key: optionsApiKey ?? process.env.XAI_API_KEY, source: 'XAI_API_KEY' };
|
|
108
|
+
}
|
|
109
|
+
return { key: optionsApiKey ?? openRouterApiKey, source: optionsApiKey ? 'apiKey option' : 'OPENROUTER_API_KEY' };
|
|
110
|
+
};
|
|
111
|
+
const apiKeyResult = getApiKeyForModel(options.model);
|
|
112
|
+
const apiKey = apiKeyResult.key;
|
|
113
|
+
if (!apiKey) {
|
|
114
|
+
const envVar = isOpenRouterBaseUrl(baseUrl) || openRouterFallback
|
|
115
|
+
? 'OPENROUTER_API_KEY'
|
|
116
|
+
: options.model.startsWith('gpt')
|
|
117
|
+
? isAzureOpenAI
|
|
118
|
+
? 'AZURE_OPENAI_API_KEY (or OPENAI_API_KEY)'
|
|
119
|
+
: 'OPENAI_API_KEY'
|
|
120
|
+
: options.model.startsWith('gemini')
|
|
121
|
+
? 'GEMINI_API_KEY'
|
|
122
|
+
: options.model.startsWith('claude')
|
|
123
|
+
? 'ANTHROPIC_API_KEY'
|
|
124
|
+
: options.model.startsWith('grok')
|
|
125
|
+
? 'XAI_API_KEY'
|
|
126
|
+
: 'OPENROUTER_API_KEY';
|
|
127
|
+
throw new PromptValidationError(`Missing ${envVar}. Set it via the environment or a .env file.`, {
|
|
128
|
+
env: envVar,
|
|
129
|
+
});
|
|
130
|
+
}
|
|
131
|
+
const envVar = apiKeyResult.source;
|
|
132
|
+
const minPromptLength = Number.parseInt(process.env.ORACLE_MIN_PROMPT_CHARS ?? '10', 10);
|
|
133
|
+
const promptLength = options.prompt?.trim().length ?? 0;
|
|
134
|
+
// Enforce the short-prompt guardrail on pro-tier models because they're costly; cheaper models can run short prompts without blocking.
|
|
135
|
+
const isProTierModel = isProModel(options.model);
|
|
136
|
+
if (isProTierModel && !Number.isNaN(minPromptLength) && promptLength < minPromptLength) {
|
|
137
|
+
throw new PromptValidationError(`Prompt is too short (<${minPromptLength} chars). This was likely accidental; please provide more detail.`, { minPromptLength, promptLength });
|
|
138
|
+
}
|
|
139
|
+
const resolverOpenRouterApiKey = openRouterFallback || isOpenRouterBaseUrl(baseUrl) ? openRouterApiKey ?? apiKey : undefined;
|
|
140
|
+
const modelConfig = await resolveModelConfig(options.model, {
|
|
141
|
+
baseUrl,
|
|
142
|
+
openRouterApiKey: resolverOpenRouterApiKey,
|
|
143
|
+
});
|
|
144
|
+
const isLongRunningModel = isProTierModel;
|
|
145
|
+
const supportsBackground = modelConfig.supportsBackground !== false;
|
|
146
|
+
const useBackground = supportsBackground ? options.background ?? isLongRunningModel : false;
|
|
147
|
+
const inputTokenBudget = options.maxInput ?? modelConfig.inputLimit;
|
|
148
|
+
const files = await readFiles(options.file ?? [], { cwd, fsModule });
|
|
149
|
+
const searchEnabled = options.search !== false;
|
|
150
|
+
logVerbose(`cwd: ${cwd}`);
|
|
151
|
+
let pendingNoFilesTip = null;
|
|
152
|
+
let pendingShortPromptTip = null;
|
|
153
|
+
if (files.length > 0) {
|
|
154
|
+
const displayPaths = files
|
|
155
|
+
.map((file) => path.relative(cwd, file.path) || file.path)
|
|
156
|
+
.slice(0, 10)
|
|
157
|
+
.join(', ');
|
|
158
|
+
const extra = files.length > 10 ? ` (+${files.length - 10} more)` : '';
|
|
159
|
+
logVerbose(`Attached files (${files.length}): ${displayPaths}${extra}`);
|
|
160
|
+
}
|
|
161
|
+
else {
|
|
162
|
+
logVerbose('No files attached.');
|
|
163
|
+
if (!isPreview) {
|
|
164
|
+
pendingNoFilesTip =
|
|
165
|
+
'Tip: no files attached — Oracle works best with project context. Add files via --file path/to/code or docs.';
|
|
166
|
+
}
|
|
167
|
+
}
|
|
168
|
+
const shortPrompt = (options.prompt?.trim().length ?? 0) < 80;
|
|
169
|
+
if (!isPreview && shortPrompt) {
|
|
170
|
+
pendingShortPromptTip =
|
|
171
|
+
'Tip: brief prompts often yield generic answers — aim for 6–30 sentences and attach key files.';
|
|
172
|
+
}
|
|
173
|
+
const fileTokenInfo = getFileTokenStats(files, {
|
|
174
|
+
cwd,
|
|
175
|
+
tokenizer: modelConfig.tokenizer,
|
|
176
|
+
tokenizerOptions: TOKENIZER_OPTIONS,
|
|
177
|
+
inputTokenBudget,
|
|
178
|
+
});
|
|
179
|
+
const totalFileTokens = fileTokenInfo.totalTokens;
|
|
180
|
+
logVerbose(`Attached files use ${totalFileTokens.toLocaleString()} tokens`);
|
|
181
|
+
const systemPrompt = options.system?.trim() || DEFAULT_SYSTEM_PROMPT;
|
|
182
|
+
const promptWithFiles = buildPrompt(options.prompt, files, cwd);
|
|
183
|
+
const fileCount = files.length;
|
|
184
|
+
const richTty = allowStdout && process.stdout.isTTY && chalk.level > 0;
|
|
185
|
+
const renderPlain = Boolean(options.renderPlain);
|
|
186
|
+
const timeoutSeconds = options.timeoutSeconds === undefined || options.timeoutSeconds === 'auto'
|
|
187
|
+
? isLongRunningModel
|
|
188
|
+
? DEFAULT_TIMEOUT_PRO_MS / 1000
|
|
189
|
+
: DEFAULT_TIMEOUT_NON_PRO_MS / 1000
|
|
190
|
+
: options.timeoutSeconds;
|
|
191
|
+
const timeoutMs = timeoutSeconds * 1000;
|
|
192
|
+
// Track the concrete model id we dispatch to (especially for Gemini preview aliases)
|
|
193
|
+
const effectiveModelId = options.effectiveModelId ??
|
|
194
|
+
(options.model.startsWith('gemini')
|
|
195
|
+
? resolveGeminiModelId(options.model)
|
|
196
|
+
: (modelConfig.apiModel ?? modelConfig.model));
|
|
197
|
+
const requestBody = buildRequestBody({
|
|
198
|
+
modelConfig,
|
|
199
|
+
systemPrompt,
|
|
200
|
+
userPrompt: promptWithFiles,
|
|
201
|
+
searchEnabled,
|
|
202
|
+
maxOutputTokens: options.maxOutput,
|
|
203
|
+
background: useBackground,
|
|
204
|
+
storeResponse: useBackground,
|
|
205
|
+
});
|
|
206
|
+
const estimatedInputTokens = estimateRequestTokens(requestBody, modelConfig);
|
|
207
|
+
const tokenLabel = formatTokenEstimate(estimatedInputTokens, (text) => (richTty ? chalk.green(text) : text));
|
|
208
|
+
const fileLabel = richTty ? chalk.magenta(fileCount.toString()) : fileCount.toString();
|
|
209
|
+
const filesPhrase = fileCount === 0 ? 'no files' : `${fileLabel} files`;
|
|
210
|
+
const headerModelLabelBase = richTty ? chalk.cyan(modelConfig.model) : modelConfig.model;
|
|
211
|
+
const headerModelSuffix = effectiveModelId !== modelConfig.model
|
|
212
|
+
? richTty
|
|
213
|
+
? chalk.gray(` (API: ${effectiveModelId})`)
|
|
214
|
+
: ` (API: ${effectiveModelId})`
|
|
215
|
+
: '';
|
|
216
|
+
const headerLine = `Calling ${headerModelLabelBase}${headerModelSuffix} — ${tokenLabel} tokens, ${filesPhrase}.`;
|
|
217
|
+
const shouldReportFiles = (options.filesReport || fileTokenInfo.totalTokens > inputTokenBudget) && fileTokenInfo.stats.length > 0;
|
|
218
|
+
if (!isPreview) {
|
|
219
|
+
if (!options.suppressHeader) {
|
|
220
|
+
log(headerLine);
|
|
221
|
+
}
|
|
222
|
+
const maskedKey = maskApiKey(apiKey);
|
|
223
|
+
if (maskedKey && options.verbose) {
|
|
224
|
+
const resolvedSuffix = effectiveModelId !== modelConfig.model ? ` (API: ${effectiveModelId})` : '';
|
|
225
|
+
log(dim(`Using ${envVar}=${maskedKey} for model ${modelConfig.model}${resolvedSuffix}`));
|
|
226
|
+
}
|
|
227
|
+
if (!options.suppressHeader &&
|
|
228
|
+
modelConfig.model === 'gpt-5.1-pro' &&
|
|
229
|
+
effectiveModelId === 'gpt-5.2-pro') {
|
|
230
|
+
log(dim('Note: `gpt-5.1-pro` is a stable CLI alias; OpenAI API uses `gpt-5.2-pro`.'));
|
|
231
|
+
}
|
|
232
|
+
if (baseUrl) {
|
|
233
|
+
log(dim(`Base URL: ${formatBaseUrlForLog(baseUrl)}`));
|
|
234
|
+
}
|
|
235
|
+
if (effectiveModelId !== modelConfig.model) {
|
|
236
|
+
log(dim(`Resolved model: ${modelConfig.model} → ${effectiveModelId}`));
|
|
237
|
+
}
|
|
238
|
+
if (options.background && !supportsBackground) {
|
|
239
|
+
log(dim('Background runs are not supported for this model; streaming in foreground instead.'));
|
|
240
|
+
}
|
|
241
|
+
if (!options.suppressTips) {
|
|
242
|
+
if (pendingNoFilesTip) {
|
|
243
|
+
log(dim(pendingNoFilesTip));
|
|
244
|
+
}
|
|
245
|
+
if (pendingShortPromptTip) {
|
|
246
|
+
log(dim(pendingShortPromptTip));
|
|
247
|
+
}
|
|
248
|
+
}
|
|
249
|
+
if (isLongRunningModel) {
|
|
250
|
+
log(dim('This model can take up to 60 minutes (usually replies much faster).'));
|
|
251
|
+
}
|
|
252
|
+
if (options.verbose || isLongRunningModel) {
|
|
253
|
+
log(dim('Press Ctrl+C to cancel.'));
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
if (shouldReportFiles) {
|
|
257
|
+
printFileTokenStats(fileTokenInfo, { inputTokenBudget, log });
|
|
258
|
+
}
|
|
259
|
+
if (estimatedInputTokens > inputTokenBudget) {
|
|
260
|
+
throw new PromptValidationError(`Input too large (${estimatedInputTokens.toLocaleString()} tokens). Limit is ${inputTokenBudget.toLocaleString()} tokens.`, { estimatedInputTokens, inputTokenBudget });
|
|
261
|
+
}
|
|
262
|
+
logVerbose(`Estimated tokens (request body): ${estimatedInputTokens.toLocaleString()}`);
|
|
263
|
+
if (isPreview && previewMode) {
|
|
264
|
+
if (previewMode === 'json' || previewMode === 'full') {
|
|
265
|
+
log('Request JSON');
|
|
266
|
+
log(JSON.stringify(requestBody, null, 2));
|
|
267
|
+
log('');
|
|
268
|
+
}
|
|
269
|
+
if (previewMode === 'full') {
|
|
270
|
+
log('Assembled Prompt');
|
|
271
|
+
log(promptWithFiles);
|
|
272
|
+
log('');
|
|
273
|
+
}
|
|
274
|
+
log(`Estimated input tokens: ${estimatedInputTokens.toLocaleString()} / ${inputTokenBudget.toLocaleString()} (model: ${modelConfig.model})`);
|
|
275
|
+
return {
|
|
276
|
+
mode: 'preview',
|
|
277
|
+
previewMode,
|
|
278
|
+
requestBody,
|
|
279
|
+
estimatedInputTokens,
|
|
280
|
+
inputTokenBudget,
|
|
281
|
+
};
|
|
282
|
+
}
|
|
283
|
+
const apiEndpoint = modelConfig.model.startsWith('gemini')
|
|
284
|
+
? undefined
|
|
285
|
+
: isOpenRouterBaseUrl(baseUrl)
|
|
286
|
+
? baseUrl
|
|
287
|
+
: modelConfig.model.startsWith('claude')
|
|
288
|
+
? process.env.ANTHROPIC_BASE_URL ?? baseUrl
|
|
289
|
+
: baseUrl;
|
|
290
|
+
const clientInstance = client ??
|
|
291
|
+
clientFactory(apiKey, {
|
|
292
|
+
baseUrl: apiEndpoint,
|
|
293
|
+
azure: options.azure,
|
|
294
|
+
model: options.model,
|
|
295
|
+
resolvedModelId: modelConfig.model.startsWith('claude')
|
|
296
|
+
? resolveClaudeModelId(effectiveModelId)
|
|
297
|
+
: modelConfig.model.startsWith('gemini')
|
|
298
|
+
? resolveGeminiModelId(effectiveModelId)
|
|
299
|
+
: effectiveModelId,
|
|
300
|
+
httpTimeoutMs: options.httpTimeoutMs,
|
|
301
|
+
});
|
|
302
|
+
logVerbose('Dispatching request to API...');
|
|
303
|
+
if (options.verbose) {
|
|
304
|
+
log(''); // ensure verbose section is separated from Answer stream
|
|
305
|
+
}
|
|
306
|
+
const stopOscProgress = startOscProgress({
|
|
307
|
+
label: useBackground ? 'Waiting for API (background)' : 'Waiting for API',
|
|
308
|
+
targetMs: useBackground ? timeoutMs : Math.min(timeoutMs, 10 * 60_000),
|
|
309
|
+
indeterminate: true,
|
|
310
|
+
write: sinkWrite,
|
|
311
|
+
});
|
|
312
|
+
const runStart = now();
|
|
313
|
+
let response = null;
|
|
314
|
+
let elapsedMs = 0;
|
|
315
|
+
let sawTextDelta = false;
|
|
316
|
+
let answerHeaderPrinted = false;
|
|
317
|
+
const allowAnswerHeader = options.suppressAnswerHeader !== true;
|
|
318
|
+
const timeoutExceeded = () => now() - runStart >= timeoutMs;
|
|
319
|
+
const throwIfTimedOut = () => {
|
|
320
|
+
if (timeoutExceeded()) {
|
|
321
|
+
throw new OracleTransportError('client-timeout', `Timed out waiting for API response after ${formatElapsed(timeoutMs)}.`);
|
|
322
|
+
}
|
|
323
|
+
};
|
|
324
|
+
const ensureAnswerHeader = () => {
|
|
325
|
+
if (options.silent || answerHeaderPrinted)
|
|
326
|
+
return;
|
|
327
|
+
// Always add a separating newline for readability; optionally include the label depending on caller needs.
|
|
328
|
+
log('');
|
|
329
|
+
if (allowAnswerHeader) {
|
|
330
|
+
log(chalk.bold('Answer:'));
|
|
331
|
+
}
|
|
332
|
+
answerHeaderPrinted = true;
|
|
333
|
+
};
|
|
334
|
+
try {
|
|
335
|
+
if (useBackground) {
|
|
336
|
+
response = await executeBackgroundResponse({
|
|
337
|
+
client: clientInstance,
|
|
338
|
+
requestBody,
|
|
339
|
+
log,
|
|
340
|
+
wait,
|
|
341
|
+
heartbeatIntervalMs: options.heartbeatIntervalMs,
|
|
342
|
+
now,
|
|
343
|
+
maxWaitMs: timeoutMs,
|
|
344
|
+
});
|
|
345
|
+
elapsedMs = now() - runStart;
|
|
346
|
+
}
|
|
347
|
+
else {
|
|
348
|
+
let stream;
|
|
349
|
+
try {
|
|
350
|
+
stream = await clientInstance.responses.stream(requestBody);
|
|
351
|
+
}
|
|
352
|
+
catch (streamInitError) {
|
|
353
|
+
const transportError = toTransportError(streamInitError, requestBody.model);
|
|
354
|
+
log(chalk.yellow(describeTransportError(transportError, timeoutMs)));
|
|
355
|
+
throw transportError;
|
|
356
|
+
}
|
|
357
|
+
let heartbeatActive = false;
|
|
358
|
+
let stopHeartbeat = null;
|
|
359
|
+
const stopHeartbeatNow = () => {
|
|
360
|
+
if (!heartbeatActive) {
|
|
361
|
+
return;
|
|
362
|
+
}
|
|
363
|
+
heartbeatActive = false;
|
|
364
|
+
stopHeartbeat?.();
|
|
365
|
+
stopHeartbeat = null;
|
|
366
|
+
};
|
|
367
|
+
if (options.heartbeatIntervalMs && options.heartbeatIntervalMs > 0) {
|
|
368
|
+
heartbeatActive = true;
|
|
369
|
+
stopHeartbeat = startHeartbeat({
|
|
370
|
+
intervalMs: options.heartbeatIntervalMs,
|
|
371
|
+
log: (message) => log(message),
|
|
372
|
+
isActive: () => heartbeatActive,
|
|
373
|
+
makeMessage: (elapsedMs) => {
|
|
374
|
+
const elapsedText = formatElapsed(elapsedMs);
|
|
375
|
+
const remainingMs = Math.max(timeoutMs - elapsedMs, 0);
|
|
376
|
+
const remainingLabel = remainingMs >= 60_000
|
|
377
|
+
? `${Math.ceil(remainingMs / 60_000)} min`
|
|
378
|
+
: `${Math.max(1, Math.ceil(remainingMs / 1000))}s`;
|
|
379
|
+
return `API connection active — ${elapsedText} elapsed. Timeout in ~${remainingLabel} if no response.`;
|
|
380
|
+
},
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
let markdownStreamer = null;
|
|
384
|
+
const flushMarkdownStreamer = () => {
|
|
385
|
+
if (!markdownStreamer)
|
|
386
|
+
return;
|
|
387
|
+
const rendered = markdownStreamer.finish();
|
|
388
|
+
markdownStreamer = null;
|
|
389
|
+
if (rendered) {
|
|
390
|
+
stdoutWrite(rendered);
|
|
391
|
+
}
|
|
392
|
+
};
|
|
393
|
+
try {
|
|
394
|
+
markdownStreamer =
|
|
395
|
+
isTty && !renderPlain
|
|
396
|
+
? createMarkdownStreamer({
|
|
397
|
+
render: renderMarkdownAnsi,
|
|
398
|
+
spacing: 'single',
|
|
399
|
+
mode: 'hybrid',
|
|
400
|
+
})
|
|
401
|
+
: null;
|
|
402
|
+
for await (const event of stream) {
|
|
403
|
+
throwIfTimedOut();
|
|
404
|
+
const isTextDelta = event.type === 'chunk' || event.type === 'response.output_text.delta';
|
|
405
|
+
if (!isTextDelta)
|
|
406
|
+
continue;
|
|
407
|
+
stopOscProgress();
|
|
408
|
+
stopHeartbeatNow();
|
|
409
|
+
sawTextDelta = true;
|
|
410
|
+
ensureAnswerHeader();
|
|
411
|
+
if (options.silent || typeof event.delta !== 'string')
|
|
412
|
+
continue;
|
|
413
|
+
// Always keep the log/bookkeeping sink up to date.
|
|
414
|
+
sinkWrite(event.delta);
|
|
415
|
+
if (renderPlain) {
|
|
416
|
+
// Plain mode: stream directly to stdout regardless of write sink.
|
|
417
|
+
stdoutWrite(event.delta);
|
|
418
|
+
continue;
|
|
419
|
+
}
|
|
420
|
+
if (markdownStreamer) {
|
|
421
|
+
const rendered = markdownStreamer.push(event.delta);
|
|
422
|
+
if (rendered) {
|
|
423
|
+
stdoutWrite(rendered);
|
|
424
|
+
}
|
|
425
|
+
continue;
|
|
426
|
+
}
|
|
427
|
+
// Non-TTY streams should still surface output; fall back to raw stdout.
|
|
428
|
+
stdoutWrite(event.delta);
|
|
429
|
+
}
|
|
430
|
+
flushMarkdownStreamer();
|
|
431
|
+
throwIfTimedOut();
|
|
432
|
+
}
|
|
433
|
+
catch (streamError) {
|
|
434
|
+
// stream.abort() is not available on the interface
|
|
435
|
+
flushMarkdownStreamer();
|
|
436
|
+
stopHeartbeatNow();
|
|
437
|
+
const transportError = toTransportError(streamError, requestBody.model);
|
|
438
|
+
log(chalk.yellow(describeTransportError(transportError, timeoutMs)));
|
|
439
|
+
throw transportError;
|
|
440
|
+
}
|
|
441
|
+
response = await stream.finalResponse();
|
|
442
|
+
throwIfTimedOut();
|
|
443
|
+
stopHeartbeatNow();
|
|
444
|
+
elapsedMs = now() - runStart;
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
finally {
|
|
448
|
+
stopOscProgress();
|
|
449
|
+
}
|
|
450
|
+
if (!response) {
|
|
451
|
+
throw new Error('API did not return a response.');
|
|
452
|
+
}
|
|
453
|
+
// We only add spacing when streamed text was printed.
|
|
454
|
+
if (sawTextDelta && !options.silent) {
|
|
455
|
+
if (renderPlain) {
|
|
456
|
+
// Plain streaming already wrote chunks; ensure clean separation.
|
|
457
|
+
stdoutWrite('\n');
|
|
458
|
+
}
|
|
459
|
+
else {
|
|
460
|
+
// Separate streamed output from logs.
|
|
461
|
+
log('');
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
logVerbose(`Response status: ${response.status ?? 'completed'}`);
|
|
465
|
+
if (response.status && response.status !== 'completed') {
|
|
466
|
+
// API can reply `in_progress` even after the stream closes; give it a brief grace poll.
|
|
467
|
+
if (response.id && response.status === 'in_progress') {
|
|
468
|
+
const polishingStart = now();
|
|
469
|
+
const pollIntervalMs = 2_000;
|
|
470
|
+
const maxWaitMs = 180_000;
|
|
471
|
+
log(chalk.dim('Response still in_progress; polling until completion...'));
|
|
472
|
+
// Short polling loop — we don't want to hang forever, just catch late finalization.
|
|
473
|
+
while (now() - polishingStart < maxWaitMs) {
|
|
474
|
+
throwIfTimedOut();
|
|
475
|
+
await wait(pollIntervalMs);
|
|
476
|
+
const refreshed = await clientInstance.responses.retrieve(response.id);
|
|
477
|
+
if (refreshed.status === 'completed') {
|
|
478
|
+
response = refreshed;
|
|
479
|
+
break;
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
if (response.status !== 'completed') {
|
|
484
|
+
const detail = response.error?.message || response.incomplete_details?.reason || response.status;
|
|
485
|
+
log(chalk.yellow(`API ended the run early (status=${response.status}${response.incomplete_details?.reason ? `, reason=${response.incomplete_details.reason}` : ''}).`));
|
|
486
|
+
throw new OracleResponseError(`Response did not complete: ${detail}`, response);
|
|
487
|
+
}
|
|
488
|
+
}
|
|
489
|
+
const answerText = extractTextOutput(response);
|
|
490
|
+
if (!options.silent) {
|
|
491
|
+
// Flag flips to true when streaming events arrive.
|
|
492
|
+
if (sawTextDelta) {
|
|
493
|
+
// Already handled above (rendered or streamed); avoid double-printing.
|
|
494
|
+
}
|
|
495
|
+
else {
|
|
496
|
+
ensureAnswerHeader();
|
|
497
|
+
// Render markdown to ANSI in rich TTYs unless the caller opts out with --render-plain.
|
|
498
|
+
const printable = answerText
|
|
499
|
+
? renderPlain || !richTty
|
|
500
|
+
? answerText
|
|
501
|
+
: renderMarkdownAnsi(answerText)
|
|
502
|
+
: chalk.dim('(no text output)');
|
|
503
|
+
sinkWrite(printable);
|
|
504
|
+
if (!printable.endsWith('\n')) {
|
|
505
|
+
sinkWrite('\n');
|
|
506
|
+
}
|
|
507
|
+
stdoutWrite(printable);
|
|
508
|
+
if (!printable.endsWith('\n')) {
|
|
509
|
+
stdoutWrite('\n');
|
|
510
|
+
}
|
|
511
|
+
log('');
|
|
512
|
+
}
|
|
513
|
+
}
|
|
514
|
+
const usage = response.usage ?? {};
|
|
515
|
+
const inputTokens = usage.input_tokens ?? estimatedInputTokens;
|
|
516
|
+
const outputTokens = usage.output_tokens ?? 0;
|
|
517
|
+
const reasoningTokens = usage.reasoning_tokens ?? 0;
|
|
518
|
+
const totalTokens = usage.total_tokens ?? inputTokens + outputTokens + reasoningTokens;
|
|
519
|
+
const pricing = modelConfig.pricing ?? undefined;
|
|
520
|
+
const cost = pricing
|
|
521
|
+
? estimateUsdCost({
|
|
522
|
+
usage: { inputTokens, outputTokens, reasoningTokens, totalTokens },
|
|
523
|
+
pricing: { inputUsdPerToken: pricing.inputPerToken, outputUsdPerToken: pricing.outputPerToken },
|
|
524
|
+
})?.totalUsd
|
|
525
|
+
: undefined;
|
|
526
|
+
const effortLabel = modelConfig.reasoning?.effort;
|
|
527
|
+
const modelLabel = effortLabel ? `${modelConfig.model}[${effortLabel}]` : modelConfig.model;
|
|
528
|
+
const sessionIdContainsModel = typeof options.sessionId === 'string' && options.sessionId.toLowerCase().includes(modelConfig.model.toLowerCase());
|
|
529
|
+
const tokensDisplay = [inputTokens, outputTokens, reasoningTokens, totalTokens]
|
|
530
|
+
.map((value, index) => formatTokenValue(value, usage, index))
|
|
531
|
+
.join('/');
|
|
532
|
+
const tokensPart = (() => {
|
|
533
|
+
const parts = tokensDisplay.split('/');
|
|
534
|
+
if (parts.length !== 4)
|
|
535
|
+
return tokensDisplay;
|
|
536
|
+
return `↑${parts[0]} ↓${parts[1]} ↻${parts[2]} Δ${parts[3]}`;
|
|
537
|
+
})();
|
|
538
|
+
const modelPart = sessionIdContainsModel ? null : modelLabel;
|
|
539
|
+
const actualInput = usage.input_tokens;
|
|
540
|
+
const estActualPart = (() => {
|
|
541
|
+
if (!options.verbose)
|
|
542
|
+
return null;
|
|
543
|
+
if (actualInput === undefined)
|
|
544
|
+
return null;
|
|
545
|
+
const delta = actualInput - estimatedInputTokens;
|
|
546
|
+
const deltaText = delta === 0 ? '' : delta > 0 ? ` (+${delta.toLocaleString()})` : ` (${delta.toLocaleString()})`;
|
|
547
|
+
return `est→actual=${estimatedInputTokens.toLocaleString()}→${actualInput.toLocaleString()}${deltaText}`;
|
|
548
|
+
})();
|
|
549
|
+
const { line1, line2 } = formatFinishLine({
|
|
550
|
+
elapsedMs,
|
|
551
|
+
model: modelPart,
|
|
552
|
+
costUsd: cost ?? null,
|
|
553
|
+
tokensPart,
|
|
554
|
+
summaryExtraParts: options.sessionId ? [`sid=${options.sessionId}`] : null,
|
|
555
|
+
detailParts: [
|
|
556
|
+
estActualPart,
|
|
557
|
+
!searchEnabled ? 'search=off' : null,
|
|
558
|
+
files.length > 0 ? `files=${files.length}` : null,
|
|
559
|
+
],
|
|
560
|
+
});
|
|
561
|
+
if (!options.silent) {
|
|
562
|
+
log('');
|
|
563
|
+
}
|
|
564
|
+
log(chalk.blue(line1));
|
|
565
|
+
if (line2) {
|
|
566
|
+
log(dim(line2));
|
|
567
|
+
}
|
|
568
|
+
return {
|
|
569
|
+
mode: 'live',
|
|
570
|
+
response,
|
|
571
|
+
usage: { inputTokens, outputTokens, reasoningTokens, totalTokens, ...(cost != null ? { cost } : {}) },
|
|
572
|
+
elapsedMs,
|
|
573
|
+
};
|
|
574
|
+
}
|
|
575
|
+
export function extractTextOutput(response) {
|
|
576
|
+
if (Array.isArray(response.output_text) && response.output_text.length > 0) {
|
|
577
|
+
return response.output_text.join('\n');
|
|
578
|
+
}
|
|
579
|
+
if (Array.isArray(response.output)) {
|
|
580
|
+
const segments = [];
|
|
581
|
+
for (const item of response.output) {
|
|
582
|
+
if (Array.isArray(item.content)) {
|
|
583
|
+
for (const chunk of item.content) {
|
|
584
|
+
if (chunk && (chunk.type === 'output_text' || chunk.type === 'text') && chunk.text) {
|
|
585
|
+
segments.push(chunk.text);
|
|
586
|
+
}
|
|
587
|
+
}
|
|
588
|
+
}
|
|
589
|
+
else if (typeof item.text === 'string') {
|
|
590
|
+
segments.push(item.text);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
return segments.join('\n');
|
|
594
|
+
}
|
|
595
|
+
return '';
|
|
596
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
export function resolvePreviewMode(value) {
|
|
2
|
+
const allowed = new Set(['summary', 'json', 'full']);
|
|
3
|
+
if (typeof value === 'string' && value.length > 0) {
|
|
4
|
+
return allowed.has(value) ? value : 'summary';
|
|
5
|
+
}
|
|
6
|
+
if (value) {
|
|
7
|
+
return 'summary';
|
|
8
|
+
}
|
|
9
|
+
return undefined;
|
|
10
|
+
}
|
|
11
|
+
/**
|
|
12
|
+
* Format a token count, abbreviating thousands as e.g. 11.38k and trimming trailing zeros.
|
|
13
|
+
*/
|
|
14
|
+
export function formatTokenCount(value) {
|
|
15
|
+
if (Math.abs(value) >= 1000) {
|
|
16
|
+
const abbreviated = (value / 1000).toFixed(2).replace(/\.0+$/, '').replace(/\.([1-9]*)0$/, '.$1');
|
|
17
|
+
return `${abbreviated}k`;
|
|
18
|
+
}
|
|
19
|
+
return value.toLocaleString();
|
|
20
|
+
}
|
|
21
|
+
export function formatTokenEstimate(value, format = (text) => text) {
|
|
22
|
+
return format(formatTokenCount(value));
|
|
23
|
+
}
|
|
24
|
+
export function formatTokenValue(value, usage, index) {
|
|
25
|
+
const estimatedFlag = (index === 0 && usage?.input_tokens == null) ||
|
|
26
|
+
(index === 1 && usage?.output_tokens == null) ||
|
|
27
|
+
(index === 2 && usage?.reasoning_tokens == null) ||
|
|
28
|
+
(index === 3 && usage?.total_tokens == null);
|
|
29
|
+
const text = formatTokenCount(value);
|
|
30
|
+
return estimatedFlag ? `${text}*` : text;
|
|
31
|
+
}
|