@steipete/oracle 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +14 -6
  2. package/dist/.DS_Store +0 -0
  3. package/dist/bin/oracle-cli.js +161 -44
  4. package/dist/src/browser/config.js +6 -0
  5. package/dist/src/browser/cookies.js +49 -11
  6. package/dist/src/browser/index.js +18 -5
  7. package/dist/src/browser/sessionRunner.js +10 -1
  8. package/dist/src/cli/browserConfig.js +109 -2
  9. package/dist/src/cli/detach.js +12 -0
  10. package/dist/src/cli/dryRun.js +19 -3
  11. package/dist/src/cli/help.js +2 -0
  12. package/dist/src/cli/options.js +22 -0
  13. package/dist/src/cli/runOptions.js +16 -2
  14. package/dist/src/cli/sessionRunner.js +11 -0
  15. package/dist/src/cli/tui/index.js +68 -47
  16. package/dist/src/oracle/client.js +24 -6
  17. package/dist/src/oracle/config.js +10 -0
  18. package/dist/src/oracle/files.js +8 -2
  19. package/dist/src/oracle/format.js +2 -7
  20. package/dist/src/oracle/fsAdapter.js +4 -1
  21. package/dist/src/oracle/gemini.js +161 -0
  22. package/dist/src/oracle/logging.js +36 -0
  23. package/dist/src/oracle/oscProgress.js +7 -1
  24. package/dist/src/oracle/run.js +111 -48
  25. package/dist/src/oracle.js +1 -0
  26. package/dist/src/sessionManager.js +2 -0
  27. package/dist/vendor/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  28. package/dist/vendor/oracle-notifier/build-notifier.sh +0 -0
  29. package/dist/vendor/oracle-notifier/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  30. package/dist/vendor/oracle-notifier/oracle-notifier/build-notifier.sh +0 -0
  31. package/package.json +16 -26
  32. package/vendor/oracle-notifier/OracleNotifier.app/Contents/MacOS/OracleNotifier +0 -0
  33. package/vendor/oracle-notifier/build-notifier.sh +0 -0
  34. package/vendor/oracle-notifier/README.md +0 -24
@@ -0,0 +1,161 @@
1
+ import { GoogleGenerativeAI, HarmCategory, HarmBlockThreshold, } from '@google/generative-ai';
2
+ const MODEL_ID_MAP = {
3
+ 'gemini-3-pro': 'gemini-3-pro-preview',
4
+ 'gpt-5-pro': 'gpt-5-pro', // unused, normalize TS map
5
+ 'gpt-5.1': 'gpt-5.1',
6
+ };
7
+ export function resolveGeminiModelId(modelName) {
8
+ // Map our logical Gemini names to the exact model ids expected by the SDK.
9
+ return MODEL_ID_MAP[modelName] ?? modelName;
10
+ }
11
+ export function createGeminiClient(apiKey, modelName = 'gemini-3-pro', resolvedModelId) {
12
+ const genAI = new GoogleGenerativeAI(apiKey);
13
+ const modelId = resolvedModelId ?? resolveGeminiModelId(modelName);
14
+ const model = genAI.getGenerativeModel({ model: modelId });
15
+ const adaptBodyToGemini = (body) => {
16
+ const contents = body.input.map((inputItem) => ({
17
+ role: inputItem.role === 'user' ? 'user' : 'model',
18
+ parts: inputItem.content
19
+ .map((contentPart) => {
20
+ if (contentPart.type === 'input_text') {
21
+ return { text: contentPart.text };
22
+ }
23
+ return null;
24
+ })
25
+ .filter((part) => part !== null),
26
+ }));
27
+ const tools = body.tools
28
+ ?.map((tool) => {
29
+ if (tool.type === 'web_search_preview') {
30
+ return {
31
+ googleSearch: {},
32
+ };
33
+ }
34
+ return {};
35
+ })
36
+ .filter((t) => Object.keys(t).length > 0);
37
+ const generationConfig = {
38
+ maxOutputTokens: body.max_output_tokens,
39
+ };
40
+ const safetySettings = [
41
+ {
42
+ category: HarmCategory.HARM_CATEGORY_HARASSMENT,
43
+ threshold: HarmBlockThreshold.BLOCK_NONE,
44
+ },
45
+ {
46
+ category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
47
+ threshold: HarmBlockThreshold.BLOCK_NONE,
48
+ },
49
+ {
50
+ category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
51
+ threshold: HarmBlockThreshold.BLOCK_NONE,
52
+ },
53
+ {
54
+ category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
55
+ threshold: HarmBlockThreshold.BLOCK_NONE,
56
+ },
57
+ ];
58
+ const systemInstruction = body.instructions || undefined;
59
+ return { systemInstruction, contents, tools, generationConfig, safetySettings };
60
+ };
61
+ const adaptGeminiResponseToOracle = (geminiResponse) => {
62
+ const outputText = [];
63
+ const output = [];
64
+ geminiResponse.candidates?.forEach((candidate) => {
65
+ candidate.content?.parts?.forEach((part) => {
66
+ if (part.text) {
67
+ outputText.push(part.text);
68
+ output.push({ type: 'text', text: part.text });
69
+ }
70
+ });
71
+ });
72
+ const usage = {
73
+ input_tokens: geminiResponse.usageMetadata?.promptTokenCount || 0,
74
+ output_tokens: geminiResponse.usageMetadata?.candidatesTokenCount || 0,
75
+ total_tokens: (geminiResponse.usageMetadata?.promptTokenCount || 0) + (geminiResponse.usageMetadata?.candidatesTokenCount || 0),
76
+ };
77
+ return {
78
+ id: `gemini-${Date.now()}`, // Gemini doesn't always provide a stable ID in the response object
79
+ status: 'completed',
80
+ output_text: outputText,
81
+ output,
82
+ usage,
83
+ };
84
+ };
85
+ const enrichGeminiError = (error) => {
86
+ const message = error instanceof Error ? error.message : String(error);
87
+ if (message.includes('404')) {
88
+ return new Error(`Gemini model not available to this API key/region. Confirm preview access and model ID (${modelId}). Original: ${message}`);
89
+ }
90
+ return error instanceof Error ? error : new Error(message);
91
+ };
92
+ return {
93
+ responses: {
94
+ stream: (body) => {
95
+ const geminiBody = adaptBodyToGemini(body);
96
+ let finalResponsePromise = null;
97
+ const collectChunkText = (chunk) => {
98
+ const parts = [];
99
+ chunk.candidates?.forEach((candidate) => {
100
+ candidate.content?.parts?.forEach((part) => {
101
+ if (part.text) {
102
+ parts.push(part.text);
103
+ }
104
+ });
105
+ });
106
+ return parts.join('');
107
+ };
108
+ async function* iterator() {
109
+ let streamingResp;
110
+ try {
111
+ streamingResp = await model.generateContentStream(geminiBody);
112
+ }
113
+ catch (error) {
114
+ throw enrichGeminiError(error);
115
+ }
116
+ for await (const chunk of streamingResp.stream) {
117
+ const text = collectChunkText(chunk);
118
+ if (text) {
119
+ yield { type: 'chunk', delta: text };
120
+ }
121
+ }
122
+ finalResponsePromise = streamingResp.response.then(adaptGeminiResponseToOracle);
123
+ }
124
+ const generator = iterator();
125
+ return {
126
+ [Symbol.asyncIterator]: () => generator,
127
+ finalResponse: async () => {
128
+ // Ensure the stream has been consumed or at least started to get the promise
129
+ if (!finalResponsePromise) {
130
+ // In case the user calls finalResponse before iterating, we need to consume the stream
131
+ // This is a bit edge-casey but safe.
132
+ for await (const _ of generator) { }
133
+ }
134
+ if (!finalResponsePromise) {
135
+ throw new Error('Response promise not initialized');
136
+ }
137
+ return finalResponsePromise;
138
+ }
139
+ };
140
+ },
141
+ create: async (body) => {
142
+ const geminiBody = adaptBodyToGemini(body);
143
+ let result;
144
+ try {
145
+ result = await model.generateContent(geminiBody);
146
+ }
147
+ catch (error) {
148
+ throw enrichGeminiError(error);
149
+ }
150
+ return adaptGeminiResponseToOracle(result.response);
151
+ },
152
+ retrieve: async (id) => {
153
+ return {
154
+ id,
155
+ status: 'error',
156
+ error: { message: 'Retrieve by ID not supported for Gemini API yet.' },
157
+ };
158
+ },
159
+ },
160
+ };
161
+ }
@@ -0,0 +1,36 @@
1
+ export function maskApiKey(key) {
2
+ if (!key)
3
+ return null;
4
+ if (key.length <= 8)
5
+ return `${key[0] ?? ''}***${key[key.length - 1] ?? ''}`;
6
+ const prefix = key.slice(0, 4);
7
+ const suffix = key.slice(-4);
8
+ return `${prefix}****${suffix}`;
9
+ }
10
+ export function formatBaseUrlForLog(raw) {
11
+ if (!raw)
12
+ return '';
13
+ try {
14
+ const parsed = new URL(raw);
15
+ const segments = parsed.pathname.split('/').filter(Boolean);
16
+ let path = '';
17
+ if (segments.length > 0) {
18
+ path = `/${segments[0]}`;
19
+ if (segments.length > 1) {
20
+ path += '/...';
21
+ }
22
+ }
23
+ const allowedQueryKeys = ['api-version'];
24
+ const maskedQuery = allowedQueryKeys
25
+ .filter((key) => parsed.searchParams.has(key))
26
+ .map((key) => `${key}=***`);
27
+ const query = maskedQuery.length > 0 ? `?${maskedQuery.join('&')}` : '';
28
+ return `${parsed.protocol}//${parsed.host}${path}${query}`;
29
+ }
30
+ catch {
31
+ const trimmed = raw.trim();
32
+ if (trimmed.length <= 64)
33
+ return trimmed;
34
+ return `${trimmed.slice(0, 32)}…${trimmed.slice(-8)}`;
35
+ }
36
+ }
@@ -29,11 +29,17 @@ export function supportsOscProgress(env = process.env, isTty = process.stdout.is
29
29
  return false;
30
30
  }
31
31
  export function startOscProgress(options = {}) {
32
- const { label = 'Waiting for OpenAI', targetMs = 10 * 60_000, write = (text) => process.stdout.write(text) } = options;
32
+ const { label = 'Waiting for API', targetMs = 10 * 60_000, write = (text) => process.stdout.write(text), indeterminate = false, } = options;
33
33
  if (!supportsOscProgress(options.env, options.isTty)) {
34
34
  return () => { };
35
35
  }
36
36
  const cleanLabel = sanitizeLabel(label);
37
+ if (indeterminate) {
38
+ write(`${OSC}3;;${cleanLabel}${ST}`);
39
+ return () => {
40
+ write(`${OSC}0;0;${cleanLabel}${ST}`);
41
+ };
42
+ }
37
43
  const target = Math.max(targetMs, 1_000);
38
44
  const send = (state, percent) => {
39
45
  const clamped = Math.max(0, Math.min(100, Math.round(percent)));
@@ -13,30 +13,26 @@ import { formatElapsed, formatUSD } from './format.js';
13
13
  import { getFileTokenStats, printFileTokenStats } from './tokenStats.js';
14
14
  import { OracleResponseError, OracleTransportError, PromptValidationError, describeTransportError, toTransportError, } from './errors.js';
15
15
  import { createDefaultClientFactory } from './client.js';
16
+ import { formatBaseUrlForLog, maskApiKey } from './logging.js';
16
17
  import { startHeartbeat } from '../heartbeat.js';
17
18
  import { startOscProgress } from './oscProgress.js';
18
19
  import { getCliVersion } from '../version.js';
19
20
  import { createFsAdapter } from './fsAdapter.js';
20
- const isTty = process.stdout.isTTY;
21
+ import { resolveGeminiModelId } from './gemini.js';
22
+ const isTty = process.stdout.isTTY && chalk.level > 0;
21
23
  const dim = (text) => (isTty ? kleur.dim(text) : text);
22
24
  const BACKGROUND_MAX_WAIT_MS = 30 * 60 * 1000;
23
25
  const BACKGROUND_POLL_INTERVAL_MS = 5000;
24
26
  const BACKGROUND_RETRY_BASE_MS = 3000;
25
27
  const BACKGROUND_RETRY_MAX_MS = 15000;
28
+ const DEFAULT_TIMEOUT_NON_PRO_MS = 30_000;
29
+ const DEFAULT_TIMEOUT_PRO_MS = 20 * 60 * 1000;
26
30
  const defaultWait = (ms) => new Promise((resolve) => {
27
31
  setTimeout(resolve, ms);
28
32
  });
29
33
  export async function runOracle(options, deps = {}) {
30
- const { apiKey = options.apiKey ?? process.env.OPENAI_API_KEY, cwd = process.cwd(), fs: fsModule = createFsAdapter(fs), log = console.log, write = (text) => process.stdout.write(text), now = () => performance.now(), clientFactory = createDefaultClientFactory(), client, wait = defaultWait, } = deps;
31
- const maskApiKey = (key) => {
32
- if (!key)
33
- return null;
34
- if (key.length <= 8)
35
- return `${key[0] ?? ''}***${key[key.length - 1] ?? ''}`;
36
- const prefix = key.slice(0, 4);
37
- const suffix = key.slice(-4);
38
- return `${prefix}****${suffix}`;
39
- };
34
+ const { apiKey: optionsApiKey = options.apiKey, cwd = process.cwd(), fs: fsModule = createFsAdapter(fs), log = console.log, write = (text) => process.stdout.write(text), now = () => performance.now(), clientFactory = createDefaultClientFactory(), client, wait = defaultWait, } = deps;
35
+ const baseUrl = options.baseUrl?.trim() || process.env.OPENAI_BASE_URL?.trim();
40
36
  const logVerbose = (message) => {
41
37
  if (options.verbose) {
42
38
  log(dim(`[verbose] ${message}`));
@@ -44,15 +40,22 @@ export async function runOracle(options, deps = {}) {
44
40
  };
45
41
  const previewMode = resolvePreviewMode(options.previewMode ?? options.preview);
46
42
  const isPreview = Boolean(previewMode);
43
+ const getApiKeyForModel = (model) => {
44
+ if (model.startsWith('gpt')) {
45
+ return optionsApiKey ?? process.env.OPENAI_API_KEY;
46
+ }
47
+ if (model.startsWith('gemini')) {
48
+ return optionsApiKey ?? process.env.GEMINI_API_KEY;
49
+ }
50
+ return undefined;
51
+ };
52
+ const envVar = options.model.startsWith('gpt') ? 'OPENAI_API_KEY' : 'GEMINI_API_KEY';
53
+ const apiKey = getApiKeyForModel(options.model);
47
54
  if (!apiKey) {
48
- throw new PromptValidationError('Missing OPENAI_API_KEY. Set it via the environment or a .env file.', {
49
- env: 'OPENAI_API_KEY',
55
+ throw new PromptValidationError(`Missing ${envVar}. Set it via the environment or a .env file.`, {
56
+ env: envVar,
50
57
  });
51
58
  }
52
- const maskedKey = maskApiKey(apiKey);
53
- if (maskedKey) {
54
- log(dim(`Using OPENAI_API_KEY=${maskedKey}`));
55
- }
56
59
  const modelConfig = MODEL_CONFIGS[options.model];
57
60
  if (!modelConfig) {
58
61
  throw new PromptValidationError(`Unsupported model "${options.model}". Choose one of: ${Object.keys(MODEL_CONFIGS).join(', ')}`, { model: options.model });
@@ -62,6 +65,8 @@ export async function runOracle(options, deps = {}) {
62
65
  const files = await readFiles(options.file ?? [], { cwd, fsModule });
63
66
  const searchEnabled = options.search !== false;
64
67
  logVerbose(`cwd: ${cwd}`);
68
+ let pendingNoFilesTip = null;
69
+ let pendingShortPromptTip = null;
65
70
  if (files.length > 0) {
66
71
  const displayPaths = files
67
72
  .map((file) => path.relative(cwd, file.path) || file.path)
@@ -73,9 +78,15 @@ export async function runOracle(options, deps = {}) {
73
78
  else {
74
79
  logVerbose('No files attached.');
75
80
  if (!isPreview) {
76
- log(dim('Tip: no files attached — Oracle works best with project context. Add files via --file path/to/code or docs.'));
81
+ pendingNoFilesTip =
82
+ 'Tip: no files attached — Oracle works best with project context. Add files via --file path/to/code or docs.';
77
83
  }
78
84
  }
85
+ const shortPrompt = (options.prompt?.trim().length ?? 0) < 80;
86
+ if (!isPreview && shortPrompt) {
87
+ pendingShortPromptTip =
88
+ 'Tip: brief prompts often yield generic answers — aim for 6–30 sentences and attach key files.';
89
+ }
79
90
  const fileTokenInfo = getFileTokenStats(files, {
80
91
  cwd,
81
92
  tokenizer: modelConfig.tokenizer,
@@ -89,6 +100,15 @@ export async function runOracle(options, deps = {}) {
89
100
  const fileCount = files.length;
90
101
  const cliVersion = getCliVersion();
91
102
  const richTty = process.stdout.isTTY && chalk.level > 0;
103
+ const timeoutSeconds = options.timeoutSeconds === undefined || options.timeoutSeconds === 'auto'
104
+ ? options.model === 'gpt-5-pro'
105
+ ? DEFAULT_TIMEOUT_PRO_MS / 1000
106
+ : DEFAULT_TIMEOUT_NON_PRO_MS / 1000
107
+ : options.timeoutSeconds;
108
+ const timeoutMs = timeoutSeconds * 1000;
109
+ // Track the concrete model id we dispatch to (especially for Gemini preview aliases)
110
+ const effectiveModelId = options.effectiveModelId ??
111
+ (options.model.startsWith('gemini') ? resolveGeminiModelId(options.model) : modelConfig.model);
92
112
  const headerModelLabel = richTty ? chalk.cyan(modelConfig.model) : modelConfig.model;
93
113
  const requestBody = buildRequestBody({
94
114
  modelConfig,
@@ -102,10 +122,25 @@ export async function runOracle(options, deps = {}) {
102
122
  const estimatedInputTokens = estimateRequestTokens(requestBody, modelConfig);
103
123
  const tokenLabel = richTty ? chalk.green(estimatedInputTokens.toLocaleString()) : estimatedInputTokens.toLocaleString();
104
124
  const fileLabel = richTty ? chalk.magenta(fileCount.toString()) : fileCount.toString();
105
- const headerLine = `oracle (${cliVersion}) consulting ${headerModelLabel}'s crystal ball with ${tokenLabel} tokens and ${fileLabel} files...`;
125
+ const filesPhrase = fileCount === 0 ? 'no files' : `${fileLabel} files`;
126
+ const headerLine = `🧿 oracle (${cliVersion}) summons ${headerModelLabel} — ${tokenLabel} tokens, ${filesPhrase}`;
106
127
  const shouldReportFiles = (options.filesReport || fileTokenInfo.totalTokens > inputTokenBudget) && fileTokenInfo.stats.length > 0;
107
128
  if (!isPreview) {
108
129
  log(headerLine);
130
+ const maskedKey = maskApiKey(apiKey);
131
+ if (maskedKey) {
132
+ const resolvedSuffix = options.model.startsWith('gemini') && effectiveModelId !== modelConfig.model ? ` (resolved: ${effectiveModelId})` : '';
133
+ log(dim(`Using ${envVar}=${maskedKey} for model ${modelConfig.model}${resolvedSuffix}`));
134
+ }
135
+ if (baseUrl) {
136
+ log(dim(`Base URL: ${formatBaseUrlForLog(baseUrl)}`));
137
+ }
138
+ if (pendingNoFilesTip) {
139
+ log(dim(pendingNoFilesTip));
140
+ }
141
+ if (pendingShortPromptTip) {
142
+ log(dim(pendingShortPromptTip));
143
+ }
109
144
  if (options.model === 'gpt-5-pro') {
110
145
  log(dim('Pro is thinking, this can take up to 30 minutes...'));
111
146
  }
@@ -138,11 +173,22 @@ export async function runOracle(options, deps = {}) {
138
173
  inputTokenBudget,
139
174
  };
140
175
  }
141
- const openAiClient = client ?? clientFactory(apiKey);
142
- logVerbose('Dispatching request to OpenAI Responses API...');
176
+ const apiEndpoint = modelConfig.model.startsWith('gemini') ? undefined : baseUrl;
177
+ const clientInstance = client ??
178
+ clientFactory(apiKey, {
179
+ baseUrl: apiEndpoint,
180
+ azure: options.azure,
181
+ model: options.model,
182
+ resolvedModelId: effectiveModelId,
183
+ });
184
+ logVerbose('Dispatching request to API...');
185
+ if (options.verbose) {
186
+ log(''); // ensure verbose section is separated from Answer stream
187
+ }
143
188
  const stopOscProgress = startOscProgress({
144
- label: useBackground ? 'Waiting for OpenAI (background)' : 'Waiting for OpenAI',
145
- targetMs: useBackground ? BACKGROUND_MAX_WAIT_MS : 10 * 60_000,
189
+ label: useBackground ? 'Waiting for API (background)' : 'Waiting for API',
190
+ targetMs: useBackground ? timeoutMs : Math.min(timeoutMs, 10 * 60_000),
191
+ indeterminate: true,
146
192
  write,
147
193
  });
148
194
  const runStart = now();
@@ -150,6 +196,12 @@ export async function runOracle(options, deps = {}) {
150
196
  let elapsedMs = 0;
151
197
  let sawTextDelta = false;
152
198
  let answerHeaderPrinted = false;
199
+ const timeoutExceeded = () => now() - runStart >= timeoutMs;
200
+ const throwIfTimedOut = () => {
201
+ if (timeoutExceeded()) {
202
+ throw new OracleTransportError('client-timeout', `Timed out waiting for API response after ${formatElapsed(timeoutMs)}.`);
203
+ }
204
+ };
153
205
  const ensureAnswerHeader = () => {
154
206
  if (!options.silent && !answerHeaderPrinted) {
155
207
  log('');
@@ -160,17 +212,18 @@ export async function runOracle(options, deps = {}) {
160
212
  try {
161
213
  if (useBackground) {
162
214
  response = await executeBackgroundResponse({
163
- client: openAiClient,
215
+ client: clientInstance,
164
216
  requestBody,
165
217
  log,
166
218
  wait,
167
219
  heartbeatIntervalMs: options.heartbeatIntervalMs,
168
220
  now,
221
+ maxWaitMs: timeoutMs,
169
222
  });
170
223
  elapsedMs = now() - runStart;
171
224
  }
172
225
  else {
173
- const stream = await openAiClient.responses.stream(requestBody);
226
+ const stream = await clientInstance.responses.stream(requestBody);
174
227
  let heartbeatActive = false;
175
228
  let stopHeartbeat = null;
176
229
  const stopHeartbeatNow = () => {
@@ -189,13 +242,16 @@ export async function runOracle(options, deps = {}) {
189
242
  isActive: () => heartbeatActive,
190
243
  makeMessage: (elapsedMs) => {
191
244
  const elapsedText = formatElapsed(elapsedMs);
192
- return `API connection active — ${elapsedText} elapsed. Expect up to ~10 min before GPT-5 responds.`;
245
+ const timeoutLabel = Math.round(timeoutMs / 60000);
246
+ return `API connection active — ${elapsedText} elapsed. Timeout in ~${timeoutLabel} min if no response.`;
193
247
  },
194
248
  });
195
249
  }
196
250
  try {
197
251
  for await (const event of stream) {
198
- if (event.type === 'response.output_text.delta') {
252
+ throwIfTimedOut();
253
+ const isTextDelta = event.type === 'chunk' || event.type === 'response.output_text.delta';
254
+ if (isTextDelta) {
199
255
  stopOscProgress();
200
256
  stopHeartbeatNow();
201
257
  sawTextDelta = true;
@@ -205,17 +261,17 @@ export async function runOracle(options, deps = {}) {
205
261
  }
206
262
  }
207
263
  }
264
+ throwIfTimedOut();
208
265
  }
209
266
  catch (streamError) {
210
- if (typeof stream.abort === 'function') {
211
- stream.abort();
212
- }
267
+ // stream.abort() is not available on the interface
213
268
  stopHeartbeatNow();
214
269
  const transportError = toTransportError(streamError);
215
270
  log(chalk.yellow(describeTransportError(transportError)));
216
271
  throw transportError;
217
272
  }
218
273
  response = await stream.finalResponse();
274
+ throwIfTimedOut();
219
275
  stopHeartbeatNow();
220
276
  elapsedMs = now() - runStart;
221
277
  }
@@ -224,11 +280,16 @@ export async function runOracle(options, deps = {}) {
224
280
  stopOscProgress();
225
281
  }
226
282
  if (!response) {
227
- throw new Error('OpenAI did not return a response.');
283
+ throw new Error('API did not return a response.');
284
+ }
285
+ // biome-ignore lint/nursery/noUnnecessaryConditions: we only add spacing when any streamed text was printed
286
+ if (sawTextDelta && !options.silent) {
287
+ write('\n');
288
+ log('');
228
289
  }
229
290
  logVerbose(`Response status: ${response.status ?? 'completed'}`);
230
291
  if (response.status && response.status !== 'completed') {
231
- // OpenAI can reply `in_progress` even after the stream closes; give it a brief grace poll.
292
+ // API can reply `in_progress` even after the stream closes; give it a brief grace poll.
232
293
  if (response.id && response.status === 'in_progress') {
233
294
  const polishingStart = now();
234
295
  const pollIntervalMs = 2_000;
@@ -237,7 +298,7 @@ export async function runOracle(options, deps = {}) {
237
298
  // Short polling loop — we don't want to hang forever, just catch late finalization.
238
299
  while (now() - polishingStart < maxWaitMs) {
239
300
  await wait(pollIntervalMs);
240
- const refreshed = await openAiClient.responses.retrieve(response.id);
301
+ const refreshed = await clientInstance.responses.retrieve(response.id);
241
302
  if (refreshed.status === 'completed') {
242
303
  response = refreshed;
243
304
  break;
@@ -246,7 +307,7 @@ export async function runOracle(options, deps = {}) {
246
307
  }
247
308
  if (response.status !== 'completed') {
248
309
  const detail = response.error?.message || response.incomplete_details?.reason || response.status;
249
- log(chalk.yellow(`OpenAI ended the run early (status=${response.status}${response.incomplete_details?.reason ? `, reason=${response.incomplete_details.reason}` : ''}).`));
310
+ log(chalk.yellow(`API ended the run early (status=${response.status}${response.incomplete_details?.reason ? `, reason=${response.incomplete_details.reason}` : ''}).`));
250
311
  throw new OracleResponseError(`Response did not complete: ${detail}`, response);
251
312
  }
252
313
  }
@@ -254,7 +315,7 @@ export async function runOracle(options, deps = {}) {
254
315
  if (!options.silent) {
255
316
  // biome-ignore lint/nursery/noUnnecessaryConditions: flips true when streaming events arrive
256
317
  if (sawTextDelta) {
257
- write('\n\n');
318
+ write('\n');
258
319
  }
259
320
  else {
260
321
  ensureAnswerHeader();
@@ -276,7 +337,8 @@ export async function runOracle(options, deps = {}) {
276
337
  const tokensDisplay = [inputTokens, outputTokens, reasoningTokens, totalTokens]
277
338
  .map((value, index) => formatTokenValue(value, usage, index))
278
339
  .join('/');
279
- statsParts.push(`tok(i/o/r/t)=${tokensDisplay}`);
340
+ const tokensLabel = options.verbose ? 'tokens (input/output/reasoning/total)' : 'tok(i/o/r/t)';
341
+ statsParts.push(`${tokensLabel}=${tokensDisplay}`);
280
342
  const actualInput = usage.input_tokens;
281
343
  if (actualInput !== undefined) {
282
344
  const delta = actualInput - estimatedInputTokens;
@@ -289,7 +351,8 @@ export async function runOracle(options, deps = {}) {
289
351
  if (files.length > 0) {
290
352
  statsParts.push(`files=${files.length}`);
291
353
  }
292
- log(chalk.blue(`Finished in ${elapsedDisplay} (${statsParts.join(' | ')})`));
354
+ const sessionPrefix = options.sessionId ? `${options.sessionId} ` : '';
355
+ log(chalk.blue(`Finished ${sessionPrefix}in ${elapsedDisplay} (${statsParts.join(' | ')})`));
293
356
  return {
294
357
  mode: 'live',
295
358
  response,
@@ -338,13 +401,13 @@ export function extractTextOutput(response) {
338
401
  return '';
339
402
  }
340
403
  async function executeBackgroundResponse(params) {
341
- const { client, requestBody, log, wait, heartbeatIntervalMs, now } = params;
404
+ const { client, requestBody, log, wait, heartbeatIntervalMs, now, maxWaitMs } = params;
342
405
  const initialResponse = await client.responses.create(requestBody);
343
406
  if (!initialResponse || !initialResponse.id) {
344
- throw new OracleResponseError('OpenAI did not return a response ID for the background run.', initialResponse);
407
+ throw new OracleResponseError('API did not return a response ID for the background run.', initialResponse);
345
408
  }
346
409
  const responseId = initialResponse.id;
347
- log(dim(`OpenAI scheduled background response ${responseId} (status=${initialResponse.status ?? 'unknown'}). Monitoring up to ${Math.round(BACKGROUND_MAX_WAIT_MS / 60000)} minutes for completion...`));
410
+ log(dim(`API scheduled background response ${responseId} (status=${initialResponse.status ?? 'unknown'}). Monitoring up to ${Math.round(BACKGROUND_MAX_WAIT_MS / 60000)} minutes for completion...`));
348
411
  let heartbeatActive = false;
349
412
  let stopHeartbeat = null;
350
413
  const stopHeartbeatNow = () => {
@@ -363,7 +426,7 @@ async function executeBackgroundResponse(params) {
363
426
  isActive: () => heartbeatActive,
364
427
  makeMessage: (elapsedMs) => {
365
428
  const elapsedText = formatElapsed(elapsedMs);
366
- return `OpenAI background run still in progress — ${elapsedText} elapsed.`;
429
+ return `API background run still in progress — ${elapsedText} elapsed.`;
367
430
  },
368
431
  });
369
432
  }
@@ -375,7 +438,7 @@ async function executeBackgroundResponse(params) {
375
438
  log,
376
439
  wait,
377
440
  now,
378
- maxWaitMs: BACKGROUND_MAX_WAIT_MS,
441
+ maxWaitMs,
379
442
  });
380
443
  }
381
444
  finally {
@@ -394,10 +457,10 @@ async function pollBackgroundResponse(params) {
394
457
  // biome-ignore lint/nursery/noUnnecessaryConditions: guard only for first iteration
395
458
  if (firstCycle) {
396
459
  firstCycle = false;
397
- log(dim(`OpenAI background response status=${status}. We'll keep retrying automatically.`));
460
+ log(dim(`API background response status=${status}. We'll keep retrying automatically.`));
398
461
  }
399
462
  else if (status !== lastStatus && status !== 'completed') {
400
- log(dim(`OpenAI background response status=${status}.`));
463
+ log(dim(`API background response status=${status}.`));
401
464
  }
402
465
  lastStatus = status;
403
466
  if (status === 'completed') {
@@ -408,11 +471,11 @@ async function pollBackgroundResponse(params) {
408
471
  throw new OracleResponseError(`Response did not complete: ${detail}`, response);
409
472
  }
410
473
  if (now() - startMark >= maxWaitMs) {
411
- throw new OracleTransportError('client-timeout', 'Timed out waiting for OpenAI background response to finish.');
474
+ throw new OracleTransportError('client-timeout', 'Timed out waiting for API background response to finish.');
412
475
  }
413
476
  await wait(BACKGROUND_POLL_INTERVAL_MS);
414
477
  if (now() - startMark >= maxWaitMs) {
415
- throw new OracleTransportError('client-timeout', 'Timed out waiting for OpenAI background response to finish.');
478
+ throw new OracleTransportError('client-timeout', 'Timed out waiting for API background response to finish.');
416
479
  }
417
480
  const { response: nextResponse, reconnected } = await retrieveBackgroundResponseWithRetry({
418
481
  client,
@@ -425,7 +488,7 @@ async function pollBackgroundResponse(params) {
425
488
  });
426
489
  if (reconnected) {
427
490
  const nextStatus = nextResponse.status ?? 'in_progress';
428
- log(dim(`Reconnected to OpenAI background response (status=${nextStatus}). OpenAI is still working...`));
491
+ log(dim(`Reconnected to API background response (status=${nextStatus}). API is still working...`));
429
492
  }
430
493
  response = nextResponse;
431
494
  }
@@ -449,7 +512,7 @@ async function retrieveBackgroundResponseWithRetry(params) {
449
512
  log(chalk.yellow(`${describeTransportError(transportError)} Retrying in ${formatElapsed(delay)}...`));
450
513
  await wait(delay);
451
514
  if (now() - startMark >= maxWaitMs) {
452
- throw new OracleTransportError('client-timeout', 'Timed out waiting for OpenAI background response to finish.');
515
+ throw new OracleTransportError('client-timeout', 'Timed out waiting for API background response to finish.');
453
516
  }
454
517
  }
455
518
  }
@@ -7,3 +7,4 @@ export { getFileTokenStats, printFileTokenStats } from './oracle/tokenStats.js';
7
7
  export { OracleResponseError, OracleTransportError, OracleUserError, FileValidationError, BrowserAutomationError, PromptValidationError, describeTransportError, extractResponseMetadata, asOracleUserError, toTransportError, } from './oracle/errors.js';
8
8
  export { createDefaultClientFactory } from './oracle/client.js';
9
9
  export { runOracle, extractTextOutput } from './oracle/run.js';
10
+ export { resolveGeminiModelId } from './oracle/gemini.js';
@@ -104,6 +104,8 @@ export async function initializeSession(options, cwd, notifications) {
104
104
  browserBundleFiles: options.browserBundleFiles,
105
105
  background: options.background,
106
106
  search: options.search,
107
+ baseUrl: options.baseUrl,
108
+ azure: options.azure,
107
109
  },
108
110
  };
109
111
  await fs.writeFile(metaPath(sessionId), JSON.stringify(metadata, null, 2), 'utf8');
File without changes