@steipete/oracle 0.4.2 → 0.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -55,7 +55,7 @@ Engine auto-picks API when `OPENAI_API_KEY` is set, otherwise browser; browser i
55
55
  - Oracle bundles a prompt plus the right files so another AI (GPT 5 Pro + more) can answer. Use when stuck/bugs/reviewing.
56
56
  - Run `npx -y @steipete/oracle --help` once per session before first use.
57
57
  ```
58
- - Tip: set `chatgptUrl` in config (or `--chatgpt-url`) to a dedicated ChatGPT project folder so work runs don’t clutter your main history.
58
+ - Tip: set `browser.chatgptUrl` in config (or `--chatgpt-url`) to a dedicated ChatGPT project folder so browser runs don’t clutter your main history.
59
59
 
60
60
  **MCP**
61
61
  - Run the stdio server via `oracle-mcp`.
@@ -116,9 +116,12 @@ Put defaults in `~/.oracle/config.json` (JSON5). Example:
116
116
  model: "gpt-5.1-pro",
117
117
  engine: "api",
118
118
  filesReport: true,
119
- chatgptUrl: "https://chatgpt.com/g/your-project-folder"
119
+ browser: {
120
+ chatgptUrl: "https://chatgpt.com/g/g-p-691edc9fec088191b553a35093da1ea8-oracle/project"
121
+ }
120
122
  }
121
123
  ```
124
+ Use `browser.chatgptUrl` (or the legacy alias `browser.url`) to target a specific ChatGPT workspace/folder for browser automation.
122
125
  See [docs/configuration.md](docs/configuration.md) for precedence and full schema.
123
126
 
124
127
  Advanced flags
@@ -498,6 +498,13 @@ async function runRootCommand(options) {
498
498
  const isCodex = primaryModelCandidate.startsWith('gpt-5.1-codex');
499
499
  const isClaude = primaryModelCandidate.startsWith('claude');
500
500
  const userForcedBrowser = options.browser || options.engine === 'browser';
501
+ const hasNonGptBrowserTarget = (engine === 'browser' || userForcedBrowser) &&
502
+ (normalizedMultiModels.length > 0
503
+ ? normalizedMultiModels.some((model) => !model.startsWith('gpt-'))
504
+ : !resolvedModelCandidate.startsWith('gpt-'));
505
+ if (hasNonGptBrowserTarget) {
506
+ throw new Error('Browser engine only supports GPT-series ChatGPT models. Re-run with --engine api for Grok, Claude, Gemini, or other non-GPT models.');
507
+ }
501
508
  if (isGemini && userForcedBrowser) {
502
509
  throw new Error('Gemini is only supported via API. Use --engine api.');
503
510
  }
@@ -116,6 +116,9 @@ export function resolveApiModel(modelValue) {
116
116
  if (normalized in MODEL_CONFIGS) {
117
117
  return normalized;
118
118
  }
119
+ if (normalized.includes('grok')) {
120
+ return 'grok-4.1';
121
+ }
119
122
  if (normalized.includes('claude') && normalized.includes('sonnet')) {
120
123
  return 'claude-4.5-sonnet';
121
124
  }
@@ -156,6 +159,9 @@ export function inferModelFromLabel(modelValue) {
156
159
  if (normalized in MODEL_CONFIGS) {
157
160
  return normalized;
158
161
  }
162
+ if (normalized.includes('grok')) {
163
+ return 'grok-4.1';
164
+ }
159
165
  if (normalized.includes('claude') && normalized.includes('sonnet')) {
160
166
  return 'claude-4.5-sonnet';
161
167
  }
@@ -2,9 +2,11 @@ import { DEFAULT_MODEL, MODEL_CONFIGS } from '../oracle.js';
2
2
  import { resolveEngine } from './engine.js';
3
3
  import { normalizeModelOption, inferModelFromLabel, resolveApiModel, normalizeBaseUrl } from './options.js';
4
4
  import { resolveGeminiModelId } from '../oracle/gemini.js';
5
+ import { PromptValidationError } from '../oracle/errors.js';
5
6
  export function resolveRunOptionsFromConfig({ prompt, files = [], model, models, engine, userConfig, env = process.env, }) {
6
7
  const resolvedEngine = resolveEngineWithConfig({ engine, configEngine: userConfig?.engine, env });
7
8
  const browserRequested = engine === 'browser';
9
+ const browserConfigured = userConfig?.engine === 'browser';
8
10
  const requestedModelList = Array.isArray(models) ? models : [];
9
11
  const normalizedRequestedModels = requestedModelList.map((entry) => normalizeModelOption(entry)).filter(Boolean);
10
12
  const cliModelArg = normalizeModelOption(model ?? userConfig?.model) || DEFAULT_MODEL;
@@ -14,18 +16,26 @@ export function resolveRunOptionsFromConfig({ prompt, files = [], model, models,
14
16
  const isGemini = resolvedModel.startsWith('gemini');
15
17
  const isCodex = resolvedModel.startsWith('gpt-5.1-codex');
16
18
  const isClaude = resolvedModel.startsWith('claude');
17
- const engineCoercedToApi = (isGemini || isCodex || isClaude) && browserRequested;
18
- // When Gemini, Claude, or Codex is selected, always force API engine (overrides config/env auto browser).
19
- const fixedEngine = isGemini || isCodex || isClaude || normalizedRequestedModels.length > 0 ? 'api' : resolvedEngine;
19
+ const isGrok = resolvedModel.startsWith('grok');
20
+ const engineWasBrowser = resolvedEngine === 'browser';
21
+ const allModels = normalizedRequestedModels.length > 0
22
+ ? Array.from(new Set(normalizedRequestedModels.map((entry) => resolveApiModel(entry))))
23
+ : [resolvedModel];
24
+ const hasNonGptBrowserTarget = (browserRequested || browserConfigured) && allModels.some((m) => !m.startsWith('gpt-'));
25
+ if (hasNonGptBrowserTarget) {
26
+ throw new PromptValidationError('Browser engine only supports GPT-series ChatGPT models. Re-run with --engine api for Grok, Claude, Gemini, or other non-GPT models.', { engine: 'browser', models: allModels });
27
+ }
28
+ const engineCoercedToApi = engineWasBrowser && (isGemini || isCodex || isClaude || isGrok);
29
+ // When Gemini, Claude, or Grok is selected, force API engine for auto-browser detection; codex also forces API.
30
+ const fixedEngine = isGemini || isCodex || isClaude || isGrok || normalizedRequestedModels.length > 0 ? 'api' : resolvedEngine;
20
31
  const promptWithSuffix = userConfig?.promptSuffix && userConfig.promptSuffix.trim().length > 0
21
32
  ? `${prompt.trim()}\n${userConfig.promptSuffix}`
22
33
  : prompt;
23
34
  const search = userConfig?.search !== 'off';
24
35
  const heartbeatIntervalMs = userConfig?.heartbeatSeconds !== undefined ? userConfig.heartbeatSeconds * 1000 : 30_000;
25
- const baseUrl = normalizeBaseUrl(userConfig?.apiBaseUrl ?? (isClaude ? env.ANTHROPIC_BASE_URL : env.OPENAI_BASE_URL));
26
- const uniqueMultiModels = normalizedRequestedModels.length > 0
27
- ? Array.from(new Set(normalizedRequestedModels.map((entry) => resolveApiModel(entry))))
28
- : [];
36
+ const baseUrl = normalizeBaseUrl(userConfig?.apiBaseUrl ??
37
+ (isClaude ? env.ANTHROPIC_BASE_URL : isGrok ? env.XAI_BASE_URL : env.OPENAI_BASE_URL));
38
+ const uniqueMultiModels = normalizedRequestedModels.length > 0 ? allModels : [];
29
39
  const includesCodexMultiModel = uniqueMultiModels.some((entry) => entry.startsWith('gpt-5.1-codex'));
30
40
  if (includesCodexMultiModel && browserRequested) {
31
41
  // Silent coerce; multi-model still forces API.
@@ -23,10 +23,14 @@ export async function startMcpServer() {
23
23
  transport.onerror = (error) => {
24
24
  console.error('MCP transport error:', error);
25
25
  };
26
- transport.onclose = () => {
27
- // Keep quiet on normal close; caller owns lifecycle.
28
- };
26
+ const closed = new Promise((resolve) => {
27
+ transport.onclose = () => {
28
+ resolve();
29
+ };
30
+ });
31
+ // Keep the process alive until the client closes the transport.
29
32
  await server.connect(transport);
33
+ await closed;
30
34
  }
31
35
  if (import.meta.url === `file://${process.argv[1]}` || process.argv[1]?.endsWith('oracle-mcp')) {
32
36
  startMcpServer().catch((error) => {
@@ -84,6 +84,20 @@ export const MODEL_CONFIGS = {
84
84
  supportsBackground: false,
85
85
  supportsSearch: false,
86
86
  },
87
+ 'grok-4.1': {
88
+ model: 'grok-4.1',
89
+ apiModel: 'grok-4-1-fast-reasoning',
90
+ tokenizer: countTokensGpt5Pro,
91
+ inputLimit: 2_000_000,
92
+ pricing: {
93
+ inputPerToken: 0.2 / 1_000_000,
94
+ outputPerToken: 0.5 / 1_000_000,
95
+ },
96
+ reasoning: null,
97
+ supportsBackground: false,
98
+ supportsSearch: true,
99
+ searchToolType: 'web_search',
100
+ },
87
101
  };
88
102
  export const DEFAULT_SYSTEM_PROMPT = [
89
103
  'You are Oracle, a focused one-shot problem solver.',
@@ -7,6 +7,7 @@ const MODEL_ID_MAP = {
7
7
  'gpt-5.1-codex': 'gpt-5.1-codex',
8
8
  'claude-4.5-sonnet': 'claude-4.5-sonnet',
9
9
  'claude-4.1-opus': 'claude-4.1-opus',
10
+ 'grok-4.1': 'grok-4.1',
10
11
  };
11
12
  export function resolveGeminiModelId(modelName) {
12
13
  // Map our logical Gemini names to the exact model ids expected by the SDK.
@@ -12,6 +12,7 @@ export function buildPrompt(basePrompt, files, cwd = process.cwd()) {
12
12
  return `${basePrompt.trim()}\n\n${sectionText}`;
13
13
  }
14
14
  export function buildRequestBody({ modelConfig, systemPrompt, userPrompt, searchEnabled, maxOutputTokens, background, storeResponse, }) {
15
+ const searchToolType = modelConfig.searchToolType ?? 'web_search_preview';
15
16
  return {
16
17
  model: modelConfig.apiModel ?? modelConfig.model,
17
18
  instructions: systemPrompt,
@@ -26,7 +27,7 @@ export function buildRequestBody({ modelConfig, systemPrompt, userPrompt, search
26
27
  ],
27
28
  },
28
29
  ],
29
- tools: searchEnabled ? [{ type: 'web_search_preview' }] : undefined,
30
+ tools: searchEnabled ? [{ type: searchToolType }] : undefined,
30
31
  reasoning: modelConfig.reasoning || undefined,
31
32
  max_output_tokens: maxOutputTokens,
32
33
  background: background ? true : undefined,
@@ -35,7 +35,16 @@ export async function runOracle(options, deps = {}) {
35
35
  ? stdoutWriteDep ?? process.stdout.write.bind(process.stdout)
36
36
  : () => true;
37
37
  const isTty = allowStdout && isStdoutTty;
38
- const baseUrl = options.baseUrl?.trim() || process.env.OPENAI_BASE_URL?.trim();
38
+ const resolvedXaiBaseUrl = process.env.XAI_BASE_URL?.trim() || 'https://api.x.ai/v1';
39
+ let baseUrl = options.baseUrl?.trim();
40
+ if (!baseUrl) {
41
+ if (options.model.startsWith('grok')) {
42
+ baseUrl = resolvedXaiBaseUrl;
43
+ }
44
+ else {
45
+ baseUrl = process.env.OPENAI_BASE_URL?.trim();
46
+ }
47
+ }
39
48
  const logVerbose = (message) => {
40
49
  if (options.verbose) {
41
50
  log(dim(`[verbose] ${message}`));
@@ -59,6 +68,9 @@ export async function runOracle(options, deps = {}) {
59
68
  if (model.startsWith('claude')) {
60
69
  return optionsApiKey ?? process.env.ANTHROPIC_API_KEY;
61
70
  }
71
+ if (model.startsWith('grok')) {
72
+ return optionsApiKey ?? process.env.XAI_API_KEY;
73
+ }
62
74
  return undefined;
63
75
  };
64
76
  const envVar = options.model.startsWith('gpt')
@@ -67,7 +79,9 @@ export async function runOracle(options, deps = {}) {
67
79
  : 'OPENAI_API_KEY'
68
80
  : options.model.startsWith('gemini')
69
81
  ? 'GEMINI_API_KEY'
70
- : 'ANTHROPIC_API_KEY';
82
+ : options.model.startsWith('claude')
83
+ ? 'ANTHROPIC_API_KEY'
84
+ : 'XAI_API_KEY';
71
85
  const apiKey = getApiKeyForModel(options.model);
72
86
  if (!apiKey) {
73
87
  throw new PromptValidationError(`Missing ${envVar}. Set it via the environment or a .env file.`, {
@@ -86,7 +100,8 @@ export async function runOracle(options, deps = {}) {
86
100
  throw new PromptValidationError(`Unsupported model "${options.model}". Choose one of: ${Object.keys(MODEL_CONFIGS).join(', ')}`, { model: options.model });
87
101
  }
88
102
  const isLongRunningModel = isProTierModel;
89
- const useBackground = options.background ?? isLongRunningModel;
103
+ const supportsBackground = modelConfig.supportsBackground !== false;
104
+ const useBackground = supportsBackground ? options.background ?? isLongRunningModel : false;
90
105
  const inputTokenBudget = options.maxInput ?? modelConfig.inputLimit;
91
106
  const files = await readFiles(options.file ?? [], { cwd, fsModule });
92
107
  const searchEnabled = options.search !== false;
@@ -165,6 +180,9 @@ export async function runOracle(options, deps = {}) {
165
180
  if (baseUrl) {
166
181
  log(dim(`Base URL: ${formatBaseUrlForLog(baseUrl)}`));
167
182
  }
183
+ if (options.background && !supportsBackground) {
184
+ log(dim('Background runs are not supported for this model; streaming in foreground instead.'));
185
+ }
168
186
  if (!options.suppressTips) {
169
187
  if (pendingNoFilesTip) {
170
188
  log(dim(pendingNoFilesTip));
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@steipete/oracle",
3
- "version": "0.4.2",
3
+ "version": "0.4.3",
4
4
  "description": "CLI wrapper around OpenAI Responses API with GPT-5.1 Pro, GPT-5.1, and GPT-5.1 Codex high reasoning modes.",
5
5
  "type": "module",
6
6
  "main": "dist/bin/oracle-cli.js",
@@ -18,6 +18,9 @@
18
18
  "typecheck": "tsc --noEmit",
19
19
  "lint": "pnpm run typecheck && biome lint .",
20
20
  "test": "vitest run",
21
+ "test:mcp": "pnpm run build && pnpm run test:mcp:unit && pnpm run test:mcp:mcporter",
22
+ "test:mcp:unit": "vitest run tests/mcp*.test.ts tests/mcp/**/*.test.ts",
23
+ "test:mcp:mcporter": "npx -y mcporter list oracle-local --schema --config config/mcporter.json && npx -y mcporter call oracle-local.sessions limit:1 --config config/mcporter.json",
21
24
  "test:browser": "pnpm run build && ./scripts/browser-smoke.sh",
22
25
  "test:live": "ORACLE_LIVE_TEST=1 vitest run tests/live --exclude tests/live/openai-live.test.ts",
23
26
  "test:pro": "ORACLE_LIVE_TEST=1 vitest run tests/live/openai-live.test.ts",
@@ -73,6 +76,7 @@
73
76
  "devDependencies": {
74
77
  "@anthropic-ai/tokenizer": "^0.0.4",
75
78
  "@biomejs/biome": "^2.3.5",
79
+ "@cdktf/node-pty-prebuilt-multiarch": "0.10.2",
76
80
  "@types/chrome-remote-interface": "^0.31.14",
77
81
  "@types/inquirer": "^9.0.9",
78
82
  "@types/json5": "^0.0.30",
@@ -80,6 +84,7 @@
80
84
  "@vitest/coverage-v8": "4.0.9",
81
85
  "devtools-protocol": "^0.0.1545402",
82
86
  "es-toolkit": "^1.42.0",
87
+ "esbuild": "^0.27.0",
83
88
  "puppeteer-core": "^24.30.0",
84
89
  "tsx": "^4.20.6",
85
90
  "typescript": "^5.9.3",
@@ -94,6 +99,7 @@
94
99
  "win-dpapi": "npm:@primno/dpapi@2.0.1"
95
100
  },
96
101
  "onlyBuiltDependencies": [
102
+ "@cdktf/node-pty-prebuilt-multiarch",
97
103
  "keytar",
98
104
  "sqlite3",
99
105
  "win-dpapi"