claude-code-openai 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +138 -22
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -186172,6 +186172,9 @@ function getContextWindowForModel(model, betas) {
186172
186172
  return override;
186173
186173
  }
186174
186174
  }
186175
+ if (isOpenAIProvider() && model.startsWith("gpt-")) {
186176
+ return 128000;
186177
+ }
186175
186178
  if (has1mContext(model)) {
186176
186179
  return 1e6;
186177
186180
  }
@@ -186280,6 +186283,7 @@ var init_context = __esm(() => {
186280
186283
  init_envUtils();
186281
186284
  init_model();
186282
186285
  init_modelCapabilities();
186286
+ init_providers();
186283
186287
  });
186284
186288
 
186285
186289
  // src/utils/model/modelSupportOverrides.ts
@@ -204204,7 +204208,7 @@ var init_metadata = __esm(() => {
204204
204208
  isClaudeAiAuth: isClaudeAISubscriber(),
204205
204209
  version: "2.1.88-rebuild",
204206
204210
  versionBase: getVersionBase(),
204207
- buildTime: "2026-04-01T08:06:57.637Z",
204211
+ buildTime: "2026-04-01T08:38:53.515Z",
204208
204212
  deploymentEnvironment: env4.detectDeploymentEnvironment(),
204209
204213
  ...isEnvTruthy(process.env.GITHUB_ACTIONS) && {
204210
204214
  githubEventName: process.env.GITHUB_EVENT_NAME,
@@ -256480,6 +256484,9 @@ function modelSupportsThinking(model) {
256480
256484
  }
256481
256485
  const canonical = getCanonicalName(model);
256482
256486
  const provider = getAPIProvider();
256487
+ if (provider === "openai") {
256488
+ return /^(gpt-5|o[1-9]|o3)/.test(canonical);
256489
+ }
256483
256490
  if (provider === "foundry" || provider === "firstParty") {
256484
256491
  return !canonical.includes("claude-3-");
256485
256492
  }
@@ -256498,6 +256505,9 @@ function modelSupportsAdaptiveThinking(model) {
256498
256505
  return false;
256499
256506
  }
256500
256507
  const provider = getAPIProvider();
256508
+ if (provider === "openai") {
256509
+ return /^(gpt-5|o[1-9]|o3)/.test(canonical);
256510
+ }
256501
256511
  return provider === "firstParty" || provider === "foundry";
256502
256512
  }
256503
256513
  function shouldEnableThinkingByDefault() {
@@ -592566,7 +592576,7 @@ function getAnthropicEnvMetadata() {
592566
592576
  function getBuildAgeMinutes() {
592567
592577
  if (false)
592568
592578
  ;
592569
- const buildTime = new Date("2026-04-01T08:06:57.637Z").getTime();
592579
+ const buildTime = new Date("2026-04-01T08:38:53.515Z").getTime();
592570
592580
  if (isNaN(buildTime))
592571
592581
  return;
592572
592582
  return Math.floor((Date.now() - buildTime) / 60000);
@@ -594657,17 +594667,53 @@ function convertAssistantMessage(msg, items) {
594657
594667
  `) });
594658
594668
  }
594659
594669
  }
594670
+ function enforceStrictSchema(schema) {
594671
+ const out = { ...schema };
594672
+ for (const keyword of ["anyOf", "oneOf", "allOf"]) {
594673
+ if (Array.isArray(out[keyword])) {
594674
+ out[keyword] = out[keyword].map((s2) => enforceStrictSchema(s2));
594675
+ }
594676
+ }
594677
+ if (out.type === "object") {
594678
+ out.additionalProperties = false;
594679
+ if (out.properties && typeof out.properties === "object") {
594680
+ const props = out.properties;
594681
+ const newProps = {};
594682
+ for (const [key2, val] of Object.entries(props)) {
594683
+ newProps[key2] = enforceStrictSchema(val);
594684
+ }
594685
+ out.properties = newProps;
594686
+ const allKeys = Object.keys(newProps);
594687
+ if (allKeys.length > 0) {
594688
+ out.required = allKeys;
594689
+ }
594690
+ }
594691
+ }
594692
+ if (out.type === "array" && out.items && typeof out.items === "object") {
594693
+ out.items = enforceStrictSchema(out.items);
594694
+ }
594695
+ return out;
594696
+ }
594660
594697
  function convertToolSchemas(tools) {
594661
- return tools.filter((t2) => t2.type === "custom" || !("type" in t2) || t2.type === undefined).map((t2) => {
594662
- const tool = t2;
594663
- return {
594664
- type: "function",
594665
- name: tool.name,
594666
- description: tool.description ?? "",
594667
- parameters: tool.input_schema,
594668
- strict: tool.strict === true
594669
- };
594670
- });
594698
+ const oaiTools = [];
594699
+ for (const t2 of tools) {
594700
+ if ("type" in t2 && typeof t2.type === "string" && t2.type.startsWith("web_search")) {
594701
+ oaiTools.push({ type: "web_search_preview", search_context_size: "medium" });
594702
+ continue;
594703
+ }
594704
+ if (t2.type === "custom" || !("type" in t2) || t2.type === undefined) {
594705
+ const tool = t2;
594706
+ const strictParams = enforceStrictSchema(tool.input_schema);
594707
+ oaiTools.push({
594708
+ type: "function",
594709
+ name: tool.name,
594710
+ description: tool.description ?? "",
594711
+ parameters: strictParams,
594712
+ strict: true
594713
+ });
594714
+ }
594715
+ }
594716
+ return oaiTools;
594671
594717
  }
594672
594718
  function convertToolChoice(toolChoice) {
594673
594719
  if (!toolChoice)
@@ -594732,6 +594778,7 @@ function convertThinkingConfig(thinkingConfig) {
594732
594778
  // src/services/api/openai-query.ts
594733
594779
  var exports_openai_query = {};
594734
594780
  __export(exports_openai_query, {
594781
+ resolveOpenAIModel: () => resolveOpenAIModel,
594735
594782
  queryModelOpenAINonStreaming: () => queryModelOpenAINonStreaming,
594736
594783
  queryModelOpenAI: () => queryModelOpenAI,
594737
594784
  getLastOpenAIResponseId: () => getLastOpenAIResponseId,
@@ -594756,6 +594803,13 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
594756
594803
  model: options.model,
594757
594804
  source: options.querySource
594758
594805
  });
594806
+ if (!client3.apiKey) {
594807
+ yield createAssistantAPIErrorMessage({
594808
+ content: "OPENAI_API_KEY is not set. Please set the OPENAI_API_KEY environment variable.",
594809
+ error: "authentication_failed"
594810
+ });
594811
+ return;
594812
+ }
594759
594813
  const openaiModel = resolveOpenAIModel(options.model);
594760
594814
  const instructions = convertSystemPrompt(systemPrompt.filter(Boolean).map((text2) => ({ type: "text", text: text2 })));
594761
594815
  const oaiTools = convertToolSchemas(tools);
@@ -594777,19 +594831,21 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
594777
594831
  } else {
594778
594832
  input = convertMessages(messages);
594779
594833
  }
594780
- const maxOutputTokens = options.maxOutputTokensOverride || 16384;
594834
+ const maxOutputTokens = options.maxOutputTokensOverride || MAX_OUTPUT_TOKENS[openaiModel] || 16384;
594835
+ const hasFunctionTools = oaiTools.some((t2) => t2.type === "function");
594781
594836
  const params = {
594782
594837
  model: openaiModel,
594783
594838
  instructions: instructions || undefined,
594784
594839
  input,
594785
594840
  tools: oaiTools.length > 0 ? oaiTools : undefined,
594786
- tool_choice: oaiTools.length > 0 ? toolChoice : undefined,
594787
- parallel_tool_calls: false,
594841
+ tool_choice: hasFunctionTools ? toolChoice : undefined,
594842
+ parallel_tool_calls: hasFunctionTools ? false : undefined,
594788
594843
  stream: true,
594789
594844
  max_output_tokens: maxOutputTokens,
594790
594845
  temperature: options.temperatureOverride ?? 1,
594791
594846
  reasoning,
594792
- previous_response_id: usePreviousResponseId ? _lastResponseId : undefined
594847
+ previous_response_id: usePreviousResponseId ? _lastResponseId : undefined,
594848
+ store: true
594793
594849
  };
594794
594850
  logForDebugging(`[OpenAI] Request: model=${openaiModel} input=${input.length} items (${usePreviousResponseId ? "incremental, chain=" + _lastResponseId : "full"}) tools=${oaiTools.length}`);
594795
594851
  const start = Date.now();
@@ -594896,6 +594952,8 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
594896
594952
  const functionCallState = new Map;
594897
594953
  const textState = new Map;
594898
594954
  const reasoningState = new Map;
594955
+ const webSearchState = new Map;
594956
+ const pendingAnnotations = new Map;
594899
594957
  const reader = response.body.getReader();
594900
594958
  const decoder = new TextDecoder;
594901
594959
  let buffer = "";
@@ -595008,6 +595066,9 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595008
595066
  content_block: { type: "thinking", thinking: "", signature: "openai-reasoning" }
595009
595067
  }
595010
595068
  };
595069
+ } else if (item.type === "web_search_call") {
595070
+ webSearchState.set(event.output_index, item.id);
595071
+ logForDebugging(`[OpenAI] web_search_call started: ${item.id}`);
595011
595072
  }
595012
595073
  break;
595013
595074
  }
@@ -595117,10 +595178,16 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595117
595178
  responseStatus = "incomplete";
595118
595179
  break;
595119
595180
  }
595181
+ case "response.output_text.done": {
595182
+ if (event.annotations && event.annotations.length > 0) {
595183
+ pendingAnnotations.set(event.output_index, event.annotations);
595184
+ logForDebugging(`[OpenAI] Got ${event.annotations.length} annotations for output_index ${event.output_index}`);
595185
+ }
595186
+ break;
595187
+ }
595120
595188
  case "response.in_progress":
595121
595189
  case "response.content_part.added":
595122
595190
  case "response.content_part.done":
595123
- case "response.output_text.done":
595124
595191
  break;
595125
595192
  case "error": {
595126
595193
  logForDebugging(`[OpenAI] Stream error: ${event.error.message}`);
@@ -595136,6 +595203,36 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595136
595203
  } finally {
595137
595204
  reader.releaseLock();
595138
595205
  }
595206
+ if (pendingAnnotations.size > 0) {
595207
+ for (const [outputIdx, annotations] of pendingAnnotations) {
595208
+ const blockIdx = findBlockIndex(contentBlocks, outputIdx, "text", textState, functionCallState, reasoningState);
595209
+ if (blockIdx >= 0) {
595210
+ const block = contentBlocks[blockIdx];
595211
+ const uniqueUrls = new Map;
595212
+ for (const ann of annotations) {
595213
+ if (ann.type === "url_citation" && ann.url && !uniqueUrls.has(ann.url)) {
595214
+ uniqueUrls.set(ann.url, ann.title || ann.url);
595215
+ }
595216
+ }
595217
+ if (uniqueUrls.size > 0) {
595218
+ const sourcesText = `
595219
+
595220
+ Sources:
595221
+ ` + [...uniqueUrls].map(([url3, title]) => `- [${title}](${url3})`).join(`
595222
+ `);
595223
+ block.text += sourcesText;
595224
+ yield {
595225
+ type: "stream_event",
595226
+ event: {
595227
+ type: "content_block_delta",
595228
+ index: blockIdx,
595229
+ delta: { type: "text_delta", text: sourcesText }
595230
+ }
595231
+ };
595232
+ }
595233
+ }
595234
+ }
595235
+ }
595139
595236
  const stopReason = convertStopReason(responseStatus);
595140
595237
  yield {
595141
595238
  type: "stream_event",
@@ -595214,18 +595311,36 @@ async function queryModelOpenAINonStreaming(messages, systemPrompt, thinkingConf
595214
595311
  }
595215
595312
  return result;
595216
595313
  }
595217
- var OPENAI_MODEL_MAP, _lastResponseId = null;
595314
+ var OPENAI_MODEL_MAP, MAX_OUTPUT_TOKENS, _lastResponseId = null;
595218
595315
  var init_openai_query = __esm(() => {
595219
595316
  init_messages7();
595220
595317
  init_debug();
595221
595318
  init_client5();
595222
595319
  OPENAI_MODEL_MAP = {
595223
595320
  "claude-opus-4-6-20260401": "gpt-5.4",
595321
+ "claude-opus-4-5-20250918": "gpt-5.4",
595322
+ "claude-opus-4-1-20250415": "gpt-5.4",
595323
+ "claude-opus-4-20250115": "gpt-5.4",
595324
+ "claude-sonnet-4-6-20260401": "gpt-5.4-mini",
595224
595325
  "claude-sonnet-4-5-20250929": "gpt-5.4-mini",
595326
+ "claude-sonnet-4-20250514": "gpt-5.4-mini",
595327
+ "claude-3-7-sonnet-20250219": "gpt-5.4-mini",
595328
+ "claude-3-5-sonnet-20241022": "gpt-5.4-mini",
595225
595329
  "claude-haiku-4-5-20251001": "gpt-4.1-mini",
595330
+ "claude-3-5-haiku-20241022": "gpt-4.1-mini",
595226
595331
  opus: "gpt-5.4",
595227
595332
  sonnet: "gpt-5.4-mini",
595228
- haiku: "gpt-4.1-mini"
595333
+ haiku: "gpt-4.1-mini",
595334
+ "gpt-5.4": "gpt-5.4",
595335
+ "gpt-5.4-mini": "gpt-5.4-mini",
595336
+ "gpt-4.1": "gpt-4.1",
595337
+ "gpt-4.1-mini": "gpt-4.1-mini"
595338
+ };
595339
+ MAX_OUTPUT_TOKENS = {
595340
+ "gpt-5.4": 32768,
595341
+ "gpt-5.4-mini": 16384,
595342
+ "gpt-4.1": 16384,
595343
+ "gpt-4.1-mini": 16384
595229
595344
  };
595230
595345
  });
595231
595346
 
@@ -678920,7 +679035,7 @@ var init_bridge_kick = __esm(() => {
678920
679035
  var call56 = async () => {
678921
679036
  return {
678922
679037
  type: "text",
678923
- value: `${"2.1.88-rebuild"} (built ${"2026-04-01T08:06:57.637Z"})`
679038
+ value: `${"2.1.88-rebuild"} (built ${"2026-04-01T08:38:53.515Z"})`
678924
679039
  };
678925
679040
  }, version6, version_default;
678926
679041
  var init_version = __esm(() => {
@@ -700423,7 +700538,7 @@ async function sideQueryOpenAI(opts) {
700423
700538
  stop_sequences
700424
700539
  } = opts;
700425
700540
  const client3 = await getOpenAIClient({ model, source: "side_query" });
700426
- const openaiModel = process.env.OPENAI_MODEL || "gpt-4.1-mini";
700541
+ const openaiModel = resolveOpenAIModel(model);
700427
700542
  const instructions = Array.isArray(system) ? system.map((b5) => b5.text).join(`
700428
700543
 
700429
700544
  `) : system || "";
@@ -700530,6 +700645,7 @@ var init_sideQuery = __esm(() => {
700530
700645
  init_fingerprint();
700531
700646
  init_model();
700532
700647
  init_providers();
700648
+ init_openai_query();
700533
700649
  });
700534
700650
 
700535
700651
  // src/utils/claudeInChrome/mcpServer.ts
@@ -776876,4 +776992,4 @@ async function main2() {
776876
776992
  }
776877
776993
  main2();
776878
776994
 
776879
- //# debugId=0CF638629D23D41464756E2164756E21
776995
+ //# debugId=B6963C77F1D1E9E764756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-code-openai",
3
- "version": "0.1.2",
3
+ "version": "0.1.4",
4
4
  "description": "Claude Code CLI with OpenAI GPT-5.4 backend support",
5
5
  "type": "module",
6
6
  "bin": {