claude-code-openai 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +90 -10
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -186172,6 +186172,9 @@ function getContextWindowForModel(model, betas) {
186172
186172
  return override;
186173
186173
  }
186174
186174
  }
186175
+ if (isOpenAIProvider() && model.startsWith("gpt-")) {
186176
+ return 128000;
186177
+ }
186175
186178
  if (has1mContext(model)) {
186176
186179
  return 1e6;
186177
186180
  }
@@ -186280,6 +186283,7 @@ var init_context = __esm(() => {
186280
186283
  init_envUtils();
186281
186284
  init_model();
186282
186285
  init_modelCapabilities();
186286
+ init_providers();
186283
186287
  });
186284
186288
 
186285
186289
  // src/utils/model/modelSupportOverrides.ts
@@ -188479,7 +188483,7 @@ function isAnthropicAuthEnabled() {
188479
188483
  if (process.env.ANTHROPIC_UNIX_SOCKET) {
188480
188484
  return !!process.env.CLAUDE_CODE_OAUTH_TOKEN;
188481
188485
  }
188482
- const is3P = isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) || isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY);
188486
+ const is3P = isEnvTruthy(process.env.CLAUDE_CODE_USE_BEDROCK) || isEnvTruthy(process.env.CLAUDE_CODE_USE_VERTEX) || isEnvTruthy(process.env.CLAUDE_CODE_USE_FOUNDRY) || isEnvTruthy(process.env.CLAUDE_CODE_USE_OPENAI);
188483
188487
  const settings = getSettings_DEPRECATED() || {};
188484
188488
  const apiKeyHelper = settings.apiKeyHelper;
188485
188489
  const hasExternalAuthToken = process.env.ANTHROPIC_AUTH_TOKEN || apiKeyHelper || process.env.CLAUDE_CODE_API_KEY_FILE_DESCRIPTOR;
@@ -204204,7 +204208,7 @@ var init_metadata = __esm(() => {
204204
204208
  isClaudeAiAuth: isClaudeAISubscriber(),
204205
204209
  version: "2.1.88-rebuild",
204206
204210
  versionBase: getVersionBase(),
204207
- buildTime: "2026-04-01T08:22:00.019Z",
204211
+ buildTime: "2026-04-01T08:46:34.702Z",
204208
204212
  deploymentEnvironment: env4.detectDeploymentEnvironment(),
204209
204213
  ...isEnvTruthy(process.env.GITHUB_ACTIONS) && {
204210
204214
  githubEventName: process.env.GITHUB_EVENT_NAME,
@@ -256480,6 +256484,9 @@ function modelSupportsThinking(model) {
256480
256484
  }
256481
256485
  const canonical = getCanonicalName(model);
256482
256486
  const provider = getAPIProvider();
256487
+ if (provider === "openai") {
256488
+ return /^(gpt-5|o[1-9]|o3)/.test(canonical);
256489
+ }
256483
256490
  if (provider === "foundry" || provider === "firstParty") {
256484
256491
  return !canonical.includes("claude-3-");
256485
256492
  }
@@ -256498,6 +256505,9 @@ function modelSupportsAdaptiveThinking(model) {
256498
256505
  return false;
256499
256506
  }
256500
256507
  const provider = getAPIProvider();
256508
+ if (provider === "openai") {
256509
+ return /^(gpt-5|o[1-9]|o3)/.test(canonical);
256510
+ }
256501
256511
  return provider === "firstParty" || provider === "foundry";
256502
256512
  }
256503
256513
  function shouldEnableThinkingByDefault() {
@@ -592566,7 +592576,7 @@ function getAnthropicEnvMetadata() {
592566
592576
  function getBuildAgeMinutes() {
592567
592577
  if (false)
592568
592578
  ;
592569
- const buildTime = new Date("2026-04-01T08:22:00.019Z").getTime();
592579
+ const buildTime = new Date("2026-04-01T08:46:34.702Z").getTime();
592570
592580
  if (isNaN(buildTime))
592571
592581
  return;
592572
592582
  return Math.floor((Date.now() - buildTime) / 60000);
@@ -594657,6 +594667,33 @@ function convertAssistantMessage(msg, items) {
594657
594667
  `) });
594658
594668
  }
594659
594669
  }
594670
+ function enforceStrictSchema(schema) {
594671
+ const out = { ...schema };
594672
+ for (const keyword of ["anyOf", "oneOf", "allOf"]) {
594673
+ if (Array.isArray(out[keyword])) {
594674
+ out[keyword] = out[keyword].map((s2) => enforceStrictSchema(s2));
594675
+ }
594676
+ }
594677
+ if (out.type === "object") {
594678
+ out.additionalProperties = false;
594679
+ if (out.properties && typeof out.properties === "object") {
594680
+ const props = out.properties;
594681
+ const newProps = {};
594682
+ for (const [key2, val] of Object.entries(props)) {
594683
+ newProps[key2] = enforceStrictSchema(val);
594684
+ }
594685
+ out.properties = newProps;
594686
+ const allKeys = Object.keys(newProps);
594687
+ if (allKeys.length > 0) {
594688
+ out.required = allKeys;
594689
+ }
594690
+ }
594691
+ }
594692
+ if (out.type === "array" && out.items && typeof out.items === "object") {
594693
+ out.items = enforceStrictSchema(out.items);
594694
+ }
594695
+ return out;
594696
+ }
594660
594697
  function convertToolSchemas(tools) {
594661
594698
  const oaiTools = [];
594662
594699
  for (const t2 of tools) {
@@ -594666,12 +594703,13 @@ function convertToolSchemas(tools) {
594666
594703
  }
594667
594704
  if (t2.type === "custom" || !("type" in t2) || t2.type === undefined) {
594668
594705
  const tool = t2;
594706
+ const strictParams = enforceStrictSchema(tool.input_schema);
594669
594707
  oaiTools.push({
594670
594708
  type: "function",
594671
594709
  name: tool.name,
594672
594710
  description: tool.description ?? "",
594673
- parameters: tool.input_schema,
594674
- strict: tool.strict === true
594711
+ parameters: strictParams,
594712
+ strict: true
594675
594713
  });
594676
594714
  }
594677
594715
  }
@@ -594740,6 +594778,7 @@ function convertThinkingConfig(thinkingConfig) {
594740
594778
  // src/services/api/openai-query.ts
594741
594779
  var exports_openai_query = {};
594742
594780
  __export(exports_openai_query, {
594781
+ resolveOpenAIModel: () => resolveOpenAIModel,
594743
594782
  queryModelOpenAINonStreaming: () => queryModelOpenAINonStreaming,
594744
594783
  queryModelOpenAI: () => queryModelOpenAI,
594745
594784
  getLastOpenAIResponseId: () => getLastOpenAIResponseId,
@@ -594764,6 +594803,13 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
594764
594803
  model: options.model,
594765
594804
  source: options.querySource
594766
594805
  });
594806
+ if (!client3.apiKey) {
594807
+ yield createAssistantAPIErrorMessage({
594808
+ content: "OPENAI_API_KEY is not set. Please set the OPENAI_API_KEY environment variable.",
594809
+ error: "authentication_failed"
594810
+ });
594811
+ return;
594812
+ }
594767
594813
  const openaiModel = resolveOpenAIModel(options.model);
594768
594814
  const instructions = convertSystemPrompt(systemPrompt.filter(Boolean).map((text2) => ({ type: "text", text: text2 })));
594769
594815
  const oaiTools = convertToolSchemas(tools);
@@ -594842,7 +594888,18 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
594842
594888
  delayMs = BASE_DELAY_MS4 * Math.pow(2, attempt);
594843
594889
  }
594844
594890
  logForDebugging(`[OpenAI] Retrying in ${delayMs}ms...`);
594845
- await new Promise((resolve35) => setTimeout(resolve35, delayMs));
594891
+ await new Promise((resolve35, reject2) => {
594892
+ if (signal.aborted) {
594893
+ resolve35();
594894
+ return;
594895
+ }
594896
+ const timer = setTimeout(resolve35, delayMs);
594897
+ const onAbort = () => {
594898
+ clearTimeout(timer);
594899
+ resolve35();
594900
+ };
594901
+ signal.addEventListener("abort", onAbort, { once: true });
594902
+ });
594846
594903
  }
594847
594904
  if (!response || !response.ok) {
594848
594905
  const status = lastStatus;
@@ -595228,6 +595285,7 @@ Sources:
595228
595285
  logForDebugging(`[OpenAI] Complete: ${contentBlocks.length} blocks, ${usage.input_tokens}in/${usage.output_tokens}out tokens, ${Date.now() - start}ms`);
595229
595286
  } catch (err2) {
595230
595287
  if (err2 instanceof Error && err2.name === "AbortError") {
595288
+ _lastResponseId = null;
595231
595289
  return;
595232
595290
  }
595233
595291
  const errorMsg = err2 instanceof Error ? err2.message : String(err2);
@@ -678989,7 +679047,7 @@ var init_bridge_kick = __esm(() => {
678989
679047
  var call56 = async () => {
678990
679048
  return {
678991
679049
  type: "text",
678992
- value: `${"2.1.88-rebuild"} (built ${"2026-04-01T08:22:00.019Z"})`
679050
+ value: `${"2.1.88-rebuild"} (built ${"2026-04-01T08:46:34.702Z"})`
678993
679051
  };
678994
679052
  }, version6, version_default;
678995
679053
  var init_version = __esm(() => {
@@ -698765,6 +698823,7 @@ async function verifyApiKey(apiKey, isNonInteractiveSession) {
698765
698823
  }
698766
698824
  if (getAPIProvider() === "openai") {
698767
698825
  try {
698826
+ const { resolveOpenAIModel: resolveOpenAIModel2 } = await Promise.resolve().then(() => (init_openai_query(), exports_openai_query));
698768
698827
  const client3 = await getOpenAIClient({ apiKey, maxRetries: 0 });
698769
698828
  const resp = await client3.fetch(`${client3.baseURL}/responses`, {
698770
698829
  method: "POST",
@@ -698773,7 +698832,7 @@ async function verifyApiKey(apiKey, isNonInteractiveSession) {
698773
698832
  Authorization: `Bearer ${client3.apiKey}`
698774
698833
  },
698775
698834
  body: JSON.stringify({
698776
- model: process.env.OPENAI_MODEL || "gpt-4.1-mini",
698835
+ model: resolveOpenAIModel2(getSmallFastModel()),
698777
698836
  input: "test",
698778
698837
  max_output_tokens: 1
698779
698838
  })
@@ -700492,7 +700551,7 @@ async function sideQueryOpenAI(opts) {
700492
700551
  stop_sequences
700493
700552
  } = opts;
700494
700553
  const client3 = await getOpenAIClient({ model, source: "side_query" });
700495
- const openaiModel = process.env.OPENAI_MODEL || "gpt-4.1-mini";
700554
+ const openaiModel = resolveOpenAIModel(model);
700496
700555
  const instructions = Array.isArray(system) ? system.map((b5) => b5.text).join(`
700497
700556
 
700498
700557
  `) : system || "";
@@ -700599,6 +700658,7 @@ var init_sideQuery = __esm(() => {
700599
700658
  init_fingerprint();
700600
700659
  init_model();
700601
700660
  init_providers();
700661
+ init_openai_query();
700602
700662
  });
700603
700663
 
700604
700664
  // src/utils/claudeInChrome/mcpServer.ts
@@ -756287,6 +756347,25 @@ var init_useTimeout = __esm(() => {
756287
756347
 
756288
756348
  // src/utils/preflightChecks.tsx
756289
756349
  async function checkEndpoints() {
756350
+ if (getAPIProvider() === "openai") {
756351
+ try {
756352
+ const resp = await axios_default.get("https://api.openai.com/v1/models", {
756353
+ headers: { "User-Agent": getUserAgent(), Authorization: `Bearer ${process.env.OPENAI_API_KEY || "test"}` },
756354
+ validateStatus: () => true
756355
+ });
756356
+ if (resp.status > 0) {
756357
+ return { success: true };
756358
+ }
756359
+ return { success: false, error: "Failed to connect to api.openai.com" };
756360
+ } catch (error42) {
756361
+ const sslHint = getSSLErrorHint(error42);
756362
+ return {
756363
+ success: false,
756364
+ error: `Failed to connect to api.openai.com: ${error42 instanceof Error ? error42.code || error42.message : String(error42)}`,
756365
+ sslHint: sslHint ?? undefined
756366
+ };
756367
+ }
756368
+ }
756290
756369
  try {
756291
756370
  const oauthConfig = getOauthConfig();
756292
756371
  const tokenUrl = new URL(oauthConfig.TOKEN_URL);
@@ -756447,6 +756526,7 @@ var init_preflightChecks = __esm(() => {
756447
756526
  init_ink2();
756448
756527
  init_errorUtils();
756449
756528
  init_product();
756529
+ init_providers();
756450
756530
  init_http2();
756451
756531
  init_log2();
756452
756532
  import_react_compiler_runtime372 = __toESM(require_dist7(), 1);
@@ -776945,4 +777025,4 @@ async function main2() {
776945
777025
  }
776946
777026
  main2();
776947
777027
 
776948
- //# debugId=EA0F0C0F950E52A464756E2164756E21
777028
+ //# debugId=EF296FB1D976B08864756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-code-openai",
3
- "version": "0.1.3",
3
+ "version": "0.1.5",
4
4
  "description": "Claude Code CLI with OpenAI GPT-5.4 backend support",
5
5
  "type": "module",
6
6
  "bin": {