claude-code-openai 0.1.9 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +54 -6
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -186389,6 +186389,9 @@ function getCustomHeaders() {
186389
186389
  }
186390
186390
  return customHeaders;
186391
186391
  }
186392
+ function clearOpenAIClientCache() {
186393
+ _cachedOpenAIClient = null;
186394
+ }
186392
186395
  async function getOpenAIClient({
186393
186396
  apiKey
186394
186397
  }) {
@@ -204603,7 +204606,7 @@ var init_metadata = __esm(() => {
204603
204606
  isClaudeAiAuth: isClaudeAISubscriber(),
204604
204607
  version: "2.1.88-rebuild",
204605
204608
  versionBase: getVersionBase(),
204606
- buildTime: "2026-04-01T09:46:44.490Z",
204609
+ buildTime: "2026-04-01T09:59:26.964Z",
204607
204610
  deploymentEnvironment: env4.detectDeploymentEnvironment(),
204608
204611
  ...isEnvTruthy(process.env.GITHUB_ACTIONS) && {
204609
204612
  githubEventName: process.env.GITHUB_EVENT_NAME,
@@ -592889,7 +592892,7 @@ function getAnthropicEnvMetadata() {
592889
592892
  function getBuildAgeMinutes() {
592890
592893
  if (false)
592891
592894
  ;
592892
- const buildTime = new Date("2026-04-01T09:46:44.490Z").getTime();
592895
+ const buildTime = new Date("2026-04-01T09:59:26.964Z").getTime();
592893
592896
  if (isNaN(buildTime))
592894
592897
  return;
592895
592898
  return Math.floor((Date.now() - buildTime) / 60000);
@@ -595165,6 +595168,15 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595165
595168
  const MAX_RETRIES4 = 3;
595166
595169
  const BASE_DELAY_MS4 = 500;
595167
595170
  try {
595171
+ let resetWatchdog = function() {
595172
+ if (watchdogTimer)
595173
+ clearTimeout(watchdogTimer);
595174
+ watchdogTimer = setTimeout(() => {
595175
+ logForDebugging(`[OpenAI] Stream watchdog triggered — no data for ${STREAM_WATCHDOG_MS / 1000}s, aborting`);
595176
+ watchdogController.abort();
595177
+ reader.cancel().catch(() => {});
595178
+ }, STREAM_WATCHDOG_MS);
595179
+ };
595168
595180
  let response;
595169
595181
  let lastErrorMessage = "";
595170
595182
  let lastStatus = 0;
@@ -595222,8 +595234,30 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595222
595234
  error: "rate_limit"
595223
595235
  });
595224
595236
  } else if (status === 401 || status === 403) {
595237
+ clearOpenAIClientCache();
595238
+ if (!_auth401RetryInProgress) {
595239
+ try {
595240
+ const { loadOpenAITokens: loadOpenAITokens2, refreshOpenAIToken: refreshOpenAIToken2 } = await Promise.resolve().then(() => (init_openai_oauth(), exports_openai_oauth));
595241
+ const tokens = loadOpenAITokens2();
595242
+ if (tokens?.refresh_token) {
595243
+ logForDebugging("[OpenAI] 401 received — attempting OAuth token refresh...");
595244
+ _auth401RetryInProgress = true;
595245
+ try {
595246
+ await refreshOpenAIToken2(tokens.refresh_token);
595247
+ logForDebugging("[OpenAI] Token refreshed, retrying request...");
595248
+ yield* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools, signal, options);
595249
+ return;
595250
+ } finally {
595251
+ _auth401RetryInProgress = false;
595252
+ }
595253
+ }
595254
+ } catch (refreshErr) {
595255
+ _auth401RetryInProgress = false;
595256
+ logForDebugging(`[OpenAI] OAuth token refresh failed: ${refreshErr instanceof Error ? refreshErr.message : String(refreshErr)}`);
595257
+ }
595258
+ }
595225
595259
  yield createAssistantAPIErrorMessage({
595226
- content: `Authentication error: ${lastErrorMessage}. Check your OPENAI_API_KEY.`,
595260
+ content: `Authentication error: ${lastErrorMessage}. Run /login to re-authenticate or check your OPENAI_API_KEY.`,
595227
595261
  error: "authentication_failed"
595228
595262
  });
595229
595263
  } else if (status === 400 && lastErrorMessage.includes("context_length_exceeded")) {
@@ -595281,11 +595315,16 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595281
595315
  const reader = response.body.getReader();
595282
595316
  const decoder = new TextDecoder;
595283
595317
  let buffer = "";
595318
+ const STREAM_WATCHDOG_MS = 90000;
595319
+ let watchdogTimer = null;
595320
+ const watchdogController = new AbortController;
595321
+ resetWatchdog();
595284
595322
  try {
595285
595323
  while (true) {
595286
595324
  const { done, value } = await reader.read();
595287
595325
  if (done)
595288
595326
  break;
595327
+ resetWatchdog();
595289
595328
  buffer += decoder.decode(value, { stream: true });
595290
595329
  const lines2 = buffer.split(`
595291
595330
  `);
@@ -595525,8 +595564,17 @@ async function* queryModelOpenAI(messages, systemPrompt, thinkingConfig, tools,
595525
595564
  }
595526
595565
  }
595527
595566
  } finally {
595567
+ if (watchdogTimer)
595568
+ clearTimeout(watchdogTimer);
595528
595569
  reader.releaseLock();
595529
595570
  }
595571
+ if (watchdogController.signal.aborted) {
595572
+ yield createAssistantAPIErrorMessage({
595573
+ content: `Stream timed out — no data received for ${STREAM_WATCHDOG_MS / 1000} seconds. The server may be overloaded. Please try again.`,
595574
+ error: "unknown"
595575
+ });
595576
+ return;
595577
+ }
595530
595578
  if (pendingAnnotations.size > 0) {
595531
595579
  for (const [outputIdx, annotations] of pendingAnnotations) {
595532
595580
  const blockIdx = findBlockIndex(contentBlocks, outputIdx, "text", textState, functionCallState, reasoningState);
@@ -595636,7 +595684,7 @@ async function queryModelOpenAINonStreaming(messages, systemPrompt, thinkingConf
595636
595684
  }
595637
595685
  return result;
595638
595686
  }
595639
- var OPENAI_MODEL_MAP, MAX_OUTPUT_TOKENS, _lastResponseId = null;
595687
+ var OPENAI_MODEL_MAP, MAX_OUTPUT_TOKENS, _lastResponseId = null, _auth401RetryInProgress = false;
595640
595688
  var init_openai_query = __esm(() => {
595641
595689
  init_messages7();
595642
595690
  init_debug();
@@ -679360,7 +679408,7 @@ var init_bridge_kick = __esm(() => {
679360
679408
  var call56 = async () => {
679361
679409
  return {
679362
679410
  type: "text",
679363
- value: `${"2.1.88-rebuild"} (built ${"2026-04-01T09:46:44.490Z"})`
679411
+ value: `${"2.1.88-rebuild"} (built ${"2026-04-01T09:59:26.964Z"})`
679364
679412
  };
679365
679413
  }, version6, version_default;
679366
679414
  var init_version = __esm(() => {
@@ -777365,4 +777413,4 @@ async function main2() {
777365
777413
  }
777366
777414
  main2();
777367
777415
 
777368
- //# debugId=DACEE88BBB4178C864756E2164756E21
777416
+ //# debugId=F1DED8B8A2869D0864756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "claude-code-openai",
3
- "version": "0.1.9",
3
+ "version": "0.1.10",
4
4
  "description": "Claude Code CLI with OpenAI GPT-5.4 backend support",
5
5
  "type": "module",
6
6
  "bin": {