call-ai 0.7.0-dev-preview-1 → 0.7.0-dev-preview-3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/api.js +45 -2
  2. package/package.json +1 -1
package/dist/api.js CHANGED
@@ -2,6 +2,9 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.callAI = callAI;
4
4
  const strategies_1 = require("./strategies");
5
+ // Import package version for debugging
6
+ // eslint-disable-next-line @typescript-eslint/no-var-requires
7
+ const PACKAGE_VERSION = require('../package.json').version;
5
8
  // Default fallback model when the primary model fails or is unavailable
6
9
  const FALLBACK_MODEL = "openrouter/auto";
7
10
  /**
@@ -278,6 +281,10 @@ async function extractClaudeResponse(response) {
278
281
  * Internal implementation for streaming API calls
279
282
  */
280
283
  async function* callAIStreaming(prompt, options = {}, isRetry = false) {
284
+ // Track errors to ensure consistent propagation across environments
285
+ let streamingError = null;
286
+ // Browser-specific detection to help with environment-specific handling
287
+ const isBrowser = typeof window !== 'undefined';
281
288
  try {
282
289
  const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
283
290
  const response = await fetch(endpoint, requestOptions);
@@ -289,7 +296,18 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
289
296
  }
290
297
  const errorText = await response.text();
291
298
  console.error(`API Error: ${response.status} ${response.statusText}`, errorText);
292
- throw new Error(`API returned error ${response.status}: ${response.statusText}`);
299
+ // Create the error object
300
+ streamingError = new Error(`API returned error ${response.status}: ${response.statusText}`);
301
+ // For browsers, we need to handle errors differently
302
+ if (isBrowser) {
303
+ // In browsers, return an error string that can be detected and handled
304
+ // The string format helps our client detect this is an error condition
305
+ return `{"error":true,"message":"API returned error ${response.status}: ${response.statusText}"}`;
306
+ }
307
+ else {
308
+ // In Node.js, throw directly as it propagates correctly
309
+ throw streamingError;
310
+ }
293
311
  }
294
312
  // Handle streaming response
295
313
  if (!response.body) {
@@ -304,7 +322,7 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
304
322
  const { done, value } = await reader.read();
305
323
  if (done) {
306
324
  if (options.debug) {
307
- console.log(`[callAI-streaming:complete] Stream finished after ${chunkCount} chunks`);
325
+ console.log(`[callAI-streaming:complete v${PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`);
308
326
  }
309
327
  break;
310
328
  }
@@ -442,6 +460,30 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
442
460
  }
443
461
  }
444
462
  }
463
+ // Check if we encountered an error earlier but didn't throw it yet
464
+ // This ensures browser environments will get the error during iteration
465
+ if (streamingError) {
466
+ handleApiError(streamingError, "Streaming API call", options.debug);
467
+ }
468
+ // Final check for errors before returning
469
+ if (streamingError) {
470
+ throw streamingError;
471
+ }
472
+ // Check if the completeText looks like our error marker format
473
+ if (completeText && completeText.includes('"error":true') && completeText.includes('"message":')) {
474
+ try {
475
+ // Try to parse the error info
476
+ const errorInfo = JSON.parse(completeText);
477
+ if (errorInfo.error === true && errorInfo.message) {
478
+ // Create and throw proper error
479
+ const detectedError = new Error(errorInfo.message);
480
+ handleApiError(detectedError, "Streaming API call", options.debug);
481
+ }
482
+ }
483
+ catch (parseError) {
484
+ // If we can't parse, continue with normal processing
485
+ }
486
+ }
445
487
  // If we have assembled tool calls but haven't yielded them yet
446
488
  if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
447
489
  return toolCallsAssembled;
@@ -450,6 +492,7 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
450
492
  return schemaStrategy.processResponse(completeText);
451
493
  }
452
494
  catch (error) {
495
+ // Standardize error handling
453
496
  handleApiError(error, "Streaming API call", options.debug);
454
497
  }
455
498
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "call-ai",
3
- "version": "0.7.0-dev-preview-1",
3
+ "version": "0.7.0-dev-preview-3",
4
4
  "description": "Lightweight library for making AI API calls with streaming support",
5
5
  "main": "dist/index.js",
6
6
  "browser": "dist/index.js",