call-ai 0.6.4 → 0.7.0-dev-preview-2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/api.js +26 -15
  2. package/package.json +1 -1
package/dist/api.js CHANGED
@@ -49,18 +49,17 @@ async function bufferStreamingResults(prompt, options) {
49
49
  return finalResult;
50
50
  }
51
51
  catch (error) {
52
- return handleApiError(error, "Streaming buffer error");
52
+ handleApiError(error, "Streaming buffer error", options.debug);
53
53
  }
54
54
  }
55
55
  /**
56
56
  * Standardized API error handler
57
57
  */
58
- function handleApiError(error, context) {
59
- console.error(`[callAI:${context}]:`, error);
60
- return JSON.stringify({
61
- error: String(error),
62
- message: `Sorry, I couldn't process that request: ${String(error)}`,
63
- });
58
+ function handleApiError(error, context, debug = false) {
59
+ if (debug) {
60
+ console.error(`[callAI:${context}]:`, error);
61
+ }
62
+ throw new Error(`${context}: ${String(error)}`);
64
63
  }
65
64
  /**
66
65
  * Helper to check if an error indicates invalid model and handle fallback
@@ -176,7 +175,7 @@ async function callAINonStreaming(prompt, options = {}, isRetry = false) {
176
175
  result = await extractClaudeResponse(response);
177
176
  }
178
177
  catch (error) {
179
- return handleApiError(error, "Claude API response processing failed");
178
+ handleApiError(error, "Claude API response processing failed", options.debug);
180
179
  }
181
180
  }
182
181
  else {
@@ -208,7 +207,7 @@ async function callAINonStreaming(prompt, options = {}, isRetry = false) {
208
207
  return schemaStrategy.processResponse(content);
209
208
  }
210
209
  catch (error) {
211
- return handleApiError(error, "Non-streaming API call");
210
+ handleApiError(error, "Non-streaming API call", options.debug);
212
211
  }
213
212
  }
214
213
  /**
@@ -279,6 +278,8 @@ async function extractClaudeResponse(response) {
279
278
  * Internal implementation for streaming API calls
280
279
  */
281
280
  async function* callAIStreaming(prompt, options = {}, isRetry = false) {
281
+ // Track errors to ensure consistent propagation across environments
282
+ let streamingError = null;
282
283
  try {
283
284
  const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
284
285
  const response = await fetch(endpoint, requestOptions);
@@ -290,7 +291,8 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
290
291
  }
291
292
  const errorText = await response.text();
292
293
  console.error(`API Error: ${response.status} ${response.statusText}`, errorText);
293
- throw new Error(`API returned error ${response.status}: ${response.statusText}`);
294
+ streamingError = new Error(`API returned error ${response.status}: ${response.statusText}`);
295
+ throw streamingError;
294
296
  }
295
297
  // Handle streaming response
296
298
  if (!response.body) {
@@ -333,10 +335,9 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
333
335
  // Check for error in the parsed JSON response
334
336
  if (json.error) {
335
337
  // Use the standard error format as the rest of the library
336
- const errorResponse = handleApiError(new Error(`API returned error: ${JSON.stringify(json.error)}`), "Streaming API call error");
337
- yield errorResponse;
338
- // After yielding the error, break out of the loop to end streaming
339
- break;
338
+ // We need to throw the error properly
339
+ handleApiError(new Error(`API returned error: ${JSON.stringify(json.error)}`), "Streaming API call error", options.debug);
340
+ // This code is unreachable as handleApiError throws
340
341
  }
341
342
  // Handle tool use response - Claude with schema cases
342
343
  const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
@@ -444,6 +445,15 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
444
445
  }
445
446
  }
446
447
  }
448
+ // Check if we encountered an error earlier but didn't throw it yet
449
+ // This ensures browser environments will get the error during iteration
450
+ if (streamingError) {
451
+ handleApiError(streamingError, "Streaming API call", options.debug);
452
+ }
453
+ // Final check for errors before returning
454
+ if (streamingError) {
455
+ throw streamingError;
456
+ }
447
457
  // If we have assembled tool calls but haven't yielded them yet
448
458
  if (toolCallsAssembled && (!completeText || completeText.length === 0)) {
449
459
  return toolCallsAssembled;
@@ -452,6 +462,7 @@ async function* callAIStreaming(prompt, options = {}, isRetry = false) {
452
462
  return schemaStrategy.processResponse(completeText);
453
463
  }
454
464
  catch (error) {
455
- return handleApiError(error, "Streaming API call");
465
+ // Standardize error handling
466
+ handleApiError(error, "Streaming API call", options.debug);
456
467
  }
457
468
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "call-ai",
3
- "version": "0.6.4",
3
+ "version": "0.7.0-dev-preview-2",
4
4
  "description": "Lightweight library for making AI API calls with streaming support",
5
5
  "main": "dist/index.js",
6
6
  "browser": "dist/index.js",