call-ai 0.7.0-dev-preview-7 → 0.7.0-dev-preview-9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/api.js +68 -17
  2. package/package.json +1 -1
package/dist/api.js CHANGED
@@ -33,15 +33,40 @@ function callAI(prompt, options = {}) {
33
33
  // Do setup and validation before returning the generator
34
34
  const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
35
35
  // Make the fetch request and handle errors before creating the generator
36
- const response = await fetch(endpoint, requestOptions);
37
- // Enhanced error handling with more debugging
36
+ console.log(`[callAI:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
37
+ console.log(`[callAI:${PACKAGE_VERSION}] With model: ${model}`);
38
+ let response;
39
+ try {
40
+ response = await fetch(endpoint, requestOptions);
41
+ console.log(`[callAI:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText);
42
+ }
43
+ catch (fetchError) {
44
+ console.error(`[callAI:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
45
+ throw fetchError; // Re-throw network errors
46
+ }
47
+ // Explicitly check for HTTP error status and log extensively
48
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.ok =`, response.ok);
49
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.status =`, response.status);
50
+ // Enhanced error handling with more debugging - MUST check !response.ok
38
51
  if (!response.ok) {
52
+ console.log(`[callAI:${PACKAGE_VERSION}] Detected error response with status:`, response.status);
39
53
  if (options.debug) {
40
54
  console.error(`[callAI:${PACKAGE_VERSION}] HTTP Error:`, response.status, response.statusText, response.url);
41
55
  }
42
56
  // Check if this is an invalid model error
43
- const { isInvalidModel, errorData } = await checkForInvalidModelError(response.clone(), // Clone response since we'll need to read body twice
44
- model, false, options.skipRetry);
57
+ console.log(`[callAI:${PACKAGE_VERSION}] Checking for invalid model error...`);
58
+ let isInvalidModel = false;
59
+ let errorData = null;
60
+ try {
61
+ const result = await checkForInvalidModelError(response.clone(), // Clone response since we'll need to read body twice
62
+ model, false, options.skipRetry);
63
+ isInvalidModel = result.isInvalidModel;
64
+ errorData = result.errorData;
65
+ console.log(`[callAI:${PACKAGE_VERSION}] Invalid model check result:`, isInvalidModel);
66
+ }
67
+ catch (checkError) {
68
+ console.error(`[callAI:${PACKAGE_VERSION}] Error during invalid model check:`, checkError);
69
+ }
45
70
  if (isInvalidModel && !options.skipRetry) {
46
71
  if (options.debug) {
47
72
  console.log(`[callAI:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`);
@@ -51,11 +76,19 @@ function callAI(prompt, options = {}) {
51
76
  return result;
52
77
  }
53
78
  // Get full error text from body
54
- const errorText = await response.text();
55
- if (options.debug) {
56
- console.error(`[callAI:${PACKAGE_VERSION}] Error response body:`, errorText);
79
+ console.log(`[callAI:${PACKAGE_VERSION}] Reading error response body...`);
80
+ let errorText = "";
81
+ try {
82
+ errorText = await response.text();
83
+ console.log(`[callAI:${PACKAGE_VERSION}] Error response body:`, errorText);
84
+ }
85
+ catch (error) {
86
+ const textError = error;
87
+ console.error(`[callAI:${PACKAGE_VERSION}] Error reading response body:`, textError);
88
+ errorText = `Failed to read error details: ${textError.message || 'Unknown error'}`;
57
89
  }
58
90
  // Create a detailed error with status information
91
+ console.log(`[callAI:${PACKAGE_VERSION}] Creating error object with status ${response.status}`);
59
92
  const errorMessage = `API returned error ${response.status}: ${response.statusText}`;
60
93
  const error = new Error(errorMessage);
61
94
  // Add extra properties for more context
@@ -63,12 +96,17 @@ function callAI(prompt, options = {}) {
63
96
  error.statusText = response.statusText;
64
97
  error.details = errorText;
65
98
  // Ensure this error is thrown and caught properly in the Promise chain
66
- if (options.debug) {
67
- console.error(`[callAI:${PACKAGE_VERSION}] Throwing error:`, error);
68
- }
69
- throw error;
99
+ console.error(`[callAI:${PACKAGE_VERSION}] THROWING API ERROR:`, {
100
+ message: errorMessage,
101
+ status: response.status,
102
+ statusText: response.statusText,
103
+ details: errorText
104
+ });
105
+ // This MUST throw the error from the promise
106
+ return Promise.reject(error);
70
107
  }
71
108
  // Only if response is OK, create and return the streaming generator
109
+ console.log(`[callAI:${PACKAGE_VERSION}] Response OK, creating streaming generator`);
72
110
  return createStreamingGenerator(response, options, schemaStrategy, model);
73
111
  })();
74
112
  // For backward compatibility with v0.6.x where users didn't await the result
@@ -424,6 +462,10 @@ async function extractClaudeResponse(response) {
424
462
  /**
425
463
  * Generator factory function for streaming API calls
426
464
  * This is called after the fetch is made and response is validated
465
+ *
466
+ * Note: Even though we checked response.ok before creating this generator,
467
+ * we need to be prepared for errors that may occur during streaming. Some APIs
468
+ * return a 200 OK initially but then deliver error information in the stream.
427
469
  */
428
470
  async function* createStreamingGenerator(response, options, schemaStrategy, model) {
429
471
  try {
@@ -465,12 +507,21 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
465
507
  chunkCount++;
466
508
  // Parse the JSON chunk
467
509
  const json = JSON.parse(jsonLine);
468
- // Check for error in the parsed JSON response
469
- if (json.error) {
470
- // Use the standard error format as the rest of the library
471
- // We need to throw the error properly
472
- handleApiError(new Error(`API returned error: ${JSON.stringify(json.error)}`), "Streaming API call error", options.debug);
473
- // This code is unreachable as handleApiError throws
510
+ // Enhanced error detection - check for BOTH error and json.error
511
+ // Some APIs return 200 OK but then deliver errors in the stream
512
+ if (json.error || (typeof json === 'object' && 'error' in json)) {
513
+ console.error(`[callAI:${PACKAGE_VERSION}] Detected error in streaming response:`, json);
514
+ // Create a detailed error object similar to our HTTP error handling
515
+ const errorMessage = json.error?.message ||
516
+ json.error?.toString() ||
517
+ JSON.stringify(json.error || json);
518
+ const detailedError = new Error(`API streaming error: ${errorMessage}`);
519
+ // Add error metadata
520
+ detailedError.status = json.error?.status || 400;
521
+ detailedError.statusText = json.error?.type || 'Bad Request';
522
+ detailedError.details = JSON.stringify(json.error || json);
523
+ console.error(`[callAI:${PACKAGE_VERSION}] Throwing stream error:`, detailedError);
524
+ throw detailedError;
474
525
  }
475
526
  // Handle tool use response - Claude with schema cases
476
527
  const isClaudeWithSchema = /claude/i.test(model) && schemaStrategy.strategy === "tool_mode";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "call-ai",
3
- "version": "0.7.0-dev-preview-7",
3
+ "version": "0.7.0-dev-preview-9",
4
4
  "description": "Lightweight library for making AI API calls with streaming support",
5
5
  "main": "dist/index.js",
6
6
  "browser": "dist/index.js",