call-ai 0.7.0-dev-preview-12 → 0.7.0-dev-preview-14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/api.js +144 -65
  2. package/package.json +2 -2
package/dist/api.js CHANGED
@@ -4,7 +4,7 @@ exports.callAI = callAI;
4
4
  const strategies_1 = require("./strategies");
5
5
  // Import package version for debugging
6
6
  // eslint-disable-next-line @typescript-eslint/no-var-requires
7
- const PACKAGE_VERSION = require('../package.json').version;
7
+ const PACKAGE_VERSION = require("../package.json").version;
8
8
  // Default fallback model when the primary model fails or is unavailable
9
9
  const FALLBACK_MODEL = "openrouter/auto";
10
10
  /**
@@ -33,47 +33,56 @@ function callAI(prompt, options = {}) {
33
33
  // Do setup and validation before returning the generator
34
34
  const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
35
35
  // Make the fetch request and handle errors before creating the generator
36
- console.log(`[callAI:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
37
- console.log(`[callAI:${PACKAGE_VERSION}] With model: ${model}`);
38
- console.log(`[callAI:${PACKAGE_VERSION}] Request headers:`, JSON.stringify(requestOptions.headers));
36
+ if (options.debug) {
37
+ console.log(`[callAI:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
38
+ console.log(`[callAI:${PACKAGE_VERSION}] With model: ${model}`);
39
+ console.log(`[callAI:${PACKAGE_VERSION}] Request headers:`, JSON.stringify(requestOptions.headers));
40
+ }
39
41
  let response;
40
42
  try {
41
43
  response = await fetch(endpoint, requestOptions);
42
- console.log(`[callAI:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText);
43
- // Log all headers
44
- console.log(`[callAI:${PACKAGE_VERSION}] Response headers:`);
45
- response.headers.forEach((value, name) => {
46
- console.log(`[callAI:${PACKAGE_VERSION}] ${name}: ${value}`);
47
- });
48
- // Clone response for diagnostic purposes only
49
- const diagnosticResponse = response.clone();
50
- try {
51
- // Try to get the response as text for debugging
52
- const responseText = await diagnosticResponse.text();
53
- console.log(`[callAI:${PACKAGE_VERSION}] First 500 chars of response body:`, responseText.substring(0, 500) + (responseText.length > 500 ? '...' : ''));
54
- }
55
- catch (e) {
56
- console.log(`[callAI:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, e);
44
+ if (options.debug) {
45
+ console.log(`[callAI:${PACKAGE_VERSION}] Fetch completed with status:`, response.status, response.statusText);
46
+ // Log all headers
47
+ console.log(`[callAI:${PACKAGE_VERSION}] Response headers:`);
48
+ response.headers.forEach((value, name) => {
49
+ console.log(`[callAI:${PACKAGE_VERSION}] ${name}: ${value}`);
50
+ });
51
+ // Clone response for diagnostic purposes only
52
+ const diagnosticResponse = response.clone();
53
+ try {
54
+ // Try to get the response as text for debugging
55
+ const responseText = await diagnosticResponse.text();
56
+ console.log(`[callAI:${PACKAGE_VERSION}] First 500 chars of response body:`, responseText.substring(0, 500) +
57
+ (responseText.length > 500 ? "..." : ""));
58
+ }
59
+ catch (e) {
60
+ console.log(`[callAI:${PACKAGE_VERSION}] Could not read response body for diagnostics:`, e);
61
+ }
57
62
  }
58
63
  }
59
64
  catch (fetchError) {
60
65
  console.error(`[callAI:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
61
66
  throw fetchError; // Re-throw network errors
62
67
  }
63
- // Explicitly check for HTTP error status and log extensively
64
- console.log(`[callAI:${PACKAGE_VERSION}] Response.ok =`, response.ok);
65
- console.log(`[callAI:${PACKAGE_VERSION}] Response.status =`, response.status);
66
- console.log(`[callAI:${PACKAGE_VERSION}] Response.statusText =`, response.statusText);
67
- console.log(`[callAI:${PACKAGE_VERSION}] Response.type =`, response.type);
68
- // Double check for content-type to see if there's a mismatch in error response handling
69
- const contentType = response.headers.get('content-type') || '';
70
- console.log(`[callAI:${PACKAGE_VERSION}] Content-Type =`, contentType);
68
+ // Explicitly check for HTTP error status and log extensively if debug is enabled
69
+ // Safe access to headers in case of mock environments
70
+ const contentType = response?.headers?.get?.("content-type") || "";
71
+ if (options.debug) {
72
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.ok =`, response.ok);
73
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.status =`, response.status);
74
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.statusText =`, response.statusText);
75
+ console.log(`[callAI:${PACKAGE_VERSION}] Response.type =`, response.type);
76
+ console.log(`[callAI:${PACKAGE_VERSION}] Content-Type =`, contentType);
77
+ }
71
78
  // Browser-compatible error handling - must check BOTH status code AND content-type
72
79
  // Some browsers will report status 200 for SSE streams even when server returns 400
73
80
  const hasHttpError = !response.ok || response.status >= 400;
74
- const hasJsonError = contentType.includes('application/json');
81
+ const hasJsonError = contentType.includes("application/json");
75
82
  if (hasHttpError || hasJsonError) {
76
- console.log(`[callAI:${PACKAGE_VERSION}] ⚠️ Error detected - HTTP Status: ${response.status}, Content-Type: ${contentType}`);
83
+ if (options.debug) {
84
+ console.log(`[callAI:${PACKAGE_VERSION}] ⚠️ Error detected - HTTP Status: ${response.status}, Content-Type: ${contentType}`);
85
+ }
77
86
  // Handle the error with fallback model if appropriate
78
87
  if (!options.skipRetry) {
79
88
  const clonedResponse = response.clone();
@@ -87,7 +96,10 @@ function callAI(prompt, options = {}) {
87
96
  console.log(`[callAI:${PACKAGE_VERSION}] Retrying with fallback model: ${FALLBACK_MODEL}`);
88
97
  }
89
98
  // Retry with fallback model
90
- return await callAI(prompt, { ...options, model: FALLBACK_MODEL });
99
+ return (await callAI(prompt, {
100
+ ...options,
101
+ model: FALLBACK_MODEL,
102
+ }));
91
103
  }
92
104
  }
93
105
  catch (modelCheckError) {
@@ -99,16 +111,43 @@ function callAI(prompt, options = {}) {
99
111
  try {
100
112
  // Try to get error details from the response body
101
113
  const errorBody = await response.text();
102
- console.log(`[callAI:${PACKAGE_VERSION}] Error body:`, errorBody);
114
+ if (options.debug) {
115
+ console.log(`[callAI:${PACKAGE_VERSION}] Error body:`, errorBody);
116
+ }
103
117
  try {
104
118
  // Try to parse JSON error
105
119
  const errorJson = JSON.parse(errorBody);
106
- console.log(`[callAI:${PACKAGE_VERSION}] Parsed error:`, errorJson);
120
+ if (options.debug) {
121
+ console.log(`[callAI:${PACKAGE_VERSION}] Parsed error:`, errorJson);
122
+ }
107
123
  // Extract message from OpenRouter error format
108
- const errorMessage = ((errorJson.error && typeof errorJson.error === 'object') ?
109
- errorJson.error.message :
110
- errorJson.error || errorJson.message ||
111
- `API returned ${response.status}: ${response.statusText}`);
124
+ let errorMessage = "";
125
+ // Handle common error formats
126
+ if (errorJson.error &&
127
+ typeof errorJson.error === "object" &&
128
+ errorJson.error.message) {
129
+ // OpenRouter/OpenAI format: { error: { message: "..." } }
130
+ errorMessage = errorJson.error.message;
131
+ }
132
+ else if (errorJson.error && typeof errorJson.error === "string") {
133
+ // Simple error format: { error: "..." }
134
+ errorMessage = errorJson.error;
135
+ }
136
+ else if (errorJson.message) {
137
+ // Generic format: { message: "..." }
138
+ errorMessage = errorJson.message;
139
+ }
140
+ else {
141
+ // Fallback with status details
142
+ errorMessage = `API returned ${response.status}: ${response.statusText}`;
143
+ }
144
+ // Add status details to error message if not already included
145
+ if (!errorMessage.includes(response.status.toString())) {
146
+ errorMessage = `${errorMessage} (Status: ${response.status})`;
147
+ }
148
+ if (options.debug) {
149
+ console.log(`[callAI:${PACKAGE_VERSION}] Extracted error message:`, errorMessage);
150
+ }
112
151
  // Create error with standard format
113
152
  const error = new Error(errorMessage);
114
153
  // Add useful metadata
@@ -119,9 +158,31 @@ function callAI(prompt, options = {}) {
119
158
  throw error;
120
159
  }
121
160
  catch (jsonError) {
122
- // If JSON parsing fails, throw a simpler error
123
- console.log(`[callAI:${PACKAGE_VERSION}] JSON parse error:`, jsonError);
124
- const error = new Error(`API error: ${response.status} ${response.statusText}`);
161
+ // If JSON parsing fails, extract a useful message from the raw error body
162
+ if (options.debug) {
163
+ console.log(`[callAI:${PACKAGE_VERSION}] JSON parse error:`, jsonError);
164
+ }
165
+ // Try to extract a useful message even from non-JSON text
166
+ let errorMessage = "";
167
+ // Check if it's a plain text error message
168
+ if (errorBody && errorBody.trim().length > 0) {
169
+ // Limit length for readability
170
+ errorMessage =
171
+ errorBody.length > 100
172
+ ? errorBody.substring(0, 100) + "..."
173
+ : errorBody;
174
+ }
175
+ else {
176
+ errorMessage = `API error: ${response.status} ${response.statusText}`;
177
+ }
178
+ // Add status details if not already included
179
+ if (!errorMessage.includes(response.status.toString())) {
180
+ errorMessage = `${errorMessage} (Status: ${response.status})`;
181
+ }
182
+ if (options.debug) {
183
+ console.log(`[callAI:${PACKAGE_VERSION}] Extracted text error message:`, errorMessage);
184
+ }
185
+ const error = new Error(errorMessage);
125
186
  error.status = response.status;
126
187
  error.statusText = response.statusText;
127
188
  error.details = errorBody;
@@ -143,11 +204,13 @@ function callAI(prompt, options = {}) {
143
204
  }
144
205
  }
145
206
  // Only if response is OK, create and return the streaming generator
146
- console.log(`[callAI:${PACKAGE_VERSION}] Response OK, creating streaming generator`);
207
+ if (options.debug) {
208
+ console.log(`[callAI:${PACKAGE_VERSION}] Response OK, creating streaming generator`);
209
+ }
147
210
  return createStreamingGenerator(response, options, schemaStrategy, model);
148
211
  })();
149
212
  // For backward compatibility with v0.6.x where users didn't await the result
150
- if (process.env.NODE_ENV !== 'production') {
213
+ if (process.env.NODE_ENV !== "production") {
151
214
  console.warn(`[callAI:${PACKAGE_VERSION}] WARNING: Using callAI with streaming without await is deprecated. ` +
152
215
  `Please use 'const generator = await callAI(...)' instead of 'const generator = callAI(...)'. ` +
153
216
  `This backward compatibility will be removed in a future version.`);
@@ -168,7 +231,7 @@ async function bufferStreamingResults(prompt, options) {
168
231
  };
169
232
  try {
170
233
  // Get streaming generator
171
- const generator = await callAI(prompt, streamingOptions);
234
+ const generator = (await callAI(prompt, streamingOptions));
172
235
  // Buffer all chunks
173
236
  let finalResult = "";
174
237
  let chunkCount = 0;
@@ -194,7 +257,10 @@ function createBackwardCompatStreamingProxy(promise) {
194
257
  return new Proxy({}, {
195
258
  get(target, prop) {
196
259
  // First check if it's an AsyncGenerator method (needed for for-await)
197
- if (prop === 'next' || prop === 'throw' || prop === 'return' || prop === Symbol.asyncIterator) {
260
+ if (prop === "next" ||
261
+ prop === "throw" ||
262
+ prop === "return" ||
263
+ prop === Symbol.asyncIterator) {
198
264
  // Create wrapper functions that await the Promise first
199
265
  if (prop === Symbol.asyncIterator) {
200
266
  return function () {
@@ -209,7 +275,7 @@ function createBackwardCompatStreamingProxy(promise) {
209
275
  // Turn Promise rejection into iterator result with error thrown
210
276
  return Promise.reject(error);
211
277
  }
212
- }
278
+ },
213
279
  };
214
280
  };
215
281
  }
@@ -220,11 +286,11 @@ function createBackwardCompatStreamingProxy(promise) {
220
286
  };
221
287
  }
222
288
  // Then check if it's a Promise method
223
- if (prop === 'then' || prop === 'catch' || prop === 'finally') {
289
+ if (prop === "then" || prop === "catch" || prop === "finally") {
224
290
  return promise[prop].bind(promise);
225
291
  }
226
292
  return undefined;
227
- }
293
+ },
228
294
  });
229
295
  }
230
296
  /**
@@ -257,7 +323,7 @@ async function checkForInvalidModelError(response, model, isRetry, skipRetry = f
257
323
  console.log(`[callAI:${PACKAGE_VERSION}] Checking for invalid model error:`, {
258
324
  model,
259
325
  statusCode: response.status,
260
- errorData
326
+ errorData,
261
327
  });
262
328
  }
263
329
  // Common patterns for invalid model errors across different providers
@@ -268,10 +334,10 @@ async function checkForInvalidModelError(response, model, isRetry, skipRetry = f
268
334
  "unknown model",
269
335
  "no provider was found",
270
336
  "fake-model", // For our test case
271
- "does-not-exist" // For our test case
337
+ "does-not-exist", // For our test case
272
338
  ];
273
339
  // Check if error message contains any of our patterns
274
- let errorMessage = '';
340
+ let errorMessage = "";
275
341
  if (errorData.error && errorData.error.message) {
276
342
  errorMessage = errorData.error.message.toLowerCase();
277
343
  }
@@ -282,7 +348,7 @@ async function checkForInvalidModelError(response, model, isRetry, skipRetry = f
282
348
  errorMessage = JSON.stringify(errorData).toLowerCase();
283
349
  }
284
350
  // Test the error message against each pattern
285
- const isInvalidModel = invalidModelPatterns.some(pattern => errorMessage.includes(pattern.toLowerCase()));
351
+ const isInvalidModel = invalidModelPatterns.some((pattern) => errorMessage.includes(pattern.toLowerCase()));
286
352
  if (isInvalidModel && debugEnabled) {
287
353
  console.warn(`[callAI:${PACKAGE_VERSION}] Model ${model} not valid, will retry with ${FALLBACK_MODEL}`);
288
354
  }
@@ -505,10 +571,12 @@ async function extractClaudeResponse(response) {
505
571
  * return a 200 OK initially but then deliver error information in the stream.
506
572
  */
507
573
  async function* createStreamingGenerator(response, options, schemaStrategy, model) {
508
- console.log(`[callAI:${PACKAGE_VERSION}] Starting streaming generator with model: ${model}`);
509
- console.log(`[callAI:${PACKAGE_VERSION}] Response status:`, response.status);
510
- console.log(`[callAI:${PACKAGE_VERSION}] Response type:`, response.type);
511
- console.log(`[callAI:${PACKAGE_VERSION}] Response Content-Type:`, response.headers.get('content-type'));
574
+ if (options.debug) {
575
+ console.log(`[callAI:${PACKAGE_VERSION}] Starting streaming generator with model: ${model}`);
576
+ console.log(`[callAI:${PACKAGE_VERSION}] Response status:`, response.status);
577
+ console.log(`[callAI:${PACKAGE_VERSION}] Response type:`, response.type);
578
+ console.log(`[callAI:${PACKAGE_VERSION}] Response Content-Type:`, response.headers.get("content-type"));
579
+ }
512
580
  try {
513
581
  // Handle streaming response
514
582
  if (!response.body) {
@@ -522,8 +590,8 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
522
590
  while (true) {
523
591
  const { done, value } = await reader.read();
524
592
  if (done) {
525
- console.log(`[callAI:${PACKAGE_VERSION}] Stream done=true after ${chunkCount} chunks`);
526
593
  if (options.debug) {
594
+ console.log(`[callAI:${PACKAGE_VERSION}] Stream done=true after ${chunkCount} chunks`);
527
595
  console.log(`[callAI-streaming:complete v${PACKAGE_VERSION}] Stream finished after ${chunkCount} chunks`);
528
596
  }
529
597
  break;
@@ -531,15 +599,23 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
531
599
  // Increment chunk counter before processing
532
600
  chunkCount++;
533
601
  const chunk = decoder.decode(value);
534
- console.log(`[callAI:${PACKAGE_VERSION}] Raw chunk #${chunkCount} (${chunk.length} bytes):`, chunk.length > 200 ? chunk.substring(0, 200) + '...' : chunk);
602
+ if (options.debug) {
603
+ console.log(`[callAI:${PACKAGE_VERSION}] Raw chunk #${chunkCount} (${chunk.length} bytes):`, chunk.length > 200 ? chunk.substring(0, 200) + "..." : chunk);
604
+ }
535
605
  const lines = chunk.split("\n").filter((line) => line.trim() !== "");
536
- console.log(`[callAI:${PACKAGE_VERSION}] Chunk #${chunkCount} contains ${lines.length} non-empty lines`);
606
+ if (options.debug) {
607
+ console.log(`[callAI:${PACKAGE_VERSION}] Chunk #${chunkCount} contains ${lines.length} non-empty lines`);
608
+ }
537
609
  for (const line of lines) {
538
- console.log(`[callAI:${PACKAGE_VERSION}] Processing line:`, line.length > 100 ? line.substring(0, 100) + '...' : line);
610
+ if (options.debug) {
611
+ console.log(`[callAI:${PACKAGE_VERSION}] Processing line:`, line.length > 100 ? line.substring(0, 100) + "..." : line);
612
+ }
539
613
  if (line.startsWith("data: ")) {
540
614
  let data = line.slice(6);
541
615
  if (data === "[DONE]") {
542
- console.log(`[callAI:${PACKAGE_VERSION}] Received [DONE] marker`);
616
+ if (options.debug) {
617
+ console.log(`[callAI:${PACKAGE_VERSION}] Received [DONE] marker`);
618
+ }
543
619
  break;
544
620
  }
545
621
  if (options.debug) {
@@ -556,14 +632,16 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
556
632
  console.log(`[callAI:${PACKAGE_VERSION}] Empty JSON line after data: prefix`);
557
633
  continue;
558
634
  }
559
- console.log(`[callAI:${PACKAGE_VERSION}] JSON line (first 100 chars):`, jsonLine.length > 100 ? jsonLine.substring(0, 100) + '...' : jsonLine);
635
+ console.log(`[callAI:${PACKAGE_VERSION}] JSON line (first 100 chars):`, jsonLine.length > 100
636
+ ? jsonLine.substring(0, 100) + "..."
637
+ : jsonLine);
560
638
  // Parse the JSON chunk
561
639
  let json;
562
640
  try {
563
641
  json = JSON.parse(jsonLine);
564
- console.log(`[callAI:${PACKAGE_VERSION}] Parsed JSON:`, JSON.stringify(json).length > 100 ?
565
- JSON.stringify(json).substring(0, 100) + '...' :
566
- JSON.stringify(json));
642
+ console.log(`[callAI:${PACKAGE_VERSION}] Parsed JSON:`, JSON.stringify(json).length > 100
643
+ ? JSON.stringify(json).substring(0, 100) + "..."
644
+ : JSON.stringify(json));
567
645
  }
568
646
  catch (parseError) {
569
647
  console.error(`[callAI:${PACKAGE_VERSION}] JSON parse error:`, parseError);
@@ -572,7 +650,7 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
572
650
  }
573
651
  // Enhanced error detection - check for BOTH error and json.error
574
652
  // Some APIs return 200 OK but then deliver errors in the stream
575
- if (json.error || (typeof json === 'object' && 'error' in json)) {
653
+ if (json.error || (typeof json === "object" && "error" in json)) {
576
654
  console.error(`[callAI:${PACKAGE_VERSION}] Detected error in streaming response:`, json);
577
655
  // Create a detailed error object similar to our HTTP error handling
578
656
  const errorMessage = json.error?.message ||
@@ -581,7 +659,8 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
581
659
  const detailedError = new Error(`API streaming error: ${errorMessage}`);
582
660
  // Add error metadata
583
661
  detailedError.status = json.error?.status || 400;
584
- detailedError.statusText = json.error?.type || 'Bad Request';
662
+ detailedError.statusText =
663
+ json.error?.type || "Bad Request";
585
664
  detailedError.details = JSON.stringify(json.error || json);
586
665
  console.error(`[callAI:${PACKAGE_VERSION}] Throwing stream error:`, detailedError);
587
666
  throw detailedError;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "call-ai",
3
- "version": "0.7.0-dev-preview-12",
3
+ "version": "0.7.0-dev-preview-14",
4
4
  "description": "Lightweight library for making AI API calls with streaming support",
5
5
  "main": "dist/index.js",
6
6
  "browser": "dist/index.js",
@@ -43,7 +43,7 @@
43
43
  },
44
44
  "scripts": {
45
45
  "build": "tsc",
46
- "test": "jest --testPathIgnorePatterns=\".*\\.integration\\.test\\.ts$\"",
46
+ "test": "jest --testPathIgnorePatterns=\".*\\.integration\\.(no-await\\.)?test\\.ts$\"",
47
47
  "test:integration": "jest --testMatch=\"**/*\\.integration\\.test\\.ts\"",
48
48
  "test:all": "jest",
49
49
  "typecheck": "tsc --noEmit",