call-ai 0.7.0 → 0.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/api.js +51 -28
- package/package.json +13 -12
package/dist/api.js
CHANGED
|
@@ -32,7 +32,6 @@ function callAI(prompt, options = {}) {
|
|
|
32
32
|
const streamPromise = (async () => {
|
|
33
33
|
// Do setup and validation before returning the generator
|
|
34
34
|
const { endpoint, requestOptions, model, schemaStrategy } = prepareRequestParams(prompt, { ...options, stream: true });
|
|
35
|
-
// Make the fetch request and handle errors before creating the generator
|
|
36
35
|
if (options.debug) {
|
|
37
36
|
console.log(`[callAI:${PACKAGE_VERSION}] Making fetch request to: ${endpoint}`);
|
|
38
37
|
console.log(`[callAI:${PACKAGE_VERSION}] With model: ${model}`);
|
|
@@ -62,7 +61,9 @@ function callAI(prompt, options = {}) {
|
|
|
62
61
|
}
|
|
63
62
|
}
|
|
64
63
|
catch (fetchError) {
|
|
65
|
-
|
|
64
|
+
if (options.debug) {
|
|
65
|
+
console.error(`[callAI:${PACKAGE_VERSION}] Network error during fetch:`, fetchError);
|
|
66
|
+
}
|
|
66
67
|
throw fetchError; // Re-throw network errors
|
|
67
68
|
}
|
|
68
69
|
// Explicitly check for HTTP error status and log extensively if debug is enabled
|
|
@@ -89,7 +90,7 @@ function callAI(prompt, options = {}) {
|
|
|
89
90
|
let isInvalidModel = false;
|
|
90
91
|
try {
|
|
91
92
|
// Check if this is an invalid model error
|
|
92
|
-
const modelCheckResult = await checkForInvalidModelError(clonedResponse, model, false, options.skipRetry);
|
|
93
|
+
const modelCheckResult = await checkForInvalidModelError(clonedResponse, model, false, options.skipRetry, options.debug);
|
|
93
94
|
isInvalidModel = modelCheckResult.isInvalidModel;
|
|
94
95
|
if (isInvalidModel) {
|
|
95
96
|
if (options.debug) {
|
|
@@ -211,9 +212,9 @@ function callAI(prompt, options = {}) {
|
|
|
211
212
|
})();
|
|
212
213
|
// For backward compatibility with v0.6.x where users didn't await the result
|
|
213
214
|
if (process.env.NODE_ENV !== "production") {
|
|
214
|
-
|
|
215
|
-
`
|
|
216
|
-
|
|
215
|
+
if (options.debug) {
|
|
216
|
+
console.warn(`[callAI:${PACKAGE_VERSION}] No await found - using legacy streaming pattern. This will be removed in a future version and may cause issues with certain models.`);
|
|
217
|
+
}
|
|
217
218
|
}
|
|
218
219
|
// Create a proxy object that acts both as a Promise and an AsyncGenerator for backward compatibility
|
|
219
220
|
// @ts-ignore - We're deliberately implementing a proxy with dual behavior
|
|
@@ -305,7 +306,7 @@ function handleApiError(error, context, debug = false) {
|
|
|
305
306
|
/**
|
|
306
307
|
* Helper to check if an error indicates invalid model and handle fallback
|
|
307
308
|
*/
|
|
308
|
-
async function checkForInvalidModelError(response, model, isRetry, skipRetry = false) {
|
|
309
|
+
async function checkForInvalidModelError(response, model, isRetry, skipRetry = false, debug = false) {
|
|
309
310
|
// Skip retry immediately if skipRetry is true or if we're already retrying
|
|
310
311
|
if (skipRetry || isRetry) {
|
|
311
312
|
return { isInvalidModel: false };
|
|
@@ -318,8 +319,7 @@ async function checkForInvalidModelError(response, model, isRetry, skipRetry = f
|
|
|
318
319
|
const clonedResponse = response.clone();
|
|
319
320
|
try {
|
|
320
321
|
const errorData = await clonedResponse.json();
|
|
321
|
-
|
|
322
|
-
if (debugEnabled) {
|
|
322
|
+
if (debug) {
|
|
323
323
|
console.log(`[callAI:${PACKAGE_VERSION}] Checking for invalid model error:`, {
|
|
324
324
|
model,
|
|
325
325
|
statusCode: response.status,
|
|
@@ -349,29 +349,37 @@ async function checkForInvalidModelError(response, model, isRetry, skipRetry = f
|
|
|
349
349
|
}
|
|
350
350
|
// Test the error message against each pattern
|
|
351
351
|
const isInvalidModel = invalidModelPatterns.some((pattern) => errorMessage.includes(pattern.toLowerCase()));
|
|
352
|
-
if (isInvalidModel &&
|
|
352
|
+
if (isInvalidModel && debug) {
|
|
353
353
|
console.warn(`[callAI:${PACKAGE_VERSION}] Model ${model} not valid, will retry with ${FALLBACK_MODEL}`);
|
|
354
354
|
}
|
|
355
355
|
return { isInvalidModel, errorData };
|
|
356
356
|
}
|
|
357
357
|
catch (parseError) {
|
|
358
358
|
// If we can't parse the response as JSON, try to read it as text
|
|
359
|
-
|
|
359
|
+
if (debug) {
|
|
360
|
+
console.error("Failed to parse error response as JSON:", parseError);
|
|
361
|
+
}
|
|
360
362
|
try {
|
|
361
363
|
const textResponse = await response.clone().text();
|
|
362
|
-
|
|
364
|
+
if (debug) {
|
|
365
|
+
console.log("Error response as text:", textResponse);
|
|
366
|
+
}
|
|
363
367
|
// Even if it's not JSON, check if it contains any of our known patterns
|
|
364
368
|
const lowerText = textResponse.toLowerCase();
|
|
365
369
|
const isInvalidModel = lowerText.includes("invalid model") ||
|
|
366
370
|
lowerText.includes("not exist") ||
|
|
367
371
|
lowerText.includes("fake-model");
|
|
368
372
|
if (isInvalidModel) {
|
|
369
|
-
|
|
373
|
+
if (debug) {
|
|
374
|
+
console.warn(`[callAI:${PACKAGE_VERSION}] Detected invalid model in text response for ${model}`);
|
|
375
|
+
}
|
|
370
376
|
}
|
|
371
377
|
return { isInvalidModel, errorData: { text: textResponse } };
|
|
372
378
|
}
|
|
373
379
|
catch (textError) {
|
|
374
|
-
|
|
380
|
+
if (debug) {
|
|
381
|
+
console.error("Failed to read error response as text:", textError);
|
|
382
|
+
}
|
|
375
383
|
return { isInvalidModel: false };
|
|
376
384
|
}
|
|
377
385
|
}
|
|
@@ -449,7 +457,7 @@ async function callAINonStreaming(prompt, options = {}, isRetry = false) {
|
|
|
449
457
|
const response = await fetch(endpoint, requestOptions);
|
|
450
458
|
// Handle HTTP errors, with potential fallback for invalid model
|
|
451
459
|
if (!response.ok || response.status >= 400) {
|
|
452
|
-
const { isInvalidModel } = await checkForInvalidModelError(response, model, isRetry, options.skipRetry);
|
|
460
|
+
const { isInvalidModel } = await checkForInvalidModelError(response, model, isRetry, options.skipRetry, options.debug);
|
|
453
461
|
if (isInvalidModel) {
|
|
454
462
|
// Retry with fallback model
|
|
455
463
|
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
|
|
@@ -475,13 +483,17 @@ async function callAINonStreaming(prompt, options = {}, isRetry = false) {
|
|
|
475
483
|
}
|
|
476
484
|
// Handle error responses
|
|
477
485
|
if (result.error) {
|
|
478
|
-
|
|
486
|
+
if (options.debug) {
|
|
487
|
+
console.error("API returned an error:", result.error);
|
|
488
|
+
}
|
|
479
489
|
// If it's a model error and not already a retry, try with fallback
|
|
480
490
|
if (!isRetry &&
|
|
481
491
|
!options.skipRetry &&
|
|
482
492
|
result.error.message &&
|
|
483
493
|
result.error.message.toLowerCase().includes("not a valid model")) {
|
|
484
|
-
|
|
494
|
+
if (options.debug) {
|
|
495
|
+
console.warn(`Model ${model} error, retrying with ${FALLBACK_MODEL}`);
|
|
496
|
+
}
|
|
485
497
|
return callAINonStreaming(prompt, { ...options, model: FALLBACK_MODEL }, true);
|
|
486
498
|
}
|
|
487
499
|
return JSON.stringify({
|
|
@@ -551,6 +563,7 @@ async function extractClaudeResponse(response) {
|
|
|
551
563
|
]));
|
|
552
564
|
}
|
|
553
565
|
catch (textError) {
|
|
566
|
+
// Always log timeout errors
|
|
554
567
|
console.error(`Text extraction timed out or failed:`, textError);
|
|
555
568
|
throw new Error("Claude response text extraction timed out. This is likely an issue with the Claude API's response format.");
|
|
556
569
|
}
|
|
@@ -558,6 +571,7 @@ async function extractClaudeResponse(response) {
|
|
|
558
571
|
return JSON.parse(textResponse);
|
|
559
572
|
}
|
|
560
573
|
catch (err) {
|
|
574
|
+
// Always log JSON parsing errors
|
|
561
575
|
console.error(`Failed to parse Claude response as JSON:`, err);
|
|
562
576
|
throw new Error(`Failed to parse Claude response as JSON: ${err}`);
|
|
563
577
|
}
|
|
@@ -629,29 +643,36 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
|
|
|
629
643
|
try {
|
|
630
644
|
const jsonLine = line.replace("data: ", "");
|
|
631
645
|
if (!jsonLine.trim()) {
|
|
632
|
-
|
|
646
|
+
if (options.debug) {
|
|
647
|
+
console.log(`[callAI:${PACKAGE_VERSION}] Empty JSON line after data: prefix`);
|
|
648
|
+
}
|
|
633
649
|
continue;
|
|
634
650
|
}
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
651
|
+
if (options.debug) {
|
|
652
|
+
console.log(`[callAI:${PACKAGE_VERSION}] JSON line (first 100 chars):`, jsonLine.length > 100
|
|
653
|
+
? jsonLine.substring(0, 100) + "..."
|
|
654
|
+
: jsonLine);
|
|
655
|
+
}
|
|
638
656
|
// Parse the JSON chunk
|
|
639
657
|
let json;
|
|
640
658
|
try {
|
|
641
659
|
json = JSON.parse(jsonLine);
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
660
|
+
if (options.debug) {
|
|
661
|
+
console.log(`[callAI:${PACKAGE_VERSION}] Parsed JSON:`, JSON.stringify(json).substring(0, 1000));
|
|
662
|
+
}
|
|
645
663
|
}
|
|
646
664
|
catch (parseError) {
|
|
647
|
-
|
|
648
|
-
|
|
665
|
+
if (options.debug) {
|
|
666
|
+
console.error(`[callAI:${PACKAGE_VERSION}] JSON parse error:`, parseError);
|
|
667
|
+
}
|
|
649
668
|
continue;
|
|
650
669
|
}
|
|
651
670
|
// Enhanced error detection - check for BOTH error and json.error
|
|
652
671
|
// Some APIs return 200 OK but then deliver errors in the stream
|
|
653
672
|
if (json.error || (typeof json === "object" && "error" in json)) {
|
|
654
|
-
|
|
673
|
+
if (options.debug) {
|
|
674
|
+
console.error(`[callAI:${PACKAGE_VERSION}] Detected error in streaming response:`, json);
|
|
675
|
+
}
|
|
655
676
|
// Create a detailed error object similar to our HTTP error handling
|
|
656
677
|
const errorMessage = json.error?.message ||
|
|
657
678
|
json.error?.toString() ||
|
|
@@ -766,7 +787,9 @@ async function* createStreamingGenerator(response, options, schemaStrategy, mode
|
|
|
766
787
|
}
|
|
767
788
|
}
|
|
768
789
|
catch (e) {
|
|
769
|
-
|
|
790
|
+
if (options.debug) {
|
|
791
|
+
console.error(`[callAIStreaming] Error parsing JSON chunk:`, e);
|
|
792
|
+
}
|
|
770
793
|
}
|
|
771
794
|
}
|
|
772
795
|
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "call-ai",
|
|
3
|
-
"version": "0.7.
|
|
3
|
+
"version": "0.7.2",
|
|
4
4
|
"description": "Lightweight library for making AI API calls with streaming support",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"browser": "dist/index.js",
|
|
@@ -16,6 +16,17 @@
|
|
|
16
16
|
"bugs": {
|
|
17
17
|
"url": "https://github.com/fireproof-storage/call-ai/issues"
|
|
18
18
|
},
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "tsc",
|
|
21
|
+
"test": "jest --testPathIgnorePatterns=\".*\\.integration\\.(no-await\\.)?test\\.ts$\"",
|
|
22
|
+
"test:integration": "jest --testMatch=\"**/*\\.integration\\.test\\.ts\"",
|
|
23
|
+
"test:all": "jest",
|
|
24
|
+
"prepublishOnly": "npm run build",
|
|
25
|
+
"typecheck": "tsc --noEmit",
|
|
26
|
+
"format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
|
|
27
|
+
"coverage": "jest --coverage",
|
|
28
|
+
"check": "npm run typecheck && npm run format && npm run test"
|
|
29
|
+
},
|
|
19
30
|
"keywords": [
|
|
20
31
|
"ai",
|
|
21
32
|
"llm",
|
|
@@ -40,15 +51,5 @@
|
|
|
40
51
|
},
|
|
41
52
|
"engines": {
|
|
42
53
|
"node": ">=14.0.0"
|
|
43
|
-
},
|
|
44
|
-
"scripts": {
|
|
45
|
-
"build": "tsc",
|
|
46
|
-
"test": "jest --testPathIgnorePatterns=\".*\\.integration\\.(no-await\\.)?test\\.ts$\"",
|
|
47
|
-
"test:integration": "jest --testMatch=\"**/*\\.integration\\.test\\.ts\"",
|
|
48
|
-
"test:all": "jest",
|
|
49
|
-
"typecheck": "tsc --noEmit",
|
|
50
|
-
"format": "prettier --write \"src/**/*.ts\" \"test/**/*.ts\"",
|
|
51
|
-
"coverage": "jest --coverage",
|
|
52
|
-
"check": "npm run typecheck && npm run format && npm run test"
|
|
53
54
|
}
|
|
54
|
-
}
|
|
55
|
+
}
|