190proof 1.0.68 → 1.0.70
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +4 -2
- package/dist/index.d.ts +4 -2
- package/dist/index.js +96 -156
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +96 -156
- package/dist/index.mjs.map +1 -1
- package/package.json +3 -2
package/dist/index.d.mts
CHANGED
|
@@ -5,7 +5,8 @@ declare enum ClaudeModel {
|
|
|
5
5
|
SONNET_3_5 = "claude-3-5-sonnet-20241022",
|
|
6
6
|
SONNET_4 = "claude-sonnet-4-20250514",
|
|
7
7
|
OPUS_4 = "claude-opus-4-20250514",
|
|
8
|
-
SONNET_4_5 = "claude-sonnet-4-5"
|
|
8
|
+
SONNET_4_5 = "claude-sonnet-4-5",
|
|
9
|
+
OPUS_4_5 = "claude-opus-4-5"
|
|
9
10
|
}
|
|
10
11
|
declare enum GPTModel {
|
|
11
12
|
GPT35_0613 = "gpt-3.5-turbo-0613",
|
|
@@ -36,7 +37,8 @@ declare enum GeminiModel {
|
|
|
36
37
|
GEMINI_2_0_FLASH_EXP_IMAGE_GENERATION = "gemini-2.0-flash-exp-image-generation",
|
|
37
38
|
GEMINI_2_0_FLASH_THINKING_EXP = "gemini-2.0-flash-thinking-exp",
|
|
38
39
|
GEMINI_2_0_FLASH_THINKING_EXP_01_21 = "gemini-2.0-flash-thinking-exp-01-21",
|
|
39
|
-
GEMINI_2_5_FLASH_PREVIEW_04_17 = "gemini-2.5-flash-preview-04-17"
|
|
40
|
+
GEMINI_2_5_FLASH_PREVIEW_04_17 = "gemini-2.5-flash-preview-04-17",
|
|
41
|
+
GEMINI_3_FLASH_PREVIEW = "gemini-3-flash-preview"
|
|
40
42
|
}
|
|
41
43
|
interface GenericMessage {
|
|
42
44
|
role: "user" | "assistant" | "system";
|
package/dist/index.d.ts
CHANGED
|
@@ -5,7 +5,8 @@ declare enum ClaudeModel {
|
|
|
5
5
|
SONNET_3_5 = "claude-3-5-sonnet-20241022",
|
|
6
6
|
SONNET_4 = "claude-sonnet-4-20250514",
|
|
7
7
|
OPUS_4 = "claude-opus-4-20250514",
|
|
8
|
-
SONNET_4_5 = "claude-sonnet-4-5"
|
|
8
|
+
SONNET_4_5 = "claude-sonnet-4-5",
|
|
9
|
+
OPUS_4_5 = "claude-opus-4-5"
|
|
9
10
|
}
|
|
10
11
|
declare enum GPTModel {
|
|
11
12
|
GPT35_0613 = "gpt-3.5-turbo-0613",
|
|
@@ -36,7 +37,8 @@ declare enum GeminiModel {
|
|
|
36
37
|
GEMINI_2_0_FLASH_EXP_IMAGE_GENERATION = "gemini-2.0-flash-exp-image-generation",
|
|
37
38
|
GEMINI_2_0_FLASH_THINKING_EXP = "gemini-2.0-flash-thinking-exp",
|
|
38
39
|
GEMINI_2_0_FLASH_THINKING_EXP_01_21 = "gemini-2.0-flash-thinking-exp-01-21",
|
|
39
|
-
GEMINI_2_5_FLASH_PREVIEW_04_17 = "gemini-2.5-flash-preview-04-17"
|
|
40
|
+
GEMINI_2_5_FLASH_PREVIEW_04_17 = "gemini-2.5-flash-preview-04-17",
|
|
41
|
+
GEMINI_3_FLASH_PREVIEW = "gemini-3-flash-preview"
|
|
40
42
|
}
|
|
41
43
|
interface GenericMessage {
|
|
42
44
|
role: "user" | "assistant" | "system";
|
package/dist/index.js
CHANGED
|
@@ -47,6 +47,7 @@ var ClaudeModel = /* @__PURE__ */ ((ClaudeModel2) => {
|
|
|
47
47
|
ClaudeModel2["SONNET_4"] = "claude-sonnet-4-20250514";
|
|
48
48
|
ClaudeModel2["OPUS_4"] = "claude-opus-4-20250514";
|
|
49
49
|
ClaudeModel2["SONNET_4_5"] = "claude-sonnet-4-5";
|
|
50
|
+
ClaudeModel2["OPUS_4_5"] = "claude-opus-4-5";
|
|
50
51
|
return ClaudeModel2;
|
|
51
52
|
})(ClaudeModel || {});
|
|
52
53
|
var GPTModel = /* @__PURE__ */ ((GPTModel2) => {
|
|
@@ -81,9 +82,29 @@ var GeminiModel = /* @__PURE__ */ ((GeminiModel2) => {
|
|
|
81
82
|
GeminiModel2["GEMINI_2_0_FLASH_THINKING_EXP"] = "gemini-2.0-flash-thinking-exp";
|
|
82
83
|
GeminiModel2["GEMINI_2_0_FLASH_THINKING_EXP_01_21"] = "gemini-2.0-flash-thinking-exp-01-21";
|
|
83
84
|
GeminiModel2["GEMINI_2_5_FLASH_PREVIEW_04_17"] = "gemini-2.5-flash-preview-04-17";
|
|
85
|
+
GeminiModel2["GEMINI_3_FLASH_PREVIEW"] = "gemini-3-flash-preview";
|
|
84
86
|
return GeminiModel2;
|
|
85
87
|
})(GeminiModel || {});
|
|
86
88
|
|
|
89
|
+
// logger.ts
|
|
90
|
+
function formatMessage(level, identifier, message) {
|
|
91
|
+
return `[${level}] [${identifier}] ${message}`;
|
|
92
|
+
}
|
|
93
|
+
function log(identifier, message, ...args) {
|
|
94
|
+
console.log(formatMessage("LOG", identifier, message), ...args);
|
|
95
|
+
}
|
|
96
|
+
function warn(identifier, message, ...args) {
|
|
97
|
+
console.warn(formatMessage("WARN", identifier, message), ...args);
|
|
98
|
+
}
|
|
99
|
+
function error(identifier, message, ...args) {
|
|
100
|
+
console.error(formatMessage("ERROR", identifier, message), ...args);
|
|
101
|
+
}
|
|
102
|
+
var logger_default = {
|
|
103
|
+
log,
|
|
104
|
+
warn,
|
|
105
|
+
error
|
|
106
|
+
};
|
|
107
|
+
|
|
87
108
|
// index.ts
|
|
88
109
|
var import_client_bedrock_runtime = require("@aws-sdk/client-bedrock-runtime");
|
|
89
110
|
var import_axios = __toESM(require("axios"));
|
|
@@ -116,13 +137,13 @@ function parseStreamedResponse(identifier, paragraph, functionCallName, function
|
|
|
116
137
|
name: functionCallName,
|
|
117
138
|
arguments: JSON.parse(functionCallArgs)
|
|
118
139
|
};
|
|
119
|
-
} catch (
|
|
120
|
-
|
|
121
|
-
throw
|
|
140
|
+
} catch (error2) {
|
|
141
|
+
logger_default.error(identifier, "Error parsing functionCallArgs:", functionCallArgs);
|
|
142
|
+
throw error2;
|
|
122
143
|
}
|
|
123
144
|
}
|
|
124
145
|
if (!paragraph && !functionCall) {
|
|
125
|
-
|
|
146
|
+
logger_default.error(
|
|
126
147
|
identifier,
|
|
127
148
|
"Stream error: received message without content or function_call, raw:",
|
|
128
149
|
JSON.stringify({ paragraph, functionCallName, functionCallArgs })
|
|
@@ -140,7 +161,7 @@ function parseStreamedResponse(identifier, paragraph, functionCallName, function
|
|
|
140
161
|
}
|
|
141
162
|
async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, retries = 5, chunkTimeoutMs = 15e3) {
|
|
142
163
|
var _a, _b;
|
|
143
|
-
|
|
164
|
+
logger_default.log(
|
|
144
165
|
identifier,
|
|
145
166
|
"Calling OpenAI API with retries:",
|
|
146
167
|
openAiConfig == null ? void 0 : openAiConfig.service,
|
|
@@ -160,27 +181,15 @@ async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, re
|
|
|
160
181
|
chunkTimeoutMs
|
|
161
182
|
);
|
|
162
183
|
}
|
|
163
|
-
} catch (
|
|
164
|
-
|
|
165
|
-
console.error(
|
|
184
|
+
} catch (error2) {
|
|
185
|
+
logger_default.error(
|
|
166
186
|
identifier,
|
|
167
|
-
`
|
|
187
|
+
`Retry #${i} error: ${error2.message}`,
|
|
188
|
+
((_a = error2.response) == null ? void 0 : _a.data) || error2.data || error2
|
|
168
189
|
);
|
|
169
|
-
const errorCode = (_b =
|
|
170
|
-
if (errorCode) {
|
|
171
|
-
console.error(
|
|
172
|
-
identifier,
|
|
173
|
-
`Retry #${i} failed with API error: ${errorCode}`,
|
|
174
|
-
JSON.stringify({
|
|
175
|
-
data: error.data
|
|
176
|
-
})
|
|
177
|
-
);
|
|
178
|
-
}
|
|
190
|
+
const errorCode = (_b = error2.data) == null ? void 0 : _b.code;
|
|
179
191
|
if (errorCode === "content_policy_violation") {
|
|
180
|
-
|
|
181
|
-
identifier,
|
|
182
|
-
`Removing images due to content policy violation error`
|
|
183
|
-
);
|
|
192
|
+
logger_default.log(identifier, "Removing images due to content policy violation error");
|
|
184
193
|
openAiPayload.messages.forEach((message) => {
|
|
185
194
|
if (Array.isArray(message.content)) {
|
|
186
195
|
message.content = message.content.filter(
|
|
@@ -190,34 +199,25 @@ async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, re
|
|
|
190
199
|
});
|
|
191
200
|
}
|
|
192
201
|
if (i >= 2 && (openAiConfig == null ? void 0 : openAiConfig.service) === "azure" && errorCode === "content_filter") {
|
|
193
|
-
|
|
194
|
-
identifier,
|
|
195
|
-
`Switching to OpenAI service due to content filter error`
|
|
196
|
-
);
|
|
202
|
+
logger_default.log(identifier, "Switching to OpenAI service due to content filter error");
|
|
197
203
|
openAiConfig.service = "openai";
|
|
198
204
|
}
|
|
199
205
|
if (i === 3) {
|
|
200
206
|
if ((openAiConfig == null ? void 0 : openAiConfig.service) === "azure") {
|
|
201
|
-
|
|
202
|
-
identifier,
|
|
203
|
-
`Switching to OpenAI service due to Azure service error`
|
|
204
|
-
);
|
|
207
|
+
logger_default.log(identifier, "Switching to OpenAI service due to Azure service error");
|
|
205
208
|
openAiConfig.service = "openai";
|
|
206
209
|
}
|
|
207
210
|
}
|
|
208
211
|
if (i === 4) {
|
|
209
212
|
if (openAiPayload.tools) {
|
|
210
|
-
|
|
211
|
-
identifier,
|
|
212
|
-
`Switching to no tool choice due to persistent error`
|
|
213
|
-
);
|
|
213
|
+
logger_default.log(identifier, "Switching to no tool choice due to persistent error");
|
|
214
214
|
openAiPayload.tool_choice = "none";
|
|
215
215
|
}
|
|
216
216
|
}
|
|
217
217
|
await timeout(250);
|
|
218
218
|
}
|
|
219
219
|
}
|
|
220
|
-
|
|
220
|
+
logger_default.error(
|
|
221
221
|
identifier,
|
|
222
222
|
`Failed to call OpenAI API after ${retries} attempts. Please lookup OpenAI status for active issues.`,
|
|
223
223
|
errorObj
|
|
@@ -239,7 +239,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
239
239
|
let response;
|
|
240
240
|
const controller = new AbortController();
|
|
241
241
|
if (openAiConfig.service === "azure") {
|
|
242
|
-
|
|
242
|
+
logger_default.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
|
|
243
243
|
const model = openAiPayload.model;
|
|
244
244
|
if (!openAiConfig.modelConfigMap) {
|
|
245
245
|
throw new Error(
|
|
@@ -253,19 +253,15 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
253
253
|
} else {
|
|
254
254
|
throw new Error("Azure OpenAI endpoint is required in modelConfigMap.");
|
|
255
255
|
}
|
|
256
|
-
|
|
256
|
+
logger_default.log(identifier, "Using endpoint", endpoint);
|
|
257
257
|
try {
|
|
258
258
|
const stringifiedPayload = JSON.stringify({
|
|
259
259
|
...openAiPayload,
|
|
260
260
|
stream: true
|
|
261
261
|
});
|
|
262
262
|
const parsedPayload = JSON.parse(stringifiedPayload);
|
|
263
|
-
} catch (
|
|
264
|
-
|
|
265
|
-
identifier,
|
|
266
|
-
"Stream error: Azure OpenAI JSON parsing error:",
|
|
267
|
-
JSON.stringify(error)
|
|
268
|
-
);
|
|
263
|
+
} catch (error2) {
|
|
264
|
+
logger_default.error(identifier, "Stream error: Azure OpenAI JSON parsing error:", error2);
|
|
269
265
|
}
|
|
270
266
|
response = await fetch(endpoint, {
|
|
271
267
|
method: "POST",
|
|
@@ -280,10 +276,10 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
280
276
|
signal: controller.signal
|
|
281
277
|
});
|
|
282
278
|
} else {
|
|
283
|
-
|
|
279
|
+
logger_default.log(identifier, "Using OpenAI service", openAiPayload.model);
|
|
284
280
|
const endpoint = `https://api.openai.com/v1/chat/completions`;
|
|
285
281
|
if (openAiConfig.orgId) {
|
|
286
|
-
|
|
282
|
+
logger_default.log(identifier, "Using orgId", openAiConfig.orgId);
|
|
287
283
|
}
|
|
288
284
|
response = await fetch(endpoint, {
|
|
289
285
|
method: "POST",
|
|
@@ -310,11 +306,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
310
306
|
const startAbortTimeout = () => {
|
|
311
307
|
abortTimeout && clearTimeout(abortTimeout);
|
|
312
308
|
return setTimeout(() => {
|
|
313
|
-
|
|
314
|
-
identifier,
|
|
315
|
-
`Stream error: aborted due to timeout after ${chunkTimeoutMs} ms.`,
|
|
316
|
-
JSON.stringify({ paragraph })
|
|
317
|
-
);
|
|
309
|
+
logger_default.error(identifier, `Stream timeout after ${chunkTimeoutMs}ms`);
|
|
318
310
|
controller.abort();
|
|
319
311
|
}, chunkTimeoutMs);
|
|
320
312
|
};
|
|
@@ -325,11 +317,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
325
317
|
const { done, value } = await reader.read();
|
|
326
318
|
clearTimeout(abortTimeout2);
|
|
327
319
|
if (done) {
|
|
328
|
-
|
|
329
|
-
identifier,
|
|
330
|
-
`Stream error: ended after ${chunkIndex + 1} chunks via reader done flag.`,
|
|
331
|
-
rawStreamedBody
|
|
332
|
-
);
|
|
320
|
+
logger_default.error(identifier, `Stream ended prematurely after ${chunkIndex + 1} chunks`);
|
|
333
321
|
throw new Error("Stream error: ended prematurely");
|
|
334
322
|
}
|
|
335
323
|
let chunk = new TextDecoder().decode(value);
|
|
@@ -344,10 +332,6 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
344
332
|
continue;
|
|
345
333
|
}
|
|
346
334
|
if (jsonString.includes("[DONE]")) {
|
|
347
|
-
console.log(
|
|
348
|
-
identifier,
|
|
349
|
-
`Stream explicitly marked as done after ${chunkIndex + 1} chunks.`
|
|
350
|
-
);
|
|
351
335
|
try {
|
|
352
336
|
return parseStreamedResponse(
|
|
353
337
|
identifier,
|
|
@@ -356,40 +340,29 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
|
|
|
356
340
|
functionCallArgs,
|
|
357
341
|
functionNames
|
|
358
342
|
);
|
|
359
|
-
} catch (
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
"Stream error: parsing response:",
|
|
363
|
-
rawStreamedBody
|
|
364
|
-
);
|
|
365
|
-
throw error;
|
|
343
|
+
} catch (error2) {
|
|
344
|
+
logger_default.error(identifier, "Stream error: parsing response");
|
|
345
|
+
throw error2;
|
|
366
346
|
}
|
|
367
347
|
}
|
|
368
348
|
let json;
|
|
369
349
|
try {
|
|
370
350
|
json = JSON.parse(jsonString.trim());
|
|
371
|
-
} catch (
|
|
351
|
+
} catch (error2) {
|
|
372
352
|
partialChunk = jsonString;
|
|
373
353
|
continue;
|
|
374
354
|
}
|
|
375
355
|
if (!json.choices || !json.choices.length) {
|
|
376
356
|
if (json.error) {
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
error
|
|
385
|
-
throw error;
|
|
357
|
+
logger_default.error(identifier, "Stream error: OpenAI error:", json.error);
|
|
358
|
+
const error2 = new Error("Stream error: OpenAI error");
|
|
359
|
+
error2.data = json.error;
|
|
360
|
+
error2.requestBody = truncatePayload(openAiPayload);
|
|
361
|
+
throw error2;
|
|
362
|
+
}
|
|
363
|
+
if (chunkIndex !== 0) {
|
|
364
|
+
logger_default.error(identifier, "Stream error: no choices in JSON:", json);
|
|
386
365
|
}
|
|
387
|
-
if (chunkIndex !== 0)
|
|
388
|
-
console.error(
|
|
389
|
-
identifier,
|
|
390
|
-
"Stream error: no choices in JSON:",
|
|
391
|
-
json
|
|
392
|
-
);
|
|
393
366
|
continue;
|
|
394
367
|
}
|
|
395
368
|
const dToolCall = (_d = (_c = (_b = (_a = json.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.tool_calls) == null ? void 0 : _d[0];
|
|
@@ -424,7 +397,7 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
|
|
|
424
397
|
}
|
|
425
398
|
let response;
|
|
426
399
|
if (openAiConfig.service === "azure") {
|
|
427
|
-
|
|
400
|
+
logger_default.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
|
|
428
401
|
const model = openAiPayload.model;
|
|
429
402
|
if (!openAiConfig.modelConfigMap) {
|
|
430
403
|
throw new Error(
|
|
@@ -438,20 +411,16 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
|
|
|
438
411
|
} else {
|
|
439
412
|
throw new Error("Azure OpenAI endpoint is required in modelConfigMap.");
|
|
440
413
|
}
|
|
441
|
-
|
|
414
|
+
logger_default.log(identifier, "Using endpoint", endpoint);
|
|
442
415
|
try {
|
|
443
416
|
const stringifiedPayload = JSON.stringify({
|
|
444
417
|
...openAiPayload,
|
|
445
418
|
stream: false
|
|
446
419
|
});
|
|
447
420
|
const parsedPayload = JSON.parse(stringifiedPayload);
|
|
448
|
-
} catch (
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
"OpenAI JSON parsing error:",
|
|
452
|
-
JSON.stringify(error)
|
|
453
|
-
);
|
|
454
|
-
throw error;
|
|
421
|
+
} catch (error2) {
|
|
422
|
+
logger_default.error(identifier, "OpenAI JSON parsing error:", error2);
|
|
423
|
+
throw error2;
|
|
455
424
|
}
|
|
456
425
|
response = await fetch(endpoint, {
|
|
457
426
|
method: "POST",
|
|
@@ -465,10 +434,10 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
|
|
|
465
434
|
})
|
|
466
435
|
});
|
|
467
436
|
} else {
|
|
468
|
-
|
|
437
|
+
logger_default.log(identifier, "Using OpenAI service", openAiPayload.model);
|
|
469
438
|
const endpoint = `https://api.openai.com/v1/chat/completions`;
|
|
470
439
|
if (openAiConfig.orgId) {
|
|
471
|
-
|
|
440
|
+
logger_default.log(identifier, "Using orgId", openAiConfig.orgId);
|
|
472
441
|
}
|
|
473
442
|
response = await fetch(endpoint, {
|
|
474
443
|
method: "POST",
|
|
@@ -485,13 +454,13 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
|
|
|
485
454
|
}
|
|
486
455
|
if (!response.ok) {
|
|
487
456
|
const errorData = await response.json();
|
|
488
|
-
|
|
457
|
+
logger_default.error(identifier, "OpenAI API error:", errorData);
|
|
489
458
|
throw new Error(`OpenAI API Error: ${errorData.error.message}`);
|
|
490
459
|
}
|
|
491
460
|
const data = await response.json();
|
|
492
461
|
if (!data.choices || !data.choices.length) {
|
|
493
462
|
if (data.error) {
|
|
494
|
-
|
|
463
|
+
logger_default.error(identifier, "OpenAI error:", data.error);
|
|
495
464
|
throw new Error("OpenAI error: " + data.error.message);
|
|
496
465
|
}
|
|
497
466
|
throw new Error("OpenAI error: No choices returned.");
|
|
@@ -532,29 +501,28 @@ function truncatePayload(payload) {
|
|
|
532
501
|
}
|
|
533
502
|
async function callAnthropicWithRetries(identifier, AiPayload, AiConfig, attempts = 5) {
|
|
534
503
|
var _a, _b, _c, _d;
|
|
535
|
-
|
|
504
|
+
logger_default.log(identifier, "Calling Anthropic API with retries");
|
|
536
505
|
let lastResponse;
|
|
537
506
|
for (let i = 0; i < attempts; i++) {
|
|
538
507
|
try {
|
|
539
508
|
lastResponse = await callAnthropic(identifier, AiPayload, AiConfig);
|
|
540
509
|
return lastResponse;
|
|
541
510
|
} catch (e) {
|
|
542
|
-
|
|
543
|
-
console.error(
|
|
511
|
+
logger_default.error(
|
|
544
512
|
identifier,
|
|
545
|
-
`
|
|
546
|
-
|
|
513
|
+
`Retry #${i} error: ${e.message}`,
|
|
514
|
+
((_a = e.response) == null ? void 0 : _a.data) || e
|
|
547
515
|
);
|
|
548
516
|
if (((_d = (_c = (_b = e.response) == null ? void 0 : _b.data) == null ? void 0 : _c.error) == null ? void 0 : _d.type) === "rate_limit_error") {
|
|
549
517
|
}
|
|
550
518
|
await timeout(125 * i);
|
|
551
519
|
}
|
|
552
520
|
}
|
|
553
|
-
const
|
|
521
|
+
const error2 = new Error(
|
|
554
522
|
`Failed to call Anthropic API after ${attempts} attempts`
|
|
555
523
|
);
|
|
556
|
-
|
|
557
|
-
throw
|
|
524
|
+
error2.response = lastResponse;
|
|
525
|
+
throw error2;
|
|
558
526
|
}
|
|
559
527
|
async function callAnthropic(identifier, AiPayload, AiConfig) {
|
|
560
528
|
var _a, _b;
|
|
@@ -613,14 +581,14 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
|
|
|
613
581
|
}
|
|
614
582
|
const answers = data.content;
|
|
615
583
|
if (!answers[0]) {
|
|
616
|
-
|
|
584
|
+
logger_default.error(identifier, "Missing answer in Anthropic API:", data);
|
|
617
585
|
throw new Error("Missing answer in Anthropic API");
|
|
618
586
|
}
|
|
619
587
|
let textResponse = "";
|
|
620
588
|
let functionCalls = [];
|
|
621
589
|
for (const answer of answers) {
|
|
622
590
|
if (!answer.type) {
|
|
623
|
-
|
|
591
|
+
logger_default.error(identifier, "Missing answer type in Anthropic API:", data);
|
|
624
592
|
throw new Error("Missing answer type in Anthropic API");
|
|
625
593
|
}
|
|
626
594
|
let text = "";
|
|
@@ -631,7 +599,7 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
|
|
|
631
599
|
/<thinking>|<\/thinking>|<answer>|<\/answer>/gs,
|
|
632
600
|
""
|
|
633
601
|
);
|
|
634
|
-
|
|
602
|
+
logger_default.log(identifier, "No text in answer, returning text within tags:", text);
|
|
635
603
|
}
|
|
636
604
|
if (textResponse) {
|
|
637
605
|
textResponse += `
|
|
@@ -649,11 +617,7 @@ ${text}`;
|
|
|
649
617
|
}
|
|
650
618
|
}
|
|
651
619
|
if (!textResponse && !functionCalls.length) {
|
|
652
|
-
|
|
653
|
-
identifier,
|
|
654
|
-
"Missing text & fns in Anthropic API response:",
|
|
655
|
-
JSON.stringify(data)
|
|
656
|
-
);
|
|
620
|
+
logger_default.error(identifier, "Missing text & fns in Anthropic API response:", data);
|
|
657
621
|
throw new Error("Missing text & fns in Anthropic API response");
|
|
658
622
|
}
|
|
659
623
|
return {
|
|
@@ -763,9 +727,7 @@ async function prepareGoogleAIPayload(payload) {
|
|
|
763
727
|
}
|
|
764
728
|
for (const file of message.files || []) {
|
|
765
729
|
if (!((_a = file.mimeType) == null ? void 0 : _a.startsWith("image"))) {
|
|
766
|
-
|
|
767
|
-
"Google AI API does not support non-image file types. Skipping file."
|
|
768
|
-
);
|
|
730
|
+
logger_default.warn("payload", "Google AI API does not support non-image file types. Skipping file.");
|
|
769
731
|
continue;
|
|
770
732
|
}
|
|
771
733
|
if (file.url) {
|
|
@@ -803,9 +765,8 @@ async function prepareGoogleAIPayload(payload) {
|
|
|
803
765
|
}
|
|
804
766
|
async function callGoogleAI(identifier, payload) {
|
|
805
767
|
var _a, _b, _c;
|
|
806
|
-
|
|
768
|
+
logger_default.log(identifier, "Calling Google AI API");
|
|
807
769
|
const googleMessages = jigGoogleMessages(payload.messages);
|
|
808
|
-
console.log(identifier, "Google AI API messages:", googleMessages);
|
|
809
770
|
const history = googleMessages.slice(0, -1);
|
|
810
771
|
const lastMessage = googleMessages.slice(-1)[0];
|
|
811
772
|
const genAI = new import_genai.GoogleGenAI({
|
|
@@ -848,11 +809,7 @@ async function callGoogleAI(identifier, payload) {
|
|
|
848
809
|
};
|
|
849
810
|
});
|
|
850
811
|
if (!text && !(parsedFunctionCalls == null ? void 0 : parsedFunctionCalls.length) && !files.length) {
|
|
851
|
-
|
|
852
|
-
identifier,
|
|
853
|
-
"Missing text & fns in Google AI API response:",
|
|
854
|
-
response
|
|
855
|
-
);
|
|
812
|
+
logger_default.error(identifier, "Missing text & fns in Google AI API response:", response);
|
|
856
813
|
throw new Error("Missing text & fns in Google AI API response");
|
|
857
814
|
}
|
|
858
815
|
return {
|
|
@@ -863,32 +820,26 @@ async function callGoogleAI(identifier, payload) {
|
|
|
863
820
|
};
|
|
864
821
|
}
|
|
865
822
|
async function callGoogleAIWithRetries(identifier, payload, retries = 5) {
|
|
866
|
-
|
|
823
|
+
logger_default.log(identifier, "Calling Google AI API with retries");
|
|
867
824
|
let lastError;
|
|
868
825
|
for (let i = 0; i < retries; i++) {
|
|
869
826
|
try {
|
|
870
827
|
return await callGoogleAI(identifier, payload);
|
|
871
828
|
} catch (e) {
|
|
872
829
|
lastError = e;
|
|
873
|
-
|
|
874
|
-
console.error(
|
|
875
|
-
identifier,
|
|
876
|
-
`Retrying due to error: received bad response from Google AI API: ${e.message}`,
|
|
877
|
-
JSON.stringify(e)
|
|
878
|
-
// Google AI errors might not have a response.data structure like others
|
|
879
|
-
);
|
|
830
|
+
logger_default.error(identifier, `Retry #${i} error: ${e.message}`, e);
|
|
880
831
|
await timeout(125 * i);
|
|
881
832
|
}
|
|
882
833
|
}
|
|
883
|
-
const
|
|
834
|
+
const error2 = new Error(
|
|
884
835
|
`Failed to call Google AI API after ${retries} attempts`
|
|
885
836
|
);
|
|
886
|
-
|
|
887
|
-
throw
|
|
837
|
+
error2.cause = lastError;
|
|
838
|
+
throw error2;
|
|
888
839
|
}
|
|
889
840
|
async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chunkTimeoutMs = 15e3) {
|
|
890
841
|
if (isAnthropicPayload(aiPayload)) {
|
|
891
|
-
|
|
842
|
+
logger_default.log(identifier, "Delegating call to Anthropic API");
|
|
892
843
|
return await callAnthropicWithRetries(
|
|
893
844
|
identifier,
|
|
894
845
|
await prepareAnthropicPayload(aiPayload),
|
|
@@ -896,7 +847,7 @@ async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chu
|
|
|
896
847
|
retries
|
|
897
848
|
);
|
|
898
849
|
} else if (isOpenAiPayload(aiPayload)) {
|
|
899
|
-
|
|
850
|
+
logger_default.log(identifier, "Delegating call to OpenAI API");
|
|
900
851
|
return await callOpenAiWithRetries(
|
|
901
852
|
identifier,
|
|
902
853
|
await prepareOpenAIPayload(aiPayload),
|
|
@@ -905,13 +856,13 @@ async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chu
|
|
|
905
856
|
chunkTimeoutMs
|
|
906
857
|
);
|
|
907
858
|
} else if (isGroqPayload(aiPayload)) {
|
|
908
|
-
|
|
859
|
+
logger_default.log(identifier, "Delegating call to Groq API");
|
|
909
860
|
return await callGroqWithRetries(
|
|
910
861
|
identifier,
|
|
911
862
|
await prepareGroqPayload(aiPayload)
|
|
912
863
|
);
|
|
913
864
|
} else if (isGoogleAIPayload(aiPayload)) {
|
|
914
|
-
|
|
865
|
+
logger_default.log(identifier, "Delegating call to Google AI API");
|
|
915
866
|
return await callGoogleAIWithRetries(
|
|
916
867
|
identifier,
|
|
917
868
|
await prepareGoogleAIPayload(aiPayload),
|
|
@@ -946,9 +897,7 @@ async function prepareAnthropicPayload(payload) {
|
|
|
946
897
|
}
|
|
947
898
|
for (const file of message.files || []) {
|
|
948
899
|
if (!((_a = file.mimeType) == null ? void 0 : _a.startsWith("image"))) {
|
|
949
|
-
|
|
950
|
-
"Anthropic API does not support non-image file types. Skipping file."
|
|
951
|
-
);
|
|
900
|
+
logger_default.warn("payload", "Anthropic API does not support non-image file types. Skipping file.");
|
|
952
901
|
continue;
|
|
953
902
|
}
|
|
954
903
|
if (file.url) {
|
|
@@ -1038,10 +987,7 @@ async function prepareOpenAIPayload(payload) {
|
|
|
1038
987
|
});
|
|
1039
988
|
}
|
|
1040
989
|
} else {
|
|
1041
|
-
|
|
1042
|
-
"Skipping file in message. File or image type not supported by OpenAI API:",
|
|
1043
|
-
file.mimeType
|
|
1044
|
-
);
|
|
990
|
+
logger_default.warn("payload", "Skipping file in message. File or image type not supported by OpenAI API:", file.mimeType);
|
|
1045
991
|
}
|
|
1046
992
|
}
|
|
1047
993
|
preparedPayload.messages.push({
|
|
@@ -1093,7 +1039,7 @@ async function callGroq(identifier, payload) {
|
|
|
1093
1039
|
const data = response.data;
|
|
1094
1040
|
const answer = data.choices[0].message;
|
|
1095
1041
|
if (!answer) {
|
|
1096
|
-
|
|
1042
|
+
logger_default.error(identifier, "Missing answer in Groq API:", data);
|
|
1097
1043
|
throw new Error("Missing answer in Groq API");
|
|
1098
1044
|
}
|
|
1099
1045
|
const textResponse = answer.content || null;
|
|
@@ -1114,30 +1060,24 @@ async function callGroq(identifier, payload) {
|
|
|
1114
1060
|
}
|
|
1115
1061
|
async function callGroqWithRetries(identifier, payload, retries = 5) {
|
|
1116
1062
|
var _a;
|
|
1117
|
-
|
|
1063
|
+
logger_default.log(identifier, "Calling Groq API with retries");
|
|
1118
1064
|
let lastResponse;
|
|
1119
1065
|
for (let i = 0; i < retries; i++) {
|
|
1120
1066
|
try {
|
|
1121
1067
|
lastResponse = await callGroq(identifier, payload);
|
|
1122
1068
|
return lastResponse;
|
|
1123
1069
|
} catch (e) {
|
|
1124
|
-
|
|
1125
|
-
console.error(
|
|
1126
|
-
identifier,
|
|
1127
|
-
`Retrying due to error: received bad response from Groq API: ${e.message}`,
|
|
1128
|
-
JSON.stringify((_a = e.response) == null ? void 0 : _a.data)
|
|
1129
|
-
);
|
|
1070
|
+
logger_default.error(identifier, `Retry #${i} error: ${e.message}`, ((_a = e.response) == null ? void 0 : _a.data) || e);
|
|
1130
1071
|
await timeout(125 * i);
|
|
1131
1072
|
}
|
|
1132
1073
|
}
|
|
1133
|
-
const
|
|
1074
|
+
const error2 = new Error(
|
|
1134
1075
|
`Failed to call Groq API after ${retries} attempts`
|
|
1135
1076
|
);
|
|
1136
|
-
|
|
1137
|
-
throw
|
|
1077
|
+
error2.response = lastResponse;
|
|
1078
|
+
throw error2;
|
|
1138
1079
|
}
|
|
1139
1080
|
async function getNormalizedBase64PNG(url, mime) {
|
|
1140
|
-
console.log("Normalizing image", url);
|
|
1141
1081
|
const response = await import_axios.default.get(url, { responseType: "arraybuffer" });
|
|
1142
1082
|
let imageBuffer = Buffer.from(response.data);
|
|
1143
1083
|
let sharpOptions = {};
|