190proof 1.0.69 → 1.0.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -86,6 +86,31 @@ var GeminiModel = /* @__PURE__ */ ((GeminiModel2) => {
86
86
  return GeminiModel2;
87
87
  })(GeminiModel || {});
88
88
 
89
+ // logger.ts
90
+ function formatIdentifier(identifier) {
91
+ if (Array.isArray(identifier)) {
92
+ return identifier.map((id) => `[${id}]`).join(" ");
93
+ }
94
+ return `[${identifier}]`;
95
+ }
96
+ function formatMessage(level, identifier, message) {
97
+ return `[${level}] ${formatIdentifier(identifier)} ${message}`;
98
+ }
99
+ function log(identifier, message, ...args) {
100
+ console.log(formatMessage("LOG", identifier, message), ...args);
101
+ }
102
+ function warn(identifier, message, ...args) {
103
+ console.warn(formatMessage("WARN", identifier, message), ...args);
104
+ }
105
+ function error(identifier, message, ...args) {
106
+ console.error(formatMessage("ERROR", identifier, message), ...args);
107
+ }
108
+ var logger_default = {
109
+ log,
110
+ warn,
111
+ error
112
+ };
113
+
89
114
  // index.ts
90
115
  var import_client_bedrock_runtime = require("@aws-sdk/client-bedrock-runtime");
91
116
  var import_axios = __toESM(require("axios"));
@@ -118,13 +143,13 @@ function parseStreamedResponse(identifier, paragraph, functionCallName, function
118
143
  name: functionCallName,
119
144
  arguments: JSON.parse(functionCallArgs)
120
145
  };
121
- } catch (error) {
122
- console.error("Error parsing functionCallArgs:", functionCallArgs);
123
- throw error;
146
+ } catch (error2) {
147
+ logger_default.error(identifier, "Error parsing functionCallArgs:", functionCallArgs);
148
+ throw error2;
124
149
  }
125
150
  }
126
151
  if (!paragraph && !functionCall) {
127
- console.error(
152
+ logger_default.error(
128
153
  identifier,
129
154
  "Stream error: received message without content or function_call, raw:",
130
155
  JSON.stringify({ paragraph, functionCallName, functionCallArgs })
@@ -142,7 +167,7 @@ function parseStreamedResponse(identifier, paragraph, functionCallName, function
142
167
  }
143
168
  async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, retries = 5, chunkTimeoutMs = 15e3) {
144
169
  var _a, _b;
145
- console.log(
170
+ logger_default.log(
146
171
  identifier,
147
172
  "Calling OpenAI API with retries:",
148
173
  openAiConfig == null ? void 0 : openAiConfig.service,
@@ -162,27 +187,15 @@ async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, re
162
187
  chunkTimeoutMs
163
188
  );
164
189
  }
165
- } catch (error) {
166
- console.error(error);
167
- console.error(
190
+ } catch (error2) {
191
+ logger_default.error(
168
192
  identifier,
169
- `Retrying due to error: received bad response from OpenAI API [${openAiConfig == null ? void 0 : openAiConfig.service}-${openAiPayload.model}-${openAiConfig == null ? void 0 : openAiConfig.orgId}]: ${error.message} - ${JSON.stringify((_a = error.response) == null ? void 0 : _a.data)}`
193
+ `Retry #${i} error: ${error2.message}`,
194
+ ((_a = error2.response) == null ? void 0 : _a.data) || error2.data || error2
170
195
  );
171
- const errorCode = (_b = error.data) == null ? void 0 : _b.code;
172
- if (errorCode) {
173
- console.error(
174
- identifier,
175
- `Retry #${i} failed with API error: ${errorCode}`,
176
- JSON.stringify({
177
- data: error.data
178
- })
179
- );
180
- }
196
+ const errorCode = (_b = error2.data) == null ? void 0 : _b.code;
181
197
  if (errorCode === "content_policy_violation") {
182
- console.log(
183
- identifier,
184
- `Removing images due to content policy violation error`
185
- );
198
+ logger_default.log(identifier, "Removing images due to content policy violation error");
186
199
  openAiPayload.messages.forEach((message) => {
187
200
  if (Array.isArray(message.content)) {
188
201
  message.content = message.content.filter(
@@ -192,34 +205,25 @@ async function callOpenAiWithRetries(identifier, openAiPayload, openAiConfig, re
192
205
  });
193
206
  }
194
207
  if (i >= 2 && (openAiConfig == null ? void 0 : openAiConfig.service) === "azure" && errorCode === "content_filter") {
195
- console.log(
196
- identifier,
197
- `Switching to OpenAI service due to content filter error`
198
- );
208
+ logger_default.log(identifier, "Switching to OpenAI service due to content filter error");
199
209
  openAiConfig.service = "openai";
200
210
  }
201
211
  if (i === 3) {
202
212
  if ((openAiConfig == null ? void 0 : openAiConfig.service) === "azure") {
203
- console.log(
204
- identifier,
205
- `Switching to OpenAI service due to Azure service error`
206
- );
213
+ logger_default.log(identifier, "Switching to OpenAI service due to Azure service error");
207
214
  openAiConfig.service = "openai";
208
215
  }
209
216
  }
210
217
  if (i === 4) {
211
218
  if (openAiPayload.tools) {
212
- console.log(
213
- identifier,
214
- `Switching to no tool choice due to persistent error`
215
- );
219
+ logger_default.log(identifier, "Switching to no tool choice due to persistent error");
216
220
  openAiPayload.tool_choice = "none";
217
221
  }
218
222
  }
219
223
  await timeout(250);
220
224
  }
221
225
  }
222
- console.error(
226
+ logger_default.error(
223
227
  identifier,
224
228
  `Failed to call OpenAI API after ${retries} attempts. Please lookup OpenAI status for active issues.`,
225
229
  errorObj
@@ -241,7 +245,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
241
245
  let response;
242
246
  const controller = new AbortController();
243
247
  if (openAiConfig.service === "azure") {
244
- console.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
248
+ logger_default.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
245
249
  const model = openAiPayload.model;
246
250
  if (!openAiConfig.modelConfigMap) {
247
251
  throw new Error(
@@ -255,19 +259,15 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
255
259
  } else {
256
260
  throw new Error("Azure OpenAI endpoint is required in modelConfigMap.");
257
261
  }
258
- console.log(identifier, "Using endpoint", endpoint);
262
+ logger_default.log(identifier, "Using endpoint", endpoint);
259
263
  try {
260
264
  const stringifiedPayload = JSON.stringify({
261
265
  ...openAiPayload,
262
266
  stream: true
263
267
  });
264
268
  const parsedPayload = JSON.parse(stringifiedPayload);
265
- } catch (error) {
266
- console.error(
267
- identifier,
268
- "Stream error: Azure OpenAI JSON parsing error:",
269
- JSON.stringify(error)
270
- );
269
+ } catch (error2) {
270
+ logger_default.error(identifier, "Stream error: Azure OpenAI JSON parsing error:", error2);
271
271
  }
272
272
  response = await fetch(endpoint, {
273
273
  method: "POST",
@@ -282,10 +282,10 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
282
282
  signal: controller.signal
283
283
  });
284
284
  } else {
285
- console.log(identifier, "Using OpenAI service", openAiPayload.model);
285
+ logger_default.log(identifier, "Using OpenAI service", openAiPayload.model);
286
286
  const endpoint = `https://api.openai.com/v1/chat/completions`;
287
287
  if (openAiConfig.orgId) {
288
- console.log(identifier, "Using orgId", openAiConfig.orgId);
288
+ logger_default.log(identifier, "Using orgId", openAiConfig.orgId);
289
289
  }
290
290
  response = await fetch(endpoint, {
291
291
  method: "POST",
@@ -312,11 +312,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
312
312
  const startAbortTimeout = () => {
313
313
  abortTimeout && clearTimeout(abortTimeout);
314
314
  return setTimeout(() => {
315
- console.log(
316
- identifier,
317
- `Stream error: aborted due to timeout after ${chunkTimeoutMs} ms.`,
318
- JSON.stringify({ paragraph })
319
- );
315
+ logger_default.error(identifier, `Stream timeout after ${chunkTimeoutMs}ms`);
320
316
  controller.abort();
321
317
  }, chunkTimeoutMs);
322
318
  };
@@ -327,11 +323,7 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
327
323
  const { done, value } = await reader.read();
328
324
  clearTimeout(abortTimeout2);
329
325
  if (done) {
330
- console.log(
331
- identifier,
332
- `Stream error: ended after ${chunkIndex + 1} chunks via reader done flag.`,
333
- rawStreamedBody
334
- );
326
+ logger_default.error(identifier, `Stream ended prematurely after ${chunkIndex + 1} chunks`);
335
327
  throw new Error("Stream error: ended prematurely");
336
328
  }
337
329
  let chunk = new TextDecoder().decode(value);
@@ -346,10 +338,6 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
346
338
  continue;
347
339
  }
348
340
  if (jsonString.includes("[DONE]")) {
349
- console.log(
350
- identifier,
351
- `Stream explicitly marked as done after ${chunkIndex + 1} chunks.`
352
- );
353
341
  try {
354
342
  return parseStreamedResponse(
355
343
  identifier,
@@ -358,40 +346,29 @@ async function callOpenAIStream(identifier, openAiPayload, openAiConfig, chunkTi
358
346
  functionCallArgs,
359
347
  functionNames
360
348
  );
361
- } catch (error) {
362
- console.error(
363
- identifier,
364
- "Stream error: parsing response:",
365
- rawStreamedBody
366
- );
367
- throw error;
349
+ } catch (error2) {
350
+ logger_default.error(identifier, "Stream error: parsing response");
351
+ throw error2;
368
352
  }
369
353
  }
370
354
  let json;
371
355
  try {
372
356
  json = JSON.parse(jsonString.trim());
373
- } catch (error) {
357
+ } catch (error2) {
374
358
  partialChunk = jsonString;
375
359
  continue;
376
360
  }
377
361
  if (!json.choices || !json.choices.length) {
378
362
  if (json.error) {
379
- console.error(
380
- identifier,
381
- "Stream error: OpenAI error:",
382
- json.error && JSON.stringify(json.error)
383
- );
384
- const error = new Error("Stream error: OpenAI error");
385
- error.data = json.error;
386
- error.requestBody = truncatePayload(openAiPayload);
387
- throw error;
363
+ logger_default.error(identifier, "Stream error: OpenAI error:", json.error);
364
+ const error2 = new Error("Stream error: OpenAI error");
365
+ error2.data = json.error;
366
+ error2.requestBody = truncatePayload(openAiPayload);
367
+ throw error2;
368
+ }
369
+ if (chunkIndex !== 0) {
370
+ logger_default.error(identifier, "Stream error: no choices in JSON:", json);
388
371
  }
389
- if (chunkIndex !== 0)
390
- console.error(
391
- identifier,
392
- "Stream error: no choices in JSON:",
393
- json
394
- );
395
372
  continue;
396
373
  }
397
374
  const dToolCall = (_d = (_c = (_b = (_a = json.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.delta) == null ? void 0 : _c.tool_calls) == null ? void 0 : _d[0];
@@ -426,7 +403,7 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
426
403
  }
427
404
  let response;
428
405
  if (openAiConfig.service === "azure") {
429
- console.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
406
+ logger_default.log(identifier, "Using Azure OpenAI service", openAiPayload.model);
430
407
  const model = openAiPayload.model;
431
408
  if (!openAiConfig.modelConfigMap) {
432
409
  throw new Error(
@@ -440,20 +417,16 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
440
417
  } else {
441
418
  throw new Error("Azure OpenAI endpoint is required in modelConfigMap.");
442
419
  }
443
- console.log(identifier, "Using endpoint", endpoint);
420
+ logger_default.log(identifier, "Using endpoint", endpoint);
444
421
  try {
445
422
  const stringifiedPayload = JSON.stringify({
446
423
  ...openAiPayload,
447
424
  stream: false
448
425
  });
449
426
  const parsedPayload = JSON.parse(stringifiedPayload);
450
- } catch (error) {
451
- console.error(
452
- identifier,
453
- "OpenAI JSON parsing error:",
454
- JSON.stringify(error)
455
- );
456
- throw error;
427
+ } catch (error2) {
428
+ logger_default.error(identifier, "OpenAI JSON parsing error:", error2);
429
+ throw error2;
457
430
  }
458
431
  response = await fetch(endpoint, {
459
432
  method: "POST",
@@ -467,10 +440,10 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
467
440
  })
468
441
  });
469
442
  } else {
470
- console.log(identifier, "Using OpenAI service", openAiPayload.model);
443
+ logger_default.log(identifier, "Using OpenAI service", openAiPayload.model);
471
444
  const endpoint = `https://api.openai.com/v1/chat/completions`;
472
445
  if (openAiConfig.orgId) {
473
- console.log(identifier, "Using orgId", openAiConfig.orgId);
446
+ logger_default.log(identifier, "Using orgId", openAiConfig.orgId);
474
447
  }
475
448
  response = await fetch(endpoint, {
476
449
  method: "POST",
@@ -487,13 +460,13 @@ async function callOpenAI(identifier, openAiPayload, openAiConfig) {
487
460
  }
488
461
  if (!response.ok) {
489
462
  const errorData = await response.json();
490
- console.error(identifier, "OpenAI API error:", JSON.stringify(errorData));
463
+ logger_default.error(identifier, "OpenAI API error:", errorData);
491
464
  throw new Error(`OpenAI API Error: ${errorData.error.message}`);
492
465
  }
493
466
  const data = await response.json();
494
467
  if (!data.choices || !data.choices.length) {
495
468
  if (data.error) {
496
- console.error(identifier, "OpenAI error:", JSON.stringify(data.error));
469
+ logger_default.error(identifier, "OpenAI error:", data.error);
497
470
  throw new Error("OpenAI error: " + data.error.message);
498
471
  }
499
472
  throw new Error("OpenAI error: No choices returned.");
@@ -534,29 +507,28 @@ function truncatePayload(payload) {
534
507
  }
535
508
  async function callAnthropicWithRetries(identifier, AiPayload, AiConfig, attempts = 5) {
536
509
  var _a, _b, _c, _d;
537
- console.log(identifier, "Calling Anthropic API with retries");
510
+ logger_default.log(identifier, "Calling Anthropic API with retries");
538
511
  let lastResponse;
539
512
  for (let i = 0; i < attempts; i++) {
540
513
  try {
541
514
  lastResponse = await callAnthropic(identifier, AiPayload, AiConfig);
542
515
  return lastResponse;
543
516
  } catch (e) {
544
- console.error(e);
545
- console.error(
517
+ logger_default.error(
546
518
  identifier,
547
- `Retrying due to error: received bad response from Anthropic API: ${e.message}`,
548
- JSON.stringify((_a = e.response) == null ? void 0 : _a.data)
519
+ `Retry #${i} error: ${e.message}`,
520
+ ((_a = e.response) == null ? void 0 : _a.data) || e
549
521
  );
550
522
  if (((_d = (_c = (_b = e.response) == null ? void 0 : _b.data) == null ? void 0 : _c.error) == null ? void 0 : _d.type) === "rate_limit_error") {
551
523
  }
552
524
  await timeout(125 * i);
553
525
  }
554
526
  }
555
- const error = new Error(
527
+ const error2 = new Error(
556
528
  `Failed to call Anthropic API after ${attempts} attempts`
557
529
  );
558
- error.response = lastResponse;
559
- throw error;
530
+ error2.response = lastResponse;
531
+ throw error2;
560
532
  }
561
533
  async function callAnthropic(identifier, AiPayload, AiConfig) {
562
534
  var _a, _b;
@@ -615,14 +587,14 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
615
587
  }
616
588
  const answers = data.content;
617
589
  if (!answers[0]) {
618
- console.error(identifier, "Missing answer in Anthropic API:", data);
590
+ logger_default.error(identifier, "Missing answer in Anthropic API:", data);
619
591
  throw new Error("Missing answer in Anthropic API");
620
592
  }
621
593
  let textResponse = "";
622
594
  let functionCalls = [];
623
595
  for (const answer of answers) {
624
596
  if (!answer.type) {
625
- console.error(identifier, "Missing answer type in Anthropic API:", data);
597
+ logger_default.error(identifier, "Missing answer type in Anthropic API:", data);
626
598
  throw new Error("Missing answer type in Anthropic API");
627
599
  }
628
600
  let text = "";
@@ -633,7 +605,7 @@ async function callAnthropic(identifier, AiPayload, AiConfig) {
633
605
  /<thinking>|<\/thinking>|<answer>|<\/answer>/gs,
634
606
  ""
635
607
  );
636
- console.log("No text in answer, returning text within tags:", text);
608
+ logger_default.log(identifier, "No text in answer, returning text within tags:", text);
637
609
  }
638
610
  if (textResponse) {
639
611
  textResponse += `
@@ -651,11 +623,7 @@ ${text}`;
651
623
  }
652
624
  }
653
625
  if (!textResponse && !functionCalls.length) {
654
- console.error(
655
- identifier,
656
- "Missing text & fns in Anthropic API response:",
657
- JSON.stringify(data)
658
- );
626
+ logger_default.error(identifier, "Missing text & fns in Anthropic API response:", data);
659
627
  throw new Error("Missing text & fns in Anthropic API response");
660
628
  }
661
629
  return {
@@ -765,9 +733,7 @@ async function prepareGoogleAIPayload(payload) {
765
733
  }
766
734
  for (const file of message.files || []) {
767
735
  if (!((_a = file.mimeType) == null ? void 0 : _a.startsWith("image"))) {
768
- console.warn(
769
- "Google AI API does not support non-image file types. Skipping file."
770
- );
736
+ logger_default.warn("payload", "Google AI API does not support non-image file types. Skipping file.");
771
737
  continue;
772
738
  }
773
739
  if (file.url) {
@@ -805,9 +771,8 @@ async function prepareGoogleAIPayload(payload) {
805
771
  }
806
772
  async function callGoogleAI(identifier, payload) {
807
773
  var _a, _b, _c;
808
- console.log(identifier, "Calling Google AI API");
774
+ logger_default.log(identifier, "Calling Google AI API");
809
775
  const googleMessages = jigGoogleMessages(payload.messages);
810
- console.log(identifier, "Google AI API messages:", googleMessages);
811
776
  const history = googleMessages.slice(0, -1);
812
777
  const lastMessage = googleMessages.slice(-1)[0];
813
778
  const genAI = new import_genai.GoogleGenAI({
@@ -850,11 +815,7 @@ async function callGoogleAI(identifier, payload) {
850
815
  };
851
816
  });
852
817
  if (!text && !(parsedFunctionCalls == null ? void 0 : parsedFunctionCalls.length) && !files.length) {
853
- console.error(
854
- identifier,
855
- "Missing text & fns in Google AI API response:",
856
- response
857
- );
818
+ logger_default.error(identifier, "Missing text & fns in Google AI API response:", response);
858
819
  throw new Error("Missing text & fns in Google AI API response");
859
820
  }
860
821
  return {
@@ -865,32 +826,26 @@ async function callGoogleAI(identifier, payload) {
865
826
  };
866
827
  }
867
828
  async function callGoogleAIWithRetries(identifier, payload, retries = 5) {
868
- console.log(identifier, "Calling Google AI API with retries");
829
+ logger_default.log(identifier, "Calling Google AI API with retries");
869
830
  let lastError;
870
831
  for (let i = 0; i < retries; i++) {
871
832
  try {
872
833
  return await callGoogleAI(identifier, payload);
873
834
  } catch (e) {
874
835
  lastError = e;
875
- console.error(e);
876
- console.error(
877
- identifier,
878
- `Retrying due to error: received bad response from Google AI API: ${e.message}`,
879
- JSON.stringify(e)
880
- // Google AI errors might not have a response.data structure like others
881
- );
836
+ logger_default.error(identifier, `Retry #${i} error: ${e.message}`, e);
882
837
  await timeout(125 * i);
883
838
  }
884
839
  }
885
- const error = new Error(
840
+ const error2 = new Error(
886
841
  `Failed to call Google AI API after ${retries} attempts`
887
842
  );
888
- error.cause = lastError;
889
- throw error;
843
+ error2.cause = lastError;
844
+ throw error2;
890
845
  }
891
846
  async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chunkTimeoutMs = 15e3) {
892
847
  if (isAnthropicPayload(aiPayload)) {
893
- console.log(identifier, "Delegating call to Anthropic API");
848
+ logger_default.log(identifier, "Delegating call to Anthropic API");
894
849
  return await callAnthropicWithRetries(
895
850
  identifier,
896
851
  await prepareAnthropicPayload(aiPayload),
@@ -898,7 +853,7 @@ async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chu
898
853
  retries
899
854
  );
900
855
  } else if (isOpenAiPayload(aiPayload)) {
901
- console.log(identifier, "Delegating call to OpenAI API");
856
+ logger_default.log(identifier, "Delegating call to OpenAI API");
902
857
  return await callOpenAiWithRetries(
903
858
  identifier,
904
859
  await prepareOpenAIPayload(aiPayload),
@@ -907,13 +862,13 @@ async function callWithRetries(identifier, aiPayload, aiConfig, retries = 5, chu
907
862
  chunkTimeoutMs
908
863
  );
909
864
  } else if (isGroqPayload(aiPayload)) {
910
- console.log(identifier, "Delegating call to Groq API");
865
+ logger_default.log(identifier, "Delegating call to Groq API");
911
866
  return await callGroqWithRetries(
912
867
  identifier,
913
868
  await prepareGroqPayload(aiPayload)
914
869
  );
915
870
  } else if (isGoogleAIPayload(aiPayload)) {
916
- console.log(identifier, "Delegating call to Google AI API");
871
+ logger_default.log(identifier, "Delegating call to Google AI API");
917
872
  return await callGoogleAIWithRetries(
918
873
  identifier,
919
874
  await prepareGoogleAIPayload(aiPayload),
@@ -948,9 +903,7 @@ async function prepareAnthropicPayload(payload) {
948
903
  }
949
904
  for (const file of message.files || []) {
950
905
  if (!((_a = file.mimeType) == null ? void 0 : _a.startsWith("image"))) {
951
- console.warn(
952
- "Anthropic API does not support non-image file types. Skipping file."
953
- );
906
+ logger_default.warn("payload", "Anthropic API does not support non-image file types. Skipping file.");
954
907
  continue;
955
908
  }
956
909
  if (file.url) {
@@ -1040,10 +993,7 @@ async function prepareOpenAIPayload(payload) {
1040
993
  });
1041
994
  }
1042
995
  } else {
1043
- console.warn(
1044
- "Skipping file in message. File or image type not supported by OpenAI API:",
1045
- file.mimeType
1046
- );
996
+ logger_default.warn("payload", "Skipping file in message. File or image type not supported by OpenAI API:", file.mimeType);
1047
997
  }
1048
998
  }
1049
999
  preparedPayload.messages.push({
@@ -1095,7 +1045,7 @@ async function callGroq(identifier, payload) {
1095
1045
  const data = response.data;
1096
1046
  const answer = data.choices[0].message;
1097
1047
  if (!answer) {
1098
- console.error(identifier, "Missing answer in Groq API:", data);
1048
+ logger_default.error(identifier, "Missing answer in Groq API:", data);
1099
1049
  throw new Error("Missing answer in Groq API");
1100
1050
  }
1101
1051
  const textResponse = answer.content || null;
@@ -1116,30 +1066,24 @@ async function callGroq(identifier, payload) {
1116
1066
  }
1117
1067
  async function callGroqWithRetries(identifier, payload, retries = 5) {
1118
1068
  var _a;
1119
- console.log(identifier, "Calling Groq API with retries");
1069
+ logger_default.log(identifier, "Calling Groq API with retries");
1120
1070
  let lastResponse;
1121
1071
  for (let i = 0; i < retries; i++) {
1122
1072
  try {
1123
1073
  lastResponse = await callGroq(identifier, payload);
1124
1074
  return lastResponse;
1125
1075
  } catch (e) {
1126
- console.error(e);
1127
- console.error(
1128
- identifier,
1129
- `Retrying due to error: received bad response from Groq API: ${e.message}`,
1130
- JSON.stringify((_a = e.response) == null ? void 0 : _a.data)
1131
- );
1076
+ logger_default.error(identifier, `Retry #${i} error: ${e.message}`, ((_a = e.response) == null ? void 0 : _a.data) || e);
1132
1077
  await timeout(125 * i);
1133
1078
  }
1134
1079
  }
1135
- const error = new Error(
1080
+ const error2 = new Error(
1136
1081
  `Failed to call Groq API after ${retries} attempts`
1137
1082
  );
1138
- error.response = lastResponse;
1139
- throw error;
1083
+ error2.response = lastResponse;
1084
+ throw error2;
1140
1085
  }
1141
1086
  async function getNormalizedBase64PNG(url, mime) {
1142
- console.log("Normalizing image", url);
1143
1087
  const response = await import_axios.default.get(url, { responseType: "arraybuffer" });
1144
1088
  let imageBuffer = Buffer.from(response.data);
1145
1089
  let sharpOptions = {};