@elizaos/plugin-openai 1.0.0-beta.49 → 1.0.0-beta.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,29 +1,21 @@
1
1
  // src/index.ts
2
2
  import { createOpenAI } from "@ai-sdk/openai";
3
- import { getProviderBaseURL } from "@elizaos/core";
4
3
  import {
5
4
  EventType,
6
5
  logger,
7
6
  ModelType,
8
- VECTOR_DIMS,
9
7
  safeReplacer,
10
- ServiceType
8
+ ServiceType,
9
+ VECTOR_DIMS
11
10
  } from "@elizaos/core";
12
- import {
13
- generateObject,
14
- generateText,
15
- JSONParseError
16
- } from "ai";
17
- import { encodingForModel } from "js-tiktoken";
18
- import { fetch, FormData } from "undici";
19
11
 
20
- // ../../node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
12
+ // node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
21
13
  var _globalThis = typeof globalThis === "object" ? globalThis : global;
22
14
 
23
- // ../../node_modules/@opentelemetry/api/build/esm/version.js
15
+ // node_modules/@opentelemetry/api/build/esm/version.js
24
16
  var VERSION = "1.9.0";
25
17
 
26
- // ../../node_modules/@opentelemetry/api/build/esm/internal/semver.js
18
+ // node_modules/@opentelemetry/api/build/esm/internal/semver.js
27
19
  var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
28
20
  function _makeCompatibilityCheck(ownVersion) {
29
21
  var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
@@ -90,7 +82,7 @@ function _makeCompatibilityCheck(ownVersion) {
90
82
  }
91
83
  var isCompatible = _makeCompatibilityCheck(VERSION);
92
84
 
93
- // ../../node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
85
+ // node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
94
86
  var major = VERSION.split(".")[0];
95
87
  var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
96
88
  var _global = _globalThis;
@@ -132,7 +124,7 @@ function unregisterGlobal(type, diag) {
132
124
  }
133
125
  }
134
126
 
135
- // ../../node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
127
+ // node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
136
128
  var __read = function(o, n) {
137
129
  var m = typeof Symbol === "function" && o[Symbol.iterator];
138
130
  if (!m) return o;
@@ -212,7 +204,7 @@ function logProxy(funcName, namespace, args) {
212
204
  return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
213
205
  }
214
206
 
215
- // ../../node_modules/@opentelemetry/api/build/esm/diag/types.js
207
+ // node_modules/@opentelemetry/api/build/esm/diag/types.js
216
208
  var DiagLogLevel;
217
209
  (function(DiagLogLevel2) {
218
210
  DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
@@ -224,7 +216,7 @@ var DiagLogLevel;
224
216
  DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
225
217
  })(DiagLogLevel || (DiagLogLevel = {}));
226
218
 
227
- // ../../node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
219
+ // node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
228
220
  function createLogLevelDiagLogger(maxLevel, logger2) {
229
221
  if (maxLevel < DiagLogLevel.NONE) {
230
222
  maxLevel = DiagLogLevel.NONE;
@@ -249,7 +241,7 @@ function createLogLevelDiagLogger(maxLevel, logger2) {
249
241
  };
250
242
  }
251
243
 
252
- // ../../node_modules/@opentelemetry/api/build/esm/api/diag.js
244
+ // node_modules/@opentelemetry/api/build/esm/api/diag.js
253
245
  var __read2 = function(o, n) {
254
246
  var m = typeof Symbol === "function" && o[Symbol.iterator];
255
247
  if (!m) return o;
@@ -341,7 +333,7 @@ var DiagAPI = (
341
333
  }()
342
334
  );
343
335
 
344
- // ../../node_modules/@opentelemetry/api/build/esm/context/context.js
336
+ // node_modules/@opentelemetry/api/build/esm/context/context.js
345
337
  var BaseContext = (
346
338
  /** @class */
347
339
  /* @__PURE__ */ function() {
@@ -367,7 +359,7 @@ var BaseContext = (
367
359
  );
368
360
  var ROOT_CONTEXT = new BaseContext();
369
361
 
370
- // ../../node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
362
+ // node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
371
363
  var __read3 = function(o, n) {
372
364
  var m = typeof Symbol === "function" && o[Symbol.iterator];
373
365
  if (!m) return o;
@@ -422,7 +414,7 @@ var NoopContextManager = (
422
414
  }()
423
415
  );
424
416
 
425
- // ../../node_modules/@opentelemetry/api/build/esm/api/context.js
417
+ // node_modules/@opentelemetry/api/build/esm/api/context.js
426
418
  var __read4 = function(o, n) {
427
419
  var m = typeof Symbol === "function" && o[Symbol.iterator];
428
420
  if (!m) return o;
@@ -490,7 +482,7 @@ var ContextAPI = (
490
482
  }()
491
483
  );
492
484
 
493
- // ../../node_modules/@opentelemetry/api/build/esm/trace/status.js
485
+ // node_modules/@opentelemetry/api/build/esm/trace/status.js
494
486
  var SpanStatusCode;
495
487
  (function(SpanStatusCode2) {
496
488
  SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
@@ -498,26 +490,41 @@ var SpanStatusCode;
498
490
  SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
499
491
  })(SpanStatusCode || (SpanStatusCode = {}));
500
492
 
501
- // ../../node_modules/@opentelemetry/api/build/esm/context-api.js
493
+ // node_modules/@opentelemetry/api/build/esm/context-api.js
502
494
  var context = ContextAPI.getInstance();
503
495
 
504
496
  // src/index.ts
497
+ import {
498
+ generateObject,
499
+ generateText,
500
+ JSONParseError
501
+ } from "ai";
502
+ import { encodingForModel } from "js-tiktoken";
503
+ import { fetch, FormData } from "undici";
505
504
  function getTracer(runtime) {
506
505
  const availableServices = Array.from(runtime.getAllServices().keys());
507
- logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
508
- logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
506
+ logger.debug(
507
+ `[getTracer] Available services: ${JSON.stringify(availableServices)}`
508
+ );
509
+ logger.debug(
510
+ `[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`
511
+ );
509
512
  const instrumentationService = runtime.getService(
510
513
  ServiceType.INSTRUMENTATION
511
514
  );
512
515
  if (!instrumentationService) {
513
- logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
516
+ logger.warn(
517
+ `[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`
518
+ );
514
519
  return null;
515
520
  }
516
521
  if (!instrumentationService.isEnabled()) {
517
522
  logger.debug("[getTracer] Instrumentation service found but is disabled.");
518
523
  return null;
519
524
  }
520
- logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
525
+ logger.debug(
526
+ "[getTracer] Successfully retrieved enabled instrumentation service."
527
+ );
521
528
  return instrumentationService.getTracer("eliza.llm.openai");
522
529
  }
523
530
  async function startLlmSpan(runtime, spanName, attributes, fn) {
@@ -541,28 +548,37 @@ async function startLlmSpan(runtime, spanName, attributes, fn) {
541
548
  return fn(dummySpan);
542
549
  }
543
550
  const activeContext = context.active();
544
- return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
545
- try {
546
- const result = await fn(span);
547
- span.setStatus({ code: SpanStatusCode.OK });
548
- span.end();
549
- return result;
550
- } catch (error) {
551
- const message = error instanceof Error ? error.message : String(error);
552
- span.recordException(error);
553
- span.setStatus({ code: SpanStatusCode.ERROR, message });
554
- span.end();
555
- throw error;
551
+ return tracer.startActiveSpan(
552
+ spanName,
553
+ { attributes },
554
+ activeContext,
555
+ async (span) => {
556
+ try {
557
+ const result = await fn(span);
558
+ span.setStatus({ code: SpanStatusCode.OK });
559
+ span.end();
560
+ return result;
561
+ } catch (error) {
562
+ const message = error instanceof Error ? error.message : String(error);
563
+ span.recordException(error);
564
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
565
+ span.end();
566
+ throw error;
567
+ }
556
568
  }
557
- });
569
+ );
558
570
  }
559
571
  function getSetting(runtime, key, defaultValue) {
560
572
  return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
561
573
  }
562
574
  function getBaseURL(runtime) {
563
- const defaultBaseURL = getSetting(runtime, "OPENAI_BASE_URL", "https://api.openai.com/v1");
564
- logger.debug(`[OpenAI] Default base URL: ${defaultBaseURL}`);
565
- return getProviderBaseURL(runtime, "openai", defaultBaseURL);
575
+ const baseURL = getSetting(
576
+ runtime,
577
+ "OPENAI_BASE_URL",
578
+ "https://api.openai.com/v1"
579
+ );
580
+ logger.debug(`[OpenAI] Default base URL: ${baseURL}`);
581
+ return baseURL;
566
582
  }
567
583
  function getEmbeddingBaseURL(runtime) {
568
584
  const embeddingURL = getSetting(runtime, "OPENAI_EMBEDDING_URL");
@@ -612,99 +628,113 @@ async function generateObjectByModelType(runtime, params, modelType, getModelFn)
612
628
  "llm.request.temperature": temperature,
613
629
  "llm.request.schema_present": schemaPresent
614
630
  };
615
- return startLlmSpan(runtime, "LLM.generateObject", attributes, async (span) => {
616
- span.addEvent("llm.prompt", { "prompt.content": params.prompt });
617
- if (schemaPresent) {
618
- span.addEvent("llm.request.schema", {
619
- schema: JSON.stringify(params.schema, safeReplacer())
620
- });
621
- logger.info(
622
- `Using ${modelType} without schema validation (schema provided but output=no-schema)`
623
- );
624
- }
625
- try {
626
- const { object, usage } = await generateObject({
627
- model: openai.languageModel(modelName),
628
- output: "no-schema",
629
- prompt: params.prompt,
630
- temperature,
631
- experimental_repairText: getJsonRepairFunction()
632
- });
633
- span.addEvent("llm.response.processed", {
634
- "response.object": JSON.stringify(object, safeReplacer())
635
- });
636
- if (usage) {
637
- span.setAttributes({
638
- "llm.usage.prompt_tokens": usage.promptTokens,
639
- "llm.usage.completion_tokens": usage.completionTokens,
640
- "llm.usage.total_tokens": usage.totalTokens
631
+ return startLlmSpan(
632
+ runtime,
633
+ "LLM.generateObject",
634
+ attributes,
635
+ async (span) => {
636
+ span.addEvent("llm.prompt", { "prompt.content": params.prompt });
637
+ if (schemaPresent) {
638
+ span.addEvent("llm.request.schema", {
639
+ schema: JSON.stringify(params.schema, safeReplacer())
641
640
  });
642
- emitModelUsageEvent(runtime, modelType, params.prompt, usage);
641
+ logger.info(
642
+ `Using ${modelType} without schema validation (schema provided but output=no-schema)`
643
+ );
643
644
  }
644
- return object;
645
- } catch (error) {
646
- if (error instanceof JSONParseError) {
647
- logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
648
- span.recordException(error);
649
- span.addEvent("llm.error.json_parse", {
650
- "error.message": error.message,
651
- "error.text": error.text
645
+ try {
646
+ const { object, usage } = await generateObject({
647
+ model: openai.languageModel(modelName),
648
+ output: "no-schema",
649
+ prompt: params.prompt,
650
+ temperature,
651
+ experimental_repairText: getJsonRepairFunction()
652
652
  });
653
- span.addEvent("llm.repair.attempt");
654
- const repairFunction = getJsonRepairFunction();
655
- const repairedJsonString = await repairFunction({
656
- text: error.text,
657
- error
653
+ span.addEvent("llm.response.processed", {
654
+ "response.object": JSON.stringify(object, safeReplacer())
658
655
  });
659
- if (repairedJsonString) {
660
- try {
661
- const repairedObject = JSON.parse(repairedJsonString);
662
- span.addEvent("llm.repair.success", {
663
- repaired_object: JSON.stringify(repairedObject, safeReplacer())
664
- });
665
- logger.info("[generateObject] Successfully repaired JSON.");
666
- span.setStatus({
667
- code: SpanStatusCode.ERROR,
668
- message: "JSON parsing failed but was repaired"
669
- });
670
- return repairedObject;
671
- } catch (repairParseError) {
672
- const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
673
- logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
674
- const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
675
- span.recordException(exception);
676
- span.addEvent("llm.repair.parse_error", {
677
- "error.message": message
678
- });
656
+ if (usage) {
657
+ span.setAttributes({
658
+ "llm.usage.prompt_tokens": usage.promptTokens,
659
+ "llm.usage.completion_tokens": usage.completionTokens,
660
+ "llm.usage.total_tokens": usage.totalTokens
661
+ });
662
+ emitModelUsageEvent(
663
+ runtime,
664
+ modelType,
665
+ params.prompt,
666
+ usage
667
+ );
668
+ }
669
+ return object;
670
+ } catch (error) {
671
+ if (error instanceof JSONParseError) {
672
+ logger.error(
673
+ `[generateObject] Failed to parse JSON: ${error.message}`
674
+ );
675
+ span.recordException(error);
676
+ span.addEvent("llm.error.json_parse", {
677
+ "error.message": error.message,
678
+ "error.text": error.text
679
+ });
680
+ span.addEvent("llm.repair.attempt");
681
+ const repairFunction = getJsonRepairFunction();
682
+ const repairedJsonString = await repairFunction({
683
+ text: error.text,
684
+ error
685
+ });
686
+ if (repairedJsonString) {
687
+ try {
688
+ const repairedObject = JSON.parse(repairedJsonString);
689
+ span.addEvent("llm.repair.success", {
690
+ repaired_object: JSON.stringify(repairedObject, safeReplacer())
691
+ });
692
+ logger.info("[generateObject] Successfully repaired JSON.");
693
+ span.setStatus({
694
+ code: SpanStatusCode.ERROR,
695
+ message: "JSON parsing failed but was repaired"
696
+ });
697
+ return repairedObject;
698
+ } catch (repairParseError) {
699
+ const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
700
+ logger.error(
701
+ `[generateObject] Failed to parse repaired JSON: ${message}`
702
+ );
703
+ const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
704
+ span.recordException(exception);
705
+ span.addEvent("llm.repair.parse_error", {
706
+ "error.message": message
707
+ });
708
+ span.setStatus({
709
+ code: SpanStatusCode.ERROR,
710
+ message: `JSON repair failed: ${message}`
711
+ });
712
+ throw repairParseError;
713
+ }
714
+ } else {
715
+ const errMsg = error instanceof Error ? error.message : String(error);
716
+ logger.error("[generateObject] JSON repair failed.");
717
+ span.addEvent("llm.repair.failed");
679
718
  span.setStatus({
680
719
  code: SpanStatusCode.ERROR,
681
- message: `JSON repair failed: ${message}`
720
+ message: `JSON repair failed: ${errMsg}`
682
721
  });
683
- throw repairParseError;
722
+ throw error;
684
723
  }
685
724
  } else {
686
- const errMsg = error instanceof Error ? error.message : String(error);
687
- logger.error("[generateObject] JSON repair failed.");
688
- span.addEvent("llm.repair.failed");
725
+ const message = error instanceof Error ? error.message : String(error);
726
+ logger.error(`[generateObject] Unknown error: ${message}`);
727
+ const exception = error instanceof Error ? error : new Error(message);
728
+ span.recordException(exception);
689
729
  span.setStatus({
690
730
  code: SpanStatusCode.ERROR,
691
- message: `JSON repair failed: ${errMsg}`
731
+ message
692
732
  });
693
733
  throw error;
694
734
  }
695
- } else {
696
- const message = error instanceof Error ? error.message : String(error);
697
- logger.error(`[generateObject] Unknown error: ${message}`);
698
- const exception = error instanceof Error ? error : new Error(message);
699
- span.recordException(exception);
700
- span.setStatus({
701
- code: SpanStatusCode.ERROR,
702
- message
703
- });
704
- throw error;
705
735
  }
706
736
  }
707
- });
737
+ );
708
738
  }
709
739
  function getJsonRepairFunction() {
710
740
  return async ({ text, error }) => {
@@ -792,15 +822,21 @@ var openaiPlugin = {
792
822
  headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
793
823
  });
794
824
  if (!response.ok) {
795
- logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
796
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
825
+ logger.warn(
826
+ `OpenAI API key validation failed: ${response.statusText}`
827
+ );
828
+ logger.warn(
829
+ "OpenAI functionality will be limited until a valid API key is provided"
830
+ );
797
831
  } else {
798
832
  logger.log("OpenAI API key validated successfully");
799
833
  }
800
834
  } catch (fetchError) {
801
835
  const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
802
836
  logger.warn(`Error validating OpenAI API key: ${message}`);
803
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
837
+ logger.warn(
838
+ "OpenAI functionality will be limited until a valid API key is provided"
839
+ );
804
840
  }
805
841
  } catch (error) {
806
842
  const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
@@ -858,83 +894,98 @@ var openaiPlugin = {
858
894
  "llm.request.embedding.dimensions": embeddingDimension,
859
895
  "input.text.length": text.length
860
896
  };
861
- return startLlmSpan(runtime, "LLM.embedding", attributes, async (span) => {
862
- span.addEvent("llm.prompt", { "prompt.content": text });
863
- const embeddingBaseURL = getEmbeddingBaseURL(runtime);
864
- const apiKey = getApiKey(runtime);
865
- if (!apiKey) {
866
- span.setStatus({
867
- code: SpanStatusCode.ERROR,
868
- message: "OpenAI API key not configured"
869
- });
870
- throw new Error("OpenAI API key not configured");
871
- }
872
- try {
873
- const response = await fetch(`${embeddingBaseURL}/embeddings`, {
874
- method: "POST",
875
- headers: {
876
- Authorization: `Bearer ${apiKey}`,
877
- "Content-Type": "application/json"
878
- },
879
- body: JSON.stringify({
880
- model: embeddingModelName,
881
- input: text
882
- })
883
- });
884
- const responseClone = response.clone();
885
- const rawResponseBody = await responseClone.text();
886
- span.addEvent("llm.response.raw", {
887
- "response.body": rawResponseBody
888
- });
889
- if (!response.ok) {
890
- logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
891
- span.setAttributes({ "error.api.status": response.status });
897
+ return startLlmSpan(
898
+ runtime,
899
+ "LLM.embedding",
900
+ attributes,
901
+ async (span) => {
902
+ span.addEvent("llm.prompt", { "prompt.content": text });
903
+ const embeddingBaseURL = getEmbeddingBaseURL(runtime);
904
+ const apiKey = getApiKey(runtime);
905
+ if (!apiKey) {
892
906
  span.setStatus({
893
907
  code: SpanStatusCode.ERROR,
894
- message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
908
+ message: "OpenAI API key not configured"
895
909
  });
896
- const errorVector = Array(embeddingDimension).fill(0);
897
- errorVector[0] = 0.4;
898
- return errorVector;
910
+ throw new Error("OpenAI API key not configured");
899
911
  }
900
- const data = await response.json();
901
- if (!data?.data?.[0]?.embedding) {
902
- logger.error("API returned invalid structure");
903
- span.setStatus({
904
- code: SpanStatusCode.ERROR,
905
- message: "API returned invalid structure"
912
+ try {
913
+ const response = await fetch(`${embeddingBaseURL}/embeddings`, {
914
+ method: "POST",
915
+ headers: {
916
+ Authorization: `Bearer ${apiKey}`,
917
+ "Content-Type": "application/json"
918
+ },
919
+ body: JSON.stringify({
920
+ model: embeddingModelName,
921
+ input: text
922
+ })
906
923
  });
924
+ const responseClone = response.clone();
925
+ const rawResponseBody = await responseClone.text();
926
+ span.addEvent("llm.response.raw", {
927
+ "response.body": rawResponseBody
928
+ });
929
+ if (!response.ok) {
930
+ logger.error(
931
+ `OpenAI API error: ${response.status} - ${response.statusText}`
932
+ );
933
+ span.setAttributes({ "error.api.status": response.status });
934
+ span.setStatus({
935
+ code: SpanStatusCode.ERROR,
936
+ message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
937
+ });
938
+ const errorVector = Array(embeddingDimension).fill(0);
939
+ errorVector[0] = 0.4;
940
+ return errorVector;
941
+ }
942
+ const data = await response.json();
943
+ if (!data?.data?.[0]?.embedding) {
944
+ logger.error("API returned invalid structure");
945
+ span.setStatus({
946
+ code: SpanStatusCode.ERROR,
947
+ message: "API returned invalid structure"
948
+ });
949
+ const errorVector = Array(embeddingDimension).fill(0);
950
+ errorVector[0] = 0.5;
951
+ return errorVector;
952
+ }
953
+ const embedding = data.data[0].embedding;
954
+ span.setAttribute(
955
+ "llm.response.embedding.vector_length",
956
+ embedding.length
957
+ );
958
+ if (data.usage) {
959
+ span.setAttributes({
960
+ "llm.usage.prompt_tokens": data.usage.prompt_tokens,
961
+ "llm.usage.total_tokens": data.usage.total_tokens
962
+ });
963
+ const usage = {
964
+ promptTokens: data.usage.prompt_tokens,
965
+ completionTokens: 0,
966
+ totalTokens: data.usage.total_tokens
967
+ };
968
+ emitModelUsageEvent(
969
+ runtime,
970
+ ModelType.TEXT_EMBEDDING,
971
+ text,
972
+ usage
973
+ );
974
+ }
975
+ logger.log(`Got valid embedding with length ${embedding.length}`);
976
+ return embedding;
977
+ } catch (error) {
978
+ const message = error instanceof Error ? error.message : String(error);
979
+ logger.error(`Error generating embedding: ${message}`);
980
+ const exception = error instanceof Error ? error : new Error(message);
981
+ span.recordException(exception);
982
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
907
983
  const errorVector = Array(embeddingDimension).fill(0);
908
- errorVector[0] = 0.5;
984
+ errorVector[0] = 0.6;
909
985
  return errorVector;
910
986
  }
911
- const embedding = data.data[0].embedding;
912
- span.setAttribute("llm.response.embedding.vector_length", embedding.length);
913
- if (data.usage) {
914
- span.setAttributes({
915
- "llm.usage.prompt_tokens": data.usage.prompt_tokens,
916
- "llm.usage.total_tokens": data.usage.total_tokens
917
- });
918
- const usage = {
919
- promptTokens: data.usage.prompt_tokens,
920
- completionTokens: 0,
921
- totalTokens: data.usage.total_tokens
922
- };
923
- emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
924
- }
925
- logger.log(`Got valid embedding with length ${embedding.length}`);
926
- return embedding;
927
- } catch (error) {
928
- const message = error instanceof Error ? error.message : String(error);
929
- logger.error(`Error generating embedding: ${message}`);
930
- const exception = error instanceof Error ? error : new Error(message);
931
- span.recordException(exception);
932
- span.setStatus({ code: SpanStatusCode.ERROR, message });
933
- const errorVector = Array(embeddingDimension).fill(0);
934
- errorVector[0] = 0.6;
935
- return errorVector;
936
987
  }
937
- });
988
+ );
938
989
  },
939
990
  [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
940
991
  return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
@@ -961,32 +1012,40 @@ var openaiPlugin = {
961
1012
  "llm.request.presence_penalty": presence_penalty,
962
1013
  "llm.request.stop_sequences": JSON.stringify(stopSequences)
963
1014
  };
964
- return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
965
- span.addEvent("llm.prompt", { "prompt.content": prompt });
966
- const { text: openaiResponse, usage } = await generateText({
967
- model: openai.languageModel(modelName),
968
- prompt,
969
- system: runtime.character.system ?? void 0,
970
- temperature,
971
- maxTokens: max_response_length,
972
- frequencyPenalty: frequency_penalty,
973
- presencePenalty: presence_penalty,
974
- stopSequences
975
- });
976
- span.setAttribute("llm.response.processed.length", openaiResponse.length);
977
- span.addEvent("llm.response.processed", {
978
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
979
- });
980
- if (usage) {
981
- span.setAttributes({
982
- "llm.usage.prompt_tokens": usage.promptTokens,
983
- "llm.usage.completion_tokens": usage.completionTokens,
984
- "llm.usage.total_tokens": usage.totalTokens
1015
+ return startLlmSpan(
1016
+ runtime,
1017
+ "LLM.generateText",
1018
+ attributes,
1019
+ async (span) => {
1020
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1021
+ const { text: openaiResponse, usage } = await generateText({
1022
+ model: openai.languageModel(modelName),
1023
+ prompt,
1024
+ system: runtime.character.system ?? void 0,
1025
+ temperature,
1026
+ maxTokens: max_response_length,
1027
+ frequencyPenalty: frequency_penalty,
1028
+ presencePenalty: presence_penalty,
1029
+ stopSequences
1030
+ });
1031
+ span.setAttribute(
1032
+ "llm.response.processed.length",
1033
+ openaiResponse.length
1034
+ );
1035
+ span.addEvent("llm.response.processed", {
1036
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
985
1037
  });
986
- emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1038
+ if (usage) {
1039
+ span.setAttributes({
1040
+ "llm.usage.prompt_tokens": usage.promptTokens,
1041
+ "llm.usage.completion_tokens": usage.completionTokens,
1042
+ "llm.usage.total_tokens": usage.totalTokens
1043
+ });
1044
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1045
+ }
1046
+ return openaiResponse;
987
1047
  }
988
- return openaiResponse;
989
- });
1048
+ );
990
1049
  },
991
1050
  [ModelType.TEXT_LARGE]: async (runtime, {
992
1051
  prompt,
@@ -1010,32 +1069,40 @@ var openaiPlugin = {
1010
1069
  "llm.request.presence_penalty": presencePenalty,
1011
1070
  "llm.request.stop_sequences": JSON.stringify(stopSequences)
1012
1071
  };
1013
- return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
1014
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1015
- const { text: openaiResponse, usage } = await generateText({
1016
- model: openai.languageModel(modelName),
1017
- prompt,
1018
- system: runtime.character.system ?? void 0,
1019
- temperature,
1020
- maxTokens,
1021
- frequencyPenalty,
1022
- presencePenalty,
1023
- stopSequences
1024
- });
1025
- span.setAttribute("llm.response.processed.length", openaiResponse.length);
1026
- span.addEvent("llm.response.processed", {
1027
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1028
- });
1029
- if (usage) {
1030
- span.setAttributes({
1031
- "llm.usage.prompt_tokens": usage.promptTokens,
1032
- "llm.usage.completion_tokens": usage.completionTokens,
1033
- "llm.usage.total_tokens": usage.totalTokens
1072
+ return startLlmSpan(
1073
+ runtime,
1074
+ "LLM.generateText",
1075
+ attributes,
1076
+ async (span) => {
1077
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1078
+ const { text: openaiResponse, usage } = await generateText({
1079
+ model: openai.languageModel(modelName),
1080
+ prompt,
1081
+ system: runtime.character.system ?? void 0,
1082
+ temperature,
1083
+ maxTokens,
1084
+ frequencyPenalty,
1085
+ presencePenalty,
1086
+ stopSequences
1087
+ });
1088
+ span.setAttribute(
1089
+ "llm.response.processed.length",
1090
+ openaiResponse.length
1091
+ );
1092
+ span.addEvent("llm.response.processed", {
1093
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1034
1094
  });
1035
- emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1095
+ if (usage) {
1096
+ span.setAttributes({
1097
+ "llm.usage.prompt_tokens": usage.promptTokens,
1098
+ "llm.usage.completion_tokens": usage.completionTokens,
1099
+ "llm.usage.total_tokens": usage.totalTokens
1100
+ });
1101
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1102
+ }
1103
+ return openaiResponse;
1036
1104
  }
1037
- return openaiResponse;
1038
- });
1105
+ );
1039
1106
  },
1040
1107
  [ModelType.IMAGE]: async (runtime, params) => {
1041
1108
  const n = params.n || 1;
@@ -1049,57 +1116,64 @@ var openaiPlugin = {
1049
1116
  "llm.request.image.size": size,
1050
1117
  "llm.request.image.count": n
1051
1118
  };
1052
- return startLlmSpan(runtime, "LLM.imageGeneration", attributes, async (span) => {
1053
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1054
- const baseURL = getBaseURL(runtime);
1055
- const apiKey = getApiKey(runtime);
1056
- if (!apiKey) {
1057
- span.setStatus({
1058
- code: SpanStatusCode.ERROR,
1059
- message: "OpenAI API key not configured"
1060
- });
1061
- throw new Error("OpenAI API key not configured");
1062
- }
1063
- try {
1064
- const response = await fetch(`${baseURL}/images/generations`, {
1065
- method: "POST",
1066
- headers: {
1067
- Authorization: `Bearer ${apiKey}`,
1068
- "Content-Type": "application/json"
1069
- },
1070
- body: JSON.stringify({
1071
- prompt,
1072
- n,
1073
- size
1074
- })
1075
- });
1076
- const responseClone = response.clone();
1077
- const rawResponseBody = await responseClone.text();
1078
- span.addEvent("llm.response.raw", {
1079
- "response.body": rawResponseBody
1080
- });
1081
- if (!response.ok) {
1082
- span.setAttributes({ "error.api.status": response.status });
1119
+ return startLlmSpan(
1120
+ runtime,
1121
+ "LLM.imageGeneration",
1122
+ attributes,
1123
+ async (span) => {
1124
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1125
+ const baseURL = getBaseURL(runtime);
1126
+ const apiKey = getApiKey(runtime);
1127
+ if (!apiKey) {
1083
1128
  span.setStatus({
1084
1129
  code: SpanStatusCode.ERROR,
1085
- message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1130
+ message: "OpenAI API key not configured"
1086
1131
  });
1087
- throw new Error(`Failed to generate image: ${response.statusText}`);
1132
+ throw new Error("OpenAI API key not configured");
1133
+ }
1134
+ try {
1135
+ const response = await fetch(`${baseURL}/images/generations`, {
1136
+ method: "POST",
1137
+ headers: {
1138
+ Authorization: `Bearer ${apiKey}`,
1139
+ "Content-Type": "application/json"
1140
+ },
1141
+ body: JSON.stringify({
1142
+ prompt,
1143
+ n,
1144
+ size
1145
+ })
1146
+ });
1147
+ const responseClone = response.clone();
1148
+ const rawResponseBody = await responseClone.text();
1149
+ span.addEvent("llm.response.raw", {
1150
+ "response.body": rawResponseBody
1151
+ });
1152
+ if (!response.ok) {
1153
+ span.setAttributes({ "error.api.status": response.status });
1154
+ span.setStatus({
1155
+ code: SpanStatusCode.ERROR,
1156
+ message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1157
+ });
1158
+ throw new Error(
1159
+ `Failed to generate image: ${response.statusText}`
1160
+ );
1161
+ }
1162
+ const data = await response.json();
1163
+ const typedData = data;
1164
+ span.addEvent("llm.response.processed", {
1165
+ "response.urls": JSON.stringify(typedData.data)
1166
+ });
1167
+ return typedData.data;
1168
+ } catch (error) {
1169
+ const message = error instanceof Error ? error.message : String(error);
1170
+ const exception = error instanceof Error ? error : new Error(message);
1171
+ span.recordException(exception);
1172
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1173
+ throw error;
1088
1174
  }
1089
- const data = await response.json();
1090
- const typedData = data;
1091
- span.addEvent("llm.response.processed", {
1092
- "response.urls": JSON.stringify(typedData.data)
1093
- });
1094
- return typedData.data;
1095
- } catch (error) {
1096
- const message = error instanceof Error ? error.message : String(error);
1097
- const exception = error instanceof Error ? error : new Error(message);
1098
- span.recordException(exception);
1099
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1100
- throw error;
1101
1175
  }
1102
- });
1176
+ );
1103
1177
  },
1104
1178
  [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
1105
1179
  let imageUrl;
@@ -1130,102 +1204,113 @@ var openaiPlugin = {
1130
1204
  ]
1131
1205
  }
1132
1206
  ];
1133
- return startLlmSpan(runtime, "LLM.imageDescription", attributes, async (span) => {
1134
- span.addEvent("llm.prompt", {
1135
- "prompt.content": JSON.stringify(messages, safeReplacer())
1136
- });
1137
- const baseURL = getBaseURL(runtime);
1138
- const apiKey = getApiKey(runtime);
1139
- if (!apiKey) {
1140
- logger.error("OpenAI API key not set");
1141
- span.setStatus({
1142
- code: SpanStatusCode.ERROR,
1143
- message: "OpenAI API key not configured"
1144
- });
1145
- return {
1146
- title: "Failed to analyze image",
1147
- description: "API key not configured"
1148
- };
1149
- }
1150
- try {
1151
- const response = await fetch(`${baseURL}/chat/completions`, {
1152
- method: "POST",
1153
- headers: {
1154
- "Content-Type": "application/json",
1155
- Authorization: `Bearer ${apiKey}`
1156
- },
1157
- body: JSON.stringify({
1158
- model: modelName,
1159
- messages,
1160
- max_tokens: maxTokens
1161
- })
1162
- });
1163
- const responseClone = response.clone();
1164
- const rawResponseBody = await responseClone.text();
1165
- span.addEvent("llm.response.raw", {
1166
- "response.body": rawResponseBody
1207
+ return startLlmSpan(
1208
+ runtime,
1209
+ "LLM.imageDescription",
1210
+ attributes,
1211
+ async (span) => {
1212
+ span.addEvent("llm.prompt", {
1213
+ "prompt.content": JSON.stringify(messages, safeReplacer())
1167
1214
  });
1168
- if (!response.ok) {
1169
- span.setAttributes({ "error.api.status": response.status });
1215
+ const baseURL = getBaseURL(runtime);
1216
+ const apiKey = getApiKey(runtime);
1217
+ if (!apiKey) {
1218
+ logger.error("OpenAI API key not set");
1170
1219
  span.setStatus({
1171
1220
  code: SpanStatusCode.ERROR,
1172
- message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1221
+ message: "OpenAI API key not configured"
1173
1222
  });
1174
- throw new Error(`OpenAI API error: ${response.status}`);
1223
+ return {
1224
+ title: "Failed to analyze image",
1225
+ description: "API key not configured"
1226
+ };
1175
1227
  }
1176
- const result = await response.json();
1177
- const typedResult = result;
1178
- const content = typedResult.choices?.[0]?.message?.content;
1179
- if (typedResult.usage) {
1180
- span.setAttributes({
1181
- "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1182
- "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1183
- "llm.usage.total_tokens": typedResult.usage.total_tokens
1228
+ try {
1229
+ const response = await fetch(`${baseURL}/chat/completions`, {
1230
+ method: "POST",
1231
+ headers: {
1232
+ "Content-Type": "application/json",
1233
+ Authorization: `Bearer ${apiKey}`
1234
+ },
1235
+ body: JSON.stringify({
1236
+ model: modelName,
1237
+ messages,
1238
+ max_tokens: maxTokens
1239
+ })
1184
1240
  });
1185
- emitModelUsageEvent(
1186
- runtime,
1187
- ModelType.IMAGE_DESCRIPTION,
1188
- typeof params === "string" ? params : params.prompt || "",
1189
- {
1190
- promptTokens: typedResult.usage.prompt_tokens,
1191
- completionTokens: typedResult.usage.completion_tokens,
1192
- totalTokens: typedResult.usage.total_tokens
1193
- }
1194
- );
1195
- }
1196
- if (typedResult.choices?.[0]?.finish_reason) {
1197
- span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
1198
- }
1199
- if (!content) {
1200
- span.setStatus({
1201
- code: SpanStatusCode.ERROR,
1202
- message: "No content in API response"
1241
+ const responseClone = response.clone();
1242
+ const rawResponseBody = await responseClone.text();
1243
+ span.addEvent("llm.response.raw", {
1244
+ "response.body": rawResponseBody
1245
+ });
1246
+ if (!response.ok) {
1247
+ span.setAttributes({ "error.api.status": response.status });
1248
+ span.setStatus({
1249
+ code: SpanStatusCode.ERROR,
1250
+ message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1251
+ });
1252
+ throw new Error(`OpenAI API error: ${response.status}`);
1253
+ }
1254
+ const result = await response.json();
1255
+ const typedResult = result;
1256
+ const content = typedResult.choices?.[0]?.message?.content;
1257
+ if (typedResult.usage) {
1258
+ span.setAttributes({
1259
+ "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1260
+ "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1261
+ "llm.usage.total_tokens": typedResult.usage.total_tokens
1262
+ });
1263
+ emitModelUsageEvent(
1264
+ runtime,
1265
+ ModelType.IMAGE_DESCRIPTION,
1266
+ typeof params === "string" ? params : params.prompt || "",
1267
+ {
1268
+ promptTokens: typedResult.usage.prompt_tokens,
1269
+ completionTokens: typedResult.usage.completion_tokens,
1270
+ totalTokens: typedResult.usage.total_tokens
1271
+ }
1272
+ );
1273
+ }
1274
+ if (typedResult.choices?.[0]?.finish_reason) {
1275
+ span.setAttribute(
1276
+ "llm.response.finish_reason",
1277
+ typedResult.choices[0].finish_reason
1278
+ );
1279
+ }
1280
+ if (!content) {
1281
+ span.setStatus({
1282
+ code: SpanStatusCode.ERROR,
1283
+ message: "No content in API response"
1284
+ });
1285
+ return {
1286
+ title: "Failed to analyze image",
1287
+ description: "No response from API"
1288
+ };
1289
+ }
1290
+ const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1291
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
1292
+ const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1293
+ const processedResult = { title, description };
1294
+ span.addEvent("llm.response.processed", {
1295
+ "response.object": JSON.stringify(
1296
+ processedResult,
1297
+ safeReplacer()
1298
+ )
1203
1299
  });
1300
+ return processedResult;
1301
+ } catch (error) {
1302
+ const message = error instanceof Error ? error.message : String(error);
1303
+ logger.error(`Error analyzing image: ${message}`);
1304
+ const exception = error instanceof Error ? error : new Error(message);
1305
+ span.recordException(exception);
1306
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1204
1307
  return {
1205
1308
  title: "Failed to analyze image",
1206
- description: "No response from API"
1309
+ description: `Error: ${message}`
1207
1310
  };
1208
1311
  }
1209
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1210
- const title = titleMatch?.[1]?.trim() || "Image Analysis";
1211
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1212
- const processedResult = { title, description };
1213
- span.addEvent("llm.response.processed", {
1214
- "response.object": JSON.stringify(processedResult, safeReplacer())
1215
- });
1216
- return processedResult;
1217
- } catch (error) {
1218
- const message = error instanceof Error ? error.message : String(error);
1219
- logger.error(`Error analyzing image: ${message}`);
1220
- const exception = error instanceof Error ? error : new Error(message);
1221
- span.recordException(exception);
1222
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1223
- return {
1224
- title: "Failed to analyze image",
1225
- description: `Error: ${message}`
1226
- };
1227
1312
  }
1228
- });
1313
+ );
1229
1314
  },
1230
1315
  [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
1231
1316
  logger.log("audioBuffer", audioBuffer);
@@ -1237,69 +1322,87 @@ var openaiPlugin = {
1237
1322
  "llm.request.model": modelName,
1238
1323
  "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1239
1324
  };
1240
- return startLlmSpan(runtime, "LLM.transcription", attributes, async (span) => {
1241
- span.addEvent("llm.prompt", {
1242
- "prompt.info": "Audio buffer for transcription"
1243
- });
1244
- const baseURL = getBaseURL(runtime);
1245
- const apiKey = getApiKey(runtime);
1246
- if (!apiKey) {
1247
- span.setStatus({
1248
- code: SpanStatusCode.ERROR,
1249
- message: "OpenAI API key not configured"
1250
- });
1251
- throw new Error("OpenAI API key not configured - Cannot make request");
1252
- }
1253
- if (!audioBuffer || audioBuffer.length === 0) {
1254
- span.setStatus({
1255
- code: SpanStatusCode.ERROR,
1256
- message: "Audio buffer is empty or invalid"
1257
- });
1258
- throw new Error("Audio buffer is empty or invalid for transcription");
1259
- }
1260
- const formData = new FormData();
1261
- formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1262
- formData.append("model", "whisper-1");
1263
- try {
1264
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
1265
- method: "POST",
1266
- headers: {
1267
- Authorization: `Bearer ${apiKey}`
1268
- },
1269
- body: formData
1270
- });
1271
- const responseClone = response.clone();
1272
- const rawResponseBody = await responseClone.text();
1273
- span.addEvent("llm.response.raw", {
1274
- "response.body": rawResponseBody
1325
+ return startLlmSpan(
1326
+ runtime,
1327
+ "LLM.transcription",
1328
+ attributes,
1329
+ async (span) => {
1330
+ span.addEvent("llm.prompt", {
1331
+ "prompt.info": "Audio buffer for transcription"
1275
1332
  });
1276
- logger.log("response", response);
1277
- if (!response.ok) {
1278
- span.setAttributes({ "error.api.status": response.status });
1333
+ const baseURL = getBaseURL(runtime);
1334
+ const apiKey = getApiKey(runtime);
1335
+ if (!apiKey) {
1279
1336
  span.setStatus({
1280
1337
  code: SpanStatusCode.ERROR,
1281
- message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1338
+ message: "OpenAI API key not configured"
1282
1339
  });
1283
- throw new Error(`Failed to transcribe audio: ${response.statusText}`);
1340
+ throw new Error(
1341
+ "OpenAI API key not configured - Cannot make request"
1342
+ );
1343
+ }
1344
+ if (!audioBuffer || audioBuffer.length === 0) {
1345
+ span.setStatus({
1346
+ code: SpanStatusCode.ERROR,
1347
+ message: "Audio buffer is empty or invalid"
1348
+ });
1349
+ throw new Error(
1350
+ "Audio buffer is empty or invalid for transcription"
1351
+ );
1352
+ }
1353
+ const formData = new FormData();
1354
+ formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1355
+ formData.append("model", "whisper-1");
1356
+ try {
1357
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
1358
+ method: "POST",
1359
+ headers: {
1360
+ Authorization: `Bearer ${apiKey}`
1361
+ },
1362
+ body: formData
1363
+ });
1364
+ const responseClone = response.clone();
1365
+ const rawResponseBody = await responseClone.text();
1366
+ span.addEvent("llm.response.raw", {
1367
+ "response.body": rawResponseBody
1368
+ });
1369
+ logger.log("response", response);
1370
+ if (!response.ok) {
1371
+ span.setAttributes({ "error.api.status": response.status });
1372
+ span.setStatus({
1373
+ code: SpanStatusCode.ERROR,
1374
+ message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1375
+ });
1376
+ throw new Error(
1377
+ `Failed to transcribe audio: ${response.statusText}`
1378
+ );
1379
+ }
1380
+ const data = await response.json();
1381
+ const processedText = data.text;
1382
+ span.setAttribute(
1383
+ "llm.response.processed.length",
1384
+ processedText.length
1385
+ );
1386
+ span.addEvent("llm.response.processed", {
1387
+ "response.text": processedText
1388
+ });
1389
+ return processedText;
1390
+ } catch (error) {
1391
+ const message = error instanceof Error ? error.message : String(error);
1392
+ const exception = error instanceof Error ? error : new Error(message);
1393
+ span.recordException(exception);
1394
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1395
+ throw error;
1284
1396
  }
1285
- const data = await response.json();
1286
- const processedText = data.text;
1287
- span.setAttribute("llm.response.processed.length", processedText.length);
1288
- span.addEvent("llm.response.processed", {
1289
- "response.text": processedText
1290
- });
1291
- return processedText;
1292
- } catch (error) {
1293
- const message = error instanceof Error ? error.message : String(error);
1294
- const exception = error instanceof Error ? error : new Error(message);
1295
- span.recordException(exception);
1296
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1297
- throw error;
1298
1397
  }
1299
- });
1398
+ );
1300
1399
  },
1301
1400
  [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
1302
- const ttsModelName = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
1401
+ const ttsModelName = getSetting(
1402
+ runtime,
1403
+ "OPENAI_TTS_MODEL",
1404
+ "gpt-4o-mini-tts"
1405
+ );
1303
1406
  const attributes = {
1304
1407
  "llm.vendor": "OpenAI",
1305
1408
  "llm.request.type": "tts",
@@ -1325,10 +1428,20 @@ var openaiPlugin = {
1325
1428
  });
1326
1429
  },
1327
1430
  [ModelType.OBJECT_SMALL]: async (runtime, params) => {
1328
- return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
1431
+ return generateObjectByModelType(
1432
+ runtime,
1433
+ params,
1434
+ ModelType.OBJECT_SMALL,
1435
+ getSmallModel
1436
+ );
1329
1437
  },
1330
1438
  [ModelType.OBJECT_LARGE]: async (runtime, params) => {
1331
- return generateObjectByModelType(runtime, params, ModelType.OBJECT_LARGE, getLargeModel);
1439
+ return generateObjectByModelType(
1440
+ runtime,
1441
+ params,
1442
+ ModelType.OBJECT_LARGE,
1443
+ getLargeModel
1444
+ );
1332
1445
  }
1333
1446
  },
1334
1447
  tests: [
@@ -1345,9 +1458,14 @@ var openaiPlugin = {
1345
1458
  }
1346
1459
  });
1347
1460
  const data = await response.json();
1348
- logger.log("Models Available:", data?.data?.length ?? "N/A");
1461
+ logger.log(
1462
+ "Models Available:",
1463
+ data?.data?.length ?? "N/A"
1464
+ );
1349
1465
  if (!response.ok) {
1350
- throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1466
+ throw new Error(
1467
+ `Failed to validate OpenAI API key: ${response.statusText}`
1468
+ );
1351
1469
  }
1352
1470
  }
1353
1471
  },
@@ -1355,9 +1473,12 @@ var openaiPlugin = {
1355
1473
  name: "openai_test_text_embedding",
1356
1474
  fn: async (runtime) => {
1357
1475
  try {
1358
- const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
1359
- text: "Hello, world!"
1360
- });
1476
+ const embedding = await runtime.useModel(
1477
+ ModelType.TEXT_EMBEDDING,
1478
+ {
1479
+ text: "Hello, world!"
1480
+ }
1481
+ );
1361
1482
  logger.log("embedding", embedding);
1362
1483
  } catch (error) {
1363
1484
  const message = error instanceof Error ? error.message : String(error);
@@ -1433,7 +1554,10 @@ var openaiPlugin = {
1433
1554
  if (result && typeof result === "object" && "title" in result && "description" in result) {
1434
1555
  logger.log("Image description:", result);
1435
1556
  } else {
1436
- logger.error("Invalid image description result format:", result);
1557
+ logger.error(
1558
+ "Invalid image description result format:",
1559
+ result
1560
+ );
1437
1561
  }
1438
1562
  } catch (e) {
1439
1563
  const message = e instanceof Error ? e.message : String(e);
@@ -1441,7 +1565,9 @@ var openaiPlugin = {
1441
1565
  }
1442
1566
  } catch (e) {
1443
1567
  const message = e instanceof Error ? e.message : String(e);
1444
- logger.error(`Error in openai_test_image_description: ${message}`);
1568
+ logger.error(
1569
+ `Error in openai_test_image_description: ${message}`
1570
+ );
1445
1571
  }
1446
1572
  }
1447
1573
  },
@@ -1470,9 +1596,14 @@ var openaiPlugin = {
1470
1596
  name: "openai_test_text_tokenizer_encode",
1471
1597
  fn: async (runtime) => {
1472
1598
  const prompt = "Hello tokenizer encode!";
1473
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1599
+ const tokens = await runtime.useModel(
1600
+ ModelType.TEXT_TOKENIZER_ENCODE,
1601
+ { prompt }
1602
+ );
1474
1603
  if (!Array.isArray(tokens) || tokens.length === 0) {
1475
- throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1604
+ throw new Error(
1605
+ "Failed to tokenize text: expected non-empty array of tokens"
1606
+ );
1476
1607
  }
1477
1608
  logger.log("Tokenized output:", tokens);
1478
1609
  }
@@ -1481,8 +1612,14 @@ var openaiPlugin = {
1481
1612
  name: "openai_test_text_tokenizer_decode",
1482
1613
  fn: async (runtime) => {
1483
1614
  const prompt = "Hello tokenizer decode!";
1484
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1485
- const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, { tokens });
1615
+ const tokens = await runtime.useModel(
1616
+ ModelType.TEXT_TOKENIZER_ENCODE,
1617
+ { prompt }
1618
+ );
1619
+ const decodedText = await runtime.useModel(
1620
+ ModelType.TEXT_TOKENIZER_DECODE,
1621
+ { tokens }
1622
+ );
1486
1623
  if (decodedText !== prompt) {
1487
1624
  throw new Error(
1488
1625
  `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`