@elizaos/plugin-openai 1.0.0-beta.71 → 1.0.0-beta.73

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -503,28 +503,20 @@ import { encodingForModel } from "js-tiktoken";
503
503
  import { fetch, FormData } from "undici";
504
504
  function getTracer(runtime) {
505
505
  const availableServices = Array.from(runtime.getAllServices().keys());
506
- logger.debug(
507
- `[getTracer] Available services: ${JSON.stringify(availableServices)}`
508
- );
509
- logger.debug(
510
- `[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`
511
- );
506
+ logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
507
+ logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
512
508
  const instrumentationService = runtime.getService(
513
509
  ServiceType.INSTRUMENTATION
514
510
  );
515
511
  if (!instrumentationService) {
516
- logger.warn(
517
- `[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`
518
- );
512
+ logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
519
513
  return null;
520
514
  }
521
515
  if (!instrumentationService.isEnabled()) {
522
516
  logger.debug("[getTracer] Instrumentation service found but is disabled.");
523
517
  return null;
524
518
  }
525
- logger.debug(
526
- "[getTracer] Successfully retrieved enabled instrumentation service."
527
- );
519
+ logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
528
520
  return instrumentationService.getTracer("eliza.llm.openai");
529
521
  }
530
522
  async function startLlmSpan(runtime, spanName, attributes, fn) {
@@ -548,35 +540,26 @@ async function startLlmSpan(runtime, spanName, attributes, fn) {
548
540
  return fn(dummySpan);
549
541
  }
550
542
  const activeContext = context.active();
551
- return tracer.startActiveSpan(
552
- spanName,
553
- { attributes },
554
- activeContext,
555
- async (span) => {
556
- try {
557
- const result = await fn(span);
558
- span.setStatus({ code: SpanStatusCode.OK });
559
- span.end();
560
- return result;
561
- } catch (error) {
562
- const message = error instanceof Error ? error.message : String(error);
563
- span.recordException(error);
564
- span.setStatus({ code: SpanStatusCode.ERROR, message });
565
- span.end();
566
- throw error;
567
- }
543
+ return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
544
+ try {
545
+ const result = await fn(span);
546
+ span.setStatus({ code: SpanStatusCode.OK });
547
+ span.end();
548
+ return result;
549
+ } catch (error) {
550
+ const message = error instanceof Error ? error.message : String(error);
551
+ span.recordException(error);
552
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
553
+ span.end();
554
+ throw error;
568
555
  }
569
- );
556
+ });
570
557
  }
571
558
  function getSetting(runtime, key, defaultValue) {
572
559
  return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
573
560
  }
574
561
  function getBaseURL(runtime) {
575
- const baseURL = getSetting(
576
- runtime,
577
- "OPENAI_BASE_URL",
578
- "https://api.openai.com/v1"
579
- );
562
+ const baseURL = getSetting(runtime, "OPENAI_BASE_URL", "https://api.openai.com/v1");
580
563
  logger.debug(`[OpenAI] Default base URL: ${baseURL}`);
581
564
  return baseURL;
582
565
  }
@@ -631,113 +614,99 @@ async function generateObjectByModelType(runtime, params, modelType, getModelFn)
631
614
  "llm.request.temperature": temperature,
632
615
  "llm.request.schema_present": schemaPresent
633
616
  };
634
- return startLlmSpan(
635
- runtime,
636
- "LLM.generateObject",
637
- attributes,
638
- async (span) => {
639
- span.addEvent("llm.prompt", { "prompt.content": params.prompt });
640
- if (schemaPresent) {
641
- span.addEvent("llm.request.schema", {
642
- schema: JSON.stringify(params.schema, safeReplacer())
617
+ return startLlmSpan(runtime, "LLM.generateObject", attributes, async (span) => {
618
+ span.addEvent("llm.prompt", { "prompt.content": params.prompt });
619
+ if (schemaPresent) {
620
+ span.addEvent("llm.request.schema", {
621
+ schema: JSON.stringify(params.schema, safeReplacer())
622
+ });
623
+ logger.info(
624
+ `Using ${modelType} without schema validation (schema provided but output=no-schema)`
625
+ );
626
+ }
627
+ try {
628
+ const { object, usage } = await generateObject({
629
+ model: openai.languageModel(modelName),
630
+ output: "no-schema",
631
+ prompt: params.prompt,
632
+ temperature,
633
+ experimental_repairText: getJsonRepairFunction()
634
+ });
635
+ span.addEvent("llm.response.processed", {
636
+ "response.object": JSON.stringify(object, safeReplacer())
637
+ });
638
+ if (usage) {
639
+ span.setAttributes({
640
+ "llm.usage.prompt_tokens": usage.promptTokens,
641
+ "llm.usage.completion_tokens": usage.completionTokens,
642
+ "llm.usage.total_tokens": usage.totalTokens
643
643
  });
644
- logger.info(
645
- `Using ${modelType} without schema validation (schema provided but output=no-schema)`
646
- );
644
+ emitModelUsageEvent(runtime, modelType, params.prompt, usage);
647
645
  }
648
- try {
649
- const { object, usage } = await generateObject({
650
- model: openai.languageModel(modelName),
651
- output: "no-schema",
652
- prompt: params.prompt,
653
- temperature,
654
- experimental_repairText: getJsonRepairFunction()
646
+ return object;
647
+ } catch (error) {
648
+ if (error instanceof JSONParseError) {
649
+ logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
650
+ span.recordException(error);
651
+ span.addEvent("llm.error.json_parse", {
652
+ "error.message": error.message,
653
+ "error.text": error.text
655
654
  });
656
- span.addEvent("llm.response.processed", {
657
- "response.object": JSON.stringify(object, safeReplacer())
655
+ span.addEvent("llm.repair.attempt");
656
+ const repairFunction = getJsonRepairFunction();
657
+ const repairedJsonString = await repairFunction({
658
+ text: error.text,
659
+ error
658
660
  });
659
- if (usage) {
660
- span.setAttributes({
661
- "llm.usage.prompt_tokens": usage.promptTokens,
662
- "llm.usage.completion_tokens": usage.completionTokens,
663
- "llm.usage.total_tokens": usage.totalTokens
664
- });
665
- emitModelUsageEvent(
666
- runtime,
667
- modelType,
668
- params.prompt,
669
- usage
670
- );
671
- }
672
- return object;
673
- } catch (error) {
674
- if (error instanceof JSONParseError) {
675
- logger.error(
676
- `[generateObject] Failed to parse JSON: ${error.message}`
677
- );
678
- span.recordException(error);
679
- span.addEvent("llm.error.json_parse", {
680
- "error.message": error.message,
681
- "error.text": error.text
682
- });
683
- span.addEvent("llm.repair.attempt");
684
- const repairFunction = getJsonRepairFunction();
685
- const repairedJsonString = await repairFunction({
686
- text: error.text,
687
- error
688
- });
689
- if (repairedJsonString) {
690
- try {
691
- const repairedObject = JSON.parse(repairedJsonString);
692
- span.addEvent("llm.repair.success", {
693
- repaired_object: JSON.stringify(repairedObject, safeReplacer())
694
- });
695
- logger.info("[generateObject] Successfully repaired JSON.");
696
- span.setStatus({
697
- code: SpanStatusCode.ERROR,
698
- message: "JSON parsing failed but was repaired"
699
- });
700
- return repairedObject;
701
- } catch (repairParseError) {
702
- const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
703
- logger.error(
704
- `[generateObject] Failed to parse repaired JSON: ${message}`
705
- );
706
- const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
707
- span.recordException(exception);
708
- span.addEvent("llm.repair.parse_error", {
709
- "error.message": message
710
- });
711
- span.setStatus({
712
- code: SpanStatusCode.ERROR,
713
- message: `JSON repair failed: ${message}`
714
- });
715
- throw repairParseError;
716
- }
717
- } else {
718
- const errMsg = error instanceof Error ? error.message : String(error);
719
- logger.error("[generateObject] JSON repair failed.");
720
- span.addEvent("llm.repair.failed");
661
+ if (repairedJsonString) {
662
+ try {
663
+ const repairedObject = JSON.parse(repairedJsonString);
664
+ span.addEvent("llm.repair.success", {
665
+ repaired_object: JSON.stringify(repairedObject, safeReplacer())
666
+ });
667
+ logger.info("[generateObject] Successfully repaired JSON.");
721
668
  span.setStatus({
722
669
  code: SpanStatusCode.ERROR,
723
- message: `JSON repair failed: ${errMsg}`
670
+ message: "JSON parsing failed but was repaired"
671
+ });
672
+ return repairedObject;
673
+ } catch (repairParseError) {
674
+ const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
675
+ logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
676
+ const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
677
+ span.recordException(exception);
678
+ span.addEvent("llm.repair.parse_error", {
679
+ "error.message": message
724
680
  });
725
- throw error;
681
+ span.setStatus({
682
+ code: SpanStatusCode.ERROR,
683
+ message: `JSON repair failed: ${message}`
684
+ });
685
+ throw repairParseError;
726
686
  }
727
687
  } else {
728
- const message = error instanceof Error ? error.message : String(error);
729
- logger.error(`[generateObject] Unknown error: ${message}`);
730
- const exception = error instanceof Error ? error : new Error(message);
731
- span.recordException(exception);
688
+ const errMsg = error instanceof Error ? error.message : String(error);
689
+ logger.error("[generateObject] JSON repair failed.");
690
+ span.addEvent("llm.repair.failed");
732
691
  span.setStatus({
733
692
  code: SpanStatusCode.ERROR,
734
- message
693
+ message: `JSON repair failed: ${errMsg}`
735
694
  });
736
695
  throw error;
737
696
  }
697
+ } else {
698
+ const message = error instanceof Error ? error.message : String(error);
699
+ logger.error(`[generateObject] Unknown error: ${message}`);
700
+ const exception = error instanceof Error ? error : new Error(message);
701
+ span.recordException(exception);
702
+ span.setStatus({
703
+ code: SpanStatusCode.ERROR,
704
+ message
705
+ });
706
+ throw error;
738
707
  }
739
708
  }
740
- );
709
+ });
741
710
  }
742
711
  function getJsonRepairFunction() {
743
712
  return async ({ text, error }) => {
@@ -827,21 +796,15 @@ var openaiPlugin = {
827
796
  headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
828
797
  });
829
798
  if (!response.ok) {
830
- logger.warn(
831
- `OpenAI API key validation failed: ${response.statusText}`
832
- );
833
- logger.warn(
834
- "OpenAI functionality will be limited until a valid API key is provided"
835
- );
799
+ logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
800
+ logger.warn("OpenAI functionality will be limited until a valid API key is provided");
836
801
  } else {
837
802
  logger.log("OpenAI API key validated successfully");
838
803
  }
839
804
  } catch (fetchError) {
840
805
  const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
841
806
  logger.warn(`Error validating OpenAI API key: ${message}`);
842
- logger.warn(
843
- "OpenAI functionality will be limited until a valid API key is provided"
844
- );
807
+ logger.warn("OpenAI functionality will be limited until a valid API key is provided");
845
808
  }
846
809
  } catch (error) {
847
810
  const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
@@ -899,98 +862,83 @@ var openaiPlugin = {
899
862
  "llm.request.embedding.dimensions": embeddingDimension,
900
863
  "input.text.length": text.length
901
864
  };
902
- return startLlmSpan(
903
- runtime,
904
- "LLM.embedding",
905
- attributes,
906
- async (span) => {
907
- span.addEvent("llm.prompt", { "prompt.content": text });
908
- const embeddingBaseURL = getEmbeddingBaseURL(runtime);
909
- const apiKey = getApiKey(runtime);
910
- if (!apiKey) {
865
+ return startLlmSpan(runtime, "LLM.embedding", attributes, async (span) => {
866
+ span.addEvent("llm.prompt", { "prompt.content": text });
867
+ const embeddingBaseURL = getEmbeddingBaseURL(runtime);
868
+ const apiKey = getApiKey(runtime);
869
+ if (!apiKey) {
870
+ span.setStatus({
871
+ code: SpanStatusCode.ERROR,
872
+ message: "OpenAI API key not configured"
873
+ });
874
+ throw new Error("OpenAI API key not configured");
875
+ }
876
+ try {
877
+ const response = await fetch(`${embeddingBaseURL}/embeddings`, {
878
+ method: "POST",
879
+ headers: {
880
+ Authorization: `Bearer ${apiKey}`,
881
+ "Content-Type": "application/json"
882
+ },
883
+ body: JSON.stringify({
884
+ model: embeddingModelName,
885
+ input: text
886
+ })
887
+ });
888
+ const responseClone = response.clone();
889
+ const rawResponseBody = await responseClone.text();
890
+ span.addEvent("llm.response.raw", {
891
+ "response.body": rawResponseBody
892
+ });
893
+ if (!response.ok) {
894
+ logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
895
+ span.setAttributes({ "error.api.status": response.status });
911
896
  span.setStatus({
912
897
  code: SpanStatusCode.ERROR,
913
- message: "OpenAI API key not configured"
898
+ message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
914
899
  });
915
- throw new Error("OpenAI API key not configured");
900
+ const errorVector = Array(embeddingDimension).fill(0);
901
+ errorVector[0] = 0.4;
902
+ return errorVector;
916
903
  }
917
- try {
918
- const response = await fetch(`${embeddingBaseURL}/embeddings`, {
919
- method: "POST",
920
- headers: {
921
- Authorization: `Bearer ${apiKey}`,
922
- "Content-Type": "application/json"
923
- },
924
- body: JSON.stringify({
925
- model: embeddingModelName,
926
- input: text
927
- })
928
- });
929
- const responseClone = response.clone();
930
- const rawResponseBody = await responseClone.text();
931
- span.addEvent("llm.response.raw", {
932
- "response.body": rawResponseBody
904
+ const data = await response.json();
905
+ if (!data?.data?.[0]?.embedding) {
906
+ logger.error("API returned invalid structure");
907
+ span.setStatus({
908
+ code: SpanStatusCode.ERROR,
909
+ message: "API returned invalid structure"
933
910
  });
934
- if (!response.ok) {
935
- logger.error(
936
- `OpenAI API error: ${response.status} - ${response.statusText}`
937
- );
938
- span.setAttributes({ "error.api.status": response.status });
939
- span.setStatus({
940
- code: SpanStatusCode.ERROR,
941
- message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
942
- });
943
- const errorVector = Array(embeddingDimension).fill(0);
944
- errorVector[0] = 0.4;
945
- return errorVector;
946
- }
947
- const data = await response.json();
948
- if (!data?.data?.[0]?.embedding) {
949
- logger.error("API returned invalid structure");
950
- span.setStatus({
951
- code: SpanStatusCode.ERROR,
952
- message: "API returned invalid structure"
953
- });
954
- const errorVector = Array(embeddingDimension).fill(0);
955
- errorVector[0] = 0.5;
956
- return errorVector;
957
- }
958
- const embedding = data.data[0].embedding;
959
- span.setAttribute(
960
- "llm.response.embedding.vector_length",
961
- embedding.length
962
- );
963
- if (data.usage) {
964
- span.setAttributes({
965
- "llm.usage.prompt_tokens": data.usage.prompt_tokens,
966
- "llm.usage.total_tokens": data.usage.total_tokens
967
- });
968
- const usage = {
969
- promptTokens: data.usage.prompt_tokens,
970
- completionTokens: 0,
971
- totalTokens: data.usage.total_tokens
972
- };
973
- emitModelUsageEvent(
974
- runtime,
975
- ModelType.TEXT_EMBEDDING,
976
- text,
977
- usage
978
- );
979
- }
980
- logger.log(`Got valid embedding with length ${embedding.length}`);
981
- return embedding;
982
- } catch (error) {
983
- const message = error instanceof Error ? error.message : String(error);
984
- logger.error(`Error generating embedding: ${message}`);
985
- const exception = error instanceof Error ? error : new Error(message);
986
- span.recordException(exception);
987
- span.setStatus({ code: SpanStatusCode.ERROR, message });
988
911
  const errorVector = Array(embeddingDimension).fill(0);
989
- errorVector[0] = 0.6;
912
+ errorVector[0] = 0.5;
990
913
  return errorVector;
991
914
  }
915
+ const embedding = data.data[0].embedding;
916
+ span.setAttribute("llm.response.embedding.vector_length", embedding.length);
917
+ if (data.usage) {
918
+ span.setAttributes({
919
+ "llm.usage.prompt_tokens": data.usage.prompt_tokens,
920
+ "llm.usage.total_tokens": data.usage.total_tokens
921
+ });
922
+ const usage = {
923
+ promptTokens: data.usage.prompt_tokens,
924
+ completionTokens: 0,
925
+ totalTokens: data.usage.total_tokens
926
+ };
927
+ emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
928
+ }
929
+ logger.log(`Got valid embedding with length ${embedding.length}`);
930
+ return embedding;
931
+ } catch (error) {
932
+ const message = error instanceof Error ? error.message : String(error);
933
+ logger.error(`Error generating embedding: ${message}`);
934
+ const exception = error instanceof Error ? error : new Error(message);
935
+ span.recordException(exception);
936
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
937
+ const errorVector = Array(embeddingDimension).fill(0);
938
+ errorVector[0] = 0.6;
939
+ return errorVector;
992
940
  }
993
- );
941
+ });
994
942
  },
995
943
  [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
996
944
  return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
@@ -1017,40 +965,32 @@ var openaiPlugin = {
1017
965
  "llm.request.presence_penalty": presence_penalty,
1018
966
  "llm.request.stop_sequences": JSON.stringify(stopSequences)
1019
967
  };
1020
- return startLlmSpan(
1021
- runtime,
1022
- "LLM.generateText",
1023
- attributes,
1024
- async (span) => {
1025
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1026
- const { text: openaiResponse, usage } = await generateText({
1027
- model: openai.languageModel(modelName),
1028
- prompt,
1029
- system: runtime.character.system ?? void 0,
1030
- temperature,
1031
- maxTokens: max_response_length,
1032
- frequencyPenalty: frequency_penalty,
1033
- presencePenalty: presence_penalty,
1034
- stopSequences
1035
- });
1036
- span.setAttribute(
1037
- "llm.response.processed.length",
1038
- openaiResponse.length
1039
- );
1040
- span.addEvent("llm.response.processed", {
1041
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
968
+ return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
969
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
970
+ const { text: openaiResponse, usage } = await generateText({
971
+ model: openai.languageModel(modelName),
972
+ prompt,
973
+ system: runtime.character.system ?? void 0,
974
+ temperature,
975
+ maxTokens: max_response_length,
976
+ frequencyPenalty: frequency_penalty,
977
+ presencePenalty: presence_penalty,
978
+ stopSequences
979
+ });
980
+ span.setAttribute("llm.response.processed.length", openaiResponse.length);
981
+ span.addEvent("llm.response.processed", {
982
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
983
+ });
984
+ if (usage) {
985
+ span.setAttributes({
986
+ "llm.usage.prompt_tokens": usage.promptTokens,
987
+ "llm.usage.completion_tokens": usage.completionTokens,
988
+ "llm.usage.total_tokens": usage.totalTokens
1042
989
  });
1043
- if (usage) {
1044
- span.setAttributes({
1045
- "llm.usage.prompt_tokens": usage.promptTokens,
1046
- "llm.usage.completion_tokens": usage.completionTokens,
1047
- "llm.usage.total_tokens": usage.totalTokens
1048
- });
1049
- emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1050
- }
1051
- return openaiResponse;
990
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1052
991
  }
1053
- );
992
+ return openaiResponse;
993
+ });
1054
994
  },
1055
995
  [ModelType.TEXT_LARGE]: async (runtime, {
1056
996
  prompt,
@@ -1074,40 +1014,32 @@ var openaiPlugin = {
1074
1014
  "llm.request.presence_penalty": presencePenalty,
1075
1015
  "llm.request.stop_sequences": JSON.stringify(stopSequences)
1076
1016
  };
1077
- return startLlmSpan(
1078
- runtime,
1079
- "LLM.generateText",
1080
- attributes,
1081
- async (span) => {
1082
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1083
- const { text: openaiResponse, usage } = await generateText({
1084
- model: openai.languageModel(modelName),
1085
- prompt,
1086
- system: runtime.character.system ?? void 0,
1087
- temperature,
1088
- maxTokens,
1089
- frequencyPenalty,
1090
- presencePenalty,
1091
- stopSequences
1092
- });
1093
- span.setAttribute(
1094
- "llm.response.processed.length",
1095
- openaiResponse.length
1096
- );
1097
- span.addEvent("llm.response.processed", {
1098
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1017
+ return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
1018
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1019
+ const { text: openaiResponse, usage } = await generateText({
1020
+ model: openai.languageModel(modelName),
1021
+ prompt,
1022
+ system: runtime.character.system ?? void 0,
1023
+ temperature,
1024
+ maxTokens,
1025
+ frequencyPenalty,
1026
+ presencePenalty,
1027
+ stopSequences
1028
+ });
1029
+ span.setAttribute("llm.response.processed.length", openaiResponse.length);
1030
+ span.addEvent("llm.response.processed", {
1031
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1032
+ });
1033
+ if (usage) {
1034
+ span.setAttributes({
1035
+ "llm.usage.prompt_tokens": usage.promptTokens,
1036
+ "llm.usage.completion_tokens": usage.completionTokens,
1037
+ "llm.usage.total_tokens": usage.totalTokens
1099
1038
  });
1100
- if (usage) {
1101
- span.setAttributes({
1102
- "llm.usage.prompt_tokens": usage.promptTokens,
1103
- "llm.usage.completion_tokens": usage.completionTokens,
1104
- "llm.usage.total_tokens": usage.totalTokens
1105
- });
1106
- emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1107
- }
1108
- return openaiResponse;
1039
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1109
1040
  }
1110
- );
1041
+ return openaiResponse;
1042
+ });
1111
1043
  },
1112
1044
  [ModelType.IMAGE]: async (runtime, params) => {
1113
1045
  const n = params.n || 1;
@@ -1121,64 +1053,57 @@ var openaiPlugin = {
1121
1053
  "llm.request.image.size": size,
1122
1054
  "llm.request.image.count": n
1123
1055
  };
1124
- return startLlmSpan(
1125
- runtime,
1126
- "LLM.imageGeneration",
1127
- attributes,
1128
- async (span) => {
1129
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1130
- const baseURL = getBaseURL(runtime);
1131
- const apiKey = getApiKey(runtime);
1132
- if (!apiKey) {
1056
+ return startLlmSpan(runtime, "LLM.imageGeneration", attributes, async (span) => {
1057
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1058
+ const baseURL = getBaseURL(runtime);
1059
+ const apiKey = getApiKey(runtime);
1060
+ if (!apiKey) {
1061
+ span.setStatus({
1062
+ code: SpanStatusCode.ERROR,
1063
+ message: "OpenAI API key not configured"
1064
+ });
1065
+ throw new Error("OpenAI API key not configured");
1066
+ }
1067
+ try {
1068
+ const response = await fetch(`${baseURL}/images/generations`, {
1069
+ method: "POST",
1070
+ headers: {
1071
+ Authorization: `Bearer ${apiKey}`,
1072
+ "Content-Type": "application/json"
1073
+ },
1074
+ body: JSON.stringify({
1075
+ prompt,
1076
+ n,
1077
+ size
1078
+ })
1079
+ });
1080
+ const responseClone = response.clone();
1081
+ const rawResponseBody = await responseClone.text();
1082
+ span.addEvent("llm.response.raw", {
1083
+ "response.body": rawResponseBody
1084
+ });
1085
+ if (!response.ok) {
1086
+ span.setAttributes({ "error.api.status": response.status });
1133
1087
  span.setStatus({
1134
1088
  code: SpanStatusCode.ERROR,
1135
- message: "OpenAI API key not configured"
1089
+ message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1136
1090
  });
1137
- throw new Error("OpenAI API key not configured");
1138
- }
1139
- try {
1140
- const response = await fetch(`${baseURL}/images/generations`, {
1141
- method: "POST",
1142
- headers: {
1143
- Authorization: `Bearer ${apiKey}`,
1144
- "Content-Type": "application/json"
1145
- },
1146
- body: JSON.stringify({
1147
- prompt,
1148
- n,
1149
- size
1150
- })
1151
- });
1152
- const responseClone = response.clone();
1153
- const rawResponseBody = await responseClone.text();
1154
- span.addEvent("llm.response.raw", {
1155
- "response.body": rawResponseBody
1156
- });
1157
- if (!response.ok) {
1158
- span.setAttributes({ "error.api.status": response.status });
1159
- span.setStatus({
1160
- code: SpanStatusCode.ERROR,
1161
- message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1162
- });
1163
- throw new Error(
1164
- `Failed to generate image: ${response.statusText}`
1165
- );
1166
- }
1167
- const data = await response.json();
1168
- const typedData = data;
1169
- span.addEvent("llm.response.processed", {
1170
- "response.urls": JSON.stringify(typedData.data)
1171
- });
1172
- return typedData.data;
1173
- } catch (error) {
1174
- const message = error instanceof Error ? error.message : String(error);
1175
- const exception = error instanceof Error ? error : new Error(message);
1176
- span.recordException(exception);
1177
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1178
- throw error;
1091
+ throw new Error(`Failed to generate image: ${response.statusText}`);
1179
1092
  }
1093
+ const data = await response.json();
1094
+ const typedData = data;
1095
+ span.addEvent("llm.response.processed", {
1096
+ "response.urls": JSON.stringify(typedData.data)
1097
+ });
1098
+ return typedData.data;
1099
+ } catch (error) {
1100
+ const message = error instanceof Error ? error.message : String(error);
1101
+ const exception = error instanceof Error ? error : new Error(message);
1102
+ span.recordException(exception);
1103
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1104
+ throw error;
1180
1105
  }
1181
- );
1106
+ });
1182
1107
  },
1183
1108
  [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
1184
1109
  let imageUrl;
@@ -1212,114 +1137,112 @@ var openaiPlugin = {
1212
1137
  ]
1213
1138
  }
1214
1139
  ];
1215
- return startLlmSpan(
1216
- runtime,
1217
- "LLM.imageDescription",
1218
- attributes,
1219
- async (span) => {
1220
- span.addEvent("llm.prompt", {
1221
- "prompt.content": JSON.stringify(messages, safeReplacer())
1140
+ return startLlmSpan(runtime, "LLM.imageDescription", attributes, async (span) => {
1141
+ span.addEvent("llm.prompt", {
1142
+ "prompt.content": JSON.stringify(messages, safeReplacer())
1143
+ });
1144
+ const baseURL = getBaseURL(runtime);
1145
+ const apiKey = getApiKey(runtime);
1146
+ if (!apiKey) {
1147
+ logger.error("OpenAI API key not set");
1148
+ span.setStatus({
1149
+ code: SpanStatusCode.ERROR,
1150
+ message: "OpenAI API key not configured"
1151
+ });
1152
+ return {
1153
+ title: "Failed to analyze image",
1154
+ description: "API key not configured"
1155
+ };
1156
+ }
1157
+ try {
1158
+ const requestBody = {
1159
+ model: modelName,
1160
+ messages,
1161
+ max_tokens: maxTokens
1162
+ };
1163
+ const response = await fetch(`${baseURL}/chat/completions`, {
1164
+ method: "POST",
1165
+ headers: {
1166
+ "Content-Type": "application/json",
1167
+ Authorization: `Bearer ${apiKey}`
1168
+ },
1169
+ body: JSON.stringify(requestBody)
1170
+ });
1171
+ const responseClone = response.clone();
1172
+ const rawResponseBody = await responseClone.text();
1173
+ span.addEvent("llm.response.raw", {
1174
+ "response.body": rawResponseBody
1222
1175
  });
1223
- const baseURL = getBaseURL(runtime);
1224
- const apiKey = getApiKey(runtime);
1225
- if (!apiKey) {
1226
- logger.error("OpenAI API key not set");
1176
+ if (!response.ok) {
1177
+ span.setAttributes({ "error.api.status": response.status });
1227
1178
  span.setStatus({
1228
1179
  code: SpanStatusCode.ERROR,
1229
- message: "OpenAI API key not configured"
1180
+ message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1230
1181
  });
1231
- return {
1232
- title: "Failed to analyze image",
1233
- description: "API key not configured"
1234
- };
1182
+ throw new Error(`OpenAI API error: ${response.status}`);
1235
1183
  }
1236
- try {
1237
- const requestBody = {
1238
- model: modelName,
1239
- messages,
1240
- max_tokens: maxTokens
1241
- };
1242
- const response = await fetch(`${baseURL}/chat/completions`, {
1243
- method: "POST",
1244
- headers: {
1245
- "Content-Type": "application/json",
1246
- Authorization: `Bearer ${apiKey}`
1247
- },
1248
- body: JSON.stringify(requestBody)
1249
- });
1250
- const responseClone = response.clone();
1251
- const rawResponseBody = await responseClone.text();
1252
- span.addEvent("llm.response.raw", {
1253
- "response.body": rawResponseBody
1184
+ const result = await response.json();
1185
+ const typedResult = result;
1186
+ const content = typedResult.choices?.[0]?.message?.content;
1187
+ console.log("############## CONTENT", content);
1188
+ if (typedResult.usage) {
1189
+ span.setAttributes({
1190
+ "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1191
+ "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1192
+ "llm.usage.total_tokens": typedResult.usage.total_tokens
1254
1193
  });
1255
- if (!response.ok) {
1256
- span.setAttributes({ "error.api.status": response.status });
1257
- span.setStatus({
1258
- code: SpanStatusCode.ERROR,
1259
- message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1260
- });
1261
- throw new Error(`OpenAI API error: ${response.status}`);
1262
- }
1263
- const result = await response.json();
1264
- const typedResult = result;
1265
- const content = typedResult.choices?.[0]?.message?.content;
1266
- if (typedResult.usage) {
1267
- span.setAttributes({
1268
- "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1269
- "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1270
- "llm.usage.total_tokens": typedResult.usage.total_tokens
1271
- });
1272
- emitModelUsageEvent(
1273
- runtime,
1274
- ModelType.IMAGE_DESCRIPTION,
1275
- typeof params === "string" ? params : params.prompt || "",
1276
- {
1277
- promptTokens: typedResult.usage.prompt_tokens,
1278
- completionTokens: typedResult.usage.completion_tokens,
1279
- totalTokens: typedResult.usage.total_tokens
1280
- }
1281
- );
1282
- }
1283
- if (typedResult.choices?.[0]?.finish_reason) {
1284
- span.setAttribute(
1285
- "llm.response.finish_reason",
1286
- typedResult.choices[0].finish_reason
1287
- );
1288
- }
1289
- if (!content) {
1290
- span.setStatus({
1291
- code: SpanStatusCode.ERROR,
1292
- message: "No content in API response"
1293
- });
1294
- return {
1295
- title: "Failed to analyze image",
1296
- description: "No response from API"
1297
- };
1298
- }
1299
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1300
- const title = titleMatch?.[1]?.trim() || "Image Analysis";
1301
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1302
- const processedResult = { title, description };
1303
- span.addEvent("llm.response.processed", {
1304
- "response.object": JSON.stringify(
1305
- processedResult,
1306
- safeReplacer()
1307
- )
1194
+ emitModelUsageEvent(
1195
+ runtime,
1196
+ ModelType.IMAGE_DESCRIPTION,
1197
+ typeof params === "string" ? params : params.prompt || "",
1198
+ {
1199
+ promptTokens: typedResult.usage.prompt_tokens,
1200
+ completionTokens: typedResult.usage.completion_tokens,
1201
+ totalTokens: typedResult.usage.total_tokens
1202
+ }
1203
+ );
1204
+ }
1205
+ if (typedResult.choices?.[0]?.finish_reason) {
1206
+ span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
1207
+ }
1208
+ if (!content) {
1209
+ span.setStatus({
1210
+ code: SpanStatusCode.ERROR,
1211
+ message: "No content in API response"
1308
1212
  });
1309
- return processedResult;
1310
- } catch (error) {
1311
- const message = error instanceof Error ? error.message : String(error);
1312
- logger.error(`Error analyzing image: ${message}`);
1313
- const exception = error instanceof Error ? error : new Error(message);
1314
- span.recordException(exception);
1315
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1316
1213
  return {
1317
1214
  title: "Failed to analyze image",
1318
- description: `Error: ${message}`
1215
+ description: "No response from API"
1319
1216
  };
1320
1217
  }
1218
+ console.log("######################## CONTENT", content);
1219
+ const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
1220
+ if (isCustomPrompt) {
1221
+ span.addEvent("llm.response.raw_content", {
1222
+ "response.content": content
1223
+ });
1224
+ return content;
1225
+ }
1226
+ const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1227
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
1228
+ const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1229
+ const processedResult = { title, description };
1230
+ span.addEvent("llm.response.processed", {
1231
+ "response.object": JSON.stringify(processedResult, safeReplacer())
1232
+ });
1233
+ return processedResult;
1234
+ } catch (error) {
1235
+ const message = error instanceof Error ? error.message : String(error);
1236
+ logger.error(`Error analyzing image: ${message}`);
1237
+ const exception = error instanceof Error ? error : new Error(message);
1238
+ span.recordException(exception);
1239
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1240
+ return {
1241
+ title: "Failed to analyze image",
1242
+ description: `Error: ${message}`
1243
+ };
1321
1244
  }
1322
- );
1245
+ });
1323
1246
  },
1324
1247
  [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
1325
1248
  logger.log("audioBuffer", audioBuffer);
@@ -1331,87 +1254,69 @@ var openaiPlugin = {
1331
1254
  "llm.request.model": modelName,
1332
1255
  "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1333
1256
  };
1334
- return startLlmSpan(
1335
- runtime,
1336
- "LLM.transcription",
1337
- attributes,
1338
- async (span) => {
1339
- span.addEvent("llm.prompt", {
1340
- "prompt.info": "Audio buffer for transcription"
1257
+ return startLlmSpan(runtime, "LLM.transcription", attributes, async (span) => {
1258
+ span.addEvent("llm.prompt", {
1259
+ "prompt.info": "Audio buffer for transcription"
1260
+ });
1261
+ const baseURL = getBaseURL(runtime);
1262
+ const apiKey = getApiKey(runtime);
1263
+ if (!apiKey) {
1264
+ span.setStatus({
1265
+ code: SpanStatusCode.ERROR,
1266
+ message: "OpenAI API key not configured"
1341
1267
  });
1342
- const baseURL = getBaseURL(runtime);
1343
- const apiKey = getApiKey(runtime);
1344
- if (!apiKey) {
1345
- span.setStatus({
1346
- code: SpanStatusCode.ERROR,
1347
- message: "OpenAI API key not configured"
1348
- });
1349
- throw new Error(
1350
- "OpenAI API key not configured - Cannot make request"
1351
- );
1352
- }
1353
- if (!audioBuffer || audioBuffer.length === 0) {
1268
+ throw new Error("OpenAI API key not configured - Cannot make request");
1269
+ }
1270
+ if (!audioBuffer || audioBuffer.length === 0) {
1271
+ span.setStatus({
1272
+ code: SpanStatusCode.ERROR,
1273
+ message: "Audio buffer is empty or invalid"
1274
+ });
1275
+ throw new Error("Audio buffer is empty or invalid for transcription");
1276
+ }
1277
+ const formData = new FormData();
1278
+ formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1279
+ formData.append("model", "whisper-1");
1280
+ try {
1281
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
1282
+ method: "POST",
1283
+ headers: {
1284
+ Authorization: `Bearer ${apiKey}`
1285
+ },
1286
+ body: formData
1287
+ });
1288
+ const responseClone = response.clone();
1289
+ const rawResponseBody = await responseClone.text();
1290
+ span.addEvent("llm.response.raw", {
1291
+ "response.body": rawResponseBody
1292
+ });
1293
+ logger.log("response", response);
1294
+ if (!response.ok) {
1295
+ span.setAttributes({ "error.api.status": response.status });
1354
1296
  span.setStatus({
1355
1297
  code: SpanStatusCode.ERROR,
1356
- message: "Audio buffer is empty or invalid"
1298
+ message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1357
1299
  });
1358
- throw new Error(
1359
- "Audio buffer is empty or invalid for transcription"
1360
- );
1361
- }
1362
- const formData = new FormData();
1363
- formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1364
- formData.append("model", "whisper-1");
1365
- try {
1366
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
1367
- method: "POST",
1368
- headers: {
1369
- Authorization: `Bearer ${apiKey}`
1370
- },
1371
- body: formData
1372
- });
1373
- const responseClone = response.clone();
1374
- const rawResponseBody = await responseClone.text();
1375
- span.addEvent("llm.response.raw", {
1376
- "response.body": rawResponseBody
1377
- });
1378
- logger.log("response", response);
1379
- if (!response.ok) {
1380
- span.setAttributes({ "error.api.status": response.status });
1381
- span.setStatus({
1382
- code: SpanStatusCode.ERROR,
1383
- message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1384
- });
1385
- throw new Error(
1386
- `Failed to transcribe audio: ${response.statusText}`
1387
- );
1388
- }
1389
- const data = await response.json();
1390
- const processedText = data.text;
1391
- span.setAttribute(
1392
- "llm.response.processed.length",
1393
- processedText.length
1394
- );
1395
- span.addEvent("llm.response.processed", {
1396
- "response.text": processedText
1397
- });
1398
- return processedText;
1399
- } catch (error) {
1400
- const message = error instanceof Error ? error.message : String(error);
1401
- const exception = error instanceof Error ? error : new Error(message);
1402
- span.recordException(exception);
1403
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1404
- throw error;
1300
+ throw new Error(`Failed to transcribe audio: ${response.statusText}`);
1405
1301
  }
1302
+ const data = await response.json();
1303
+ const processedText = data.text;
1304
+ span.setAttribute("llm.response.processed.length", processedText.length);
1305
+ span.addEvent("llm.response.processed", {
1306
+ "response.text": processedText
1307
+ });
1308
+ return processedText;
1309
+ } catch (error) {
1310
+ const message = error instanceof Error ? error.message : String(error);
1311
+ const exception = error instanceof Error ? error : new Error(message);
1312
+ span.recordException(exception);
1313
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1314
+ throw error;
1406
1315
  }
1407
- );
1316
+ });
1408
1317
  },
1409
1318
  [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
1410
- const ttsModelName = getSetting(
1411
- runtime,
1412
- "OPENAI_TTS_MODEL",
1413
- "gpt-4o-mini-tts"
1414
- );
1319
+ const ttsModelName = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
1415
1320
  const attributes = {
1416
1321
  "llm.vendor": "OpenAI",
1417
1322
  "llm.request.type": "tts",
@@ -1437,20 +1342,10 @@ var openaiPlugin = {
1437
1342
  });
1438
1343
  },
1439
1344
  [ModelType.OBJECT_SMALL]: async (runtime, params) => {
1440
- return generateObjectByModelType(
1441
- runtime,
1442
- params,
1443
- ModelType.OBJECT_SMALL,
1444
- getSmallModel
1445
- );
1345
+ return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
1446
1346
  },
1447
1347
  [ModelType.OBJECT_LARGE]: async (runtime, params) => {
1448
- return generateObjectByModelType(
1449
- runtime,
1450
- params,
1451
- ModelType.OBJECT_LARGE,
1452
- getLargeModel
1453
- );
1348
+ return generateObjectByModelType(runtime, params, ModelType.OBJECT_LARGE, getLargeModel);
1454
1349
  }
1455
1350
  },
1456
1351
  tests: [
@@ -1467,14 +1362,9 @@ var openaiPlugin = {
1467
1362
  }
1468
1363
  });
1469
1364
  const data = await response.json();
1470
- logger.log(
1471
- "Models Available:",
1472
- data?.data?.length ?? "N/A"
1473
- );
1365
+ logger.log("Models Available:", data?.data?.length ?? "N/A");
1474
1366
  if (!response.ok) {
1475
- throw new Error(
1476
- `Failed to validate OpenAI API key: ${response.statusText}`
1477
- );
1367
+ throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1478
1368
  }
1479
1369
  }
1480
1370
  },
@@ -1482,12 +1372,9 @@ var openaiPlugin = {
1482
1372
  name: "openai_test_text_embedding",
1483
1373
  fn: async (runtime) => {
1484
1374
  try {
1485
- const embedding = await runtime.useModel(
1486
- ModelType.TEXT_EMBEDDING,
1487
- {
1488
- text: "Hello, world!"
1489
- }
1490
- );
1375
+ const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
1376
+ text: "Hello, world!"
1377
+ });
1491
1378
  logger.log("embedding", embedding);
1492
1379
  } catch (error) {
1493
1380
  const message = error instanceof Error ? error.message : String(error);
@@ -1563,10 +1450,7 @@ var openaiPlugin = {
1563
1450
  if (result && typeof result === "object" && "title" in result && "description" in result) {
1564
1451
  logger.log("Image description:", result);
1565
1452
  } else {
1566
- logger.error(
1567
- "Invalid image description result format:",
1568
- result
1569
- );
1453
+ logger.error("Invalid image description result format:", result);
1570
1454
  }
1571
1455
  } catch (e) {
1572
1456
  const message = e instanceof Error ? e.message : String(e);
@@ -1574,9 +1458,7 @@ var openaiPlugin = {
1574
1458
  }
1575
1459
  } catch (e) {
1576
1460
  const message = e instanceof Error ? e.message : String(e);
1577
- logger.error(
1578
- `Error in openai_test_image_description: ${message}`
1579
- );
1461
+ logger.error(`Error in openai_test_image_description: ${message}`);
1580
1462
  }
1581
1463
  }
1582
1464
  },
@@ -1605,14 +1487,9 @@ var openaiPlugin = {
1605
1487
  name: "openai_test_text_tokenizer_encode",
1606
1488
  fn: async (runtime) => {
1607
1489
  const prompt = "Hello tokenizer encode!";
1608
- const tokens = await runtime.useModel(
1609
- ModelType.TEXT_TOKENIZER_ENCODE,
1610
- { prompt }
1611
- );
1490
+ const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1612
1491
  if (!Array.isArray(tokens) || tokens.length === 0) {
1613
- throw new Error(
1614
- "Failed to tokenize text: expected non-empty array of tokens"
1615
- );
1492
+ throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1616
1493
  }
1617
1494
  logger.log("Tokenized output:", tokens);
1618
1495
  }
@@ -1621,14 +1498,8 @@ var openaiPlugin = {
1621
1498
  name: "openai_test_text_tokenizer_decode",
1622
1499
  fn: async (runtime) => {
1623
1500
  const prompt = "Hello tokenizer decode!";
1624
- const tokens = await runtime.useModel(
1625
- ModelType.TEXT_TOKENIZER_ENCODE,
1626
- { prompt }
1627
- );
1628
- const decodedText = await runtime.useModel(
1629
- ModelType.TEXT_TOKENIZER_DECODE,
1630
- { tokens }
1631
- );
1501
+ const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1502
+ const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, { tokens });
1632
1503
  if (decodedText !== prompt) {
1633
1504
  throw new Error(
1634
1505
  `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`