@elizaos/plugin-openai 2.0.0-alpha.5 → 2.0.0-alpha.537
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +163 -0
- package/dist/browser/index.browser.js +2 -2
- package/dist/browser/index.browser.js.map +12 -11
- package/dist/build.d.ts +1 -1
- package/dist/cjs/index.d.ts +2 -2
- package/dist/cjs/index.node.cjs +1533 -1076
- package/dist/cjs/index.node.js.map +10 -9
- package/dist/generated/specs/specs.d.ts +27 -27
- package/dist/index.browser.d.ts +2 -1
- package/dist/index.browser.d.ts.map +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.node.d.ts +2 -1
- package/dist/index.node.d.ts.map +1 -1
- package/dist/models/embedding.d.ts.map +1 -1
- package/dist/models/index.d.ts +1 -1
- package/dist/models/index.d.ts.map +1 -1
- package/dist/models/text.d.ts +5 -0
- package/dist/models/text.d.ts.map +1 -1
- package/dist/node/index.node.js +128 -16
- package/dist/node/index.node.js.map +12 -11
- package/dist/types/index.d.ts +16 -0
- package/dist/types/index.d.ts.map +1 -1
- package/dist/utils/config.d.ts +5 -0
- package/dist/utils/config.d.ts.map +1 -1
- package/dist/utils/events.d.ts +6 -0
- package/dist/utils/events.d.ts.map +1 -1
- package/dist/utils/index.d.ts +1 -1
- package/package.json +13 -11
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["../../models/text.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,kBAAkB,EAAE,aAAa,EAAiB,MAAM,eAAe,CAAC;AAItF,OAAO,KAAK,EAAE,gBAAgB,EAAc,MAAM,UAAU,CAAC;
|
|
1
|
+
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["../../models/text.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AAEH,OAAO,KAAK,EAAE,kBAAkB,EAAE,aAAa,EAAiB,MAAM,eAAe,CAAC;AAItF,OAAO,KAAK,EAAE,gBAAgB,EAAc,MAAM,UAAU,CAAC;AAoM7D;;;;;;;;GAQG;AACH,wBAAsB,eAAe,CACnC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC;AAED,wBAAsB,cAAc,CAClC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC;AAED,wBAAsB,gBAAgB,CACpC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC;AAED;;;;;;;;GAQG;AACH,wBAAsB,eAAe,CACnC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC;AAED,wBAAsB,cAAc,CAClC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC;AAED,wBAAsB,qBAAqB,CACzC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAOpC;AAED,wBAAsB,mBAAmB,CACvC,OAAO,EAAE,aAAa,EACtB,MAAM,EAAE,kBAAkB,GACzB,OAAO,CAAC,MAAM,GAAG,gBAAgB,CAAC,CAEpC"}
|
package/dist/node/index.node.js
CHANGED
|
@@ -82,9 +82,24 @@ function getEmbeddingBaseURL(runtime) {
|
|
|
82
82
|
function getSmallModel(runtime) {
|
|
83
83
|
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL") ?? "gpt-5-mini";
|
|
84
84
|
}
|
|
85
|
+
function getNanoModel(runtime) {
|
|
86
|
+
return getSetting(runtime, "OPENAI_NANO_MODEL") ?? getSetting(runtime, "NANO_MODEL") ?? getSmallModel(runtime);
|
|
87
|
+
}
|
|
88
|
+
function getMediumModel(runtime) {
|
|
89
|
+
return getSetting(runtime, "OPENAI_MEDIUM_MODEL") ?? getSetting(runtime, "MEDIUM_MODEL") ?? getSmallModel(runtime);
|
|
90
|
+
}
|
|
85
91
|
function getLargeModel(runtime) {
|
|
86
92
|
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL") ?? "gpt-5";
|
|
87
93
|
}
|
|
94
|
+
function getMegaModel(runtime) {
|
|
95
|
+
return getSetting(runtime, "OPENAI_MEGA_MODEL") ?? getSetting(runtime, "MEGA_MODEL") ?? getLargeModel(runtime);
|
|
96
|
+
}
|
|
97
|
+
function getResponseHandlerModel(runtime) {
|
|
98
|
+
return getSetting(runtime, "OPENAI_RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "OPENAI_SHOULD_RESPOND_MODEL") ?? getSetting(runtime, "RESPONSE_HANDLER_MODEL") ?? getSetting(runtime, "SHOULD_RESPOND_MODEL") ?? getNanoModel(runtime);
|
|
99
|
+
}
|
|
100
|
+
function getActionPlannerModel(runtime) {
|
|
101
|
+
return getSetting(runtime, "OPENAI_ACTION_PLANNER_MODEL") ?? getSetting(runtime, "OPENAI_PLANNER_MODEL") ?? getSetting(runtime, "ACTION_PLANNER_MODEL") ?? getSetting(runtime, "PLANNER_MODEL") ?? getMediumModel(runtime);
|
|
102
|
+
}
|
|
88
103
|
function getEmbeddingModel(runtime) {
|
|
89
104
|
return getSetting(runtime, "OPENAI_EMBEDDING_MODEL") ?? "text-embedding-3-small";
|
|
90
105
|
}
|
|
@@ -391,19 +406,24 @@ function truncatePrompt(prompt) {
|
|
|
391
406
|
}
|
|
392
407
|
function normalizeUsage(usage) {
|
|
393
408
|
if ("promptTokens" in usage) {
|
|
409
|
+
const promptTokensDetails = "promptTokensDetails" in usage ? usage.promptTokensDetails : undefined;
|
|
410
|
+
const cachedPromptTokens = usage.cachedPromptTokens ?? promptTokensDetails?.cachedTokens;
|
|
394
411
|
return {
|
|
395
412
|
promptTokens: usage.promptTokens ?? 0,
|
|
396
413
|
completionTokens: usage.completionTokens ?? 0,
|
|
397
|
-
totalTokens: usage.totalTokens ?? (usage.promptTokens ?? 0) + (usage.completionTokens ?? 0)
|
|
414
|
+
totalTokens: usage.totalTokens ?? (usage.promptTokens ?? 0) + (usage.completionTokens ?? 0),
|
|
415
|
+
cachedPromptTokens
|
|
398
416
|
};
|
|
399
417
|
}
|
|
400
418
|
if ("inputTokens" in usage || "outputTokens" in usage) {
|
|
401
419
|
const input = usage.inputTokens ?? 0;
|
|
402
420
|
const output = usage.outputTokens ?? 0;
|
|
421
|
+
const total = usage.totalTokens ?? input + output;
|
|
403
422
|
return {
|
|
404
423
|
promptTokens: input,
|
|
405
424
|
completionTokens: output,
|
|
406
|
-
totalTokens:
|
|
425
|
+
totalTokens: total,
|
|
426
|
+
cachedPromptTokens: usage.cachedInputTokens
|
|
407
427
|
};
|
|
408
428
|
}
|
|
409
429
|
return {
|
|
@@ -423,14 +443,14 @@ function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
|
423
443
|
tokens: {
|
|
424
444
|
prompt: normalized.promptTokens,
|
|
425
445
|
completion: normalized.completionTokens,
|
|
426
|
-
total: normalized.totalTokens
|
|
446
|
+
total: normalized.totalTokens,
|
|
447
|
+
...normalized.cachedPromptTokens !== undefined ? { cached: normalized.cachedPromptTokens } : {}
|
|
427
448
|
}
|
|
428
449
|
};
|
|
429
450
|
runtime.emitEvent(EventType.MODEL_USED, payload);
|
|
430
451
|
}
|
|
431
452
|
|
|
432
453
|
// models/embedding.ts
|
|
433
|
-
var MAX_EMBEDDING_TOKENS = 8000;
|
|
434
454
|
function validateDimension(dimension) {
|
|
435
455
|
const validDimensions = Object.values(VECTOR_DIMS);
|
|
436
456
|
if (!validDimensions.includes(dimension)) {
|
|
@@ -464,9 +484,9 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
464
484
|
if (trimmedText.length === 0) {
|
|
465
485
|
throw new Error("Cannot generate embedding for empty text");
|
|
466
486
|
}
|
|
467
|
-
const maxChars =
|
|
487
|
+
const maxChars = 8000 * 4;
|
|
468
488
|
if (trimmedText.length > maxChars) {
|
|
469
|
-
logger5.warn(`[OpenAI] Embedding input too long (~${Math.ceil(trimmedText.length / 4)} tokens), truncating to
|
|
489
|
+
logger5.warn(`[OpenAI] Embedding input too long (~${Math.ceil(trimmedText.length / 4)} tokens), truncating to ~8000 tokens`);
|
|
470
490
|
trimmedText = trimmedText.slice(0, maxChars);
|
|
471
491
|
}
|
|
472
492
|
const baseURL = getEmbeddingBaseURL(runtime);
|
|
@@ -489,7 +509,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
489
509
|
}
|
|
490
510
|
const data = await response.json();
|
|
491
511
|
const firstResult = data?.data?.[0];
|
|
492
|
-
if (!firstResult
|
|
512
|
+
if (!firstResult?.embedding) {
|
|
493
513
|
throw new Error("OpenAI API returned invalid embedding response structure");
|
|
494
514
|
}
|
|
495
515
|
const embedding = firstResult.embedding;
|
|
@@ -515,7 +535,7 @@ async function handleImageGeneration(runtime, params) {
|
|
|
515
535
|
const size = params.size ?? "1024x1024";
|
|
516
536
|
const extendedParams = params;
|
|
517
537
|
logger6.debug(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
518
|
-
if (
|
|
538
|
+
if (params.prompt.trim().length === 0) {
|
|
519
539
|
throw new Error("IMAGE generation requires a non-empty prompt");
|
|
520
540
|
}
|
|
521
541
|
if (count < 1 || count > 10) {
|
|
@@ -547,7 +567,7 @@ async function handleImageGeneration(runtime, params) {
|
|
|
547
567
|
throw new Error(`OpenAI image generation failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
548
568
|
}
|
|
549
569
|
const data = await response.json();
|
|
550
|
-
if (
|
|
570
|
+
if (data.data.length === 0) {
|
|
551
571
|
throw new Error("OpenAI API returned no images");
|
|
552
572
|
}
|
|
553
573
|
return data.data.map((item) => ({
|
|
@@ -675,7 +695,7 @@ async function generateObjectByModelType(runtime, params, modelType, getModelFn)
|
|
|
675
695
|
const openai = createOpenAIClient(runtime);
|
|
676
696
|
const modelName = getModelFn(runtime);
|
|
677
697
|
logger8.debug(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
678
|
-
if (
|
|
698
|
+
if (params.prompt.trim().length === 0) {
|
|
679
699
|
throw new Error("Object generation requires a non-empty prompt");
|
|
680
700
|
}
|
|
681
701
|
if (params.schema) {
|
|
@@ -914,30 +934,70 @@ async function handleResearch(runtime, params) {
|
|
|
914
934
|
// models/text.ts
|
|
915
935
|
import { logger as logger10, ModelType as ModelType4 } from "@elizaos/core";
|
|
916
936
|
import { generateText, streamText } from "ai";
|
|
937
|
+
var TEXT_NANO_MODEL_TYPE = ModelType4.TEXT_NANO ?? "TEXT_NANO";
|
|
938
|
+
var TEXT_MEDIUM_MODEL_TYPE = ModelType4.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
939
|
+
var TEXT_MEGA_MODEL_TYPE = ModelType4.TEXT_MEGA ?? "TEXT_MEGA";
|
|
940
|
+
var RESPONSE_HANDLER_MODEL_TYPE = ModelType4.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
941
|
+
var ACTION_PLANNER_MODEL_TYPE = ModelType4.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
942
|
+
function buildUserContent(params) {
|
|
943
|
+
const content = [{ type: "text", text: params.prompt }];
|
|
944
|
+
for (const attachment of params.attachments ?? []) {
|
|
945
|
+
content.push({
|
|
946
|
+
type: "file",
|
|
947
|
+
data: attachment.data,
|
|
948
|
+
mediaType: attachment.mediaType,
|
|
949
|
+
...attachment.filename ? { filename: attachment.filename } : {}
|
|
950
|
+
});
|
|
951
|
+
}
|
|
952
|
+
return content;
|
|
953
|
+
}
|
|
917
954
|
function convertUsage(usage) {
|
|
918
955
|
if (!usage) {
|
|
919
956
|
return;
|
|
920
957
|
}
|
|
921
958
|
const promptTokens = usage.inputTokens ?? 0;
|
|
922
959
|
const completionTokens = usage.outputTokens ?? 0;
|
|
960
|
+
const usageWithCache = usage;
|
|
923
961
|
return {
|
|
924
962
|
promptTokens,
|
|
925
963
|
completionTokens,
|
|
926
|
-
totalTokens: promptTokens + completionTokens
|
|
964
|
+
totalTokens: promptTokens + completionTokens,
|
|
965
|
+
cachedPromptTokens: usageWithCache.cachedInputTokens
|
|
966
|
+
};
|
|
967
|
+
}
|
|
968
|
+
function resolvePromptCacheOptions(params) {
|
|
969
|
+
const withOpenAIOptions = params;
|
|
970
|
+
return {
|
|
971
|
+
promptCacheKey: withOpenAIOptions.providerOptions?.openai?.promptCacheKey,
|
|
972
|
+
promptCacheRetention: withOpenAIOptions.providerOptions?.openai?.promptCacheRetention
|
|
927
973
|
};
|
|
928
974
|
}
|
|
929
975
|
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
976
|
+
const paramsWithAttachments = params;
|
|
930
977
|
const openai = createOpenAIClient(runtime);
|
|
931
978
|
const modelName = getModelFn(runtime);
|
|
932
979
|
logger10.debug(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
980
|
+
const promptCacheOptions = resolvePromptCacheOptions(params);
|
|
981
|
+
const hasAttachments = (paramsWithAttachments.attachments?.length ?? 0) > 0;
|
|
982
|
+
const userContent = hasAttachments ? buildUserContent(paramsWithAttachments) : undefined;
|
|
933
983
|
const systemPrompt = runtime.character.system ?? undefined;
|
|
934
984
|
const model = openai.chat(modelName);
|
|
935
985
|
const generateParams = {
|
|
936
986
|
model,
|
|
937
|
-
prompt: params.prompt,
|
|
987
|
+
...userContent ? { messages: [{ role: "user", content: userContent }] } : { prompt: params.prompt },
|
|
938
988
|
system: systemPrompt,
|
|
939
989
|
maxOutputTokens: params.maxTokens ?? 8192,
|
|
940
|
-
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) }
|
|
990
|
+
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) },
|
|
991
|
+
...promptCacheOptions.promptCacheKey || promptCacheOptions.promptCacheRetention ? {
|
|
992
|
+
providerOptions: {
|
|
993
|
+
openai: {
|
|
994
|
+
...promptCacheOptions.promptCacheKey ? { promptCacheKey: promptCacheOptions.promptCacheKey } : {},
|
|
995
|
+
...promptCacheOptions.promptCacheRetention ? {
|
|
996
|
+
promptCacheRetention: promptCacheOptions.promptCacheRetention
|
|
997
|
+
} : {}
|
|
998
|
+
}
|
|
999
|
+
}
|
|
1000
|
+
} : {}
|
|
941
1001
|
};
|
|
942
1002
|
if (params.stream) {
|
|
943
1003
|
const result = streamText(generateParams);
|
|
@@ -957,9 +1017,24 @@ async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
|
957
1017
|
async function handleTextSmall(runtime, params) {
|
|
958
1018
|
return generateTextByModelType(runtime, params, ModelType4.TEXT_SMALL, getSmallModel);
|
|
959
1019
|
}
|
|
1020
|
+
async function handleTextNano(runtime, params) {
|
|
1021
|
+
return generateTextByModelType(runtime, params, TEXT_NANO_MODEL_TYPE, getNanoModel);
|
|
1022
|
+
}
|
|
1023
|
+
async function handleTextMedium(runtime, params) {
|
|
1024
|
+
return generateTextByModelType(runtime, params, TEXT_MEDIUM_MODEL_TYPE, getMediumModel);
|
|
1025
|
+
}
|
|
960
1026
|
async function handleTextLarge(runtime, params) {
|
|
961
1027
|
return generateTextByModelType(runtime, params, ModelType4.TEXT_LARGE, getLargeModel);
|
|
962
1028
|
}
|
|
1029
|
+
async function handleTextMega(runtime, params) {
|
|
1030
|
+
return generateTextByModelType(runtime, params, TEXT_MEGA_MODEL_TYPE, getMegaModel);
|
|
1031
|
+
}
|
|
1032
|
+
async function handleResponseHandler(runtime, params) {
|
|
1033
|
+
return generateTextByModelType(runtime, params, RESPONSE_HANDLER_MODEL_TYPE, getResponseHandlerModel);
|
|
1034
|
+
}
|
|
1035
|
+
async function handleActionPlanner(runtime, params) {
|
|
1036
|
+
return generateTextByModelType(runtime, params, ACTION_PLANNER_MODEL_TYPE, getActionPlannerModel);
|
|
1037
|
+
}
|
|
963
1038
|
// models/tokenizer.ts
|
|
964
1039
|
import { ModelType as ModelType6 } from "@elizaos/core";
|
|
965
1040
|
|
|
@@ -1027,16 +1102,35 @@ function getProcessEnv() {
|
|
|
1027
1102
|
return process.env;
|
|
1028
1103
|
}
|
|
1029
1104
|
var env = getProcessEnv();
|
|
1105
|
+
var TEXT_NANO_MODEL_TYPE2 = ModelType7.TEXT_NANO ?? "TEXT_NANO";
|
|
1106
|
+
var TEXT_MEDIUM_MODEL_TYPE2 = ModelType7.TEXT_MEDIUM ?? "TEXT_MEDIUM";
|
|
1107
|
+
var TEXT_MEGA_MODEL_TYPE2 = ModelType7.TEXT_MEGA ?? "TEXT_MEGA";
|
|
1108
|
+
var RESPONSE_HANDLER_MODEL_TYPE2 = ModelType7.RESPONSE_HANDLER ?? "RESPONSE_HANDLER";
|
|
1109
|
+
var ACTION_PLANNER_MODEL_TYPE2 = ModelType7.ACTION_PLANNER ?? "ACTION_PLANNER";
|
|
1030
1110
|
var openaiPlugin = {
|
|
1031
1111
|
name: "openai",
|
|
1032
1112
|
description: "OpenAI API integration for text, image, audio, and embedding models",
|
|
1033
1113
|
config: {
|
|
1034
1114
|
OPENAI_API_KEY: env.OPENAI_API_KEY ?? null,
|
|
1035
1115
|
OPENAI_BASE_URL: env.OPENAI_BASE_URL ?? null,
|
|
1116
|
+
OPENAI_NANO_MODEL: env.OPENAI_NANO_MODEL ?? null,
|
|
1117
|
+
OPENAI_MEDIUM_MODEL: env.OPENAI_MEDIUM_MODEL ?? null,
|
|
1036
1118
|
OPENAI_SMALL_MODEL: env.OPENAI_SMALL_MODEL ?? null,
|
|
1037
1119
|
OPENAI_LARGE_MODEL: env.OPENAI_LARGE_MODEL ?? null,
|
|
1120
|
+
OPENAI_MEGA_MODEL: env.OPENAI_MEGA_MODEL ?? null,
|
|
1121
|
+
OPENAI_RESPONSE_HANDLER_MODEL: env.OPENAI_RESPONSE_HANDLER_MODEL ?? null,
|
|
1122
|
+
OPENAI_SHOULD_RESPOND_MODEL: env.OPENAI_SHOULD_RESPOND_MODEL ?? null,
|
|
1123
|
+
OPENAI_ACTION_PLANNER_MODEL: env.OPENAI_ACTION_PLANNER_MODEL ?? null,
|
|
1124
|
+
OPENAI_PLANNER_MODEL: env.OPENAI_PLANNER_MODEL ?? null,
|
|
1125
|
+
NANO_MODEL: env.NANO_MODEL ?? null,
|
|
1126
|
+
MEDIUM_MODEL: env.MEDIUM_MODEL ?? null,
|
|
1038
1127
|
SMALL_MODEL: env.SMALL_MODEL ?? null,
|
|
1039
1128
|
LARGE_MODEL: env.LARGE_MODEL ?? null,
|
|
1129
|
+
MEGA_MODEL: env.MEGA_MODEL ?? null,
|
|
1130
|
+
RESPONSE_HANDLER_MODEL: env.RESPONSE_HANDLER_MODEL ?? null,
|
|
1131
|
+
SHOULD_RESPOND_MODEL: env.SHOULD_RESPOND_MODEL ?? null,
|
|
1132
|
+
ACTION_PLANNER_MODEL: env.ACTION_PLANNER_MODEL ?? null,
|
|
1133
|
+
PLANNER_MODEL: env.PLANNER_MODEL ?? null,
|
|
1040
1134
|
OPENAI_EMBEDDING_MODEL: env.OPENAI_EMBEDDING_MODEL ?? null,
|
|
1041
1135
|
OPENAI_EMBEDDING_API_KEY: env.OPENAI_EMBEDDING_API_KEY ?? null,
|
|
1042
1136
|
OPENAI_EMBEDDING_URL: env.OPENAI_EMBEDDING_URL ?? null,
|
|
@@ -1063,9 +1157,24 @@ var openaiPlugin = {
|
|
|
1063
1157
|
[ModelType7.TEXT_SMALL]: async (runtime, params) => {
|
|
1064
1158
|
return handleTextSmall(runtime, params);
|
|
1065
1159
|
},
|
|
1160
|
+
[TEXT_NANO_MODEL_TYPE2]: async (runtime, params) => {
|
|
1161
|
+
return handleTextNano(runtime, params);
|
|
1162
|
+
},
|
|
1163
|
+
[TEXT_MEDIUM_MODEL_TYPE2]: async (runtime, params) => {
|
|
1164
|
+
return handleTextMedium(runtime, params);
|
|
1165
|
+
},
|
|
1066
1166
|
[ModelType7.TEXT_LARGE]: async (runtime, params) => {
|
|
1067
1167
|
return handleTextLarge(runtime, params);
|
|
1068
1168
|
},
|
|
1169
|
+
[TEXT_MEGA_MODEL_TYPE2]: async (runtime, params) => {
|
|
1170
|
+
return handleTextMega(runtime, params);
|
|
1171
|
+
},
|
|
1172
|
+
[RESPONSE_HANDLER_MODEL_TYPE2]: async (runtime, params) => {
|
|
1173
|
+
return handleResponseHandler(runtime, params);
|
|
1174
|
+
},
|
|
1175
|
+
[ACTION_PLANNER_MODEL_TYPE2]: async (runtime, params) => {
|
|
1176
|
+
return handleActionPlanner(runtime, params);
|
|
1177
|
+
},
|
|
1069
1178
|
[ModelType7.IMAGE]: async (runtime, params) => {
|
|
1070
1179
|
return handleImageGeneration(runtime, params);
|
|
1071
1180
|
},
|
|
@@ -1253,10 +1362,13 @@ var openaiPlugin = {
|
|
|
1253
1362
|
}
|
|
1254
1363
|
]
|
|
1255
1364
|
};
|
|
1256
|
-
var
|
|
1365
|
+
var plugin_openai_default = openaiPlugin;
|
|
1366
|
+
|
|
1367
|
+
// index.node.ts
|
|
1368
|
+
var index_node_default = plugin_openai_default;
|
|
1257
1369
|
export {
|
|
1258
1370
|
openaiPlugin,
|
|
1259
|
-
|
|
1371
|
+
index_node_default as default
|
|
1260
1372
|
};
|
|
1261
1373
|
|
|
1262
|
-
//# debugId=
|
|
1374
|
+
//# debugId=5F395DEFC0D15DB964756E2164756E21
|