@elizaos/plugin-openai 1.5.18 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +2 -3
- package/dist/browser/index.browser.js.map +10 -13
- package/dist/cjs/index.node.cjs +88 -87
- package/dist/cjs/index.node.js.map +9 -9
- package/dist/models/audio.d.ts +1 -1
- package/dist/models/image.d.ts +8 -8
- package/dist/models/object.d.ts +2 -3
- package/dist/models/text.d.ts +3 -3
- package/dist/node/index.node.js +89 -94
- package/dist/node/index.node.js.map +9 -9
- package/package.json +3 -2
package/dist/node/index.node.js
CHANGED
|
@@ -1,22 +1,3 @@
|
|
|
1
|
-
import { createRequire } from "node:module";
|
|
2
|
-
var __create = Object.create;
|
|
3
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
4
|
-
var __defProp = Object.defineProperty;
|
|
5
|
-
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
-
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
-
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
-
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
-
for (let key of __getOwnPropNames(mod))
|
|
11
|
-
if (!__hasOwnProp.call(to, key))
|
|
12
|
-
__defProp(to, key, {
|
|
13
|
-
get: () => mod[key],
|
|
14
|
-
enumerable: true
|
|
15
|
-
});
|
|
16
|
-
return to;
|
|
17
|
-
};
|
|
18
|
-
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
19
|
-
|
|
20
1
|
// src/index.ts
|
|
21
2
|
import { logger as logger10, ModelType as ModelType7 } from "@elizaos/core";
|
|
22
3
|
|
|
@@ -26,7 +7,11 @@ import { logger as logger2 } from "@elizaos/core";
|
|
|
26
7
|
// src/utils/config.ts
|
|
27
8
|
import { logger } from "@elizaos/core";
|
|
28
9
|
function getSetting(runtime, key, defaultValue) {
|
|
29
|
-
|
|
10
|
+
const value = runtime.getSetting(key);
|
|
11
|
+
if (value !== undefined && value !== null) {
|
|
12
|
+
return String(value);
|
|
13
|
+
}
|
|
14
|
+
return process.env[key] ?? defaultValue;
|
|
30
15
|
}
|
|
31
16
|
function isBrowser() {
|
|
32
17
|
return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
|
|
@@ -117,7 +102,7 @@ function initializeOpenAI(_config, runtime) {
|
|
|
117
102
|
|
|
118
103
|
// src/models/text.ts
|
|
119
104
|
import { logger as logger3, ModelType } from "@elizaos/core";
|
|
120
|
-
import { generateText } from "ai";
|
|
105
|
+
import { generateText, streamText } from "ai";
|
|
121
106
|
|
|
122
107
|
// src/providers/openai.ts
|
|
123
108
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
@@ -133,10 +118,13 @@ function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
|
133
118
|
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
134
119
|
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
135
120
|
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
121
|
+
const truncatedPrompt = typeof prompt === "string" ? prompt.length > 200 ? `${prompt.slice(0, 200)}…` : prompt : "";
|
|
136
122
|
runtime.emitEvent(EventType.MODEL_USED, {
|
|
123
|
+
runtime,
|
|
124
|
+
source: "openai",
|
|
137
125
|
provider: "openai",
|
|
138
126
|
type,
|
|
139
|
-
prompt,
|
|
127
|
+
prompt: truncatedPrompt,
|
|
140
128
|
tokens: {
|
|
141
129
|
prompt: promptTokens,
|
|
142
130
|
completion: completionTokens,
|
|
@@ -149,34 +137,35 @@ function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
|
149
137
|
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
150
138
|
const openai = createOpenAIClient(runtime);
|
|
151
139
|
const modelName = getModelFn(runtime);
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
logger3.log(params.prompt);
|
|
155
|
-
const {
|
|
156
|
-
prompt,
|
|
157
|
-
stopSequences = [],
|
|
158
|
-
maxTokens = 8192,
|
|
159
|
-
temperature = 0.7,
|
|
160
|
-
frequencyPenalty = 0.7,
|
|
161
|
-
presencePenalty = 0.7
|
|
162
|
-
} = params;
|
|
163
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
140
|
+
logger3.debug(`[OpenAI] ${modelType} model: ${modelName}`);
|
|
141
|
+
const generateParams = {
|
|
164
142
|
model: openai.languageModel(modelName),
|
|
165
|
-
prompt,
|
|
143
|
+
prompt: params.prompt,
|
|
166
144
|
system: runtime.character.system ?? undefined,
|
|
167
|
-
temperature,
|
|
168
|
-
maxOutputTokens: maxTokens,
|
|
169
|
-
frequencyPenalty,
|
|
170
|
-
presencePenalty,
|
|
171
|
-
stopSequences,
|
|
172
|
-
experimental_telemetry: {
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
145
|
+
temperature: params.temperature ?? 0.7,
|
|
146
|
+
maxOutputTokens: params.maxTokens ?? 8192,
|
|
147
|
+
frequencyPenalty: params.frequencyPenalty ?? 0.7,
|
|
148
|
+
presencePenalty: params.presencePenalty ?? 0.7,
|
|
149
|
+
stopSequences: params.stopSequences ?? [],
|
|
150
|
+
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) }
|
|
151
|
+
};
|
|
152
|
+
if (params.stream) {
|
|
153
|
+
const result = streamText(generateParams);
|
|
154
|
+
return {
|
|
155
|
+
textStream: result.textStream,
|
|
156
|
+
text: result.text,
|
|
157
|
+
usage: result.usage.then((u) => u ? {
|
|
158
|
+
promptTokens: u.inputTokens ?? 0,
|
|
159
|
+
completionTokens: u.outputTokens ?? 0,
|
|
160
|
+
totalTokens: (u.inputTokens ?? 0) + (u.outputTokens ?? 0)
|
|
161
|
+
} : undefined),
|
|
162
|
+
finishReason: result.finishReason
|
|
163
|
+
};
|
|
178
164
|
}
|
|
179
|
-
|
|
165
|
+
const { text, usage } = await generateText(generateParams);
|
|
166
|
+
if (usage)
|
|
167
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
168
|
+
return text;
|
|
180
169
|
}
|
|
181
170
|
async function handleTextSmall(runtime, params) {
|
|
182
171
|
return generateTextByModelType(runtime, params, ModelType.TEXT_SMALL, getSmallModel);
|
|
@@ -268,7 +257,7 @@ async function handleTextEmbedding(runtime, params) {
|
|
|
268
257
|
// src/models/image.ts
|
|
269
258
|
import { logger as logger5, ModelType as ModelType3 } from "@elizaos/core";
|
|
270
259
|
async function handleImageGeneration(runtime, params) {
|
|
271
|
-
const n = params.
|
|
260
|
+
const n = params.count || 1;
|
|
272
261
|
const size = params.size || "1024x1024";
|
|
273
262
|
const prompt = params.prompt;
|
|
274
263
|
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
@@ -293,7 +282,7 @@ async function handleImageGeneration(runtime, params) {
|
|
|
293
282
|
}
|
|
294
283
|
const data = await response.json();
|
|
295
284
|
const typedData = data;
|
|
296
|
-
return typedData;
|
|
285
|
+
return typedData.data;
|
|
297
286
|
} catch (error) {
|
|
298
287
|
const message = error instanceof Error ? error.message : String(error);
|
|
299
288
|
throw error;
|
|
@@ -356,10 +345,6 @@ async function handleImageDescription(runtime, params) {
|
|
|
356
345
|
description: "No response from API"
|
|
357
346
|
};
|
|
358
347
|
}
|
|
359
|
-
const isCustomPrompt = typeof params === "object" && Boolean(params.prompt) && params.prompt !== DEFAULT_PROMPT;
|
|
360
|
-
if (isCustomPrompt) {
|
|
361
|
-
return content;
|
|
362
|
-
}
|
|
363
348
|
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
364
349
|
const title = titleMatch?.[1]?.trim();
|
|
365
350
|
if (!title) {
|
|
@@ -426,33 +411,6 @@ function detectAudioMimeType(buffer) {
|
|
|
426
411
|
logger6.warn("Could not detect audio format from buffer, using generic binary type");
|
|
427
412
|
return "application/octet-stream";
|
|
428
413
|
}
|
|
429
|
-
async function webStreamToNodeStream(webStream) {
|
|
430
|
-
try {
|
|
431
|
-
const { Readable } = await import("node:stream");
|
|
432
|
-
const reader = webStream.getReader();
|
|
433
|
-
return new Readable({
|
|
434
|
-
async read() {
|
|
435
|
-
try {
|
|
436
|
-
const { done, value } = await reader.read();
|
|
437
|
-
if (done) {
|
|
438
|
-
this.push(null);
|
|
439
|
-
} else {
|
|
440
|
-
this.push(value);
|
|
441
|
-
}
|
|
442
|
-
} catch (error) {
|
|
443
|
-
this.destroy(error);
|
|
444
|
-
}
|
|
445
|
-
},
|
|
446
|
-
destroy(error, callback) {
|
|
447
|
-
reader.cancel().finally(() => callback(error));
|
|
448
|
-
}
|
|
449
|
-
});
|
|
450
|
-
} catch (error) {
|
|
451
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
452
|
-
logger6.error(`Failed to load node:stream module: ${message}`);
|
|
453
|
-
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
454
|
-
}
|
|
455
|
-
}
|
|
456
414
|
|
|
457
415
|
// src/models/audio.ts
|
|
458
416
|
async function fetchTextToSpeech(runtime, options) {
|
|
@@ -484,13 +442,7 @@ async function fetchTextToSpeech(runtime, options) {
|
|
|
484
442
|
const err = await res.text();
|
|
485
443
|
throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
|
|
486
444
|
}
|
|
487
|
-
|
|
488
|
-
throw new Error("OpenAI TTS response body is null");
|
|
489
|
-
}
|
|
490
|
-
if (!isBrowser()) {
|
|
491
|
-
return await webStreamToNodeStream(res.body);
|
|
492
|
-
}
|
|
493
|
-
return res.body;
|
|
445
|
+
return await res.arrayBuffer();
|
|
494
446
|
} catch (err) {
|
|
495
447
|
const message = err instanceof Error ? err.message : String(err);
|
|
496
448
|
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
@@ -582,8 +534,7 @@ async function handleTextToSpeech(runtime, input) {
|
|
|
582
534
|
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
583
535
|
logger7.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
584
536
|
try {
|
|
585
|
-
|
|
586
|
-
return speechStream;
|
|
537
|
+
return await fetchTextToSpeech(runtime, options);
|
|
587
538
|
} catch (error) {
|
|
588
539
|
const message = error instanceof Error ? error.message : String(error);
|
|
589
540
|
logger7.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
@@ -813,7 +764,7 @@ var openaiPlugin = {
|
|
|
813
764
|
try {
|
|
814
765
|
const image = await runtime.useModel(ModelType7.IMAGE, {
|
|
815
766
|
prompt: "A beautiful sunset over a calm ocean",
|
|
816
|
-
|
|
767
|
+
count: 1,
|
|
817
768
|
size: "1024x1024"
|
|
818
769
|
});
|
|
819
770
|
logger10.log({ image }, "generated with test_image_generation");
|
|
@@ -866,7 +817,7 @@ var openaiPlugin = {
|
|
|
866
817
|
name: "openai_test_text_tokenizer_encode",
|
|
867
818
|
fn: async (runtime) => {
|
|
868
819
|
const prompt = "Hello tokenizer encode!";
|
|
869
|
-
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
820
|
+
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt, modelType: ModelType7.TEXT_SMALL });
|
|
870
821
|
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
871
822
|
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
872
823
|
}
|
|
@@ -877,9 +828,10 @@ var openaiPlugin = {
|
|
|
877
828
|
name: "openai_test_text_tokenizer_decode",
|
|
878
829
|
fn: async (runtime) => {
|
|
879
830
|
const prompt = "Hello tokenizer decode!";
|
|
880
|
-
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
831
|
+
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt, modelType: ModelType7.TEXT_SMALL });
|
|
881
832
|
const decodedText = await runtime.useModel(ModelType7.TEXT_TOKENIZER_DECODE, {
|
|
882
|
-
tokens
|
|
833
|
+
tokens,
|
|
834
|
+
modelType: ModelType7.TEXT_SMALL
|
|
883
835
|
});
|
|
884
836
|
if (decodedText !== prompt) {
|
|
885
837
|
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
@@ -904,6 +856,49 @@ var openaiPlugin = {
|
|
|
904
856
|
throw error;
|
|
905
857
|
}
|
|
906
858
|
}
|
|
859
|
+
},
|
|
860
|
+
{
|
|
861
|
+
name: "openai_test_text_generation_large",
|
|
862
|
+
fn: async (runtime) => {
|
|
863
|
+
try {
|
|
864
|
+
const result = await runtime.useModel(ModelType7.TEXT_LARGE, {
|
|
865
|
+
prompt: "Say hello in 5 words."
|
|
866
|
+
});
|
|
867
|
+
if (!result || result.length === 0) {
|
|
868
|
+
throw new Error("Text generation returned empty result");
|
|
869
|
+
}
|
|
870
|
+
logger10.log({ result }, "Text generation test completed");
|
|
871
|
+
} catch (error) {
|
|
872
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
873
|
+
logger10.error(`Error in openai_test_text_generation_large: ${message}`);
|
|
874
|
+
throw error;
|
|
875
|
+
}
|
|
876
|
+
}
|
|
877
|
+
},
|
|
878
|
+
{
|
|
879
|
+
name: "openai_test_streaming",
|
|
880
|
+
fn: async (runtime) => {
|
|
881
|
+
try {
|
|
882
|
+
const chunks = [];
|
|
883
|
+
const result = await runtime.useModel(ModelType7.TEXT_LARGE, {
|
|
884
|
+
prompt: "Count from 1 to 5.",
|
|
885
|
+
onStreamChunk: (chunk) => {
|
|
886
|
+
chunks.push(chunk);
|
|
887
|
+
}
|
|
888
|
+
});
|
|
889
|
+
if (!result || result.length === 0) {
|
|
890
|
+
throw new Error("Streaming returned empty result");
|
|
891
|
+
}
|
|
892
|
+
if (chunks.length === 0) {
|
|
893
|
+
throw new Error("No streaming chunks received");
|
|
894
|
+
}
|
|
895
|
+
logger10.log({ chunks: chunks.length, result: result.substring(0, 50) }, "Streaming test completed");
|
|
896
|
+
} catch (error) {
|
|
897
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
898
|
+
logger10.error(`Error in openai_test_streaming: ${message}`);
|
|
899
|
+
throw error;
|
|
900
|
+
}
|
|
901
|
+
}
|
|
907
902
|
}
|
|
908
903
|
]
|
|
909
904
|
}
|
|
@@ -915,4 +910,4 @@ export {
|
|
|
915
910
|
src_default as default
|
|
916
911
|
};
|
|
917
912
|
|
|
918
|
-
//# debugId=
|
|
913
|
+
//# debugId=382E411BFB8DEF2564756E2164756E21
|