@elizaos/plugin-openai 1.5.15 → 1.5.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +3 -2
- package/dist/browser/index.browser.js.map +20 -4
- package/dist/cjs/index.node.cjs +626 -466
- package/dist/cjs/index.node.js.map +17 -4
- package/dist/index.d.ts +1 -16
- package/dist/init.d.ts +5 -0
- package/dist/models/audio.d.ts +10 -0
- package/dist/models/embedding.d.ts +5 -0
- package/dist/models/image.d.ts +14 -0
- package/dist/models/index.d.ts +6 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/text.d.ts +9 -0
- package/dist/models/tokenizer.d.ts +9 -0
- package/dist/node/index.node.js +633 -471
- package/dist/node/index.node.js.map +17 -4
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/openai.d.ts +8 -0
- package/dist/types/index.d.ts +26 -0
- package/dist/utils/audio.d.ts +12 -0
- package/dist/utils/config.d.ts +70 -0
- package/dist/utils/events.d.ts +10 -0
- package/dist/utils/index.d.ts +5 -0
- package/dist/utils/json.d.ts +7 -0
- package/dist/utils/tokenization.d.ts +17 -0
- package/package.json +1 -1
package/dist/node/index.node.js
CHANGED
|
@@ -1,17 +1,35 @@
|
|
|
1
|
+
import { createRequire } from "node:module";
|
|
2
|
+
var __create = Object.create;
|
|
3
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
4
|
+
var __defProp = Object.defineProperty;
|
|
5
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
+
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
+
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
+
for (let key of __getOwnPropNames(mod))
|
|
11
|
+
if (!__hasOwnProp.call(to, key))
|
|
12
|
+
__defProp(to, key, {
|
|
13
|
+
get: () => mod[key],
|
|
14
|
+
enumerable: true
|
|
15
|
+
});
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
19
|
+
|
|
1
20
|
// src/index.ts
|
|
2
|
-
import {
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
} from "
|
|
9
|
-
import { encodingForModel } from "js-tiktoken";
|
|
21
|
+
import { logger as logger10, ModelType as ModelType7 } from "@elizaos/core";
|
|
22
|
+
|
|
23
|
+
// src/init.ts
|
|
24
|
+
import { logger as logger2 } from "@elizaos/core";
|
|
25
|
+
|
|
26
|
+
// src/utils/config.ts
|
|
27
|
+
import { logger } from "@elizaos/core";
|
|
10
28
|
function getSetting(runtime, key, defaultValue) {
|
|
11
29
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
12
30
|
}
|
|
13
31
|
function isBrowser() {
|
|
14
|
-
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
32
|
+
return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
|
|
15
33
|
}
|
|
16
34
|
function isProxyMode(runtime) {
|
|
17
35
|
return isBrowser() && !!getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
@@ -50,13 +68,13 @@ function getEmbeddingApiKey(runtime) {
|
|
|
50
68
|
return getApiKey(runtime);
|
|
51
69
|
}
|
|
52
70
|
function getSmallModel(runtime) {
|
|
53
|
-
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-
|
|
71
|
+
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
54
72
|
}
|
|
55
73
|
function getLargeModel(runtime) {
|
|
56
|
-
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-
|
|
74
|
+
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
|
|
57
75
|
}
|
|
58
76
|
function getImageDescriptionModel(runtime) {
|
|
59
|
-
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano")
|
|
77
|
+
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano");
|
|
60
78
|
}
|
|
61
79
|
function getExperimentalTelemetry(runtime) {
|
|
62
80
|
const setting = getSetting(runtime, "OPENAI_EXPERIMENTAL_TELEMETRY", "false");
|
|
@@ -65,98 +83,378 @@ function getExperimentalTelemetry(runtime) {
|
|
|
65
83
|
logger.debug(`[OpenAI] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
66
84
|
return result;
|
|
67
85
|
}
|
|
86
|
+
|
|
87
|
+
// src/init.ts
|
|
88
|
+
function initializeOpenAI(_config, runtime) {
|
|
89
|
+
(async () => {
|
|
90
|
+
try {
|
|
91
|
+
if (!getApiKey(runtime) && !isBrowser()) {
|
|
92
|
+
logger2.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
93
|
+
return;
|
|
94
|
+
}
|
|
95
|
+
try {
|
|
96
|
+
const baseURL = getBaseURL(runtime);
|
|
97
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
98
|
+
headers: getAuthHeader(runtime)
|
|
99
|
+
});
|
|
100
|
+
if (!response.ok) {
|
|
101
|
+
logger2.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
102
|
+
logger2.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
103
|
+
} else {
|
|
104
|
+
logger2.log("OpenAI API key validated successfully");
|
|
105
|
+
}
|
|
106
|
+
} catch (fetchError) {
|
|
107
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
108
|
+
logger2.warn(`Error validating OpenAI API key: ${message}`);
|
|
109
|
+
logger2.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
110
|
+
}
|
|
111
|
+
} catch (error) {
|
|
112
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
113
|
+
logger2.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
114
|
+
}
|
|
115
|
+
})();
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
// src/models/text.ts
|
|
119
|
+
import { logger as logger3, ModelType } from "@elizaos/core";
|
|
120
|
+
import { generateText } from "ai";
|
|
121
|
+
|
|
122
|
+
// src/providers/openai.ts
|
|
123
|
+
import { createOpenAI } from "@ai-sdk/openai";
|
|
68
124
|
function createOpenAIClient(runtime) {
|
|
69
125
|
const baseURL = getBaseURL(runtime);
|
|
70
126
|
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "sk-proxy" : undefined);
|
|
71
127
|
return createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
72
128
|
}
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
const
|
|
80
|
-
|
|
129
|
+
|
|
130
|
+
// src/utils/events.ts
|
|
131
|
+
import { EventType } from "@elizaos/core";
|
|
132
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
133
|
+
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
134
|
+
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
135
|
+
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
136
|
+
runtime.emitEvent(EventType.MODEL_USED, {
|
|
137
|
+
provider: "openai",
|
|
138
|
+
type,
|
|
139
|
+
prompt,
|
|
140
|
+
tokens: {
|
|
141
|
+
prompt: promptTokens,
|
|
142
|
+
completion: completionTokens,
|
|
143
|
+
total: totalTokens
|
|
144
|
+
}
|
|
145
|
+
});
|
|
81
146
|
}
|
|
82
|
-
|
|
147
|
+
|
|
148
|
+
// src/models/text.ts
|
|
149
|
+
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
83
150
|
const openai = createOpenAIClient(runtime);
|
|
84
151
|
const modelName = getModelFn(runtime);
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
152
|
+
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
153
|
+
logger3.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
154
|
+
logger3.log(params.prompt);
|
|
155
|
+
const {
|
|
156
|
+
prompt,
|
|
157
|
+
stopSequences = [],
|
|
158
|
+
maxTokens = 8192,
|
|
159
|
+
temperature = 0.7,
|
|
160
|
+
frequencyPenalty = 0.7,
|
|
161
|
+
presencePenalty = 0.7
|
|
162
|
+
} = params;
|
|
163
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
164
|
+
model: openai.languageModel(modelName),
|
|
165
|
+
prompt,
|
|
166
|
+
system: runtime.character.system ?? undefined,
|
|
167
|
+
temperature,
|
|
168
|
+
maxOutputTokens: maxTokens,
|
|
169
|
+
frequencyPenalty,
|
|
170
|
+
presencePenalty,
|
|
171
|
+
stopSequences,
|
|
172
|
+
experimental_telemetry: {
|
|
173
|
+
isEnabled: experimentalTelemetry
|
|
174
|
+
}
|
|
175
|
+
});
|
|
176
|
+
if (usage) {
|
|
177
|
+
emitModelUsageEvent(runtime, modelType, prompt, usage);
|
|
178
|
+
}
|
|
179
|
+
return openaiResponse;
|
|
180
|
+
}
|
|
181
|
+
async function handleTextSmall(runtime, params) {
|
|
182
|
+
return generateTextByModelType(runtime, params, ModelType.TEXT_SMALL, getSmallModel);
|
|
183
|
+
}
|
|
184
|
+
async function handleTextLarge(runtime, params) {
|
|
185
|
+
return generateTextByModelType(runtime, params, ModelType.TEXT_LARGE, getLargeModel);
|
|
186
|
+
}
|
|
187
|
+
// src/models/embedding.ts
|
|
188
|
+
import { logger as logger4, ModelType as ModelType2, VECTOR_DIMS } from "@elizaos/core";
|
|
189
|
+
async function handleTextEmbedding(runtime, params) {
|
|
190
|
+
const embeddingModelName = getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
191
|
+
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
192
|
+
if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
|
|
193
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
|
|
194
|
+
logger4.error(errorMsg);
|
|
195
|
+
throw new Error(errorMsg);
|
|
196
|
+
}
|
|
197
|
+
if (params === null) {
|
|
198
|
+
logger4.debug("Creating test embedding for initialization");
|
|
199
|
+
const testVector = Array(embeddingDimension).fill(0);
|
|
200
|
+
testVector[0] = 0.1;
|
|
201
|
+
return testVector;
|
|
202
|
+
}
|
|
203
|
+
let text;
|
|
204
|
+
if (typeof params === "string") {
|
|
205
|
+
text = params;
|
|
206
|
+
} else if (typeof params === "object" && params.text) {
|
|
207
|
+
text = params.text;
|
|
208
|
+
} else {
|
|
209
|
+
const errorMsg = "Invalid input format for embedding";
|
|
210
|
+
logger4.warn(errorMsg);
|
|
211
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
212
|
+
fallbackVector[0] = 0.2;
|
|
213
|
+
return fallbackVector;
|
|
90
214
|
}
|
|
215
|
+
if (!text.trim()) {
|
|
216
|
+
const errorMsg = "Empty text for embedding";
|
|
217
|
+
logger4.warn(errorMsg);
|
|
218
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
219
|
+
fallbackVector[0] = 0.3;
|
|
220
|
+
return fallbackVector;
|
|
221
|
+
}
|
|
222
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
91
223
|
try {
|
|
92
|
-
const
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
224
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
225
|
+
method: "POST",
|
|
226
|
+
headers: {
|
|
227
|
+
...getAuthHeader(runtime, true),
|
|
228
|
+
"Content-Type": "application/json"
|
|
229
|
+
},
|
|
230
|
+
body: JSON.stringify({
|
|
231
|
+
model: embeddingModelName,
|
|
232
|
+
input: text
|
|
233
|
+
})
|
|
98
234
|
});
|
|
99
|
-
if (
|
|
100
|
-
|
|
235
|
+
if (!response.ok) {
|
|
236
|
+
logger4.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
237
|
+
throw new Error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
101
238
|
}
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
logger.info("[generateObject] Successfully repaired JSON.");
|
|
115
|
-
return repairedObject;
|
|
116
|
-
} catch (repairParseError) {
|
|
117
|
-
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
118
|
-
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
119
|
-
throw repairParseError;
|
|
120
|
-
}
|
|
121
|
-
} else {
|
|
122
|
-
logger.error("[generateObject] JSON repair failed.");
|
|
123
|
-
throw error;
|
|
124
|
-
}
|
|
125
|
-
} else {
|
|
126
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
127
|
-
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
128
|
-
throw error;
|
|
239
|
+
const data = await response.json();
|
|
240
|
+
if (!data?.data?.[0]?.embedding) {
|
|
241
|
+
logger4.error("API returned invalid structure");
|
|
242
|
+
throw new Error("API returned invalid structure");
|
|
243
|
+
}
|
|
244
|
+
const embedding = data.data[0].embedding;
|
|
245
|
+
if (!Array.isArray(embedding) || embedding.length !== embeddingDimension) {
|
|
246
|
+
const errorMsg = `Embedding length ${embedding?.length ?? 0} does not match configured dimension ${embeddingDimension}`;
|
|
247
|
+
logger4.error(errorMsg);
|
|
248
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
249
|
+
fallbackVector[0] = 0.4;
|
|
250
|
+
return fallbackVector;
|
|
129
251
|
}
|
|
252
|
+
if (data.usage) {
|
|
253
|
+
const usage = {
|
|
254
|
+
inputTokens: data.usage.prompt_tokens,
|
|
255
|
+
outputTokens: 0,
|
|
256
|
+
totalTokens: data.usage.total_tokens
|
|
257
|
+
};
|
|
258
|
+
emitModelUsageEvent(runtime, ModelType2.TEXT_EMBEDDING, text, usage);
|
|
259
|
+
}
|
|
260
|
+
logger4.log(`Got valid embedding with length ${embedding.length}`);
|
|
261
|
+
return embedding;
|
|
262
|
+
} catch (error) {
|
|
263
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
264
|
+
logger4.error(`Error generating embedding: ${message}`);
|
|
265
|
+
throw error instanceof Error ? error : new Error(message);
|
|
130
266
|
}
|
|
131
267
|
}
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
268
|
+
// src/models/image.ts
|
|
269
|
+
import { logger as logger5, ModelType as ModelType3 } from "@elizaos/core";
|
|
270
|
+
async function handleImageGeneration(runtime, params) {
|
|
271
|
+
const n = params.n || 1;
|
|
272
|
+
const size = params.size || "1024x1024";
|
|
273
|
+
const prompt = params.prompt;
|
|
274
|
+
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
275
|
+
logger5.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
276
|
+
const baseURL = getBaseURL(runtime);
|
|
277
|
+
try {
|
|
278
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
279
|
+
method: "POST",
|
|
280
|
+
headers: {
|
|
281
|
+
...getAuthHeader(runtime),
|
|
282
|
+
"Content-Type": "application/json"
|
|
283
|
+
},
|
|
284
|
+
body: JSON.stringify({
|
|
285
|
+
model: modelName,
|
|
286
|
+
prompt,
|
|
287
|
+
n,
|
|
288
|
+
size
|
|
289
|
+
})
|
|
290
|
+
});
|
|
291
|
+
if (!response.ok) {
|
|
292
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
145
293
|
}
|
|
146
|
-
|
|
294
|
+
const data = await response.json();
|
|
295
|
+
const typedData = data;
|
|
296
|
+
return typedData;
|
|
297
|
+
} catch (error) {
|
|
298
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
299
|
+
throw error;
|
|
300
|
+
}
|
|
147
301
|
}
|
|
148
|
-
function
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
302
|
+
async function handleImageDescription(runtime, params) {
|
|
303
|
+
let imageUrl;
|
|
304
|
+
let promptText;
|
|
305
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
306
|
+
logger5.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
307
|
+
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
308
|
+
const DEFAULT_PROMPT = "Please analyze this image and provide a title and detailed description.";
|
|
309
|
+
if (typeof params === "string") {
|
|
310
|
+
imageUrl = params;
|
|
311
|
+
promptText = DEFAULT_PROMPT;
|
|
312
|
+
} else {
|
|
313
|
+
imageUrl = params.imageUrl;
|
|
314
|
+
promptText = params.prompt || DEFAULT_PROMPT;
|
|
315
|
+
}
|
|
316
|
+
const messages = [
|
|
317
|
+
{
|
|
318
|
+
role: "user",
|
|
319
|
+
content: [
|
|
320
|
+
{ type: "text", text: promptText },
|
|
321
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
322
|
+
]
|
|
157
323
|
}
|
|
158
|
-
|
|
324
|
+
];
|
|
325
|
+
const baseURL = getBaseURL(runtime);
|
|
326
|
+
try {
|
|
327
|
+
const requestBody = {
|
|
328
|
+
model: modelName,
|
|
329
|
+
messages,
|
|
330
|
+
max_tokens: maxTokens
|
|
331
|
+
};
|
|
332
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
333
|
+
method: "POST",
|
|
334
|
+
headers: {
|
|
335
|
+
"Content-Type": "application/json",
|
|
336
|
+
...getAuthHeader(runtime)
|
|
337
|
+
},
|
|
338
|
+
body: JSON.stringify(requestBody)
|
|
339
|
+
});
|
|
340
|
+
if (!response.ok) {
|
|
341
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
342
|
+
}
|
|
343
|
+
const result = await response.json();
|
|
344
|
+
const typedResult = result;
|
|
345
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
346
|
+
if (typedResult.usage) {
|
|
347
|
+
emitModelUsageEvent(runtime, ModelType3.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
348
|
+
inputTokens: typedResult.usage.prompt_tokens,
|
|
349
|
+
outputTokens: typedResult.usage.completion_tokens,
|
|
350
|
+
totalTokens: typedResult.usage.total_tokens
|
|
351
|
+
});
|
|
352
|
+
}
|
|
353
|
+
if (!content) {
|
|
354
|
+
return {
|
|
355
|
+
title: "Failed to analyze image",
|
|
356
|
+
description: "No response from API"
|
|
357
|
+
};
|
|
358
|
+
}
|
|
359
|
+
const isCustomPrompt = typeof params === "object" && Boolean(params.prompt) && params.prompt !== DEFAULT_PROMPT;
|
|
360
|
+
if (isCustomPrompt) {
|
|
361
|
+
return content;
|
|
362
|
+
}
|
|
363
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
364
|
+
const title = titleMatch?.[1]?.trim();
|
|
365
|
+
if (!title) {
|
|
366
|
+
logger5.warn("Could not extract title from image description response");
|
|
367
|
+
}
|
|
368
|
+
const finalTitle = title || "Image Analysis";
|
|
369
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
370
|
+
const processedResult = { title: finalTitle, description };
|
|
371
|
+
return processedResult;
|
|
372
|
+
} catch (error) {
|
|
373
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
374
|
+
logger5.error(`Error analyzing image: ${message}`);
|
|
375
|
+
return {
|
|
376
|
+
title: "Failed to analyze image",
|
|
377
|
+
description: `Error: ${message}`
|
|
378
|
+
};
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
// src/models/audio.ts
|
|
382
|
+
import { logger as logger7 } from "@elizaos/core";
|
|
383
|
+
|
|
384
|
+
// src/utils/audio.ts
|
|
385
|
+
import { logger as logger6 } from "@elizaos/core";
|
|
386
|
+
var MAGIC_BYTES = {
|
|
387
|
+
WAV: {
|
|
388
|
+
HEADER: [82, 73, 70, 70],
|
|
389
|
+
IDENTIFIER: [87, 65, 86, 69]
|
|
390
|
+
},
|
|
391
|
+
MP3_ID3: [73, 68, 51],
|
|
392
|
+
OGG: [79, 103, 103, 83],
|
|
393
|
+
FLAC: [102, 76, 97, 67],
|
|
394
|
+
FTYP: [102, 116, 121, 112],
|
|
395
|
+
WEBM_EBML: [26, 69, 223, 163]
|
|
396
|
+
};
|
|
397
|
+
function matchBytes(buffer, offset, bytes) {
|
|
398
|
+
for (let i = 0;i < bytes.length; i++) {
|
|
399
|
+
if (buffer[offset + i] !== bytes[i])
|
|
400
|
+
return false;
|
|
401
|
+
}
|
|
402
|
+
return true;
|
|
403
|
+
}
|
|
404
|
+
function detectAudioMimeType(buffer) {
|
|
405
|
+
if (buffer.length < 12) {
|
|
406
|
+
return "application/octet-stream";
|
|
407
|
+
}
|
|
408
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WAV.HEADER) && matchBytes(buffer, 8, MAGIC_BYTES.WAV.IDENTIFIER)) {
|
|
409
|
+
return "audio/wav";
|
|
410
|
+
}
|
|
411
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.MP3_ID3) || buffer[0] === 255 && (buffer[1] & 224) === 224) {
|
|
412
|
+
return "audio/mpeg";
|
|
413
|
+
}
|
|
414
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.OGG)) {
|
|
415
|
+
return "audio/ogg";
|
|
416
|
+
}
|
|
417
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.FLAC)) {
|
|
418
|
+
return "audio/flac";
|
|
419
|
+
}
|
|
420
|
+
if (matchBytes(buffer, 4, MAGIC_BYTES.FTYP)) {
|
|
421
|
+
return "audio/mp4";
|
|
422
|
+
}
|
|
423
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WEBM_EBML)) {
|
|
424
|
+
return "audio/webm";
|
|
425
|
+
}
|
|
426
|
+
logger6.warn("Could not detect audio format from buffer, using generic binary type");
|
|
427
|
+
return "application/octet-stream";
|
|
428
|
+
}
|
|
429
|
+
async function webStreamToNodeStream(webStream) {
|
|
430
|
+
try {
|
|
431
|
+
const { Readable } = await import("node:stream");
|
|
432
|
+
const reader = webStream.getReader();
|
|
433
|
+
return new Readable({
|
|
434
|
+
async read() {
|
|
435
|
+
try {
|
|
436
|
+
const { done, value } = await reader.read();
|
|
437
|
+
if (done) {
|
|
438
|
+
this.push(null);
|
|
439
|
+
} else {
|
|
440
|
+
this.push(value);
|
|
441
|
+
}
|
|
442
|
+
} catch (error) {
|
|
443
|
+
this.destroy(error);
|
|
444
|
+
}
|
|
445
|
+
},
|
|
446
|
+
destroy(error, callback) {
|
|
447
|
+
reader.cancel().finally(() => callback(error));
|
|
448
|
+
}
|
|
449
|
+
});
|
|
450
|
+
} catch (error) {
|
|
451
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
452
|
+
logger6.error(`Failed to load node:stream module: ${message}`);
|
|
453
|
+
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
454
|
+
}
|
|
159
455
|
}
|
|
456
|
+
|
|
457
|
+
// src/models/audio.ts
|
|
160
458
|
async function fetchTextToSpeech(runtime, options) {
|
|
161
459
|
const defaultModel = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
162
460
|
const defaultVoice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
|
|
@@ -186,12 +484,203 @@ async function fetchTextToSpeech(runtime, options) {
|
|
|
186
484
|
const err = await res.text();
|
|
187
485
|
throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
|
|
188
486
|
}
|
|
487
|
+
if (!res.body) {
|
|
488
|
+
throw new Error("OpenAI TTS response body is null");
|
|
489
|
+
}
|
|
490
|
+
if (!isBrowser()) {
|
|
491
|
+
return await webStreamToNodeStream(res.body);
|
|
492
|
+
}
|
|
189
493
|
return res.body;
|
|
190
494
|
} catch (err) {
|
|
191
495
|
const message = err instanceof Error ? err.message : String(err);
|
|
192
496
|
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
193
497
|
}
|
|
194
498
|
}
|
|
499
|
+
async function handleTranscription(runtime, input) {
|
|
500
|
+
let modelName = getSetting(runtime, "OPENAI_TRANSCRIPTION_MODEL", "gpt-4o-mini-transcribe");
|
|
501
|
+
logger7.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
502
|
+
const baseURL = getBaseURL(runtime);
|
|
503
|
+
let blob;
|
|
504
|
+
let extraParams = null;
|
|
505
|
+
if (input instanceof Blob || input instanceof File) {
|
|
506
|
+
blob = input;
|
|
507
|
+
} else if (Buffer.isBuffer(input)) {
|
|
508
|
+
const detectedMimeType = detectAudioMimeType(input);
|
|
509
|
+
logger7.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
510
|
+
const uint8Array = new Uint8Array(input);
|
|
511
|
+
blob = new Blob([uint8Array], { type: detectedMimeType });
|
|
512
|
+
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
513
|
+
const params = input;
|
|
514
|
+
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
515
|
+
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
516
|
+
}
|
|
517
|
+
if (Buffer.isBuffer(params.audio)) {
|
|
518
|
+
let mimeType = params.mimeType;
|
|
519
|
+
if (!mimeType) {
|
|
520
|
+
mimeType = detectAudioMimeType(params.audio);
|
|
521
|
+
logger7.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
522
|
+
} else {
|
|
523
|
+
logger7.debug(`Using provided MIME type: ${mimeType}`);
|
|
524
|
+
}
|
|
525
|
+
const uint8Array = new Uint8Array(params.audio);
|
|
526
|
+
blob = new Blob([uint8Array], { type: mimeType });
|
|
527
|
+
} else {
|
|
528
|
+
blob = params.audio;
|
|
529
|
+
}
|
|
530
|
+
extraParams = params;
|
|
531
|
+
if (typeof params.model === "string" && params.model) {
|
|
532
|
+
modelName = params.model;
|
|
533
|
+
}
|
|
534
|
+
} else {
|
|
535
|
+
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
536
|
+
}
|
|
537
|
+
const mime = blob.type || "audio/webm";
|
|
538
|
+
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
539
|
+
const formData = new FormData;
|
|
540
|
+
formData.append("file", blob, filename);
|
|
541
|
+
formData.append("model", String(modelName));
|
|
542
|
+
if (extraParams) {
|
|
543
|
+
if (typeof extraParams.language === "string") {
|
|
544
|
+
formData.append("language", String(extraParams.language));
|
|
545
|
+
}
|
|
546
|
+
if (typeof extraParams.response_format === "string") {
|
|
547
|
+
formData.append("response_format", String(extraParams.response_format));
|
|
548
|
+
}
|
|
549
|
+
if (typeof extraParams.prompt === "string") {
|
|
550
|
+
formData.append("prompt", String(extraParams.prompt));
|
|
551
|
+
}
|
|
552
|
+
if (typeof extraParams.temperature === "number") {
|
|
553
|
+
formData.append("temperature", String(extraParams.temperature));
|
|
554
|
+
}
|
|
555
|
+
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
556
|
+
for (const g of extraParams.timestampGranularities) {
|
|
557
|
+
formData.append("timestamp_granularities[]", String(g));
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
try {
|
|
562
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
563
|
+
method: "POST",
|
|
564
|
+
headers: {
|
|
565
|
+
...getAuthHeader(runtime)
|
|
566
|
+
},
|
|
567
|
+
body: formData
|
|
568
|
+
});
|
|
569
|
+
if (!response.ok) {
|
|
570
|
+
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
571
|
+
}
|
|
572
|
+
const data = await response.json();
|
|
573
|
+
return data.text || "";
|
|
574
|
+
} catch (error) {
|
|
575
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
576
|
+
logger7.error(`TRANSCRIPTION error: ${message}`);
|
|
577
|
+
throw error;
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
async function handleTextToSpeech(runtime, input) {
|
|
581
|
+
const options = typeof input === "string" ? { text: input } : input;
|
|
582
|
+
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
583
|
+
logger7.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
584
|
+
try {
|
|
585
|
+
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
586
|
+
return speechStream;
|
|
587
|
+
} catch (error) {
|
|
588
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
589
|
+
logger7.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
590
|
+
throw error;
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
// src/models/object.ts
|
|
594
|
+
import { logger as logger9, ModelType as ModelType4 } from "@elizaos/core";
|
|
595
|
+
import { generateObject } from "ai";
|
|
596
|
+
|
|
597
|
+
// src/utils/json.ts
|
|
598
|
+
import { logger as logger8 } from "@elizaos/core";
|
|
599
|
+
import { JSONParseError } from "ai";
|
|
600
|
+
function getJsonRepairFunction() {
|
|
601
|
+
return async ({ text, error }) => {
|
|
602
|
+
try {
|
|
603
|
+
if (error instanceof JSONParseError) {
|
|
604
|
+
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
605
|
+
JSON.parse(cleanedText);
|
|
606
|
+
return cleanedText;
|
|
607
|
+
}
|
|
608
|
+
return null;
|
|
609
|
+
} catch (jsonError) {
|
|
610
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
611
|
+
logger8.warn(`Failed to repair JSON text: ${message}`);
|
|
612
|
+
return null;
|
|
613
|
+
}
|
|
614
|
+
};
|
|
615
|
+
}
|
|
616
|
+
|
|
617
|
+
// src/models/object.ts
|
|
618
|
+
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
619
|
+
const openai = createOpenAIClient(runtime);
|
|
620
|
+
const modelName = getModelFn(runtime);
|
|
621
|
+
logger9.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
622
|
+
const temperature = params.temperature ?? 0;
|
|
623
|
+
const schemaPresent = !!params.schema;
|
|
624
|
+
if (schemaPresent) {
|
|
625
|
+
logger9.warn(`Schema provided but ignored: OpenAI object generation currently uses output=no-schema. The schema parameter has no effect.`);
|
|
626
|
+
}
|
|
627
|
+
try {
|
|
628
|
+
const { object, usage } = await generateObject({
|
|
629
|
+
model: openai.languageModel(modelName),
|
|
630
|
+
output: "no-schema",
|
|
631
|
+
prompt: params.prompt,
|
|
632
|
+
temperature,
|
|
633
|
+
experimental_repairText: getJsonRepairFunction()
|
|
634
|
+
});
|
|
635
|
+
if (usage) {
|
|
636
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
637
|
+
}
|
|
638
|
+
return object;
|
|
639
|
+
} catch (error) {
|
|
640
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
641
|
+
logger9.error(`[generateObject] Error: ${message}`);
|
|
642
|
+
throw error;
|
|
643
|
+
}
|
|
644
|
+
}
|
|
645
|
+
async function handleObjectSmall(runtime, params) {
|
|
646
|
+
return generateObjectByModelType(runtime, params, ModelType4.OBJECT_SMALL, getSmallModel);
|
|
647
|
+
}
|
|
648
|
+
async function handleObjectLarge(runtime, params) {
|
|
649
|
+
return generateObjectByModelType(runtime, params, ModelType4.OBJECT_LARGE, getLargeModel);
|
|
650
|
+
}
|
|
651
|
+
// src/models/tokenizer.ts
|
|
652
|
+
import { ModelType as ModelType6 } from "@elizaos/core";
|
|
653
|
+
|
|
654
|
+
// src/utils/tokenization.ts
|
|
655
|
+
import { ModelType as ModelType5 } from "@elizaos/core";
|
|
656
|
+
import { encodingForModel, getEncoding } from "js-tiktoken";
|
|
657
|
+
function resolveTokenizerEncoding(modelName) {
|
|
658
|
+
const normalized = modelName.toLowerCase();
|
|
659
|
+
const fallbackEncoding = normalized.includes("4o") ? "o200k_base" : "cl100k_base";
|
|
660
|
+
try {
|
|
661
|
+
return encodingForModel(modelName);
|
|
662
|
+
} catch (error) {
|
|
663
|
+
return getEncoding(fallbackEncoding);
|
|
664
|
+
}
|
|
665
|
+
}
|
|
666
|
+
async function tokenizeText(runtime, model, prompt) {
|
|
667
|
+
const modelName = model === ModelType5.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
668
|
+
const tokens = resolveTokenizerEncoding(modelName).encode(prompt);
|
|
669
|
+
return tokens;
|
|
670
|
+
}
|
|
671
|
+
async function detokenizeText(runtime, model, tokens) {
|
|
672
|
+
const modelName = model === ModelType5.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
673
|
+
return resolveTokenizerEncoding(modelName).decode(tokens);
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
// src/models/tokenizer.ts
|
|
677
|
+
async function handleTokenizerEncode(runtime, { prompt, modelType = ModelType6.TEXT_LARGE }) {
|
|
678
|
+
return await tokenizeText(runtime, modelType, prompt);
|
|
679
|
+
}
|
|
680
|
+
async function handleTokenizerDecode(runtime, { tokens, modelType = ModelType6.TEXT_LARGE }) {
|
|
681
|
+
return await detokenizeText(runtime, modelType, tokens);
|
|
682
|
+
}
|
|
683
|
+
// src/index.ts
|
|
195
684
|
var openaiPlugin = {
|
|
196
685
|
name: "openai",
|
|
197
686
|
description: "OpenAI plugin",
|
|
@@ -211,368 +700,41 @@ var openaiPlugin = {
|
|
|
211
700
|
OPENAI_EXPERIMENTAL_TELEMETRY: process.env.OPENAI_EXPERIMENTAL_TELEMETRY
|
|
212
701
|
},
|
|
213
702
|
async init(_config, runtime) {
|
|
214
|
-
|
|
215
|
-
resolve();
|
|
216
|
-
try {
|
|
217
|
-
if (!getApiKey(runtime) && !isBrowser()) {
|
|
218
|
-
logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
219
|
-
return;
|
|
220
|
-
}
|
|
221
|
-
try {
|
|
222
|
-
const baseURL = getBaseURL(runtime);
|
|
223
|
-
const response = await fetch(`${baseURL}/models`, {
|
|
224
|
-
headers: { ...getAuthHeader(runtime) }
|
|
225
|
-
});
|
|
226
|
-
if (!response.ok) {
|
|
227
|
-
logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
228
|
-
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
229
|
-
} else {
|
|
230
|
-
logger.log("OpenAI API key validated successfully");
|
|
231
|
-
}
|
|
232
|
-
} catch (fetchError) {
|
|
233
|
-
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
234
|
-
logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
235
|
-
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
236
|
-
}
|
|
237
|
-
} catch (error) {
|
|
238
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
239
|
-
logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
240
|
-
}
|
|
241
|
-
});
|
|
703
|
+
initializeOpenAI(_config, runtime);
|
|
242
704
|
},
|
|
243
705
|
models: {
|
|
244
|
-
[
|
|
245
|
-
|
|
246
|
-
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
247
|
-
if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
|
|
248
|
-
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
|
|
249
|
-
logger.error(errorMsg);
|
|
250
|
-
throw new Error(errorMsg);
|
|
251
|
-
}
|
|
252
|
-
if (params === null) {
|
|
253
|
-
logger.debug("Creating test embedding for initialization");
|
|
254
|
-
const testVector = Array(embeddingDimension).fill(0);
|
|
255
|
-
testVector[0] = 0.1;
|
|
256
|
-
return testVector;
|
|
257
|
-
}
|
|
258
|
-
let text;
|
|
259
|
-
if (typeof params === "string") {
|
|
260
|
-
text = params;
|
|
261
|
-
} else if (typeof params === "object" && params.text) {
|
|
262
|
-
text = params.text;
|
|
263
|
-
} else {
|
|
264
|
-
logger.warn("Invalid input format for embedding");
|
|
265
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
266
|
-
fallbackVector[0] = 0.2;
|
|
267
|
-
return fallbackVector;
|
|
268
|
-
}
|
|
269
|
-
if (!text.trim()) {
|
|
270
|
-
logger.warn("Empty text for embedding");
|
|
271
|
-
const emptyVector = Array(embeddingDimension).fill(0);
|
|
272
|
-
emptyVector[0] = 0.3;
|
|
273
|
-
return emptyVector;
|
|
274
|
-
}
|
|
275
|
-
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
276
|
-
try {
|
|
277
|
-
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
278
|
-
method: "POST",
|
|
279
|
-
headers: {
|
|
280
|
-
...getAuthHeader(runtime, true),
|
|
281
|
-
"Content-Type": "application/json"
|
|
282
|
-
},
|
|
283
|
-
body: JSON.stringify({
|
|
284
|
-
model: embeddingModelName,
|
|
285
|
-
input: text
|
|
286
|
-
})
|
|
287
|
-
});
|
|
288
|
-
if (!response.ok) {
|
|
289
|
-
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
290
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
291
|
-
errorVector[0] = 0.4;
|
|
292
|
-
return errorVector;
|
|
293
|
-
}
|
|
294
|
-
const data = await response.json();
|
|
295
|
-
if (!data?.data?.[0]?.embedding) {
|
|
296
|
-
logger.error("API returned invalid structure");
|
|
297
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
298
|
-
errorVector[0] = 0.5;
|
|
299
|
-
return errorVector;
|
|
300
|
-
}
|
|
301
|
-
const embedding = data.data[0].embedding;
|
|
302
|
-
if (data.usage) {
|
|
303
|
-
const usage = {
|
|
304
|
-
inputTokens: data.usage.prompt_tokens,
|
|
305
|
-
outputTokens: 0,
|
|
306
|
-
totalTokens: data.usage.total_tokens
|
|
307
|
-
};
|
|
308
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
|
|
309
|
-
}
|
|
310
|
-
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
311
|
-
return embedding;
|
|
312
|
-
} catch (error) {
|
|
313
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
314
|
-
logger.error(`Error generating embedding: ${message}`);
|
|
315
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
316
|
-
errorVector[0] = 0.6;
|
|
317
|
-
return errorVector;
|
|
318
|
-
}
|
|
706
|
+
[ModelType7.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
707
|
+
return handleTextEmbedding(runtime, params);
|
|
319
708
|
},
|
|
320
|
-
[
|
|
321
|
-
return
|
|
709
|
+
[ModelType7.TEXT_TOKENIZER_ENCODE]: async (runtime, params) => {
|
|
710
|
+
return handleTokenizerEncode(runtime, params);
|
|
322
711
|
},
|
|
323
|
-
[
|
|
324
|
-
return
|
|
712
|
+
[ModelType7.TEXT_TOKENIZER_DECODE]: async (runtime, params) => {
|
|
713
|
+
return handleTokenizerDecode(runtime, params);
|
|
325
714
|
},
|
|
326
|
-
[
|
|
327
|
-
|
|
328
|
-
stopSequences = [],
|
|
329
|
-
maxTokens = 8192,
|
|
330
|
-
temperature = 0.7,
|
|
331
|
-
frequencyPenalty = 0.7,
|
|
332
|
-
presencePenalty = 0.7
|
|
333
|
-
}) => {
|
|
334
|
-
const openai = createOpenAIClient(runtime);
|
|
335
|
-
const modelName = getSmallModel(runtime);
|
|
336
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
337
|
-
logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
338
|
-
logger.log(prompt);
|
|
339
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
340
|
-
model: openai.languageModel(modelName),
|
|
341
|
-
prompt,
|
|
342
|
-
system: runtime.character.system ?? undefined,
|
|
343
|
-
temperature,
|
|
344
|
-
maxOutputTokens: maxTokens,
|
|
345
|
-
frequencyPenalty,
|
|
346
|
-
presencePenalty,
|
|
347
|
-
stopSequences,
|
|
348
|
-
experimental_telemetry: {
|
|
349
|
-
isEnabled: experimentalTelemetry
|
|
350
|
-
}
|
|
351
|
-
});
|
|
352
|
-
if (usage) {
|
|
353
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
354
|
-
}
|
|
355
|
-
return openaiResponse;
|
|
715
|
+
[ModelType7.TEXT_SMALL]: async (runtime, params) => {
|
|
716
|
+
return handleTextSmall(runtime, params);
|
|
356
717
|
},
|
|
357
|
-
[
|
|
358
|
-
|
|
359
|
-
stopSequences = [],
|
|
360
|
-
maxTokens = 8192,
|
|
361
|
-
temperature = 0.7,
|
|
362
|
-
frequencyPenalty = 0.7,
|
|
363
|
-
presencePenalty = 0.7
|
|
364
|
-
}) => {
|
|
365
|
-
const openai = createOpenAIClient(runtime);
|
|
366
|
-
const modelName = getLargeModel(runtime);
|
|
367
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
368
|
-
logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
369
|
-
logger.log(prompt);
|
|
370
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
371
|
-
model: openai.languageModel(modelName),
|
|
372
|
-
prompt,
|
|
373
|
-
system: runtime.character.system ?? undefined,
|
|
374
|
-
temperature,
|
|
375
|
-
maxOutputTokens: maxTokens,
|
|
376
|
-
frequencyPenalty,
|
|
377
|
-
presencePenalty,
|
|
378
|
-
stopSequences,
|
|
379
|
-
experimental_telemetry: {
|
|
380
|
-
isEnabled: experimentalTelemetry
|
|
381
|
-
}
|
|
382
|
-
});
|
|
383
|
-
if (usage) {
|
|
384
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
385
|
-
}
|
|
386
|
-
return openaiResponse;
|
|
718
|
+
[ModelType7.TEXT_LARGE]: async (runtime, params) => {
|
|
719
|
+
return handleTextLarge(runtime, params);
|
|
387
720
|
},
|
|
388
|
-
[
|
|
389
|
-
|
|
390
|
-
const size = params.size || "1024x1024";
|
|
391
|
-
const prompt = params.prompt;
|
|
392
|
-
const modelName = "gpt-image-1";
|
|
393
|
-
logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
394
|
-
const baseURL = getBaseURL(runtime);
|
|
395
|
-
try {
|
|
396
|
-
const response = await fetch(`${baseURL}/images/generations`, {
|
|
397
|
-
method: "POST",
|
|
398
|
-
headers: {
|
|
399
|
-
...getAuthHeader(runtime),
|
|
400
|
-
"Content-Type": "application/json"
|
|
401
|
-
},
|
|
402
|
-
body: JSON.stringify({
|
|
403
|
-
model: modelName,
|
|
404
|
-
prompt,
|
|
405
|
-
n,
|
|
406
|
-
size
|
|
407
|
-
})
|
|
408
|
-
});
|
|
409
|
-
if (!response.ok) {
|
|
410
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
411
|
-
}
|
|
412
|
-
const data = await response.json();
|
|
413
|
-
const typedData = data;
|
|
414
|
-
return typedData.data;
|
|
415
|
-
} catch (error) {
|
|
416
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
417
|
-
throw error;
|
|
418
|
-
}
|
|
721
|
+
[ModelType7.IMAGE]: async (runtime, params) => {
|
|
722
|
+
return handleImageGeneration(runtime, params);
|
|
419
723
|
},
|
|
420
|
-
[
|
|
421
|
-
|
|
422
|
-
let promptText;
|
|
423
|
-
const modelName = getImageDescriptionModel(runtime);
|
|
424
|
-
logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
425
|
-
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
426
|
-
if (typeof params === "string") {
|
|
427
|
-
imageUrl = params;
|
|
428
|
-
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
429
|
-
} else {
|
|
430
|
-
imageUrl = params.imageUrl;
|
|
431
|
-
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
432
|
-
}
|
|
433
|
-
const messages = [
|
|
434
|
-
{
|
|
435
|
-
role: "user",
|
|
436
|
-
content: [
|
|
437
|
-
{ type: "text", text: promptText },
|
|
438
|
-
{ type: "image_url", image_url: { url: imageUrl } }
|
|
439
|
-
]
|
|
440
|
-
}
|
|
441
|
-
];
|
|
442
|
-
const baseURL = getBaseURL(runtime);
|
|
443
|
-
try {
|
|
444
|
-
const requestBody = {
|
|
445
|
-
model: modelName,
|
|
446
|
-
messages,
|
|
447
|
-
max_tokens: maxTokens
|
|
448
|
-
};
|
|
449
|
-
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
450
|
-
method: "POST",
|
|
451
|
-
headers: {
|
|
452
|
-
"Content-Type": "application/json",
|
|
453
|
-
...getAuthHeader(runtime)
|
|
454
|
-
},
|
|
455
|
-
body: JSON.stringify(requestBody)
|
|
456
|
-
});
|
|
457
|
-
if (!response.ok) {
|
|
458
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
459
|
-
}
|
|
460
|
-
const result = await response.json();
|
|
461
|
-
const typedResult = result;
|
|
462
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
463
|
-
if (typedResult.usage) {
|
|
464
|
-
emitModelUsageEvent(runtime, ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
465
|
-
inputTokens: typedResult.usage.prompt_tokens,
|
|
466
|
-
outputTokens: typedResult.usage.completion_tokens,
|
|
467
|
-
totalTokens: typedResult.usage.total_tokens
|
|
468
|
-
});
|
|
469
|
-
}
|
|
470
|
-
if (!content) {
|
|
471
|
-
return {
|
|
472
|
-
title: "Failed to analyze image",
|
|
473
|
-
description: "No response from API"
|
|
474
|
-
};
|
|
475
|
-
}
|
|
476
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
477
|
-
if (isCustomPrompt) {
|
|
478
|
-
return content;
|
|
479
|
-
}
|
|
480
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
481
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
482
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
483
|
-
const processedResult = { title, description };
|
|
484
|
-
return processedResult;
|
|
485
|
-
} catch (error) {
|
|
486
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
487
|
-
logger.error(`Error analyzing image: ${message}`);
|
|
488
|
-
return {
|
|
489
|
-
title: "Failed to analyze image",
|
|
490
|
-
description: `Error: ${message}`
|
|
491
|
-
};
|
|
492
|
-
}
|
|
724
|
+
[ModelType7.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
725
|
+
return handleImageDescription(runtime, params);
|
|
493
726
|
},
|
|
494
|
-
[
|
|
495
|
-
|
|
496
|
-
logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
497
|
-
const baseURL = getBaseURL(runtime);
|
|
498
|
-
let blob;
|
|
499
|
-
let extraParams = null;
|
|
500
|
-
if (input instanceof Blob || input instanceof File) {
|
|
501
|
-
blob = input;
|
|
502
|
-
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
503
|
-
const params = input;
|
|
504
|
-
if (!(params.audio instanceof Blob) && !(params.audio instanceof File)) {
|
|
505
|
-
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File. Wrap buffers as: new Blob([buffer], { type: 'audio/mpeg' })");
|
|
506
|
-
}
|
|
507
|
-
blob = params.audio;
|
|
508
|
-
extraParams = params;
|
|
509
|
-
if (typeof params.model === "string" && params.model) {
|
|
510
|
-
modelName = params.model;
|
|
511
|
-
}
|
|
512
|
-
} else {
|
|
513
|
-
throw new Error("TRANSCRIPTION expects a Blob/File or an object { audio: Blob/File, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
514
|
-
}
|
|
515
|
-
const mime = blob.type || "audio/webm";
|
|
516
|
-
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
517
|
-
const formData = new FormData;
|
|
518
|
-
formData.append("file", blob, filename);
|
|
519
|
-
formData.append("model", String(modelName));
|
|
520
|
-
if (extraParams) {
|
|
521
|
-
if (typeof extraParams.language === "string") {
|
|
522
|
-
formData.append("language", String(extraParams.language));
|
|
523
|
-
}
|
|
524
|
-
if (typeof extraParams.response_format === "string") {
|
|
525
|
-
formData.append("response_format", String(extraParams.response_format));
|
|
526
|
-
}
|
|
527
|
-
if (typeof extraParams.prompt === "string") {
|
|
528
|
-
formData.append("prompt", String(extraParams.prompt));
|
|
529
|
-
}
|
|
530
|
-
if (typeof extraParams.temperature === "number") {
|
|
531
|
-
formData.append("temperature", String(extraParams.temperature));
|
|
532
|
-
}
|
|
533
|
-
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
534
|
-
for (const g of extraParams.timestampGranularities) {
|
|
535
|
-
formData.append("timestamp_granularities[]", String(g));
|
|
536
|
-
}
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
try {
|
|
540
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
541
|
-
method: "POST",
|
|
542
|
-
headers: {
|
|
543
|
-
...getAuthHeader(runtime)
|
|
544
|
-
},
|
|
545
|
-
body: formData
|
|
546
|
-
});
|
|
547
|
-
if (!response.ok) {
|
|
548
|
-
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
549
|
-
}
|
|
550
|
-
const data = await response.json();
|
|
551
|
-
return data.text || "";
|
|
552
|
-
} catch (error) {
|
|
553
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
554
|
-
logger.error(`TRANSCRIPTION error: ${message}`);
|
|
555
|
-
throw error;
|
|
556
|
-
}
|
|
727
|
+
[ModelType7.TRANSCRIPTION]: async (runtime, input) => {
|
|
728
|
+
return handleTranscription(runtime, input);
|
|
557
729
|
},
|
|
558
|
-
[
|
|
559
|
-
|
|
560
|
-
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
561
|
-
logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
562
|
-
try {
|
|
563
|
-
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
564
|
-
return speechStream;
|
|
565
|
-
} catch (error) {
|
|
566
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
567
|
-
logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
568
|
-
throw error;
|
|
569
|
-
}
|
|
730
|
+
[ModelType7.TEXT_TO_SPEECH]: async (runtime, input) => {
|
|
731
|
+
return handleTextToSpeech(runtime, input);
|
|
570
732
|
},
|
|
571
|
-
[
|
|
572
|
-
return
|
|
733
|
+
[ModelType7.OBJECT_SMALL]: async (runtime, params) => {
|
|
734
|
+
return handleObjectSmall(runtime, params);
|
|
573
735
|
},
|
|
574
|
-
[
|
|
575
|
-
return
|
|
736
|
+
[ModelType7.OBJECT_LARGE]: async (runtime, params) => {
|
|
737
|
+
return handleObjectLarge(runtime, params);
|
|
576
738
|
}
|
|
577
739
|
},
|
|
578
740
|
tests: [
|
|
@@ -584,12 +746,10 @@ var openaiPlugin = {
|
|
|
584
746
|
fn: async (runtime) => {
|
|
585
747
|
const baseURL = getBaseURL(runtime);
|
|
586
748
|
const response = await fetch(`${baseURL}/models`, {
|
|
587
|
-
headers:
|
|
588
|
-
Authorization: `Bearer ${getApiKey(runtime)}`
|
|
589
|
-
}
|
|
749
|
+
headers: getAuthHeader(runtime)
|
|
590
750
|
});
|
|
591
751
|
const data = await response.json();
|
|
592
|
-
|
|
752
|
+
logger10.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
593
753
|
if (!response.ok) {
|
|
594
754
|
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
595
755
|
}
|
|
@@ -599,13 +759,13 @@ var openaiPlugin = {
|
|
|
599
759
|
name: "openai_test_text_embedding",
|
|
600
760
|
fn: async (runtime) => {
|
|
601
761
|
try {
|
|
602
|
-
const embedding = await runtime.useModel(
|
|
762
|
+
const embedding = await runtime.useModel(ModelType7.TEXT_EMBEDDING, {
|
|
603
763
|
text: "Hello, world!"
|
|
604
764
|
});
|
|
605
|
-
|
|
765
|
+
logger10.log({ embedding }, "embedding");
|
|
606
766
|
} catch (error) {
|
|
607
767
|
const message = error instanceof Error ? error.message : String(error);
|
|
608
|
-
|
|
768
|
+
logger10.error(`Error in test_text_embedding: ${message}`);
|
|
609
769
|
throw error;
|
|
610
770
|
}
|
|
611
771
|
}
|
|
@@ -614,16 +774,16 @@ var openaiPlugin = {
|
|
|
614
774
|
name: "openai_test_text_large",
|
|
615
775
|
fn: async (runtime) => {
|
|
616
776
|
try {
|
|
617
|
-
const text = await runtime.useModel(
|
|
777
|
+
const text = await runtime.useModel(ModelType7.TEXT_LARGE, {
|
|
618
778
|
prompt: "What is the nature of reality in 10 words?"
|
|
619
779
|
});
|
|
620
780
|
if (text.length === 0) {
|
|
621
781
|
throw new Error("Failed to generate text");
|
|
622
782
|
}
|
|
623
|
-
|
|
783
|
+
logger10.log({ text }, "generated with test_text_large");
|
|
624
784
|
} catch (error) {
|
|
625
785
|
const message = error instanceof Error ? error.message : String(error);
|
|
626
|
-
|
|
786
|
+
logger10.error(`Error in test_text_large: ${message}`);
|
|
627
787
|
throw error;
|
|
628
788
|
}
|
|
629
789
|
}
|
|
@@ -632,16 +792,16 @@ var openaiPlugin = {
|
|
|
632
792
|
name: "openai_test_text_small",
|
|
633
793
|
fn: async (runtime) => {
|
|
634
794
|
try {
|
|
635
|
-
const text = await runtime.useModel(
|
|
795
|
+
const text = await runtime.useModel(ModelType7.TEXT_SMALL, {
|
|
636
796
|
prompt: "What is the nature of reality in 10 words?"
|
|
637
797
|
});
|
|
638
798
|
if (text.length === 0) {
|
|
639
799
|
throw new Error("Failed to generate text");
|
|
640
800
|
}
|
|
641
|
-
|
|
801
|
+
logger10.log({ text }, "generated with test_text_small");
|
|
642
802
|
} catch (error) {
|
|
643
803
|
const message = error instanceof Error ? error.message : String(error);
|
|
644
|
-
|
|
804
|
+
logger10.error(`Error in test_text_small: ${message}`);
|
|
645
805
|
throw error;
|
|
646
806
|
}
|
|
647
807
|
}
|
|
@@ -649,17 +809,17 @@ var openaiPlugin = {
|
|
|
649
809
|
{
|
|
650
810
|
name: "openai_test_image_generation",
|
|
651
811
|
fn: async (runtime) => {
|
|
652
|
-
|
|
812
|
+
logger10.log("openai_test_image_generation");
|
|
653
813
|
try {
|
|
654
|
-
const image = await runtime.useModel(
|
|
814
|
+
const image = await runtime.useModel(ModelType7.IMAGE, {
|
|
655
815
|
prompt: "A beautiful sunset over a calm ocean",
|
|
656
816
|
n: 1,
|
|
657
817
|
size: "1024x1024"
|
|
658
818
|
});
|
|
659
|
-
|
|
819
|
+
logger10.log({ image }, "generated with test_image_generation");
|
|
660
820
|
} catch (error) {
|
|
661
821
|
const message = error instanceof Error ? error.message : String(error);
|
|
662
|
-
|
|
822
|
+
logger10.error(`Error in test_image_generation: ${message}`);
|
|
663
823
|
throw error;
|
|
664
824
|
}
|
|
665
825
|
}
|
|
@@ -668,36 +828,36 @@ var openaiPlugin = {
|
|
|
668
828
|
name: "image-description",
|
|
669
829
|
fn: async (runtime) => {
|
|
670
830
|
try {
|
|
671
|
-
|
|
831
|
+
logger10.log("openai_test_image_description");
|
|
672
832
|
try {
|
|
673
|
-
const result = await runtime.useModel(
|
|
833
|
+
const result = await runtime.useModel(ModelType7.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
674
834
|
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
675
|
-
|
|
835
|
+
logger10.log({ result }, "Image description");
|
|
676
836
|
} else {
|
|
677
|
-
|
|
837
|
+
logger10.error("Invalid image description result format:", result);
|
|
678
838
|
}
|
|
679
839
|
} catch (e) {
|
|
680
840
|
const message = e instanceof Error ? e.message : String(e);
|
|
681
|
-
|
|
841
|
+
logger10.error(`Error in image description test: ${message}`);
|
|
682
842
|
}
|
|
683
843
|
} catch (e) {
|
|
684
844
|
const message = e instanceof Error ? e.message : String(e);
|
|
685
|
-
|
|
845
|
+
logger10.error(`Error in openai_test_image_description: ${message}`);
|
|
686
846
|
}
|
|
687
847
|
}
|
|
688
848
|
},
|
|
689
849
|
{
|
|
690
850
|
name: "openai_test_transcription",
|
|
691
851
|
fn: async (runtime) => {
|
|
692
|
-
|
|
852
|
+
logger10.log("openai_test_transcription");
|
|
693
853
|
try {
|
|
694
854
|
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
695
855
|
const arrayBuffer = await response.arrayBuffer();
|
|
696
|
-
const transcription = await runtime.useModel(
|
|
697
|
-
|
|
856
|
+
const transcription = await runtime.useModel(ModelType7.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
857
|
+
logger10.log({ transcription }, "generated with test_transcription");
|
|
698
858
|
} catch (error) {
|
|
699
859
|
const message = error instanceof Error ? error.message : String(error);
|
|
700
|
-
|
|
860
|
+
logger10.error(`Error in test_transcription: ${message}`);
|
|
701
861
|
throw error;
|
|
702
862
|
}
|
|
703
863
|
}
|
|
@@ -706,39 +866,41 @@ var openaiPlugin = {
|
|
|
706
866
|
name: "openai_test_text_tokenizer_encode",
|
|
707
867
|
fn: async (runtime) => {
|
|
708
868
|
const prompt = "Hello tokenizer encode!";
|
|
709
|
-
const tokens = await runtime.useModel(
|
|
869
|
+
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
710
870
|
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
711
871
|
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
712
872
|
}
|
|
713
|
-
|
|
873
|
+
logger10.log({ tokens }, "Tokenized output");
|
|
714
874
|
}
|
|
715
875
|
},
|
|
716
876
|
{
|
|
717
877
|
name: "openai_test_text_tokenizer_decode",
|
|
718
878
|
fn: async (runtime) => {
|
|
719
879
|
const prompt = "Hello tokenizer decode!";
|
|
720
|
-
const tokens = await runtime.useModel(
|
|
721
|
-
const decodedText = await runtime.useModel(
|
|
880
|
+
const tokens = await runtime.useModel(ModelType7.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
881
|
+
const decodedText = await runtime.useModel(ModelType7.TEXT_TOKENIZER_DECODE, {
|
|
882
|
+
tokens
|
|
883
|
+
});
|
|
722
884
|
if (decodedText !== prompt) {
|
|
723
885
|
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
724
886
|
}
|
|
725
|
-
|
|
887
|
+
logger10.log({ decodedText }, "Decoded text");
|
|
726
888
|
}
|
|
727
889
|
},
|
|
728
890
|
{
|
|
729
891
|
name: "openai_test_text_to_speech",
|
|
730
892
|
fn: async (runtime) => {
|
|
731
893
|
try {
|
|
732
|
-
const response = await
|
|
894
|
+
const response = await runtime.useModel(ModelType7.TEXT_TO_SPEECH, {
|
|
733
895
|
text: "Hello, this is a test for text-to-speech."
|
|
734
896
|
});
|
|
735
897
|
if (!response) {
|
|
736
898
|
throw new Error("Failed to generate speech");
|
|
737
899
|
}
|
|
738
|
-
|
|
900
|
+
logger10.log("Generated speech successfully");
|
|
739
901
|
} catch (error) {
|
|
740
902
|
const message = error instanceof Error ? error.message : String(error);
|
|
741
|
-
|
|
903
|
+
logger10.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
742
904
|
throw error;
|
|
743
905
|
}
|
|
744
906
|
}
|
|
@@ -753,4 +915,4 @@ export {
|
|
|
753
915
|
src_default as default
|
|
754
916
|
};
|
|
755
917
|
|
|
756
|
-
//# debugId=
|
|
918
|
+
//# debugId=967FB48261E5AE3064756E2164756E21
|