@elizaos/plugin-openai 1.5.16 → 1.5.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +3 -3
- package/dist/browser/index.browser.js.map +17 -4
- package/dist/cjs/index.node.cjs +563 -489
- package/dist/cjs/index.node.js.map +17 -4
- package/dist/index.d.ts +2 -18
- package/dist/init.d.ts +5 -0
- package/dist/models/audio.d.ts +10 -0
- package/dist/models/embedding.d.ts +5 -0
- package/dist/models/image.d.ts +14 -0
- package/dist/models/index.d.ts +6 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/text.d.ts +9 -0
- package/dist/models/tokenizer.d.ts +9 -0
- package/dist/node/index.node.js +564 -494
- package/dist/node/index.node.js.map +17 -4
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/openai.d.ts +8 -0
- package/dist/types/index.d.ts +26 -0
- package/dist/utils/audio.d.ts +12 -0
- package/dist/utils/config.d.ts +70 -0
- package/dist/utils/events.d.ts +10 -0
- package/dist/utils/index.d.ts +5 -0
- package/dist/utils/json.d.ts +7 -0
- package/dist/utils/tokenization.d.ts +17 -0
- package/package.json +1 -1
package/dist/cjs/index.node.cjs
CHANGED
|
@@ -48,15 +48,18 @@ __export(exports_index_node, {
|
|
|
48
48
|
module.exports = __toCommonJS(exports_index_node);
|
|
49
49
|
|
|
50
50
|
// src/index.ts
|
|
51
|
-
var
|
|
51
|
+
var import_core13 = require("@elizaos/core");
|
|
52
|
+
|
|
53
|
+
// src/init.ts
|
|
54
|
+
var import_core2 = require("@elizaos/core");
|
|
55
|
+
|
|
56
|
+
// src/utils/config.ts
|
|
52
57
|
var import_core = require("@elizaos/core");
|
|
53
|
-
var import_ai = require("ai");
|
|
54
|
-
var import_js_tiktoken = require("js-tiktoken");
|
|
55
58
|
function getSetting(runtime, key, defaultValue) {
|
|
56
59
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
57
60
|
}
|
|
58
61
|
function isBrowser() {
|
|
59
|
-
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
62
|
+
return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
|
|
60
63
|
}
|
|
61
64
|
function isProxyMode(runtime) {
|
|
62
65
|
return isBrowser() && !!getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
@@ -95,13 +98,13 @@ function getEmbeddingApiKey(runtime) {
|
|
|
95
98
|
return getApiKey(runtime);
|
|
96
99
|
}
|
|
97
100
|
function getSmallModel(runtime) {
|
|
98
|
-
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-
|
|
101
|
+
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
99
102
|
}
|
|
100
103
|
function getLargeModel(runtime) {
|
|
101
|
-
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-
|
|
104
|
+
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
|
|
102
105
|
}
|
|
103
106
|
function getImageDescriptionModel(runtime) {
|
|
104
|
-
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano")
|
|
107
|
+
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano");
|
|
105
108
|
}
|
|
106
109
|
function getExperimentalTelemetry(runtime) {
|
|
107
110
|
const setting = getSetting(runtime, "OPENAI_EXPERIMENTAL_TELEMETRY", "false");
|
|
@@ -110,121 +113,347 @@ function getExperimentalTelemetry(runtime) {
|
|
|
110
113
|
import_core.logger.debug(`[OpenAI] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
111
114
|
return result;
|
|
112
115
|
}
|
|
116
|
+
|
|
117
|
+
// src/init.ts
|
|
118
|
+
function initializeOpenAI(_config, runtime) {
|
|
119
|
+
(async () => {
|
|
120
|
+
try {
|
|
121
|
+
if (!getApiKey(runtime) && !isBrowser()) {
|
|
122
|
+
import_core2.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
try {
|
|
126
|
+
const baseURL = getBaseURL(runtime);
|
|
127
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
128
|
+
headers: getAuthHeader(runtime)
|
|
129
|
+
});
|
|
130
|
+
if (!response.ok) {
|
|
131
|
+
import_core2.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
132
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
133
|
+
} else {
|
|
134
|
+
import_core2.logger.log("OpenAI API key validated successfully");
|
|
135
|
+
}
|
|
136
|
+
} catch (fetchError) {
|
|
137
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
138
|
+
import_core2.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
139
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
140
|
+
}
|
|
141
|
+
} catch (error) {
|
|
142
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
143
|
+
import_core2.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
144
|
+
}
|
|
145
|
+
})();
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// src/models/text.ts
|
|
149
|
+
var import_core4 = require("@elizaos/core");
|
|
150
|
+
var import_ai = require("ai");
|
|
151
|
+
|
|
152
|
+
// src/providers/openai.ts
|
|
153
|
+
var import_openai = require("@ai-sdk/openai");
|
|
113
154
|
function createOpenAIClient(runtime) {
|
|
114
155
|
const baseURL = getBaseURL(runtime);
|
|
115
156
|
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "sk-proxy" : undefined);
|
|
116
157
|
return import_openai.createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
117
158
|
}
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
const
|
|
125
|
-
|
|
159
|
+
|
|
160
|
+
// src/utils/events.ts
|
|
161
|
+
var import_core3 = require("@elizaos/core");
|
|
162
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
163
|
+
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
164
|
+
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
165
|
+
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
166
|
+
runtime.emitEvent(import_core3.EventType.MODEL_USED, {
|
|
167
|
+
provider: "openai",
|
|
168
|
+
type,
|
|
169
|
+
prompt,
|
|
170
|
+
tokens: {
|
|
171
|
+
prompt: promptTokens,
|
|
172
|
+
completion: completionTokens,
|
|
173
|
+
total: totalTokens
|
|
174
|
+
}
|
|
175
|
+
});
|
|
126
176
|
}
|
|
127
|
-
|
|
177
|
+
|
|
178
|
+
// src/models/text.ts
|
|
179
|
+
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
128
180
|
const openai = createOpenAIClient(runtime);
|
|
129
181
|
const modelName = getModelFn(runtime);
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
182
|
+
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
183
|
+
import_core4.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
184
|
+
import_core4.logger.log(params.prompt);
|
|
185
|
+
const {
|
|
186
|
+
prompt,
|
|
187
|
+
stopSequences = [],
|
|
188
|
+
maxTokens = 8192,
|
|
189
|
+
temperature = 0.7,
|
|
190
|
+
frequencyPenalty = 0.7,
|
|
191
|
+
presencePenalty = 0.7
|
|
192
|
+
} = params;
|
|
193
|
+
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
194
|
+
model: openai.languageModel(modelName),
|
|
195
|
+
prompt,
|
|
196
|
+
system: runtime.character.system ?? undefined,
|
|
197
|
+
temperature,
|
|
198
|
+
maxOutputTokens: maxTokens,
|
|
199
|
+
frequencyPenalty,
|
|
200
|
+
presencePenalty,
|
|
201
|
+
stopSequences,
|
|
202
|
+
experimental_telemetry: {
|
|
203
|
+
isEnabled: experimentalTelemetry
|
|
204
|
+
}
|
|
205
|
+
});
|
|
206
|
+
if (usage) {
|
|
207
|
+
emitModelUsageEvent(runtime, modelType, prompt, usage);
|
|
208
|
+
}
|
|
209
|
+
return openaiResponse;
|
|
210
|
+
}
|
|
211
|
+
async function handleTextSmall(runtime, params) {
|
|
212
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_SMALL, getSmallModel);
|
|
213
|
+
}
|
|
214
|
+
async function handleTextLarge(runtime, params) {
|
|
215
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_LARGE, getLargeModel);
|
|
216
|
+
}
|
|
217
|
+
// src/models/embedding.ts
|
|
218
|
+
var import_core5 = require("@elizaos/core");
|
|
219
|
+
async function handleTextEmbedding(runtime, params) {
|
|
220
|
+
const embeddingModelName = getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
221
|
+
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
222
|
+
if (!Object.values(import_core5.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
223
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core5.VECTOR_DIMS).join(", ")}`;
|
|
224
|
+
import_core5.logger.error(errorMsg);
|
|
225
|
+
throw new Error(errorMsg);
|
|
226
|
+
}
|
|
227
|
+
if (params === null) {
|
|
228
|
+
import_core5.logger.debug("Creating test embedding for initialization");
|
|
229
|
+
const testVector = Array(embeddingDimension).fill(0);
|
|
230
|
+
testVector[0] = 0.1;
|
|
231
|
+
return testVector;
|
|
135
232
|
}
|
|
233
|
+
let text;
|
|
234
|
+
if (typeof params === "string") {
|
|
235
|
+
text = params;
|
|
236
|
+
} else if (typeof params === "object" && params.text) {
|
|
237
|
+
text = params.text;
|
|
238
|
+
} else {
|
|
239
|
+
const errorMsg = "Invalid input format for embedding";
|
|
240
|
+
import_core5.logger.warn(errorMsg);
|
|
241
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
242
|
+
fallbackVector[0] = 0.2;
|
|
243
|
+
return fallbackVector;
|
|
244
|
+
}
|
|
245
|
+
if (!text.trim()) {
|
|
246
|
+
const errorMsg = "Empty text for embedding";
|
|
247
|
+
import_core5.logger.warn(errorMsg);
|
|
248
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
249
|
+
fallbackVector[0] = 0.3;
|
|
250
|
+
return fallbackVector;
|
|
251
|
+
}
|
|
252
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
136
253
|
try {
|
|
137
|
-
const
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
254
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
255
|
+
method: "POST",
|
|
256
|
+
headers: {
|
|
257
|
+
...getAuthHeader(runtime, true),
|
|
258
|
+
"Content-Type": "application/json"
|
|
259
|
+
},
|
|
260
|
+
body: JSON.stringify({
|
|
261
|
+
model: embeddingModelName,
|
|
262
|
+
input: text
|
|
263
|
+
})
|
|
143
264
|
});
|
|
144
|
-
if (
|
|
145
|
-
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
import_core5.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
267
|
+
throw new Error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
146
268
|
}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
const repairFunction = getJsonRepairFunction();
|
|
152
|
-
const repairedJsonString = await repairFunction({
|
|
153
|
-
text: error.text,
|
|
154
|
-
error
|
|
155
|
-
});
|
|
156
|
-
if (repairedJsonString) {
|
|
157
|
-
try {
|
|
158
|
-
const repairedObject = JSON.parse(repairedJsonString);
|
|
159
|
-
import_core.logger.info("[generateObject] Successfully repaired JSON.");
|
|
160
|
-
return repairedObject;
|
|
161
|
-
} catch (repairParseError) {
|
|
162
|
-
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
163
|
-
import_core.logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
164
|
-
throw repairParseError;
|
|
165
|
-
}
|
|
166
|
-
} else {
|
|
167
|
-
import_core.logger.error("[generateObject] JSON repair failed.");
|
|
168
|
-
throw error;
|
|
169
|
-
}
|
|
170
|
-
} else {
|
|
171
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
172
|
-
import_core.logger.error(`[generateObject] Unknown error: ${message}`);
|
|
173
|
-
throw error;
|
|
269
|
+
const data = await response.json();
|
|
270
|
+
if (!data?.data?.[0]?.embedding) {
|
|
271
|
+
import_core5.logger.error("API returned invalid structure");
|
|
272
|
+
throw new Error("API returned invalid structure");
|
|
174
273
|
}
|
|
274
|
+
const embedding = data.data[0].embedding;
|
|
275
|
+
if (!Array.isArray(embedding) || embedding.length !== embeddingDimension) {
|
|
276
|
+
const errorMsg = `Embedding length ${embedding?.length ?? 0} does not match configured dimension ${embeddingDimension}`;
|
|
277
|
+
import_core5.logger.error(errorMsg);
|
|
278
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
279
|
+
fallbackVector[0] = 0.4;
|
|
280
|
+
return fallbackVector;
|
|
281
|
+
}
|
|
282
|
+
if (data.usage) {
|
|
283
|
+
const usage = {
|
|
284
|
+
inputTokens: data.usage.prompt_tokens,
|
|
285
|
+
outputTokens: 0,
|
|
286
|
+
totalTokens: data.usage.total_tokens
|
|
287
|
+
};
|
|
288
|
+
emitModelUsageEvent(runtime, import_core5.ModelType.TEXT_EMBEDDING, text, usage);
|
|
289
|
+
}
|
|
290
|
+
import_core5.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
291
|
+
return embedding;
|
|
292
|
+
} catch (error) {
|
|
293
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
294
|
+
import_core5.logger.error(`Error generating embedding: ${message}`);
|
|
295
|
+
throw error instanceof Error ? error : new Error(message);
|
|
175
296
|
}
|
|
176
297
|
}
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
298
|
+
// src/models/image.ts
|
|
299
|
+
var import_core6 = require("@elizaos/core");
|
|
300
|
+
async function handleImageGeneration(runtime, params) {
|
|
301
|
+
const n = params.n || 1;
|
|
302
|
+
const size = params.size || "1024x1024";
|
|
303
|
+
const prompt = params.prompt;
|
|
304
|
+
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
305
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
306
|
+
const baseURL = getBaseURL(runtime);
|
|
307
|
+
try {
|
|
308
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
309
|
+
method: "POST",
|
|
310
|
+
headers: {
|
|
311
|
+
...getAuthHeader(runtime),
|
|
312
|
+
"Content-Type": "application/json"
|
|
313
|
+
},
|
|
314
|
+
body: JSON.stringify({
|
|
315
|
+
model: modelName,
|
|
316
|
+
prompt,
|
|
317
|
+
n,
|
|
318
|
+
size
|
|
319
|
+
})
|
|
320
|
+
});
|
|
321
|
+
if (!response.ok) {
|
|
322
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
190
323
|
}
|
|
191
|
-
|
|
324
|
+
const data = await response.json();
|
|
325
|
+
const typedData = data;
|
|
326
|
+
return typedData;
|
|
327
|
+
} catch (error) {
|
|
328
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
329
|
+
throw error;
|
|
330
|
+
}
|
|
192
331
|
}
|
|
193
|
-
function
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
332
|
+
async function handleImageDescription(runtime, params) {
|
|
333
|
+
let imageUrl;
|
|
334
|
+
let promptText;
|
|
335
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
336
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
337
|
+
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
338
|
+
const DEFAULT_PROMPT = "Please analyze this image and provide a title and detailed description.";
|
|
339
|
+
if (typeof params === "string") {
|
|
340
|
+
imageUrl = params;
|
|
341
|
+
promptText = DEFAULT_PROMPT;
|
|
342
|
+
} else {
|
|
343
|
+
imageUrl = params.imageUrl;
|
|
344
|
+
promptText = params.prompt || DEFAULT_PROMPT;
|
|
345
|
+
}
|
|
346
|
+
const messages = [
|
|
347
|
+
{
|
|
348
|
+
role: "user",
|
|
349
|
+
content: [
|
|
350
|
+
{ type: "text", text: promptText },
|
|
351
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
352
|
+
]
|
|
202
353
|
}
|
|
203
|
-
|
|
354
|
+
];
|
|
355
|
+
const baseURL = getBaseURL(runtime);
|
|
356
|
+
try {
|
|
357
|
+
const requestBody = {
|
|
358
|
+
model: modelName,
|
|
359
|
+
messages,
|
|
360
|
+
max_tokens: maxTokens
|
|
361
|
+
};
|
|
362
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
363
|
+
method: "POST",
|
|
364
|
+
headers: {
|
|
365
|
+
"Content-Type": "application/json",
|
|
366
|
+
...getAuthHeader(runtime)
|
|
367
|
+
},
|
|
368
|
+
body: JSON.stringify(requestBody)
|
|
369
|
+
});
|
|
370
|
+
if (!response.ok) {
|
|
371
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
372
|
+
}
|
|
373
|
+
const result = await response.json();
|
|
374
|
+
const typedResult = result;
|
|
375
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
376
|
+
if (typedResult.usage) {
|
|
377
|
+
emitModelUsageEvent(runtime, import_core6.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
378
|
+
inputTokens: typedResult.usage.prompt_tokens,
|
|
379
|
+
outputTokens: typedResult.usage.completion_tokens,
|
|
380
|
+
totalTokens: typedResult.usage.total_tokens
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
if (!content) {
|
|
384
|
+
return {
|
|
385
|
+
title: "Failed to analyze image",
|
|
386
|
+
description: "No response from API"
|
|
387
|
+
};
|
|
388
|
+
}
|
|
389
|
+
const isCustomPrompt = typeof params === "object" && Boolean(params.prompt) && params.prompt !== DEFAULT_PROMPT;
|
|
390
|
+
if (isCustomPrompt) {
|
|
391
|
+
return content;
|
|
392
|
+
}
|
|
393
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
394
|
+
const title = titleMatch?.[1]?.trim();
|
|
395
|
+
if (!title) {
|
|
396
|
+
import_core6.logger.warn("Could not extract title from image description response");
|
|
397
|
+
}
|
|
398
|
+
const finalTitle = title || "Image Analysis";
|
|
399
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
400
|
+
const processedResult = { title: finalTitle, description };
|
|
401
|
+
return processedResult;
|
|
402
|
+
} catch (error) {
|
|
403
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
404
|
+
import_core6.logger.error(`Error analyzing image: ${message}`);
|
|
405
|
+
return {
|
|
406
|
+
title: "Failed to analyze image",
|
|
407
|
+
description: `Error: ${message}`
|
|
408
|
+
};
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
// src/models/audio.ts
|
|
412
|
+
var import_core8 = require("@elizaos/core");
|
|
413
|
+
|
|
414
|
+
// src/utils/audio.ts
|
|
415
|
+
var import_core7 = require("@elizaos/core");
|
|
416
|
+
var MAGIC_BYTES = {
|
|
417
|
+
WAV: {
|
|
418
|
+
HEADER: [82, 73, 70, 70],
|
|
419
|
+
IDENTIFIER: [87, 65, 86, 69]
|
|
420
|
+
},
|
|
421
|
+
MP3_ID3: [73, 68, 51],
|
|
422
|
+
OGG: [79, 103, 103, 83],
|
|
423
|
+
FLAC: [102, 76, 97, 67],
|
|
424
|
+
FTYP: [102, 116, 121, 112],
|
|
425
|
+
WEBM_EBML: [26, 69, 223, 163]
|
|
426
|
+
};
|
|
427
|
+
function matchBytes(buffer, offset, bytes) {
|
|
428
|
+
for (let i = 0;i < bytes.length; i++) {
|
|
429
|
+
if (buffer[offset + i] !== bytes[i])
|
|
430
|
+
return false;
|
|
431
|
+
}
|
|
432
|
+
return true;
|
|
204
433
|
}
|
|
205
434
|
function detectAudioMimeType(buffer) {
|
|
206
435
|
if (buffer.length < 12) {
|
|
207
436
|
return "application/octet-stream";
|
|
208
437
|
}
|
|
209
|
-
if (buffer
|
|
438
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WAV.HEADER) && matchBytes(buffer, 8, MAGIC_BYTES.WAV.IDENTIFIER)) {
|
|
210
439
|
return "audio/wav";
|
|
211
440
|
}
|
|
212
|
-
if (buffer
|
|
441
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.MP3_ID3) || buffer[0] === 255 && (buffer[1] & 224) === 224) {
|
|
213
442
|
return "audio/mpeg";
|
|
214
443
|
}
|
|
215
|
-
if (buffer
|
|
444
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.OGG)) {
|
|
216
445
|
return "audio/ogg";
|
|
217
446
|
}
|
|
218
|
-
if (buffer
|
|
447
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.FLAC)) {
|
|
219
448
|
return "audio/flac";
|
|
220
449
|
}
|
|
221
|
-
if (buffer
|
|
450
|
+
if (matchBytes(buffer, 4, MAGIC_BYTES.FTYP)) {
|
|
222
451
|
return "audio/mp4";
|
|
223
452
|
}
|
|
224
|
-
if (buffer
|
|
453
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WEBM_EBML)) {
|
|
225
454
|
return "audio/webm";
|
|
226
455
|
}
|
|
227
|
-
|
|
456
|
+
import_core7.logger.warn("Could not detect audio format from buffer, using generic binary type");
|
|
228
457
|
return "application/octet-stream";
|
|
229
458
|
}
|
|
230
459
|
async function webStreamToNodeStream(webStream) {
|
|
@@ -250,10 +479,12 @@ async function webStreamToNodeStream(webStream) {
|
|
|
250
479
|
});
|
|
251
480
|
} catch (error) {
|
|
252
481
|
const message = error instanceof Error ? error.message : String(error);
|
|
253
|
-
|
|
482
|
+
import_core7.logger.error(`Failed to load node:stream module: ${message}`);
|
|
254
483
|
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
255
484
|
}
|
|
256
485
|
}
|
|
486
|
+
|
|
487
|
+
// src/models/audio.ts
|
|
257
488
|
async function fetchTextToSpeech(runtime, options) {
|
|
258
489
|
const defaultModel = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
259
490
|
const defaultVoice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
|
|
@@ -295,6 +526,191 @@ async function fetchTextToSpeech(runtime, options) {
|
|
|
295
526
|
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
296
527
|
}
|
|
297
528
|
}
|
|
529
|
+
async function handleTranscription(runtime, input) {
|
|
530
|
+
let modelName = getSetting(runtime, "OPENAI_TRANSCRIPTION_MODEL", "gpt-4o-mini-transcribe");
|
|
531
|
+
import_core8.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
532
|
+
const baseURL = getBaseURL(runtime);
|
|
533
|
+
let blob;
|
|
534
|
+
let extraParams = null;
|
|
535
|
+
if (input instanceof Blob || input instanceof File) {
|
|
536
|
+
blob = input;
|
|
537
|
+
} else if (Buffer.isBuffer(input)) {
|
|
538
|
+
const detectedMimeType = detectAudioMimeType(input);
|
|
539
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
540
|
+
const uint8Array = new Uint8Array(input);
|
|
541
|
+
blob = new Blob([uint8Array], { type: detectedMimeType });
|
|
542
|
+
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
543
|
+
const params = input;
|
|
544
|
+
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
545
|
+
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
546
|
+
}
|
|
547
|
+
if (Buffer.isBuffer(params.audio)) {
|
|
548
|
+
let mimeType = params.mimeType;
|
|
549
|
+
if (!mimeType) {
|
|
550
|
+
mimeType = detectAudioMimeType(params.audio);
|
|
551
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
552
|
+
} else {
|
|
553
|
+
import_core8.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
554
|
+
}
|
|
555
|
+
const uint8Array = new Uint8Array(params.audio);
|
|
556
|
+
blob = new Blob([uint8Array], { type: mimeType });
|
|
557
|
+
} else {
|
|
558
|
+
blob = params.audio;
|
|
559
|
+
}
|
|
560
|
+
extraParams = params;
|
|
561
|
+
if (typeof params.model === "string" && params.model) {
|
|
562
|
+
modelName = params.model;
|
|
563
|
+
}
|
|
564
|
+
} else {
|
|
565
|
+
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
566
|
+
}
|
|
567
|
+
const mime = blob.type || "audio/webm";
|
|
568
|
+
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
569
|
+
const formData = new FormData;
|
|
570
|
+
formData.append("file", blob, filename);
|
|
571
|
+
formData.append("model", String(modelName));
|
|
572
|
+
if (extraParams) {
|
|
573
|
+
if (typeof extraParams.language === "string") {
|
|
574
|
+
formData.append("language", String(extraParams.language));
|
|
575
|
+
}
|
|
576
|
+
if (typeof extraParams.response_format === "string") {
|
|
577
|
+
formData.append("response_format", String(extraParams.response_format));
|
|
578
|
+
}
|
|
579
|
+
if (typeof extraParams.prompt === "string") {
|
|
580
|
+
formData.append("prompt", String(extraParams.prompt));
|
|
581
|
+
}
|
|
582
|
+
if (typeof extraParams.temperature === "number") {
|
|
583
|
+
formData.append("temperature", String(extraParams.temperature));
|
|
584
|
+
}
|
|
585
|
+
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
586
|
+
for (const g of extraParams.timestampGranularities) {
|
|
587
|
+
formData.append("timestamp_granularities[]", String(g));
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
try {
|
|
592
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
593
|
+
method: "POST",
|
|
594
|
+
headers: {
|
|
595
|
+
...getAuthHeader(runtime)
|
|
596
|
+
},
|
|
597
|
+
body: formData
|
|
598
|
+
});
|
|
599
|
+
if (!response.ok) {
|
|
600
|
+
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
601
|
+
}
|
|
602
|
+
const data = await response.json();
|
|
603
|
+
return data.text || "";
|
|
604
|
+
} catch (error) {
|
|
605
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
606
|
+
import_core8.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
607
|
+
throw error;
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
async function handleTextToSpeech(runtime, input) {
|
|
611
|
+
const options = typeof input === "string" ? { text: input } : input;
|
|
612
|
+
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
613
|
+
import_core8.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
614
|
+
try {
|
|
615
|
+
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
616
|
+
return speechStream;
|
|
617
|
+
} catch (error) {
|
|
618
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
619
|
+
import_core8.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
620
|
+
throw error;
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
// src/models/object.ts
|
|
624
|
+
var import_core10 = require("@elizaos/core");
|
|
625
|
+
var import_ai3 = require("ai");
|
|
626
|
+
|
|
627
|
+
// src/utils/json.ts
|
|
628
|
+
var import_core9 = require("@elizaos/core");
|
|
629
|
+
var import_ai2 = require("ai");
|
|
630
|
+
function getJsonRepairFunction() {
|
|
631
|
+
return async ({ text, error }) => {
|
|
632
|
+
try {
|
|
633
|
+
if (error instanceof import_ai2.JSONParseError) {
|
|
634
|
+
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
635
|
+
JSON.parse(cleanedText);
|
|
636
|
+
return cleanedText;
|
|
637
|
+
}
|
|
638
|
+
return null;
|
|
639
|
+
} catch (jsonError) {
|
|
640
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
641
|
+
import_core9.logger.warn(`Failed to repair JSON text: ${message}`);
|
|
642
|
+
return null;
|
|
643
|
+
}
|
|
644
|
+
};
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
// src/models/object.ts
|
|
648
|
+
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
649
|
+
const openai = createOpenAIClient(runtime);
|
|
650
|
+
const modelName = getModelFn(runtime);
|
|
651
|
+
import_core10.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
652
|
+
const temperature = params.temperature ?? 0;
|
|
653
|
+
const schemaPresent = !!params.schema;
|
|
654
|
+
if (schemaPresent) {
|
|
655
|
+
import_core10.logger.warn(`Schema provided but ignored: OpenAI object generation currently uses output=no-schema. The schema parameter has no effect.`);
|
|
656
|
+
}
|
|
657
|
+
try {
|
|
658
|
+
const { object, usage } = await import_ai3.generateObject({
|
|
659
|
+
model: openai.languageModel(modelName),
|
|
660
|
+
output: "no-schema",
|
|
661
|
+
prompt: params.prompt,
|
|
662
|
+
temperature,
|
|
663
|
+
experimental_repairText: getJsonRepairFunction()
|
|
664
|
+
});
|
|
665
|
+
if (usage) {
|
|
666
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
667
|
+
}
|
|
668
|
+
return object;
|
|
669
|
+
} catch (error) {
|
|
670
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
671
|
+
import_core10.logger.error(`[generateObject] Error: ${message}`);
|
|
672
|
+
throw error;
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
async function handleObjectSmall(runtime, params) {
|
|
676
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_SMALL, getSmallModel);
|
|
677
|
+
}
|
|
678
|
+
async function handleObjectLarge(runtime, params) {
|
|
679
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_LARGE, getLargeModel);
|
|
680
|
+
}
|
|
681
|
+
// src/models/tokenizer.ts
|
|
682
|
+
var import_core12 = require("@elizaos/core");
|
|
683
|
+
|
|
684
|
+
// src/utils/tokenization.ts
|
|
685
|
+
var import_core11 = require("@elizaos/core");
|
|
686
|
+
var import_js_tiktoken = require("js-tiktoken");
|
|
687
|
+
function resolveTokenizerEncoding(modelName) {
|
|
688
|
+
const normalized = modelName.toLowerCase();
|
|
689
|
+
const fallbackEncoding = normalized.includes("4o") ? "o200k_base" : "cl100k_base";
|
|
690
|
+
try {
|
|
691
|
+
return import_js_tiktoken.encodingForModel(modelName);
|
|
692
|
+
} catch (error) {
|
|
693
|
+
return import_js_tiktoken.getEncoding(fallbackEncoding);
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
async function tokenizeText(runtime, model, prompt) {
|
|
697
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
698
|
+
const tokens = resolveTokenizerEncoding(modelName).encode(prompt);
|
|
699
|
+
return tokens;
|
|
700
|
+
}
|
|
701
|
+
async function detokenizeText(runtime, model, tokens) {
|
|
702
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
703
|
+
return resolveTokenizerEncoding(modelName).decode(tokens);
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
// src/models/tokenizer.ts
|
|
707
|
+
async function handleTokenizerEncode(runtime, { prompt, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
708
|
+
return await tokenizeText(runtime, modelType, prompt);
|
|
709
|
+
}
|
|
710
|
+
async function handleTokenizerDecode(runtime, { tokens, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
711
|
+
return await detokenizeText(runtime, modelType, tokens);
|
|
712
|
+
}
|
|
713
|
+
// src/index.ts
|
|
298
714
|
var openaiPlugin = {
|
|
299
715
|
name: "openai",
|
|
300
716
|
description: "OpenAI plugin",
|
|
@@ -314,383 +730,41 @@ var openaiPlugin = {
|
|
|
314
730
|
OPENAI_EXPERIMENTAL_TELEMETRY: process.env.OPENAI_EXPERIMENTAL_TELEMETRY
|
|
315
731
|
},
|
|
316
732
|
async init(_config, runtime) {
|
|
317
|
-
|
|
318
|
-
resolve();
|
|
319
|
-
try {
|
|
320
|
-
if (!getApiKey(runtime) && !isBrowser()) {
|
|
321
|
-
import_core.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
322
|
-
return;
|
|
323
|
-
}
|
|
324
|
-
try {
|
|
325
|
-
const baseURL = getBaseURL(runtime);
|
|
326
|
-
const response = await fetch(`${baseURL}/models`, {
|
|
327
|
-
headers: { ...getAuthHeader(runtime) }
|
|
328
|
-
});
|
|
329
|
-
if (!response.ok) {
|
|
330
|
-
import_core.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
331
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
332
|
-
} else {
|
|
333
|
-
import_core.logger.log("OpenAI API key validated successfully");
|
|
334
|
-
}
|
|
335
|
-
} catch (fetchError) {
|
|
336
|
-
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
337
|
-
import_core.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
338
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
339
|
-
}
|
|
340
|
-
} catch (error) {
|
|
341
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
342
|
-
import_core.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
343
|
-
}
|
|
344
|
-
});
|
|
733
|
+
initializeOpenAI(_config, runtime);
|
|
345
734
|
},
|
|
346
735
|
models: {
|
|
347
|
-
[
|
|
348
|
-
|
|
349
|
-
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
350
|
-
if (!Object.values(import_core.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
351
|
-
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core.VECTOR_DIMS).join(", ")}`;
|
|
352
|
-
import_core.logger.error(errorMsg);
|
|
353
|
-
throw new Error(errorMsg);
|
|
354
|
-
}
|
|
355
|
-
if (params === null) {
|
|
356
|
-
import_core.logger.debug("Creating test embedding for initialization");
|
|
357
|
-
const testVector = Array(embeddingDimension).fill(0);
|
|
358
|
-
testVector[0] = 0.1;
|
|
359
|
-
return testVector;
|
|
360
|
-
}
|
|
361
|
-
let text;
|
|
362
|
-
if (typeof params === "string") {
|
|
363
|
-
text = params;
|
|
364
|
-
} else if (typeof params === "object" && params.text) {
|
|
365
|
-
text = params.text;
|
|
366
|
-
} else {
|
|
367
|
-
import_core.logger.warn("Invalid input format for embedding");
|
|
368
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
369
|
-
fallbackVector[0] = 0.2;
|
|
370
|
-
return fallbackVector;
|
|
371
|
-
}
|
|
372
|
-
if (!text.trim()) {
|
|
373
|
-
import_core.logger.warn("Empty text for embedding");
|
|
374
|
-
const emptyVector = Array(embeddingDimension).fill(0);
|
|
375
|
-
emptyVector[0] = 0.3;
|
|
376
|
-
return emptyVector;
|
|
377
|
-
}
|
|
378
|
-
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
379
|
-
try {
|
|
380
|
-
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
381
|
-
method: "POST",
|
|
382
|
-
headers: {
|
|
383
|
-
...getAuthHeader(runtime, true),
|
|
384
|
-
"Content-Type": "application/json"
|
|
385
|
-
},
|
|
386
|
-
body: JSON.stringify({
|
|
387
|
-
model: embeddingModelName,
|
|
388
|
-
input: text
|
|
389
|
-
})
|
|
390
|
-
});
|
|
391
|
-
if (!response.ok) {
|
|
392
|
-
import_core.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
393
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
394
|
-
errorVector[0] = 0.4;
|
|
395
|
-
return errorVector;
|
|
396
|
-
}
|
|
397
|
-
const data = await response.json();
|
|
398
|
-
if (!data?.data?.[0]?.embedding) {
|
|
399
|
-
import_core.logger.error("API returned invalid structure");
|
|
400
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
401
|
-
errorVector[0] = 0.5;
|
|
402
|
-
return errorVector;
|
|
403
|
-
}
|
|
404
|
-
const embedding = data.data[0].embedding;
|
|
405
|
-
if (data.usage) {
|
|
406
|
-
const usage = {
|
|
407
|
-
inputTokens: data.usage.prompt_tokens,
|
|
408
|
-
outputTokens: 0,
|
|
409
|
-
totalTokens: data.usage.total_tokens
|
|
410
|
-
};
|
|
411
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_EMBEDDING, text, usage);
|
|
412
|
-
}
|
|
413
|
-
import_core.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
414
|
-
return embedding;
|
|
415
|
-
} catch (error) {
|
|
416
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
417
|
-
import_core.logger.error(`Error generating embedding: ${message}`);
|
|
418
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
419
|
-
errorVector[0] = 0.6;
|
|
420
|
-
return errorVector;
|
|
421
|
-
}
|
|
736
|
+
[import_core13.ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
737
|
+
return handleTextEmbedding(runtime, params);
|
|
422
738
|
},
|
|
423
|
-
[
|
|
424
|
-
return
|
|
739
|
+
[import_core13.ModelType.TEXT_TOKENIZER_ENCODE]: async (runtime, params) => {
|
|
740
|
+
return handleTokenizerEncode(runtime, params);
|
|
425
741
|
},
|
|
426
|
-
[
|
|
427
|
-
return
|
|
742
|
+
[import_core13.ModelType.TEXT_TOKENIZER_DECODE]: async (runtime, params) => {
|
|
743
|
+
return handleTokenizerDecode(runtime, params);
|
|
428
744
|
},
|
|
429
|
-
[
|
|
430
|
-
|
|
431
|
-
stopSequences = [],
|
|
432
|
-
maxTokens = 8192,
|
|
433
|
-
temperature = 0.7,
|
|
434
|
-
frequencyPenalty = 0.7,
|
|
435
|
-
presencePenalty = 0.7
|
|
436
|
-
}) => {
|
|
437
|
-
const openai = createOpenAIClient(runtime);
|
|
438
|
-
const modelName = getSmallModel(runtime);
|
|
439
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
440
|
-
import_core.logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
441
|
-
import_core.logger.log(prompt);
|
|
442
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
443
|
-
model: openai.languageModel(modelName),
|
|
444
|
-
prompt,
|
|
445
|
-
system: runtime.character.system ?? undefined,
|
|
446
|
-
temperature,
|
|
447
|
-
maxOutputTokens: maxTokens,
|
|
448
|
-
frequencyPenalty,
|
|
449
|
-
presencePenalty,
|
|
450
|
-
stopSequences,
|
|
451
|
-
experimental_telemetry: {
|
|
452
|
-
isEnabled: experimentalTelemetry
|
|
453
|
-
}
|
|
454
|
-
});
|
|
455
|
-
if (usage) {
|
|
456
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_SMALL, prompt, usage);
|
|
457
|
-
}
|
|
458
|
-
return openaiResponse;
|
|
745
|
+
[import_core13.ModelType.TEXT_SMALL]: async (runtime, params) => {
|
|
746
|
+
return handleTextSmall(runtime, params);
|
|
459
747
|
},
|
|
460
|
-
[
|
|
461
|
-
|
|
462
|
-
stopSequences = [],
|
|
463
|
-
maxTokens = 8192,
|
|
464
|
-
temperature = 0.7,
|
|
465
|
-
frequencyPenalty = 0.7,
|
|
466
|
-
presencePenalty = 0.7
|
|
467
|
-
}) => {
|
|
468
|
-
const openai = createOpenAIClient(runtime);
|
|
469
|
-
const modelName = getLargeModel(runtime);
|
|
470
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
471
|
-
import_core.logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
472
|
-
import_core.logger.log(prompt);
|
|
473
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
474
|
-
model: openai.languageModel(modelName),
|
|
475
|
-
prompt,
|
|
476
|
-
system: runtime.character.system ?? undefined,
|
|
477
|
-
temperature,
|
|
478
|
-
maxOutputTokens: maxTokens,
|
|
479
|
-
frequencyPenalty,
|
|
480
|
-
presencePenalty,
|
|
481
|
-
stopSequences,
|
|
482
|
-
experimental_telemetry: {
|
|
483
|
-
isEnabled: experimentalTelemetry
|
|
484
|
-
}
|
|
485
|
-
});
|
|
486
|
-
if (usage) {
|
|
487
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_LARGE, prompt, usage);
|
|
488
|
-
}
|
|
489
|
-
return openaiResponse;
|
|
748
|
+
[import_core13.ModelType.TEXT_LARGE]: async (runtime, params) => {
|
|
749
|
+
return handleTextLarge(runtime, params);
|
|
490
750
|
},
|
|
491
|
-
[
|
|
492
|
-
|
|
493
|
-
const size = params.size || "1024x1024";
|
|
494
|
-
const prompt = params.prompt;
|
|
495
|
-
const modelName = "gpt-image-1";
|
|
496
|
-
import_core.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
497
|
-
const baseURL = getBaseURL(runtime);
|
|
498
|
-
try {
|
|
499
|
-
const response = await fetch(`${baseURL}/images/generations`, {
|
|
500
|
-
method: "POST",
|
|
501
|
-
headers: {
|
|
502
|
-
...getAuthHeader(runtime),
|
|
503
|
-
"Content-Type": "application/json"
|
|
504
|
-
},
|
|
505
|
-
body: JSON.stringify({
|
|
506
|
-
model: modelName,
|
|
507
|
-
prompt,
|
|
508
|
-
n,
|
|
509
|
-
size
|
|
510
|
-
})
|
|
511
|
-
});
|
|
512
|
-
if (!response.ok) {
|
|
513
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
514
|
-
}
|
|
515
|
-
const data = await response.json();
|
|
516
|
-
const typedData = data;
|
|
517
|
-
return typedData.data;
|
|
518
|
-
} catch (error) {
|
|
519
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
520
|
-
throw error;
|
|
521
|
-
}
|
|
751
|
+
[import_core13.ModelType.IMAGE]: async (runtime, params) => {
|
|
752
|
+
return handleImageGeneration(runtime, params);
|
|
522
753
|
},
|
|
523
|
-
[
|
|
524
|
-
|
|
525
|
-
let promptText;
|
|
526
|
-
const modelName = getImageDescriptionModel(runtime);
|
|
527
|
-
import_core.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
528
|
-
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
529
|
-
if (typeof params === "string") {
|
|
530
|
-
imageUrl = params;
|
|
531
|
-
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
532
|
-
} else {
|
|
533
|
-
imageUrl = params.imageUrl;
|
|
534
|
-
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
535
|
-
}
|
|
536
|
-
const messages = [
|
|
537
|
-
{
|
|
538
|
-
role: "user",
|
|
539
|
-
content: [
|
|
540
|
-
{ type: "text", text: promptText },
|
|
541
|
-
{ type: "image_url", image_url: { url: imageUrl } }
|
|
542
|
-
]
|
|
543
|
-
}
|
|
544
|
-
];
|
|
545
|
-
const baseURL = getBaseURL(runtime);
|
|
546
|
-
try {
|
|
547
|
-
const requestBody = {
|
|
548
|
-
model: modelName,
|
|
549
|
-
messages,
|
|
550
|
-
max_tokens: maxTokens
|
|
551
|
-
};
|
|
552
|
-
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
553
|
-
method: "POST",
|
|
554
|
-
headers: {
|
|
555
|
-
"Content-Type": "application/json",
|
|
556
|
-
...getAuthHeader(runtime)
|
|
557
|
-
},
|
|
558
|
-
body: JSON.stringify(requestBody)
|
|
559
|
-
});
|
|
560
|
-
if (!response.ok) {
|
|
561
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
562
|
-
}
|
|
563
|
-
const result = await response.json();
|
|
564
|
-
const typedResult = result;
|
|
565
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
566
|
-
if (typedResult.usage) {
|
|
567
|
-
emitModelUsageEvent(runtime, import_core.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
568
|
-
inputTokens: typedResult.usage.prompt_tokens,
|
|
569
|
-
outputTokens: typedResult.usage.completion_tokens,
|
|
570
|
-
totalTokens: typedResult.usage.total_tokens
|
|
571
|
-
});
|
|
572
|
-
}
|
|
573
|
-
if (!content) {
|
|
574
|
-
return {
|
|
575
|
-
title: "Failed to analyze image",
|
|
576
|
-
description: "No response from API"
|
|
577
|
-
};
|
|
578
|
-
}
|
|
579
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
580
|
-
if (isCustomPrompt) {
|
|
581
|
-
return content;
|
|
582
|
-
}
|
|
583
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
584
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
585
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
586
|
-
const processedResult = { title, description };
|
|
587
|
-
return processedResult;
|
|
588
|
-
} catch (error) {
|
|
589
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
590
|
-
import_core.logger.error(`Error analyzing image: ${message}`);
|
|
591
|
-
return {
|
|
592
|
-
title: "Failed to analyze image",
|
|
593
|
-
description: `Error: ${message}`
|
|
594
|
-
};
|
|
595
|
-
}
|
|
754
|
+
[import_core13.ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
755
|
+
return handleImageDescription(runtime, params);
|
|
596
756
|
},
|
|
597
|
-
[
|
|
598
|
-
|
|
599
|
-
import_core.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
600
|
-
const baseURL = getBaseURL(runtime);
|
|
601
|
-
let blob;
|
|
602
|
-
let extraParams = null;
|
|
603
|
-
if (input instanceof Blob || input instanceof File) {
|
|
604
|
-
blob = input;
|
|
605
|
-
} else if (Buffer.isBuffer(input)) {
|
|
606
|
-
const detectedMimeType = detectAudioMimeType(input);
|
|
607
|
-
import_core.logger.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
608
|
-
blob = new Blob([input], { type: detectedMimeType });
|
|
609
|
-
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
610
|
-
const params = input;
|
|
611
|
-
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
612
|
-
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
613
|
-
}
|
|
614
|
-
if (Buffer.isBuffer(params.audio)) {
|
|
615
|
-
let mimeType = params.mimeType;
|
|
616
|
-
if (!mimeType) {
|
|
617
|
-
mimeType = detectAudioMimeType(params.audio);
|
|
618
|
-
import_core.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
619
|
-
} else {
|
|
620
|
-
import_core.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
621
|
-
}
|
|
622
|
-
blob = new Blob([params.audio], { type: mimeType });
|
|
623
|
-
} else {
|
|
624
|
-
blob = params.audio;
|
|
625
|
-
}
|
|
626
|
-
extraParams = params;
|
|
627
|
-
if (typeof params.model === "string" && params.model) {
|
|
628
|
-
modelName = params.model;
|
|
629
|
-
}
|
|
630
|
-
} else {
|
|
631
|
-
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
632
|
-
}
|
|
633
|
-
const mime = blob.type || "audio/webm";
|
|
634
|
-
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
635
|
-
const formData = new FormData;
|
|
636
|
-
formData.append("file", blob, filename);
|
|
637
|
-
formData.append("model", String(modelName));
|
|
638
|
-
if (extraParams) {
|
|
639
|
-
if (typeof extraParams.language === "string") {
|
|
640
|
-
formData.append("language", String(extraParams.language));
|
|
641
|
-
}
|
|
642
|
-
if (typeof extraParams.response_format === "string") {
|
|
643
|
-
formData.append("response_format", String(extraParams.response_format));
|
|
644
|
-
}
|
|
645
|
-
if (typeof extraParams.prompt === "string") {
|
|
646
|
-
formData.append("prompt", String(extraParams.prompt));
|
|
647
|
-
}
|
|
648
|
-
if (typeof extraParams.temperature === "number") {
|
|
649
|
-
formData.append("temperature", String(extraParams.temperature));
|
|
650
|
-
}
|
|
651
|
-
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
652
|
-
for (const g of extraParams.timestampGranularities) {
|
|
653
|
-
formData.append("timestamp_granularities[]", String(g));
|
|
654
|
-
}
|
|
655
|
-
}
|
|
656
|
-
}
|
|
657
|
-
try {
|
|
658
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
659
|
-
method: "POST",
|
|
660
|
-
headers: {
|
|
661
|
-
...getAuthHeader(runtime)
|
|
662
|
-
},
|
|
663
|
-
body: formData
|
|
664
|
-
});
|
|
665
|
-
if (!response.ok) {
|
|
666
|
-
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
667
|
-
}
|
|
668
|
-
const data = await response.json();
|
|
669
|
-
return data.text || "";
|
|
670
|
-
} catch (error) {
|
|
671
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
672
|
-
import_core.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
673
|
-
throw error;
|
|
674
|
-
}
|
|
757
|
+
[import_core13.ModelType.TRANSCRIPTION]: async (runtime, input) => {
|
|
758
|
+
return handleTranscription(runtime, input);
|
|
675
759
|
},
|
|
676
|
-
[
|
|
677
|
-
|
|
678
|
-
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
679
|
-
import_core.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
680
|
-
try {
|
|
681
|
-
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
682
|
-
return speechStream;
|
|
683
|
-
} catch (error) {
|
|
684
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
685
|
-
import_core.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
686
|
-
throw error;
|
|
687
|
-
}
|
|
760
|
+
[import_core13.ModelType.TEXT_TO_SPEECH]: async (runtime, input) => {
|
|
761
|
+
return handleTextToSpeech(runtime, input);
|
|
688
762
|
},
|
|
689
|
-
[
|
|
690
|
-
return
|
|
763
|
+
[import_core13.ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
764
|
+
return handleObjectSmall(runtime, params);
|
|
691
765
|
},
|
|
692
|
-
[
|
|
693
|
-
return
|
|
766
|
+
[import_core13.ModelType.OBJECT_LARGE]: async (runtime, params) => {
|
|
767
|
+
return handleObjectLarge(runtime, params);
|
|
694
768
|
}
|
|
695
769
|
},
|
|
696
770
|
tests: [
|
|
@@ -702,12 +776,10 @@ var openaiPlugin = {
|
|
|
702
776
|
fn: async (runtime) => {
|
|
703
777
|
const baseURL = getBaseURL(runtime);
|
|
704
778
|
const response = await fetch(`${baseURL}/models`, {
|
|
705
|
-
headers:
|
|
706
|
-
Authorization: `Bearer ${getApiKey(runtime)}`
|
|
707
|
-
}
|
|
779
|
+
headers: getAuthHeader(runtime)
|
|
708
780
|
});
|
|
709
781
|
const data = await response.json();
|
|
710
|
-
|
|
782
|
+
import_core13.logger.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
711
783
|
if (!response.ok) {
|
|
712
784
|
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
713
785
|
}
|
|
@@ -717,13 +789,13 @@ var openaiPlugin = {
|
|
|
717
789
|
name: "openai_test_text_embedding",
|
|
718
790
|
fn: async (runtime) => {
|
|
719
791
|
try {
|
|
720
|
-
const embedding = await runtime.useModel(
|
|
792
|
+
const embedding = await runtime.useModel(import_core13.ModelType.TEXT_EMBEDDING, {
|
|
721
793
|
text: "Hello, world!"
|
|
722
794
|
});
|
|
723
|
-
|
|
795
|
+
import_core13.logger.log({ embedding }, "embedding");
|
|
724
796
|
} catch (error) {
|
|
725
797
|
const message = error instanceof Error ? error.message : String(error);
|
|
726
|
-
|
|
798
|
+
import_core13.logger.error(`Error in test_text_embedding: ${message}`);
|
|
727
799
|
throw error;
|
|
728
800
|
}
|
|
729
801
|
}
|
|
@@ -732,16 +804,16 @@ var openaiPlugin = {
|
|
|
732
804
|
name: "openai_test_text_large",
|
|
733
805
|
fn: async (runtime) => {
|
|
734
806
|
try {
|
|
735
|
-
const text = await runtime.useModel(
|
|
807
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
|
|
736
808
|
prompt: "What is the nature of reality in 10 words?"
|
|
737
809
|
});
|
|
738
810
|
if (text.length === 0) {
|
|
739
811
|
throw new Error("Failed to generate text");
|
|
740
812
|
}
|
|
741
|
-
|
|
813
|
+
import_core13.logger.log({ text }, "generated with test_text_large");
|
|
742
814
|
} catch (error) {
|
|
743
815
|
const message = error instanceof Error ? error.message : String(error);
|
|
744
|
-
|
|
816
|
+
import_core13.logger.error(`Error in test_text_large: ${message}`);
|
|
745
817
|
throw error;
|
|
746
818
|
}
|
|
747
819
|
}
|
|
@@ -750,16 +822,16 @@ var openaiPlugin = {
|
|
|
750
822
|
name: "openai_test_text_small",
|
|
751
823
|
fn: async (runtime) => {
|
|
752
824
|
try {
|
|
753
|
-
const text = await runtime.useModel(
|
|
825
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_SMALL, {
|
|
754
826
|
prompt: "What is the nature of reality in 10 words?"
|
|
755
827
|
});
|
|
756
828
|
if (text.length === 0) {
|
|
757
829
|
throw new Error("Failed to generate text");
|
|
758
830
|
}
|
|
759
|
-
|
|
831
|
+
import_core13.logger.log({ text }, "generated with test_text_small");
|
|
760
832
|
} catch (error) {
|
|
761
833
|
const message = error instanceof Error ? error.message : String(error);
|
|
762
|
-
|
|
834
|
+
import_core13.logger.error(`Error in test_text_small: ${message}`);
|
|
763
835
|
throw error;
|
|
764
836
|
}
|
|
765
837
|
}
|
|
@@ -767,17 +839,17 @@ var openaiPlugin = {
|
|
|
767
839
|
{
|
|
768
840
|
name: "openai_test_image_generation",
|
|
769
841
|
fn: async (runtime) => {
|
|
770
|
-
|
|
842
|
+
import_core13.logger.log("openai_test_image_generation");
|
|
771
843
|
try {
|
|
772
|
-
const image = await runtime.useModel(
|
|
844
|
+
const image = await runtime.useModel(import_core13.ModelType.IMAGE, {
|
|
773
845
|
prompt: "A beautiful sunset over a calm ocean",
|
|
774
846
|
n: 1,
|
|
775
847
|
size: "1024x1024"
|
|
776
848
|
});
|
|
777
|
-
|
|
849
|
+
import_core13.logger.log({ image }, "generated with test_image_generation");
|
|
778
850
|
} catch (error) {
|
|
779
851
|
const message = error instanceof Error ? error.message : String(error);
|
|
780
|
-
|
|
852
|
+
import_core13.logger.error(`Error in test_image_generation: ${message}`);
|
|
781
853
|
throw error;
|
|
782
854
|
}
|
|
783
855
|
}
|
|
@@ -786,36 +858,36 @@ var openaiPlugin = {
|
|
|
786
858
|
name: "image-description",
|
|
787
859
|
fn: async (runtime) => {
|
|
788
860
|
try {
|
|
789
|
-
|
|
861
|
+
import_core13.logger.log("openai_test_image_description");
|
|
790
862
|
try {
|
|
791
|
-
const result = await runtime.useModel(
|
|
863
|
+
const result = await runtime.useModel(import_core13.ModelType.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
792
864
|
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
793
|
-
|
|
865
|
+
import_core13.logger.log({ result }, "Image description");
|
|
794
866
|
} else {
|
|
795
|
-
|
|
867
|
+
import_core13.logger.error("Invalid image description result format:", result);
|
|
796
868
|
}
|
|
797
869
|
} catch (e) {
|
|
798
870
|
const message = e instanceof Error ? e.message : String(e);
|
|
799
|
-
|
|
871
|
+
import_core13.logger.error(`Error in image description test: ${message}`);
|
|
800
872
|
}
|
|
801
873
|
} catch (e) {
|
|
802
874
|
const message = e instanceof Error ? e.message : String(e);
|
|
803
|
-
|
|
875
|
+
import_core13.logger.error(`Error in openai_test_image_description: ${message}`);
|
|
804
876
|
}
|
|
805
877
|
}
|
|
806
878
|
},
|
|
807
879
|
{
|
|
808
880
|
name: "openai_test_transcription",
|
|
809
881
|
fn: async (runtime) => {
|
|
810
|
-
|
|
882
|
+
import_core13.logger.log("openai_test_transcription");
|
|
811
883
|
try {
|
|
812
884
|
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
813
885
|
const arrayBuffer = await response.arrayBuffer();
|
|
814
|
-
const transcription = await runtime.useModel(
|
|
815
|
-
|
|
886
|
+
const transcription = await runtime.useModel(import_core13.ModelType.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
887
|
+
import_core13.logger.log({ transcription }, "generated with test_transcription");
|
|
816
888
|
} catch (error) {
|
|
817
889
|
const message = error instanceof Error ? error.message : String(error);
|
|
818
|
-
|
|
890
|
+
import_core13.logger.error(`Error in test_transcription: ${message}`);
|
|
819
891
|
throw error;
|
|
820
892
|
}
|
|
821
893
|
}
|
|
@@ -824,39 +896,41 @@ var openaiPlugin = {
|
|
|
824
896
|
name: "openai_test_text_tokenizer_encode",
|
|
825
897
|
fn: async (runtime) => {
|
|
826
898
|
const prompt = "Hello tokenizer encode!";
|
|
827
|
-
const tokens = await runtime.useModel(
|
|
899
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
828
900
|
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
829
901
|
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
830
902
|
}
|
|
831
|
-
|
|
903
|
+
import_core13.logger.log({ tokens }, "Tokenized output");
|
|
832
904
|
}
|
|
833
905
|
},
|
|
834
906
|
{
|
|
835
907
|
name: "openai_test_text_tokenizer_decode",
|
|
836
908
|
fn: async (runtime) => {
|
|
837
909
|
const prompt = "Hello tokenizer decode!";
|
|
838
|
-
const tokens = await runtime.useModel(
|
|
839
|
-
const decodedText = await runtime.useModel(
|
|
910
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
911
|
+
const decodedText = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_DECODE, {
|
|
912
|
+
tokens
|
|
913
|
+
});
|
|
840
914
|
if (decodedText !== prompt) {
|
|
841
915
|
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
842
916
|
}
|
|
843
|
-
|
|
917
|
+
import_core13.logger.log({ decodedText }, "Decoded text");
|
|
844
918
|
}
|
|
845
919
|
},
|
|
846
920
|
{
|
|
847
921
|
name: "openai_test_text_to_speech",
|
|
848
922
|
fn: async (runtime) => {
|
|
849
923
|
try {
|
|
850
|
-
const response = await
|
|
924
|
+
const response = await runtime.useModel(import_core13.ModelType.TEXT_TO_SPEECH, {
|
|
851
925
|
text: "Hello, this is a test for text-to-speech."
|
|
852
926
|
});
|
|
853
927
|
if (!response) {
|
|
854
928
|
throw new Error("Failed to generate speech");
|
|
855
929
|
}
|
|
856
|
-
|
|
930
|
+
import_core13.logger.log("Generated speech successfully");
|
|
857
931
|
} catch (error) {
|
|
858
932
|
const message = error instanceof Error ? error.message : String(error);
|
|
859
|
-
|
|
933
|
+
import_core13.logger.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
860
934
|
throw error;
|
|
861
935
|
}
|
|
862
936
|
}
|
|
@@ -867,4 +941,4 @@ var openaiPlugin = {
|
|
|
867
941
|
};
|
|
868
942
|
var src_default = openaiPlugin;
|
|
869
943
|
|
|
870
|
-
//# debugId=
|
|
944
|
+
//# debugId=9B5BF59394995A3864756E2164756E21
|