@elizaos/plugin-openai 1.5.15 → 1.5.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +3 -2
- package/dist/browser/index.browser.js.map +20 -4
- package/dist/cjs/index.node.cjs +626 -466
- package/dist/cjs/index.node.js.map +17 -4
- package/dist/index.d.ts +1 -16
- package/dist/init.d.ts +5 -0
- package/dist/models/audio.d.ts +10 -0
- package/dist/models/embedding.d.ts +5 -0
- package/dist/models/image.d.ts +14 -0
- package/dist/models/index.d.ts +6 -0
- package/dist/models/object.d.ts +10 -0
- package/dist/models/text.d.ts +9 -0
- package/dist/models/tokenizer.d.ts +9 -0
- package/dist/node/index.node.js +633 -471
- package/dist/node/index.node.js.map +17 -4
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/openai.d.ts +8 -0
- package/dist/types/index.d.ts +26 -0
- package/dist/utils/audio.d.ts +12 -0
- package/dist/utils/config.d.ts +70 -0
- package/dist/utils/events.d.ts +10 -0
- package/dist/utils/index.d.ts +5 -0
- package/dist/utils/json.d.ts +7 -0
- package/dist/utils/tokenization.d.ts +17 -0
- package/package.json +1 -1
package/dist/cjs/index.node.cjs
CHANGED
|
@@ -1,7 +1,20 @@
|
|
|
1
|
+
var __create = Object.create;
|
|
2
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
1
3
|
var __defProp = Object.defineProperty;
|
|
2
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
3
5
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
6
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
+
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
+
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
+
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
+
for (let key of __getOwnPropNames(mod))
|
|
11
|
+
if (!__hasOwnProp.call(to, key))
|
|
12
|
+
__defProp(to, key, {
|
|
13
|
+
get: () => mod[key],
|
|
14
|
+
enumerable: true
|
|
15
|
+
});
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
5
18
|
var __moduleCache = /* @__PURE__ */ new WeakMap;
|
|
6
19
|
var __toCommonJS = (from) => {
|
|
7
20
|
var entry = __moduleCache.get(from), desc;
|
|
@@ -35,15 +48,18 @@ __export(exports_index_node, {
|
|
|
35
48
|
module.exports = __toCommonJS(exports_index_node);
|
|
36
49
|
|
|
37
50
|
// src/index.ts
|
|
38
|
-
var
|
|
51
|
+
var import_core13 = require("@elizaos/core");
|
|
52
|
+
|
|
53
|
+
// src/init.ts
|
|
54
|
+
var import_core2 = require("@elizaos/core");
|
|
55
|
+
|
|
56
|
+
// src/utils/config.ts
|
|
39
57
|
var import_core = require("@elizaos/core");
|
|
40
|
-
var import_ai = require("ai");
|
|
41
|
-
var import_js_tiktoken = require("js-tiktoken");
|
|
42
58
|
function getSetting(runtime, key, defaultValue) {
|
|
43
59
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
44
60
|
}
|
|
45
61
|
function isBrowser() {
|
|
46
|
-
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
62
|
+
return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
|
|
47
63
|
}
|
|
48
64
|
function isProxyMode(runtime) {
|
|
49
65
|
return isBrowser() && !!getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
@@ -82,13 +98,13 @@ function getEmbeddingApiKey(runtime) {
|
|
|
82
98
|
return getApiKey(runtime);
|
|
83
99
|
}
|
|
84
100
|
function getSmallModel(runtime) {
|
|
85
|
-
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-
|
|
101
|
+
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
86
102
|
}
|
|
87
103
|
function getLargeModel(runtime) {
|
|
88
|
-
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-
|
|
104
|
+
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
|
|
89
105
|
}
|
|
90
106
|
function getImageDescriptionModel(runtime) {
|
|
91
|
-
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano")
|
|
107
|
+
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano");
|
|
92
108
|
}
|
|
93
109
|
function getExperimentalTelemetry(runtime) {
|
|
94
110
|
const setting = getSetting(runtime, "OPENAI_EXPERIMENTAL_TELEMETRY", "false");
|
|
@@ -97,98 +113,378 @@ function getExperimentalTelemetry(runtime) {
|
|
|
97
113
|
import_core.logger.debug(`[OpenAI] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
98
114
|
return result;
|
|
99
115
|
}
|
|
116
|
+
|
|
117
|
+
// src/init.ts
|
|
118
|
+
function initializeOpenAI(_config, runtime) {
|
|
119
|
+
(async () => {
|
|
120
|
+
try {
|
|
121
|
+
if (!getApiKey(runtime) && !isBrowser()) {
|
|
122
|
+
import_core2.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
try {
|
|
126
|
+
const baseURL = getBaseURL(runtime);
|
|
127
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
128
|
+
headers: getAuthHeader(runtime)
|
|
129
|
+
});
|
|
130
|
+
if (!response.ok) {
|
|
131
|
+
import_core2.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
132
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
133
|
+
} else {
|
|
134
|
+
import_core2.logger.log("OpenAI API key validated successfully");
|
|
135
|
+
}
|
|
136
|
+
} catch (fetchError) {
|
|
137
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
138
|
+
import_core2.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
139
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
140
|
+
}
|
|
141
|
+
} catch (error) {
|
|
142
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
143
|
+
import_core2.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
144
|
+
}
|
|
145
|
+
})();
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// src/models/text.ts
|
|
149
|
+
var import_core4 = require("@elizaos/core");
|
|
150
|
+
var import_ai = require("ai");
|
|
151
|
+
|
|
152
|
+
// src/providers/openai.ts
|
|
153
|
+
var import_openai = require("@ai-sdk/openai");
|
|
100
154
|
function createOpenAIClient(runtime) {
|
|
101
155
|
const baseURL = getBaseURL(runtime);
|
|
102
156
|
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "sk-proxy" : undefined);
|
|
103
157
|
return import_openai.createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
104
158
|
}
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
const
|
|
112
|
-
|
|
159
|
+
|
|
160
|
+
// src/utils/events.ts
|
|
161
|
+
var import_core3 = require("@elizaos/core");
|
|
162
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
163
|
+
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
164
|
+
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
165
|
+
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
166
|
+
runtime.emitEvent(import_core3.EventType.MODEL_USED, {
|
|
167
|
+
provider: "openai",
|
|
168
|
+
type,
|
|
169
|
+
prompt,
|
|
170
|
+
tokens: {
|
|
171
|
+
prompt: promptTokens,
|
|
172
|
+
completion: completionTokens,
|
|
173
|
+
total: totalTokens
|
|
174
|
+
}
|
|
175
|
+
});
|
|
113
176
|
}
|
|
114
|
-
|
|
177
|
+
|
|
178
|
+
// src/models/text.ts
|
|
179
|
+
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
115
180
|
const openai = createOpenAIClient(runtime);
|
|
116
181
|
const modelName = getModelFn(runtime);
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
182
|
+
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
183
|
+
import_core4.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
184
|
+
import_core4.logger.log(params.prompt);
|
|
185
|
+
const {
|
|
186
|
+
prompt,
|
|
187
|
+
stopSequences = [],
|
|
188
|
+
maxTokens = 8192,
|
|
189
|
+
temperature = 0.7,
|
|
190
|
+
frequencyPenalty = 0.7,
|
|
191
|
+
presencePenalty = 0.7
|
|
192
|
+
} = params;
|
|
193
|
+
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
194
|
+
model: openai.languageModel(modelName),
|
|
195
|
+
prompt,
|
|
196
|
+
system: runtime.character.system ?? undefined,
|
|
197
|
+
temperature,
|
|
198
|
+
maxOutputTokens: maxTokens,
|
|
199
|
+
frequencyPenalty,
|
|
200
|
+
presencePenalty,
|
|
201
|
+
stopSequences,
|
|
202
|
+
experimental_telemetry: {
|
|
203
|
+
isEnabled: experimentalTelemetry
|
|
204
|
+
}
|
|
205
|
+
});
|
|
206
|
+
if (usage) {
|
|
207
|
+
emitModelUsageEvent(runtime, modelType, prompt, usage);
|
|
208
|
+
}
|
|
209
|
+
return openaiResponse;
|
|
210
|
+
}
|
|
211
|
+
async function handleTextSmall(runtime, params) {
|
|
212
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_SMALL, getSmallModel);
|
|
213
|
+
}
|
|
214
|
+
async function handleTextLarge(runtime, params) {
|
|
215
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_LARGE, getLargeModel);
|
|
216
|
+
}
|
|
217
|
+
// src/models/embedding.ts
|
|
218
|
+
var import_core5 = require("@elizaos/core");
|
|
219
|
+
async function handleTextEmbedding(runtime, params) {
|
|
220
|
+
const embeddingModelName = getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
221
|
+
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
222
|
+
if (!Object.values(import_core5.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
223
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core5.VECTOR_DIMS).join(", ")}`;
|
|
224
|
+
import_core5.logger.error(errorMsg);
|
|
225
|
+
throw new Error(errorMsg);
|
|
226
|
+
}
|
|
227
|
+
if (params === null) {
|
|
228
|
+
import_core5.logger.debug("Creating test embedding for initialization");
|
|
229
|
+
const testVector = Array(embeddingDimension).fill(0);
|
|
230
|
+
testVector[0] = 0.1;
|
|
231
|
+
return testVector;
|
|
232
|
+
}
|
|
233
|
+
let text;
|
|
234
|
+
if (typeof params === "string") {
|
|
235
|
+
text = params;
|
|
236
|
+
} else if (typeof params === "object" && params.text) {
|
|
237
|
+
text = params.text;
|
|
238
|
+
} else {
|
|
239
|
+
const errorMsg = "Invalid input format for embedding";
|
|
240
|
+
import_core5.logger.warn(errorMsg);
|
|
241
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
242
|
+
fallbackVector[0] = 0.2;
|
|
243
|
+
return fallbackVector;
|
|
122
244
|
}
|
|
245
|
+
if (!text.trim()) {
|
|
246
|
+
const errorMsg = "Empty text for embedding";
|
|
247
|
+
import_core5.logger.warn(errorMsg);
|
|
248
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
249
|
+
fallbackVector[0] = 0.3;
|
|
250
|
+
return fallbackVector;
|
|
251
|
+
}
|
|
252
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
123
253
|
try {
|
|
124
|
-
const
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
254
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
255
|
+
method: "POST",
|
|
256
|
+
headers: {
|
|
257
|
+
...getAuthHeader(runtime, true),
|
|
258
|
+
"Content-Type": "application/json"
|
|
259
|
+
},
|
|
260
|
+
body: JSON.stringify({
|
|
261
|
+
model: embeddingModelName,
|
|
262
|
+
input: text
|
|
263
|
+
})
|
|
130
264
|
});
|
|
131
|
-
if (
|
|
132
|
-
|
|
265
|
+
if (!response.ok) {
|
|
266
|
+
import_core5.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
267
|
+
throw new Error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
133
268
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
import_core.logger.error("[generateObject] JSON repair failed.");
|
|
155
|
-
throw error;
|
|
156
|
-
}
|
|
157
|
-
} else {
|
|
158
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
159
|
-
import_core.logger.error(`[generateObject] Unknown error: ${message}`);
|
|
160
|
-
throw error;
|
|
269
|
+
const data = await response.json();
|
|
270
|
+
if (!data?.data?.[0]?.embedding) {
|
|
271
|
+
import_core5.logger.error("API returned invalid structure");
|
|
272
|
+
throw new Error("API returned invalid structure");
|
|
273
|
+
}
|
|
274
|
+
const embedding = data.data[0].embedding;
|
|
275
|
+
if (!Array.isArray(embedding) || embedding.length !== embeddingDimension) {
|
|
276
|
+
const errorMsg = `Embedding length ${embedding?.length ?? 0} does not match configured dimension ${embeddingDimension}`;
|
|
277
|
+
import_core5.logger.error(errorMsg);
|
|
278
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
279
|
+
fallbackVector[0] = 0.4;
|
|
280
|
+
return fallbackVector;
|
|
281
|
+
}
|
|
282
|
+
if (data.usage) {
|
|
283
|
+
const usage = {
|
|
284
|
+
inputTokens: data.usage.prompt_tokens,
|
|
285
|
+
outputTokens: 0,
|
|
286
|
+
totalTokens: data.usage.total_tokens
|
|
287
|
+
};
|
|
288
|
+
emitModelUsageEvent(runtime, import_core5.ModelType.TEXT_EMBEDDING, text, usage);
|
|
161
289
|
}
|
|
290
|
+
import_core5.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
291
|
+
return embedding;
|
|
292
|
+
} catch (error) {
|
|
293
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
294
|
+
import_core5.logger.error(`Error generating embedding: ${message}`);
|
|
295
|
+
throw error instanceof Error ? error : new Error(message);
|
|
162
296
|
}
|
|
163
297
|
}
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
298
|
+
// src/models/image.ts
|
|
299
|
+
var import_core6 = require("@elizaos/core");
|
|
300
|
+
async function handleImageGeneration(runtime, params) {
|
|
301
|
+
const n = params.n || 1;
|
|
302
|
+
const size = params.size || "1024x1024";
|
|
303
|
+
const prompt = params.prompt;
|
|
304
|
+
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
305
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
306
|
+
const baseURL = getBaseURL(runtime);
|
|
307
|
+
try {
|
|
308
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
309
|
+
method: "POST",
|
|
310
|
+
headers: {
|
|
311
|
+
...getAuthHeader(runtime),
|
|
312
|
+
"Content-Type": "application/json"
|
|
313
|
+
},
|
|
314
|
+
body: JSON.stringify({
|
|
315
|
+
model: modelName,
|
|
316
|
+
prompt,
|
|
317
|
+
n,
|
|
318
|
+
size
|
|
319
|
+
})
|
|
320
|
+
});
|
|
321
|
+
if (!response.ok) {
|
|
322
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
177
323
|
}
|
|
178
|
-
|
|
324
|
+
const data = await response.json();
|
|
325
|
+
const typedData = data;
|
|
326
|
+
return typedData;
|
|
327
|
+
} catch (error) {
|
|
328
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
329
|
+
throw error;
|
|
330
|
+
}
|
|
179
331
|
}
|
|
180
|
-
function
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
332
|
+
async function handleImageDescription(runtime, params) {
|
|
333
|
+
let imageUrl;
|
|
334
|
+
let promptText;
|
|
335
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
336
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
337
|
+
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
338
|
+
const DEFAULT_PROMPT = "Please analyze this image and provide a title and detailed description.";
|
|
339
|
+
if (typeof params === "string") {
|
|
340
|
+
imageUrl = params;
|
|
341
|
+
promptText = DEFAULT_PROMPT;
|
|
342
|
+
} else {
|
|
343
|
+
imageUrl = params.imageUrl;
|
|
344
|
+
promptText = params.prompt || DEFAULT_PROMPT;
|
|
345
|
+
}
|
|
346
|
+
const messages = [
|
|
347
|
+
{
|
|
348
|
+
role: "user",
|
|
349
|
+
content: [
|
|
350
|
+
{ type: "text", text: promptText },
|
|
351
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
352
|
+
]
|
|
189
353
|
}
|
|
190
|
-
|
|
354
|
+
];
|
|
355
|
+
const baseURL = getBaseURL(runtime);
|
|
356
|
+
try {
|
|
357
|
+
const requestBody = {
|
|
358
|
+
model: modelName,
|
|
359
|
+
messages,
|
|
360
|
+
max_tokens: maxTokens
|
|
361
|
+
};
|
|
362
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
363
|
+
method: "POST",
|
|
364
|
+
headers: {
|
|
365
|
+
"Content-Type": "application/json",
|
|
366
|
+
...getAuthHeader(runtime)
|
|
367
|
+
},
|
|
368
|
+
body: JSON.stringify(requestBody)
|
|
369
|
+
});
|
|
370
|
+
if (!response.ok) {
|
|
371
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
372
|
+
}
|
|
373
|
+
const result = await response.json();
|
|
374
|
+
const typedResult = result;
|
|
375
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
376
|
+
if (typedResult.usage) {
|
|
377
|
+
emitModelUsageEvent(runtime, import_core6.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
378
|
+
inputTokens: typedResult.usage.prompt_tokens,
|
|
379
|
+
outputTokens: typedResult.usage.completion_tokens,
|
|
380
|
+
totalTokens: typedResult.usage.total_tokens
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
if (!content) {
|
|
384
|
+
return {
|
|
385
|
+
title: "Failed to analyze image",
|
|
386
|
+
description: "No response from API"
|
|
387
|
+
};
|
|
388
|
+
}
|
|
389
|
+
const isCustomPrompt = typeof params === "object" && Boolean(params.prompt) && params.prompt !== DEFAULT_PROMPT;
|
|
390
|
+
if (isCustomPrompt) {
|
|
391
|
+
return content;
|
|
392
|
+
}
|
|
393
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
394
|
+
const title = titleMatch?.[1]?.trim();
|
|
395
|
+
if (!title) {
|
|
396
|
+
import_core6.logger.warn("Could not extract title from image description response");
|
|
397
|
+
}
|
|
398
|
+
const finalTitle = title || "Image Analysis";
|
|
399
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
400
|
+
const processedResult = { title: finalTitle, description };
|
|
401
|
+
return processedResult;
|
|
402
|
+
} catch (error) {
|
|
403
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
404
|
+
import_core6.logger.error(`Error analyzing image: ${message}`);
|
|
405
|
+
return {
|
|
406
|
+
title: "Failed to analyze image",
|
|
407
|
+
description: `Error: ${message}`
|
|
408
|
+
};
|
|
409
|
+
}
|
|
410
|
+
}
|
|
411
|
+
// src/models/audio.ts
|
|
412
|
+
var import_core8 = require("@elizaos/core");
|
|
413
|
+
|
|
414
|
+
// src/utils/audio.ts
|
|
415
|
+
var import_core7 = require("@elizaos/core");
|
|
416
|
+
var MAGIC_BYTES = {
|
|
417
|
+
WAV: {
|
|
418
|
+
HEADER: [82, 73, 70, 70],
|
|
419
|
+
IDENTIFIER: [87, 65, 86, 69]
|
|
420
|
+
},
|
|
421
|
+
MP3_ID3: [73, 68, 51],
|
|
422
|
+
OGG: [79, 103, 103, 83],
|
|
423
|
+
FLAC: [102, 76, 97, 67],
|
|
424
|
+
FTYP: [102, 116, 121, 112],
|
|
425
|
+
WEBM_EBML: [26, 69, 223, 163]
|
|
426
|
+
};
|
|
427
|
+
function matchBytes(buffer, offset, bytes) {
|
|
428
|
+
for (let i = 0;i < bytes.length; i++) {
|
|
429
|
+
if (buffer[offset + i] !== bytes[i])
|
|
430
|
+
return false;
|
|
431
|
+
}
|
|
432
|
+
return true;
|
|
433
|
+
}
|
|
434
|
+
function detectAudioMimeType(buffer) {
|
|
435
|
+
if (buffer.length < 12) {
|
|
436
|
+
return "application/octet-stream";
|
|
437
|
+
}
|
|
438
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WAV.HEADER) && matchBytes(buffer, 8, MAGIC_BYTES.WAV.IDENTIFIER)) {
|
|
439
|
+
return "audio/wav";
|
|
440
|
+
}
|
|
441
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.MP3_ID3) || buffer[0] === 255 && (buffer[1] & 224) === 224) {
|
|
442
|
+
return "audio/mpeg";
|
|
443
|
+
}
|
|
444
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.OGG)) {
|
|
445
|
+
return "audio/ogg";
|
|
446
|
+
}
|
|
447
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.FLAC)) {
|
|
448
|
+
return "audio/flac";
|
|
449
|
+
}
|
|
450
|
+
if (matchBytes(buffer, 4, MAGIC_BYTES.FTYP)) {
|
|
451
|
+
return "audio/mp4";
|
|
452
|
+
}
|
|
453
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WEBM_EBML)) {
|
|
454
|
+
return "audio/webm";
|
|
455
|
+
}
|
|
456
|
+
import_core7.logger.warn("Could not detect audio format from buffer, using generic binary type");
|
|
457
|
+
return "application/octet-stream";
|
|
458
|
+
}
|
|
459
|
+
async function webStreamToNodeStream(webStream) {
|
|
460
|
+
try {
|
|
461
|
+
const { Readable } = await import("node:stream");
|
|
462
|
+
const reader = webStream.getReader();
|
|
463
|
+
return new Readable({
|
|
464
|
+
async read() {
|
|
465
|
+
try {
|
|
466
|
+
const { done, value } = await reader.read();
|
|
467
|
+
if (done) {
|
|
468
|
+
this.push(null);
|
|
469
|
+
} else {
|
|
470
|
+
this.push(value);
|
|
471
|
+
}
|
|
472
|
+
} catch (error) {
|
|
473
|
+
this.destroy(error);
|
|
474
|
+
}
|
|
475
|
+
},
|
|
476
|
+
destroy(error, callback) {
|
|
477
|
+
reader.cancel().finally(() => callback(error));
|
|
478
|
+
}
|
|
479
|
+
});
|
|
480
|
+
} catch (error) {
|
|
481
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
482
|
+
import_core7.logger.error(`Failed to load node:stream module: ${message}`);
|
|
483
|
+
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
484
|
+
}
|
|
191
485
|
}
|
|
486
|
+
|
|
487
|
+
// src/models/audio.ts
|
|
192
488
|
async function fetchTextToSpeech(runtime, options) {
|
|
193
489
|
const defaultModel = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
194
490
|
const defaultVoice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
|
|
@@ -218,12 +514,203 @@ async function fetchTextToSpeech(runtime, options) {
|
|
|
218
514
|
const err = await res.text();
|
|
219
515
|
throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
|
|
220
516
|
}
|
|
517
|
+
if (!res.body) {
|
|
518
|
+
throw new Error("OpenAI TTS response body is null");
|
|
519
|
+
}
|
|
520
|
+
if (!isBrowser()) {
|
|
521
|
+
return await webStreamToNodeStream(res.body);
|
|
522
|
+
}
|
|
221
523
|
return res.body;
|
|
222
524
|
} catch (err) {
|
|
223
525
|
const message = err instanceof Error ? err.message : String(err);
|
|
224
526
|
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
225
527
|
}
|
|
226
528
|
}
|
|
529
|
+
async function handleTranscription(runtime, input) {
|
|
530
|
+
let modelName = getSetting(runtime, "OPENAI_TRANSCRIPTION_MODEL", "gpt-4o-mini-transcribe");
|
|
531
|
+
import_core8.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
532
|
+
const baseURL = getBaseURL(runtime);
|
|
533
|
+
let blob;
|
|
534
|
+
let extraParams = null;
|
|
535
|
+
if (input instanceof Blob || input instanceof File) {
|
|
536
|
+
blob = input;
|
|
537
|
+
} else if (Buffer.isBuffer(input)) {
|
|
538
|
+
const detectedMimeType = detectAudioMimeType(input);
|
|
539
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
540
|
+
const uint8Array = new Uint8Array(input);
|
|
541
|
+
blob = new Blob([uint8Array], { type: detectedMimeType });
|
|
542
|
+
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
543
|
+
const params = input;
|
|
544
|
+
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
545
|
+
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
546
|
+
}
|
|
547
|
+
if (Buffer.isBuffer(params.audio)) {
|
|
548
|
+
let mimeType = params.mimeType;
|
|
549
|
+
if (!mimeType) {
|
|
550
|
+
mimeType = detectAudioMimeType(params.audio);
|
|
551
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
552
|
+
} else {
|
|
553
|
+
import_core8.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
554
|
+
}
|
|
555
|
+
const uint8Array = new Uint8Array(params.audio);
|
|
556
|
+
blob = new Blob([uint8Array], { type: mimeType });
|
|
557
|
+
} else {
|
|
558
|
+
blob = params.audio;
|
|
559
|
+
}
|
|
560
|
+
extraParams = params;
|
|
561
|
+
if (typeof params.model === "string" && params.model) {
|
|
562
|
+
modelName = params.model;
|
|
563
|
+
}
|
|
564
|
+
} else {
|
|
565
|
+
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
566
|
+
}
|
|
567
|
+
const mime = blob.type || "audio/webm";
|
|
568
|
+
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
569
|
+
const formData = new FormData;
|
|
570
|
+
formData.append("file", blob, filename);
|
|
571
|
+
formData.append("model", String(modelName));
|
|
572
|
+
if (extraParams) {
|
|
573
|
+
if (typeof extraParams.language === "string") {
|
|
574
|
+
formData.append("language", String(extraParams.language));
|
|
575
|
+
}
|
|
576
|
+
if (typeof extraParams.response_format === "string") {
|
|
577
|
+
formData.append("response_format", String(extraParams.response_format));
|
|
578
|
+
}
|
|
579
|
+
if (typeof extraParams.prompt === "string") {
|
|
580
|
+
formData.append("prompt", String(extraParams.prompt));
|
|
581
|
+
}
|
|
582
|
+
if (typeof extraParams.temperature === "number") {
|
|
583
|
+
formData.append("temperature", String(extraParams.temperature));
|
|
584
|
+
}
|
|
585
|
+
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
586
|
+
for (const g of extraParams.timestampGranularities) {
|
|
587
|
+
formData.append("timestamp_granularities[]", String(g));
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
try {
|
|
592
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
593
|
+
method: "POST",
|
|
594
|
+
headers: {
|
|
595
|
+
...getAuthHeader(runtime)
|
|
596
|
+
},
|
|
597
|
+
body: formData
|
|
598
|
+
});
|
|
599
|
+
if (!response.ok) {
|
|
600
|
+
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
601
|
+
}
|
|
602
|
+
const data = await response.json();
|
|
603
|
+
return data.text || "";
|
|
604
|
+
} catch (error) {
|
|
605
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
606
|
+
import_core8.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
607
|
+
throw error;
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
async function handleTextToSpeech(runtime, input) {
|
|
611
|
+
const options = typeof input === "string" ? { text: input } : input;
|
|
612
|
+
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
613
|
+
import_core8.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
614
|
+
try {
|
|
615
|
+
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
616
|
+
return speechStream;
|
|
617
|
+
} catch (error) {
|
|
618
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
619
|
+
import_core8.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
620
|
+
throw error;
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
// src/models/object.ts
|
|
624
|
+
var import_core10 = require("@elizaos/core");
|
|
625
|
+
var import_ai3 = require("ai");
|
|
626
|
+
|
|
627
|
+
// src/utils/json.ts
|
|
628
|
+
var import_core9 = require("@elizaos/core");
|
|
629
|
+
var import_ai2 = require("ai");
|
|
630
|
+
function getJsonRepairFunction() {
|
|
631
|
+
return async ({ text, error }) => {
|
|
632
|
+
try {
|
|
633
|
+
if (error instanceof import_ai2.JSONParseError) {
|
|
634
|
+
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
635
|
+
JSON.parse(cleanedText);
|
|
636
|
+
return cleanedText;
|
|
637
|
+
}
|
|
638
|
+
return null;
|
|
639
|
+
} catch (jsonError) {
|
|
640
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
641
|
+
import_core9.logger.warn(`Failed to repair JSON text: ${message}`);
|
|
642
|
+
return null;
|
|
643
|
+
}
|
|
644
|
+
};
|
|
645
|
+
}
|
|
646
|
+
|
|
647
|
+
// src/models/object.ts
|
|
648
|
+
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
649
|
+
const openai = createOpenAIClient(runtime);
|
|
650
|
+
const modelName = getModelFn(runtime);
|
|
651
|
+
import_core10.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
652
|
+
const temperature = params.temperature ?? 0;
|
|
653
|
+
const schemaPresent = !!params.schema;
|
|
654
|
+
if (schemaPresent) {
|
|
655
|
+
import_core10.logger.warn(`Schema provided but ignored: OpenAI object generation currently uses output=no-schema. The schema parameter has no effect.`);
|
|
656
|
+
}
|
|
657
|
+
try {
|
|
658
|
+
const { object, usage } = await import_ai3.generateObject({
|
|
659
|
+
model: openai.languageModel(modelName),
|
|
660
|
+
output: "no-schema",
|
|
661
|
+
prompt: params.prompt,
|
|
662
|
+
temperature,
|
|
663
|
+
experimental_repairText: getJsonRepairFunction()
|
|
664
|
+
});
|
|
665
|
+
if (usage) {
|
|
666
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
667
|
+
}
|
|
668
|
+
return object;
|
|
669
|
+
} catch (error) {
|
|
670
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
671
|
+
import_core10.logger.error(`[generateObject] Error: ${message}`);
|
|
672
|
+
throw error;
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
async function handleObjectSmall(runtime, params) {
|
|
676
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_SMALL, getSmallModel);
|
|
677
|
+
}
|
|
678
|
+
async function handleObjectLarge(runtime, params) {
|
|
679
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_LARGE, getLargeModel);
|
|
680
|
+
}
|
|
681
|
+
// src/models/tokenizer.ts
|
|
682
|
+
var import_core12 = require("@elizaos/core");
|
|
683
|
+
|
|
684
|
+
// src/utils/tokenization.ts
|
|
685
|
+
var import_core11 = require("@elizaos/core");
|
|
686
|
+
var import_js_tiktoken = require("js-tiktoken");
|
|
687
|
+
function resolveTokenizerEncoding(modelName) {
|
|
688
|
+
const normalized = modelName.toLowerCase();
|
|
689
|
+
const fallbackEncoding = normalized.includes("4o") ? "o200k_base" : "cl100k_base";
|
|
690
|
+
try {
|
|
691
|
+
return import_js_tiktoken.encodingForModel(modelName);
|
|
692
|
+
} catch (error) {
|
|
693
|
+
return import_js_tiktoken.getEncoding(fallbackEncoding);
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
async function tokenizeText(runtime, model, prompt) {
|
|
697
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
698
|
+
const tokens = resolveTokenizerEncoding(modelName).encode(prompt);
|
|
699
|
+
return tokens;
|
|
700
|
+
}
|
|
701
|
+
async function detokenizeText(runtime, model, tokens) {
|
|
702
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
703
|
+
return resolveTokenizerEncoding(modelName).decode(tokens);
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
// src/models/tokenizer.ts
|
|
707
|
+
async function handleTokenizerEncode(runtime, { prompt, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
708
|
+
return await tokenizeText(runtime, modelType, prompt);
|
|
709
|
+
}
|
|
710
|
+
async function handleTokenizerDecode(runtime, { tokens, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
711
|
+
return await detokenizeText(runtime, modelType, tokens);
|
|
712
|
+
}
|
|
713
|
+
// src/index.ts
|
|
227
714
|
var openaiPlugin = {
|
|
228
715
|
name: "openai",
|
|
229
716
|
description: "OpenAI plugin",
|
|
@@ -243,368 +730,41 @@ var openaiPlugin = {
|
|
|
243
730
|
OPENAI_EXPERIMENTAL_TELEMETRY: process.env.OPENAI_EXPERIMENTAL_TELEMETRY
|
|
244
731
|
},
|
|
245
732
|
async init(_config, runtime) {
|
|
246
|
-
|
|
247
|
-
resolve();
|
|
248
|
-
try {
|
|
249
|
-
if (!getApiKey(runtime) && !isBrowser()) {
|
|
250
|
-
import_core.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
251
|
-
return;
|
|
252
|
-
}
|
|
253
|
-
try {
|
|
254
|
-
const baseURL = getBaseURL(runtime);
|
|
255
|
-
const response = await fetch(`${baseURL}/models`, {
|
|
256
|
-
headers: { ...getAuthHeader(runtime) }
|
|
257
|
-
});
|
|
258
|
-
if (!response.ok) {
|
|
259
|
-
import_core.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
260
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
261
|
-
} else {
|
|
262
|
-
import_core.logger.log("OpenAI API key validated successfully");
|
|
263
|
-
}
|
|
264
|
-
} catch (fetchError) {
|
|
265
|
-
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
266
|
-
import_core.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
267
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
268
|
-
}
|
|
269
|
-
} catch (error) {
|
|
270
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
271
|
-
import_core.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
272
|
-
}
|
|
273
|
-
});
|
|
733
|
+
initializeOpenAI(_config, runtime);
|
|
274
734
|
},
|
|
275
735
|
models: {
|
|
276
|
-
[
|
|
277
|
-
|
|
278
|
-
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
279
|
-
if (!Object.values(import_core.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
280
|
-
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core.VECTOR_DIMS).join(", ")}`;
|
|
281
|
-
import_core.logger.error(errorMsg);
|
|
282
|
-
throw new Error(errorMsg);
|
|
283
|
-
}
|
|
284
|
-
if (params === null) {
|
|
285
|
-
import_core.logger.debug("Creating test embedding for initialization");
|
|
286
|
-
const testVector = Array(embeddingDimension).fill(0);
|
|
287
|
-
testVector[0] = 0.1;
|
|
288
|
-
return testVector;
|
|
289
|
-
}
|
|
290
|
-
let text;
|
|
291
|
-
if (typeof params === "string") {
|
|
292
|
-
text = params;
|
|
293
|
-
} else if (typeof params === "object" && params.text) {
|
|
294
|
-
text = params.text;
|
|
295
|
-
} else {
|
|
296
|
-
import_core.logger.warn("Invalid input format for embedding");
|
|
297
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
298
|
-
fallbackVector[0] = 0.2;
|
|
299
|
-
return fallbackVector;
|
|
300
|
-
}
|
|
301
|
-
if (!text.trim()) {
|
|
302
|
-
import_core.logger.warn("Empty text for embedding");
|
|
303
|
-
const emptyVector = Array(embeddingDimension).fill(0);
|
|
304
|
-
emptyVector[0] = 0.3;
|
|
305
|
-
return emptyVector;
|
|
306
|
-
}
|
|
307
|
-
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
308
|
-
try {
|
|
309
|
-
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
310
|
-
method: "POST",
|
|
311
|
-
headers: {
|
|
312
|
-
...getAuthHeader(runtime, true),
|
|
313
|
-
"Content-Type": "application/json"
|
|
314
|
-
},
|
|
315
|
-
body: JSON.stringify({
|
|
316
|
-
model: embeddingModelName,
|
|
317
|
-
input: text
|
|
318
|
-
})
|
|
319
|
-
});
|
|
320
|
-
if (!response.ok) {
|
|
321
|
-
import_core.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
322
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
323
|
-
errorVector[0] = 0.4;
|
|
324
|
-
return errorVector;
|
|
325
|
-
}
|
|
326
|
-
const data = await response.json();
|
|
327
|
-
if (!data?.data?.[0]?.embedding) {
|
|
328
|
-
import_core.logger.error("API returned invalid structure");
|
|
329
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
330
|
-
errorVector[0] = 0.5;
|
|
331
|
-
return errorVector;
|
|
332
|
-
}
|
|
333
|
-
const embedding = data.data[0].embedding;
|
|
334
|
-
if (data.usage) {
|
|
335
|
-
const usage = {
|
|
336
|
-
inputTokens: data.usage.prompt_tokens,
|
|
337
|
-
outputTokens: 0,
|
|
338
|
-
totalTokens: data.usage.total_tokens
|
|
339
|
-
};
|
|
340
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_EMBEDDING, text, usage);
|
|
341
|
-
}
|
|
342
|
-
import_core.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
343
|
-
return embedding;
|
|
344
|
-
} catch (error) {
|
|
345
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
346
|
-
import_core.logger.error(`Error generating embedding: ${message}`);
|
|
347
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
348
|
-
errorVector[0] = 0.6;
|
|
349
|
-
return errorVector;
|
|
350
|
-
}
|
|
736
|
+
[import_core13.ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
737
|
+
return handleTextEmbedding(runtime, params);
|
|
351
738
|
},
|
|
352
|
-
[
|
|
353
|
-
return
|
|
739
|
+
[import_core13.ModelType.TEXT_TOKENIZER_ENCODE]: async (runtime, params) => {
|
|
740
|
+
return handleTokenizerEncode(runtime, params);
|
|
354
741
|
},
|
|
355
|
-
[
|
|
356
|
-
return
|
|
742
|
+
[import_core13.ModelType.TEXT_TOKENIZER_DECODE]: async (runtime, params) => {
|
|
743
|
+
return handleTokenizerDecode(runtime, params);
|
|
357
744
|
},
|
|
358
|
-
[
|
|
359
|
-
|
|
360
|
-
stopSequences = [],
|
|
361
|
-
maxTokens = 8192,
|
|
362
|
-
temperature = 0.7,
|
|
363
|
-
frequencyPenalty = 0.7,
|
|
364
|
-
presencePenalty = 0.7
|
|
365
|
-
}) => {
|
|
366
|
-
const openai = createOpenAIClient(runtime);
|
|
367
|
-
const modelName = getSmallModel(runtime);
|
|
368
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
369
|
-
import_core.logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
370
|
-
import_core.logger.log(prompt);
|
|
371
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
372
|
-
model: openai.languageModel(modelName),
|
|
373
|
-
prompt,
|
|
374
|
-
system: runtime.character.system ?? undefined,
|
|
375
|
-
temperature,
|
|
376
|
-
maxOutputTokens: maxTokens,
|
|
377
|
-
frequencyPenalty,
|
|
378
|
-
presencePenalty,
|
|
379
|
-
stopSequences,
|
|
380
|
-
experimental_telemetry: {
|
|
381
|
-
isEnabled: experimentalTelemetry
|
|
382
|
-
}
|
|
383
|
-
});
|
|
384
|
-
if (usage) {
|
|
385
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_SMALL, prompt, usage);
|
|
386
|
-
}
|
|
387
|
-
return openaiResponse;
|
|
745
|
+
[import_core13.ModelType.TEXT_SMALL]: async (runtime, params) => {
|
|
746
|
+
return handleTextSmall(runtime, params);
|
|
388
747
|
},
|
|
389
|
-
[
|
|
390
|
-
|
|
391
|
-
stopSequences = [],
|
|
392
|
-
maxTokens = 8192,
|
|
393
|
-
temperature = 0.7,
|
|
394
|
-
frequencyPenalty = 0.7,
|
|
395
|
-
presencePenalty = 0.7
|
|
396
|
-
}) => {
|
|
397
|
-
const openai = createOpenAIClient(runtime);
|
|
398
|
-
const modelName = getLargeModel(runtime);
|
|
399
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
400
|
-
import_core.logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
401
|
-
import_core.logger.log(prompt);
|
|
402
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
403
|
-
model: openai.languageModel(modelName),
|
|
404
|
-
prompt,
|
|
405
|
-
system: runtime.character.system ?? undefined,
|
|
406
|
-
temperature,
|
|
407
|
-
maxOutputTokens: maxTokens,
|
|
408
|
-
frequencyPenalty,
|
|
409
|
-
presencePenalty,
|
|
410
|
-
stopSequences,
|
|
411
|
-
experimental_telemetry: {
|
|
412
|
-
isEnabled: experimentalTelemetry
|
|
413
|
-
}
|
|
414
|
-
});
|
|
415
|
-
if (usage) {
|
|
416
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_LARGE, prompt, usage);
|
|
417
|
-
}
|
|
418
|
-
return openaiResponse;
|
|
748
|
+
[import_core13.ModelType.TEXT_LARGE]: async (runtime, params) => {
|
|
749
|
+
return handleTextLarge(runtime, params);
|
|
419
750
|
},
|
|
420
|
-
[
|
|
421
|
-
|
|
422
|
-
const size = params.size || "1024x1024";
|
|
423
|
-
const prompt = params.prompt;
|
|
424
|
-
const modelName = "gpt-image-1";
|
|
425
|
-
import_core.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
426
|
-
const baseURL = getBaseURL(runtime);
|
|
427
|
-
try {
|
|
428
|
-
const response = await fetch(`${baseURL}/images/generations`, {
|
|
429
|
-
method: "POST",
|
|
430
|
-
headers: {
|
|
431
|
-
...getAuthHeader(runtime),
|
|
432
|
-
"Content-Type": "application/json"
|
|
433
|
-
},
|
|
434
|
-
body: JSON.stringify({
|
|
435
|
-
model: modelName,
|
|
436
|
-
prompt,
|
|
437
|
-
n,
|
|
438
|
-
size
|
|
439
|
-
})
|
|
440
|
-
});
|
|
441
|
-
if (!response.ok) {
|
|
442
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
443
|
-
}
|
|
444
|
-
const data = await response.json();
|
|
445
|
-
const typedData = data;
|
|
446
|
-
return typedData.data;
|
|
447
|
-
} catch (error) {
|
|
448
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
449
|
-
throw error;
|
|
450
|
-
}
|
|
751
|
+
[import_core13.ModelType.IMAGE]: async (runtime, params) => {
|
|
752
|
+
return handleImageGeneration(runtime, params);
|
|
451
753
|
},
|
|
452
|
-
[
|
|
453
|
-
|
|
454
|
-
let promptText;
|
|
455
|
-
const modelName = getImageDescriptionModel(runtime);
|
|
456
|
-
import_core.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
457
|
-
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
458
|
-
if (typeof params === "string") {
|
|
459
|
-
imageUrl = params;
|
|
460
|
-
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
461
|
-
} else {
|
|
462
|
-
imageUrl = params.imageUrl;
|
|
463
|
-
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
464
|
-
}
|
|
465
|
-
const messages = [
|
|
466
|
-
{
|
|
467
|
-
role: "user",
|
|
468
|
-
content: [
|
|
469
|
-
{ type: "text", text: promptText },
|
|
470
|
-
{ type: "image_url", image_url: { url: imageUrl } }
|
|
471
|
-
]
|
|
472
|
-
}
|
|
473
|
-
];
|
|
474
|
-
const baseURL = getBaseURL(runtime);
|
|
475
|
-
try {
|
|
476
|
-
const requestBody = {
|
|
477
|
-
model: modelName,
|
|
478
|
-
messages,
|
|
479
|
-
max_tokens: maxTokens
|
|
480
|
-
};
|
|
481
|
-
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
482
|
-
method: "POST",
|
|
483
|
-
headers: {
|
|
484
|
-
"Content-Type": "application/json",
|
|
485
|
-
...getAuthHeader(runtime)
|
|
486
|
-
},
|
|
487
|
-
body: JSON.stringify(requestBody)
|
|
488
|
-
});
|
|
489
|
-
if (!response.ok) {
|
|
490
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
491
|
-
}
|
|
492
|
-
const result = await response.json();
|
|
493
|
-
const typedResult = result;
|
|
494
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
495
|
-
if (typedResult.usage) {
|
|
496
|
-
emitModelUsageEvent(runtime, import_core.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
497
|
-
inputTokens: typedResult.usage.prompt_tokens,
|
|
498
|
-
outputTokens: typedResult.usage.completion_tokens,
|
|
499
|
-
totalTokens: typedResult.usage.total_tokens
|
|
500
|
-
});
|
|
501
|
-
}
|
|
502
|
-
if (!content) {
|
|
503
|
-
return {
|
|
504
|
-
title: "Failed to analyze image",
|
|
505
|
-
description: "No response from API"
|
|
506
|
-
};
|
|
507
|
-
}
|
|
508
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
509
|
-
if (isCustomPrompt) {
|
|
510
|
-
return content;
|
|
511
|
-
}
|
|
512
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
513
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
514
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
515
|
-
const processedResult = { title, description };
|
|
516
|
-
return processedResult;
|
|
517
|
-
} catch (error) {
|
|
518
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
519
|
-
import_core.logger.error(`Error analyzing image: ${message}`);
|
|
520
|
-
return {
|
|
521
|
-
title: "Failed to analyze image",
|
|
522
|
-
description: `Error: ${message}`
|
|
523
|
-
};
|
|
524
|
-
}
|
|
754
|
+
[import_core13.ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
755
|
+
return handleImageDescription(runtime, params);
|
|
525
756
|
},
|
|
526
|
-
[
|
|
527
|
-
|
|
528
|
-
import_core.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
529
|
-
const baseURL = getBaseURL(runtime);
|
|
530
|
-
let blob;
|
|
531
|
-
let extraParams = null;
|
|
532
|
-
if (input instanceof Blob || input instanceof File) {
|
|
533
|
-
blob = input;
|
|
534
|
-
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
535
|
-
const params = input;
|
|
536
|
-
if (!(params.audio instanceof Blob) && !(params.audio instanceof File)) {
|
|
537
|
-
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File. Wrap buffers as: new Blob([buffer], { type: 'audio/mpeg' })");
|
|
538
|
-
}
|
|
539
|
-
blob = params.audio;
|
|
540
|
-
extraParams = params;
|
|
541
|
-
if (typeof params.model === "string" && params.model) {
|
|
542
|
-
modelName = params.model;
|
|
543
|
-
}
|
|
544
|
-
} else {
|
|
545
|
-
throw new Error("TRANSCRIPTION expects a Blob/File or an object { audio: Blob/File, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
546
|
-
}
|
|
547
|
-
const mime = blob.type || "audio/webm";
|
|
548
|
-
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
549
|
-
const formData = new FormData;
|
|
550
|
-
formData.append("file", blob, filename);
|
|
551
|
-
formData.append("model", String(modelName));
|
|
552
|
-
if (extraParams) {
|
|
553
|
-
if (typeof extraParams.language === "string") {
|
|
554
|
-
formData.append("language", String(extraParams.language));
|
|
555
|
-
}
|
|
556
|
-
if (typeof extraParams.response_format === "string") {
|
|
557
|
-
formData.append("response_format", String(extraParams.response_format));
|
|
558
|
-
}
|
|
559
|
-
if (typeof extraParams.prompt === "string") {
|
|
560
|
-
formData.append("prompt", String(extraParams.prompt));
|
|
561
|
-
}
|
|
562
|
-
if (typeof extraParams.temperature === "number") {
|
|
563
|
-
formData.append("temperature", String(extraParams.temperature));
|
|
564
|
-
}
|
|
565
|
-
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
566
|
-
for (const g of extraParams.timestampGranularities) {
|
|
567
|
-
formData.append("timestamp_granularities[]", String(g));
|
|
568
|
-
}
|
|
569
|
-
}
|
|
570
|
-
}
|
|
571
|
-
try {
|
|
572
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
573
|
-
method: "POST",
|
|
574
|
-
headers: {
|
|
575
|
-
...getAuthHeader(runtime)
|
|
576
|
-
},
|
|
577
|
-
body: formData
|
|
578
|
-
});
|
|
579
|
-
if (!response.ok) {
|
|
580
|
-
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
581
|
-
}
|
|
582
|
-
const data = await response.json();
|
|
583
|
-
return data.text || "";
|
|
584
|
-
} catch (error) {
|
|
585
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
586
|
-
import_core.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
587
|
-
throw error;
|
|
588
|
-
}
|
|
757
|
+
[import_core13.ModelType.TRANSCRIPTION]: async (runtime, input) => {
|
|
758
|
+
return handleTranscription(runtime, input);
|
|
589
759
|
},
|
|
590
|
-
[
|
|
591
|
-
|
|
592
|
-
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
593
|
-
import_core.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
594
|
-
try {
|
|
595
|
-
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
596
|
-
return speechStream;
|
|
597
|
-
} catch (error) {
|
|
598
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
599
|
-
import_core.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
600
|
-
throw error;
|
|
601
|
-
}
|
|
760
|
+
[import_core13.ModelType.TEXT_TO_SPEECH]: async (runtime, input) => {
|
|
761
|
+
return handleTextToSpeech(runtime, input);
|
|
602
762
|
},
|
|
603
|
-
[
|
|
604
|
-
return
|
|
763
|
+
[import_core13.ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
764
|
+
return handleObjectSmall(runtime, params);
|
|
605
765
|
},
|
|
606
|
-
[
|
|
607
|
-
return
|
|
766
|
+
[import_core13.ModelType.OBJECT_LARGE]: async (runtime, params) => {
|
|
767
|
+
return handleObjectLarge(runtime, params);
|
|
608
768
|
}
|
|
609
769
|
},
|
|
610
770
|
tests: [
|
|
@@ -616,12 +776,10 @@ var openaiPlugin = {
|
|
|
616
776
|
fn: async (runtime) => {
|
|
617
777
|
const baseURL = getBaseURL(runtime);
|
|
618
778
|
const response = await fetch(`${baseURL}/models`, {
|
|
619
|
-
headers:
|
|
620
|
-
Authorization: `Bearer ${getApiKey(runtime)}`
|
|
621
|
-
}
|
|
779
|
+
headers: getAuthHeader(runtime)
|
|
622
780
|
});
|
|
623
781
|
const data = await response.json();
|
|
624
|
-
|
|
782
|
+
import_core13.logger.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
625
783
|
if (!response.ok) {
|
|
626
784
|
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
627
785
|
}
|
|
@@ -631,13 +789,13 @@ var openaiPlugin = {
|
|
|
631
789
|
name: "openai_test_text_embedding",
|
|
632
790
|
fn: async (runtime) => {
|
|
633
791
|
try {
|
|
634
|
-
const embedding = await runtime.useModel(
|
|
792
|
+
const embedding = await runtime.useModel(import_core13.ModelType.TEXT_EMBEDDING, {
|
|
635
793
|
text: "Hello, world!"
|
|
636
794
|
});
|
|
637
|
-
|
|
795
|
+
import_core13.logger.log({ embedding }, "embedding");
|
|
638
796
|
} catch (error) {
|
|
639
797
|
const message = error instanceof Error ? error.message : String(error);
|
|
640
|
-
|
|
798
|
+
import_core13.logger.error(`Error in test_text_embedding: ${message}`);
|
|
641
799
|
throw error;
|
|
642
800
|
}
|
|
643
801
|
}
|
|
@@ -646,16 +804,16 @@ var openaiPlugin = {
|
|
|
646
804
|
name: "openai_test_text_large",
|
|
647
805
|
fn: async (runtime) => {
|
|
648
806
|
try {
|
|
649
|
-
const text = await runtime.useModel(
|
|
807
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
|
|
650
808
|
prompt: "What is the nature of reality in 10 words?"
|
|
651
809
|
});
|
|
652
810
|
if (text.length === 0) {
|
|
653
811
|
throw new Error("Failed to generate text");
|
|
654
812
|
}
|
|
655
|
-
|
|
813
|
+
import_core13.logger.log({ text }, "generated with test_text_large");
|
|
656
814
|
} catch (error) {
|
|
657
815
|
const message = error instanceof Error ? error.message : String(error);
|
|
658
|
-
|
|
816
|
+
import_core13.logger.error(`Error in test_text_large: ${message}`);
|
|
659
817
|
throw error;
|
|
660
818
|
}
|
|
661
819
|
}
|
|
@@ -664,16 +822,16 @@ var openaiPlugin = {
|
|
|
664
822
|
name: "openai_test_text_small",
|
|
665
823
|
fn: async (runtime) => {
|
|
666
824
|
try {
|
|
667
|
-
const text = await runtime.useModel(
|
|
825
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_SMALL, {
|
|
668
826
|
prompt: "What is the nature of reality in 10 words?"
|
|
669
827
|
});
|
|
670
828
|
if (text.length === 0) {
|
|
671
829
|
throw new Error("Failed to generate text");
|
|
672
830
|
}
|
|
673
|
-
|
|
831
|
+
import_core13.logger.log({ text }, "generated with test_text_small");
|
|
674
832
|
} catch (error) {
|
|
675
833
|
const message = error instanceof Error ? error.message : String(error);
|
|
676
|
-
|
|
834
|
+
import_core13.logger.error(`Error in test_text_small: ${message}`);
|
|
677
835
|
throw error;
|
|
678
836
|
}
|
|
679
837
|
}
|
|
@@ -681,17 +839,17 @@ var openaiPlugin = {
|
|
|
681
839
|
{
|
|
682
840
|
name: "openai_test_image_generation",
|
|
683
841
|
fn: async (runtime) => {
|
|
684
|
-
|
|
842
|
+
import_core13.logger.log("openai_test_image_generation");
|
|
685
843
|
try {
|
|
686
|
-
const image = await runtime.useModel(
|
|
844
|
+
const image = await runtime.useModel(import_core13.ModelType.IMAGE, {
|
|
687
845
|
prompt: "A beautiful sunset over a calm ocean",
|
|
688
846
|
n: 1,
|
|
689
847
|
size: "1024x1024"
|
|
690
848
|
});
|
|
691
|
-
|
|
849
|
+
import_core13.logger.log({ image }, "generated with test_image_generation");
|
|
692
850
|
} catch (error) {
|
|
693
851
|
const message = error instanceof Error ? error.message : String(error);
|
|
694
|
-
|
|
852
|
+
import_core13.logger.error(`Error in test_image_generation: ${message}`);
|
|
695
853
|
throw error;
|
|
696
854
|
}
|
|
697
855
|
}
|
|
@@ -700,36 +858,36 @@ var openaiPlugin = {
|
|
|
700
858
|
name: "image-description",
|
|
701
859
|
fn: async (runtime) => {
|
|
702
860
|
try {
|
|
703
|
-
|
|
861
|
+
import_core13.logger.log("openai_test_image_description");
|
|
704
862
|
try {
|
|
705
|
-
const result = await runtime.useModel(
|
|
863
|
+
const result = await runtime.useModel(import_core13.ModelType.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
706
864
|
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
707
|
-
|
|
865
|
+
import_core13.logger.log({ result }, "Image description");
|
|
708
866
|
} else {
|
|
709
|
-
|
|
867
|
+
import_core13.logger.error("Invalid image description result format:", result);
|
|
710
868
|
}
|
|
711
869
|
} catch (e) {
|
|
712
870
|
const message = e instanceof Error ? e.message : String(e);
|
|
713
|
-
|
|
871
|
+
import_core13.logger.error(`Error in image description test: ${message}`);
|
|
714
872
|
}
|
|
715
873
|
} catch (e) {
|
|
716
874
|
const message = e instanceof Error ? e.message : String(e);
|
|
717
|
-
|
|
875
|
+
import_core13.logger.error(`Error in openai_test_image_description: ${message}`);
|
|
718
876
|
}
|
|
719
877
|
}
|
|
720
878
|
},
|
|
721
879
|
{
|
|
722
880
|
name: "openai_test_transcription",
|
|
723
881
|
fn: async (runtime) => {
|
|
724
|
-
|
|
882
|
+
import_core13.logger.log("openai_test_transcription");
|
|
725
883
|
try {
|
|
726
884
|
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
727
885
|
const arrayBuffer = await response.arrayBuffer();
|
|
728
|
-
const transcription = await runtime.useModel(
|
|
729
|
-
|
|
886
|
+
const transcription = await runtime.useModel(import_core13.ModelType.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
887
|
+
import_core13.logger.log({ transcription }, "generated with test_transcription");
|
|
730
888
|
} catch (error) {
|
|
731
889
|
const message = error instanceof Error ? error.message : String(error);
|
|
732
|
-
|
|
890
|
+
import_core13.logger.error(`Error in test_transcription: ${message}`);
|
|
733
891
|
throw error;
|
|
734
892
|
}
|
|
735
893
|
}
|
|
@@ -738,39 +896,41 @@ var openaiPlugin = {
|
|
|
738
896
|
name: "openai_test_text_tokenizer_encode",
|
|
739
897
|
fn: async (runtime) => {
|
|
740
898
|
const prompt = "Hello tokenizer encode!";
|
|
741
|
-
const tokens = await runtime.useModel(
|
|
899
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
742
900
|
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
743
901
|
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
744
902
|
}
|
|
745
|
-
|
|
903
|
+
import_core13.logger.log({ tokens }, "Tokenized output");
|
|
746
904
|
}
|
|
747
905
|
},
|
|
748
906
|
{
|
|
749
907
|
name: "openai_test_text_tokenizer_decode",
|
|
750
908
|
fn: async (runtime) => {
|
|
751
909
|
const prompt = "Hello tokenizer decode!";
|
|
752
|
-
const tokens = await runtime.useModel(
|
|
753
|
-
const decodedText = await runtime.useModel(
|
|
910
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
|
|
911
|
+
const decodedText = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_DECODE, {
|
|
912
|
+
tokens
|
|
913
|
+
});
|
|
754
914
|
if (decodedText !== prompt) {
|
|
755
915
|
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
756
916
|
}
|
|
757
|
-
|
|
917
|
+
import_core13.logger.log({ decodedText }, "Decoded text");
|
|
758
918
|
}
|
|
759
919
|
},
|
|
760
920
|
{
|
|
761
921
|
name: "openai_test_text_to_speech",
|
|
762
922
|
fn: async (runtime) => {
|
|
763
923
|
try {
|
|
764
|
-
const response = await
|
|
924
|
+
const response = await runtime.useModel(import_core13.ModelType.TEXT_TO_SPEECH, {
|
|
765
925
|
text: "Hello, this is a test for text-to-speech."
|
|
766
926
|
});
|
|
767
927
|
if (!response) {
|
|
768
928
|
throw new Error("Failed to generate speech");
|
|
769
929
|
}
|
|
770
|
-
|
|
930
|
+
import_core13.logger.log("Generated speech successfully");
|
|
771
931
|
} catch (error) {
|
|
772
932
|
const message = error instanceof Error ? error.message : String(error);
|
|
773
|
-
|
|
933
|
+
import_core13.logger.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
774
934
|
throw error;
|
|
775
935
|
}
|
|
776
936
|
}
|
|
@@ -781,4 +941,4 @@ var openaiPlugin = {
|
|
|
781
941
|
};
|
|
782
942
|
var src_default = openaiPlugin;
|
|
783
943
|
|
|
784
|
-
//# debugId=
|
|
944
|
+
//# debugId=9B5BF59394995A3864756E2164756E21
|