@elizaos/plugin-openai 1.5.16 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +2 -3
- package/dist/browser/index.browser.js.map +17 -7
- package/dist/cjs/index.node.cjs +612 -537
- package/dist/cjs/index.node.js.map +17 -4
- package/dist/index.d.ts +2 -18
- package/dist/init.d.ts +5 -0
- package/dist/models/audio.d.ts +10 -0
- package/dist/models/embedding.d.ts +5 -0
- package/dist/models/image.d.ts +14 -0
- package/dist/models/index.d.ts +6 -0
- package/dist/models/object.d.ts +9 -0
- package/dist/models/text.d.ts +9 -0
- package/dist/models/tokenizer.d.ts +9 -0
- package/dist/node/index.node.js +613 -548
- package/dist/node/index.node.js.map +17 -4
- package/dist/providers/index.d.ts +1 -0
- package/dist/providers/openai.d.ts +8 -0
- package/dist/types/index.d.ts +26 -0
- package/dist/utils/audio.d.ts +12 -0
- package/dist/utils/config.d.ts +70 -0
- package/dist/utils/events.d.ts +10 -0
- package/dist/utils/index.d.ts +5 -0
- package/dist/utils/json.d.ts +7 -0
- package/dist/utils/tokenization.d.ts +17 -0
- package/package.json +3 -2
package/dist/cjs/index.node.cjs
CHANGED
|
@@ -1,20 +1,7 @@
|
|
|
1
|
-
var __create = Object.create;
|
|
2
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
3
1
|
var __defProp = Object.defineProperty;
|
|
4
2
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
3
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
6
4
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
7
|
-
var __toESM = (mod, isNodeMode, target) => {
|
|
8
|
-
target = mod != null ? __create(__getProtoOf(mod)) : {};
|
|
9
|
-
const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
|
|
10
|
-
for (let key of __getOwnPropNames(mod))
|
|
11
|
-
if (!__hasOwnProp.call(to, key))
|
|
12
|
-
__defProp(to, key, {
|
|
13
|
-
get: () => mod[key],
|
|
14
|
-
enumerable: true
|
|
15
|
-
});
|
|
16
|
-
return to;
|
|
17
|
-
};
|
|
18
5
|
var __moduleCache = /* @__PURE__ */ new WeakMap;
|
|
19
6
|
var __toCommonJS = (from) => {
|
|
20
7
|
var entry = __moduleCache.get(from), desc;
|
|
@@ -48,15 +35,22 @@ __export(exports_index_node, {
|
|
|
48
35
|
module.exports = __toCommonJS(exports_index_node);
|
|
49
36
|
|
|
50
37
|
// src/index.ts
|
|
51
|
-
var
|
|
38
|
+
var import_core13 = require("@elizaos/core");
|
|
39
|
+
|
|
40
|
+
// src/init.ts
|
|
41
|
+
var import_core2 = require("@elizaos/core");
|
|
42
|
+
|
|
43
|
+
// src/utils/config.ts
|
|
52
44
|
var import_core = require("@elizaos/core");
|
|
53
|
-
var import_ai = require("ai");
|
|
54
|
-
var import_js_tiktoken = require("js-tiktoken");
|
|
55
45
|
function getSetting(runtime, key, defaultValue) {
|
|
56
|
-
|
|
46
|
+
const value = runtime.getSetting(key);
|
|
47
|
+
if (value !== undefined && value !== null) {
|
|
48
|
+
return String(value);
|
|
49
|
+
}
|
|
50
|
+
return process.env[key] ?? defaultValue;
|
|
57
51
|
}
|
|
58
52
|
function isBrowser() {
|
|
59
|
-
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
53
|
+
return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
|
|
60
54
|
}
|
|
61
55
|
function isProxyMode(runtime) {
|
|
62
56
|
return isBrowser() && !!getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
@@ -95,13 +89,13 @@ function getEmbeddingApiKey(runtime) {
|
|
|
95
89
|
return getApiKey(runtime);
|
|
96
90
|
}
|
|
97
91
|
function getSmallModel(runtime) {
|
|
98
|
-
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-
|
|
92
|
+
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
99
93
|
}
|
|
100
94
|
function getLargeModel(runtime) {
|
|
101
|
-
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-
|
|
95
|
+
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
|
|
102
96
|
}
|
|
103
97
|
function getImageDescriptionModel(runtime) {
|
|
104
|
-
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano")
|
|
98
|
+
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-5-nano");
|
|
105
99
|
}
|
|
106
100
|
function getExperimentalTelemetry(runtime) {
|
|
107
101
|
const setting = getSetting(runtime, "OPENAI_EXPERIMENTAL_TELEMETRY", "false");
|
|
@@ -110,150 +104,351 @@ function getExperimentalTelemetry(runtime) {
|
|
|
110
104
|
import_core.logger.debug(`[OpenAI] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
111
105
|
return result;
|
|
112
106
|
}
|
|
107
|
+
|
|
108
|
+
// src/init.ts
|
|
109
|
+
function initializeOpenAI(_config, runtime) {
|
|
110
|
+
(async () => {
|
|
111
|
+
try {
|
|
112
|
+
if (!getApiKey(runtime) && !isBrowser()) {
|
|
113
|
+
import_core2.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
114
|
+
return;
|
|
115
|
+
}
|
|
116
|
+
try {
|
|
117
|
+
const baseURL = getBaseURL(runtime);
|
|
118
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
119
|
+
headers: getAuthHeader(runtime)
|
|
120
|
+
});
|
|
121
|
+
if (!response.ok) {
|
|
122
|
+
import_core2.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
123
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
124
|
+
} else {
|
|
125
|
+
import_core2.logger.log("OpenAI API key validated successfully");
|
|
126
|
+
}
|
|
127
|
+
} catch (fetchError) {
|
|
128
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
129
|
+
import_core2.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
130
|
+
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
131
|
+
}
|
|
132
|
+
} catch (error) {
|
|
133
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
134
|
+
import_core2.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
135
|
+
}
|
|
136
|
+
})();
|
|
137
|
+
}
|
|
138
|
+
|
|
139
|
+
// src/models/text.ts
|
|
140
|
+
var import_core4 = require("@elizaos/core");
|
|
141
|
+
var import_ai = require("ai");
|
|
142
|
+
|
|
143
|
+
// src/providers/openai.ts
|
|
144
|
+
var import_openai = require("@ai-sdk/openai");
|
|
113
145
|
function createOpenAIClient(runtime) {
|
|
114
146
|
const baseURL = getBaseURL(runtime);
|
|
115
147
|
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "sk-proxy" : undefined);
|
|
116
148
|
return import_openai.createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
117
149
|
}
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
const
|
|
125
|
-
|
|
150
|
+
|
|
151
|
+
// src/utils/events.ts
|
|
152
|
+
var import_core3 = require("@elizaos/core");
|
|
153
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
154
|
+
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
155
|
+
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
156
|
+
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
157
|
+
const truncatedPrompt = typeof prompt === "string" ? prompt.length > 200 ? `${prompt.slice(0, 200)}…` : prompt : "";
|
|
158
|
+
runtime.emitEvent(import_core3.EventType.MODEL_USED, {
|
|
159
|
+
runtime,
|
|
160
|
+
source: "openai",
|
|
161
|
+
provider: "openai",
|
|
162
|
+
type,
|
|
163
|
+
prompt: truncatedPrompt,
|
|
164
|
+
tokens: {
|
|
165
|
+
prompt: promptTokens,
|
|
166
|
+
completion: completionTokens,
|
|
167
|
+
total: totalTokens
|
|
168
|
+
}
|
|
169
|
+
});
|
|
126
170
|
}
|
|
127
|
-
|
|
171
|
+
|
|
172
|
+
// src/models/text.ts
|
|
173
|
+
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
128
174
|
const openai = createOpenAIClient(runtime);
|
|
129
175
|
const modelName = getModelFn(runtime);
|
|
130
|
-
|
|
131
|
-
const
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
176
|
+
import_core4.logger.debug(`[OpenAI] ${modelType} model: ${modelName}`);
|
|
177
|
+
const generateParams = {
|
|
178
|
+
model: openai.languageModel(modelName),
|
|
179
|
+
prompt: params.prompt,
|
|
180
|
+
system: runtime.character.system ?? undefined,
|
|
181
|
+
temperature: params.temperature ?? 0.7,
|
|
182
|
+
maxOutputTokens: params.maxTokens ?? 8192,
|
|
183
|
+
frequencyPenalty: params.frequencyPenalty ?? 0.7,
|
|
184
|
+
presencePenalty: params.presencePenalty ?? 0.7,
|
|
185
|
+
stopSequences: params.stopSequences ?? [],
|
|
186
|
+
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) }
|
|
187
|
+
};
|
|
188
|
+
if (params.stream) {
|
|
189
|
+
const result = import_ai.streamText(generateParams);
|
|
190
|
+
return {
|
|
191
|
+
textStream: result.textStream,
|
|
192
|
+
text: result.text,
|
|
193
|
+
usage: result.usage.then((u) => u ? {
|
|
194
|
+
promptTokens: u.inputTokens ?? 0,
|
|
195
|
+
completionTokens: u.outputTokens ?? 0,
|
|
196
|
+
totalTokens: (u.inputTokens ?? 0) + (u.outputTokens ?? 0)
|
|
197
|
+
} : undefined),
|
|
198
|
+
finishReason: result.finishReason
|
|
199
|
+
};
|
|
135
200
|
}
|
|
201
|
+
const { text, usage } = await import_ai.generateText(generateParams);
|
|
202
|
+
if (usage)
|
|
203
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
204
|
+
return text;
|
|
205
|
+
}
|
|
206
|
+
async function handleTextSmall(runtime, params) {
|
|
207
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_SMALL, getSmallModel);
|
|
208
|
+
}
|
|
209
|
+
async function handleTextLarge(runtime, params) {
|
|
210
|
+
return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_LARGE, getLargeModel);
|
|
211
|
+
}
|
|
212
|
+
// src/models/embedding.ts
|
|
213
|
+
var import_core5 = require("@elizaos/core");
|
|
214
|
+
async function handleTextEmbedding(runtime, params) {
|
|
215
|
+
const embeddingModelName = getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
216
|
+
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
217
|
+
if (!Object.values(import_core5.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
218
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core5.VECTOR_DIMS).join(", ")}`;
|
|
219
|
+
import_core5.logger.error(errorMsg);
|
|
220
|
+
throw new Error(errorMsg);
|
|
221
|
+
}
|
|
222
|
+
if (params === null) {
|
|
223
|
+
import_core5.logger.debug("Creating test embedding for initialization");
|
|
224
|
+
const testVector = Array(embeddingDimension).fill(0);
|
|
225
|
+
testVector[0] = 0.1;
|
|
226
|
+
return testVector;
|
|
227
|
+
}
|
|
228
|
+
let text;
|
|
229
|
+
if (typeof params === "string") {
|
|
230
|
+
text = params;
|
|
231
|
+
} else if (typeof params === "object" && params.text) {
|
|
232
|
+
text = params.text;
|
|
233
|
+
} else {
|
|
234
|
+
const errorMsg = "Invalid input format for embedding";
|
|
235
|
+
import_core5.logger.warn(errorMsg);
|
|
236
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
237
|
+
fallbackVector[0] = 0.2;
|
|
238
|
+
return fallbackVector;
|
|
239
|
+
}
|
|
240
|
+
if (!text.trim()) {
|
|
241
|
+
const errorMsg = "Empty text for embedding";
|
|
242
|
+
import_core5.logger.warn(errorMsg);
|
|
243
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
244
|
+
fallbackVector[0] = 0.3;
|
|
245
|
+
return fallbackVector;
|
|
246
|
+
}
|
|
247
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
136
248
|
try {
|
|
137
|
-
const
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
249
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
250
|
+
method: "POST",
|
|
251
|
+
headers: {
|
|
252
|
+
...getAuthHeader(runtime, true),
|
|
253
|
+
"Content-Type": "application/json"
|
|
254
|
+
},
|
|
255
|
+
body: JSON.stringify({
|
|
256
|
+
model: embeddingModelName,
|
|
257
|
+
input: text
|
|
258
|
+
})
|
|
143
259
|
});
|
|
144
|
-
if (
|
|
145
|
-
|
|
260
|
+
if (!response.ok) {
|
|
261
|
+
import_core5.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
262
|
+
throw new Error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
146
263
|
}
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
import_core.logger.error("[generateObject] JSON repair failed.");
|
|
168
|
-
throw error;
|
|
169
|
-
}
|
|
170
|
-
} else {
|
|
171
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
172
|
-
import_core.logger.error(`[generateObject] Unknown error: ${message}`);
|
|
173
|
-
throw error;
|
|
264
|
+
const data = await response.json();
|
|
265
|
+
if (!data?.data?.[0]?.embedding) {
|
|
266
|
+
import_core5.logger.error("API returned invalid structure");
|
|
267
|
+
throw new Error("API returned invalid structure");
|
|
268
|
+
}
|
|
269
|
+
const embedding = data.data[0].embedding;
|
|
270
|
+
if (!Array.isArray(embedding) || embedding.length !== embeddingDimension) {
|
|
271
|
+
const errorMsg = `Embedding length ${embedding?.length ?? 0} does not match configured dimension ${embeddingDimension}`;
|
|
272
|
+
import_core5.logger.error(errorMsg);
|
|
273
|
+
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
274
|
+
fallbackVector[0] = 0.4;
|
|
275
|
+
return fallbackVector;
|
|
276
|
+
}
|
|
277
|
+
if (data.usage) {
|
|
278
|
+
const usage = {
|
|
279
|
+
inputTokens: data.usage.prompt_tokens,
|
|
280
|
+
outputTokens: 0,
|
|
281
|
+
totalTokens: data.usage.total_tokens
|
|
282
|
+
};
|
|
283
|
+
emitModelUsageEvent(runtime, import_core5.ModelType.TEXT_EMBEDDING, text, usage);
|
|
174
284
|
}
|
|
285
|
+
import_core5.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
286
|
+
return embedding;
|
|
287
|
+
} catch (error) {
|
|
288
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
289
|
+
import_core5.logger.error(`Error generating embedding: ${message}`);
|
|
290
|
+
throw error instanceof Error ? error : new Error(message);
|
|
175
291
|
}
|
|
176
292
|
}
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
293
|
+
// src/models/image.ts
|
|
294
|
+
var import_core6 = require("@elizaos/core");
|
|
295
|
+
async function handleImageGeneration(runtime, params) {
|
|
296
|
+
const n = params.count || 1;
|
|
297
|
+
const size = params.size || "1024x1024";
|
|
298
|
+
const prompt = params.prompt;
|
|
299
|
+
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
300
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
301
|
+
const baseURL = getBaseURL(runtime);
|
|
302
|
+
try {
|
|
303
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
304
|
+
method: "POST",
|
|
305
|
+
headers: {
|
|
306
|
+
...getAuthHeader(runtime),
|
|
307
|
+
"Content-Type": "application/json"
|
|
308
|
+
},
|
|
309
|
+
body: JSON.stringify({
|
|
310
|
+
model: modelName,
|
|
311
|
+
prompt,
|
|
312
|
+
n,
|
|
313
|
+
size
|
|
314
|
+
})
|
|
315
|
+
});
|
|
316
|
+
if (!response.ok) {
|
|
317
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
190
318
|
}
|
|
191
|
-
|
|
319
|
+
const data = await response.json();
|
|
320
|
+
const typedData = data;
|
|
321
|
+
return typedData.data;
|
|
322
|
+
} catch (error) {
|
|
323
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
324
|
+
throw error;
|
|
325
|
+
}
|
|
192
326
|
}
|
|
193
|
-
function
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
327
|
+
async function handleImageDescription(runtime, params) {
|
|
328
|
+
let imageUrl;
|
|
329
|
+
let promptText;
|
|
330
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
331
|
+
import_core6.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
332
|
+
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
333
|
+
const DEFAULT_PROMPT = "Please analyze this image and provide a title and detailed description.";
|
|
334
|
+
if (typeof params === "string") {
|
|
335
|
+
imageUrl = params;
|
|
336
|
+
promptText = DEFAULT_PROMPT;
|
|
337
|
+
} else {
|
|
338
|
+
imageUrl = params.imageUrl;
|
|
339
|
+
promptText = params.prompt || DEFAULT_PROMPT;
|
|
340
|
+
}
|
|
341
|
+
const messages = [
|
|
342
|
+
{
|
|
343
|
+
role: "user",
|
|
344
|
+
content: [
|
|
345
|
+
{ type: "text", text: promptText },
|
|
346
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
347
|
+
]
|
|
202
348
|
}
|
|
203
|
-
|
|
349
|
+
];
|
|
350
|
+
const baseURL = getBaseURL(runtime);
|
|
351
|
+
try {
|
|
352
|
+
const requestBody = {
|
|
353
|
+
model: modelName,
|
|
354
|
+
messages,
|
|
355
|
+
max_tokens: maxTokens
|
|
356
|
+
};
|
|
357
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
358
|
+
method: "POST",
|
|
359
|
+
headers: {
|
|
360
|
+
"Content-Type": "application/json",
|
|
361
|
+
...getAuthHeader(runtime)
|
|
362
|
+
},
|
|
363
|
+
body: JSON.stringify(requestBody)
|
|
364
|
+
});
|
|
365
|
+
if (!response.ok) {
|
|
366
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
367
|
+
}
|
|
368
|
+
const result = await response.json();
|
|
369
|
+
const typedResult = result;
|
|
370
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
371
|
+
if (typedResult.usage) {
|
|
372
|
+
emitModelUsageEvent(runtime, import_core6.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
373
|
+
inputTokens: typedResult.usage.prompt_tokens,
|
|
374
|
+
outputTokens: typedResult.usage.completion_tokens,
|
|
375
|
+
totalTokens: typedResult.usage.total_tokens
|
|
376
|
+
});
|
|
377
|
+
}
|
|
378
|
+
if (!content) {
|
|
379
|
+
return {
|
|
380
|
+
title: "Failed to analyze image",
|
|
381
|
+
description: "No response from API"
|
|
382
|
+
};
|
|
383
|
+
}
|
|
384
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
385
|
+
const title = titleMatch?.[1]?.trim();
|
|
386
|
+
if (!title) {
|
|
387
|
+
import_core6.logger.warn("Could not extract title from image description response");
|
|
388
|
+
}
|
|
389
|
+
const finalTitle = title || "Image Analysis";
|
|
390
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
391
|
+
const processedResult = { title: finalTitle, description };
|
|
392
|
+
return processedResult;
|
|
393
|
+
} catch (error) {
|
|
394
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
395
|
+
import_core6.logger.error(`Error analyzing image: ${message}`);
|
|
396
|
+
return {
|
|
397
|
+
title: "Failed to analyze image",
|
|
398
|
+
description: `Error: ${message}`
|
|
399
|
+
};
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
// src/models/audio.ts
|
|
403
|
+
var import_core8 = require("@elizaos/core");
|
|
404
|
+
|
|
405
|
+
// src/utils/audio.ts
|
|
406
|
+
var import_core7 = require("@elizaos/core");
|
|
407
|
+
var MAGIC_BYTES = {
|
|
408
|
+
WAV: {
|
|
409
|
+
HEADER: [82, 73, 70, 70],
|
|
410
|
+
IDENTIFIER: [87, 65, 86, 69]
|
|
411
|
+
},
|
|
412
|
+
MP3_ID3: [73, 68, 51],
|
|
413
|
+
OGG: [79, 103, 103, 83],
|
|
414
|
+
FLAC: [102, 76, 97, 67],
|
|
415
|
+
FTYP: [102, 116, 121, 112],
|
|
416
|
+
WEBM_EBML: [26, 69, 223, 163]
|
|
417
|
+
};
|
|
418
|
+
function matchBytes(buffer, offset, bytes) {
|
|
419
|
+
for (let i = 0;i < bytes.length; i++) {
|
|
420
|
+
if (buffer[offset + i] !== bytes[i])
|
|
421
|
+
return false;
|
|
422
|
+
}
|
|
423
|
+
return true;
|
|
204
424
|
}
|
|
205
425
|
function detectAudioMimeType(buffer) {
|
|
206
426
|
if (buffer.length < 12) {
|
|
207
427
|
return "application/octet-stream";
|
|
208
428
|
}
|
|
209
|
-
if (buffer
|
|
429
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WAV.HEADER) && matchBytes(buffer, 8, MAGIC_BYTES.WAV.IDENTIFIER)) {
|
|
210
430
|
return "audio/wav";
|
|
211
431
|
}
|
|
212
|
-
if (buffer
|
|
432
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.MP3_ID3) || buffer[0] === 255 && (buffer[1] & 224) === 224) {
|
|
213
433
|
return "audio/mpeg";
|
|
214
434
|
}
|
|
215
|
-
if (buffer
|
|
435
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.OGG)) {
|
|
216
436
|
return "audio/ogg";
|
|
217
437
|
}
|
|
218
|
-
if (buffer
|
|
438
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.FLAC)) {
|
|
219
439
|
return "audio/flac";
|
|
220
440
|
}
|
|
221
|
-
if (buffer
|
|
441
|
+
if (matchBytes(buffer, 4, MAGIC_BYTES.FTYP)) {
|
|
222
442
|
return "audio/mp4";
|
|
223
443
|
}
|
|
224
|
-
if (buffer
|
|
444
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.WEBM_EBML)) {
|
|
225
445
|
return "audio/webm";
|
|
226
446
|
}
|
|
227
|
-
|
|
447
|
+
import_core7.logger.warn("Could not detect audio format from buffer, using generic binary type");
|
|
228
448
|
return "application/octet-stream";
|
|
229
449
|
}
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
const { Readable } = await import("node:stream");
|
|
233
|
-
const reader = webStream.getReader();
|
|
234
|
-
return new Readable({
|
|
235
|
-
async read() {
|
|
236
|
-
try {
|
|
237
|
-
const { done, value } = await reader.read();
|
|
238
|
-
if (done) {
|
|
239
|
-
this.push(null);
|
|
240
|
-
} else {
|
|
241
|
-
this.push(value);
|
|
242
|
-
}
|
|
243
|
-
} catch (error) {
|
|
244
|
-
this.destroy(error);
|
|
245
|
-
}
|
|
246
|
-
},
|
|
247
|
-
destroy(error, callback) {
|
|
248
|
-
reader.cancel().finally(() => callback(error));
|
|
249
|
-
}
|
|
250
|
-
});
|
|
251
|
-
} catch (error) {
|
|
252
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
253
|
-
import_core.logger.error(`Failed to load node:stream module: ${message}`);
|
|
254
|
-
throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
|
|
255
|
-
}
|
|
256
|
-
}
|
|
450
|
+
|
|
451
|
+
// src/models/audio.ts
|
|
257
452
|
async function fetchTextToSpeech(runtime, options) {
|
|
258
453
|
const defaultModel = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
259
454
|
const defaultVoice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
|
|
@@ -283,18 +478,196 @@ async function fetchTextToSpeech(runtime, options) {
|
|
|
283
478
|
const err = await res.text();
|
|
284
479
|
throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
|
|
285
480
|
}
|
|
286
|
-
|
|
287
|
-
throw new Error("OpenAI TTS response body is null");
|
|
288
|
-
}
|
|
289
|
-
if (!isBrowser()) {
|
|
290
|
-
return await webStreamToNodeStream(res.body);
|
|
291
|
-
}
|
|
292
|
-
return res.body;
|
|
481
|
+
return await res.arrayBuffer();
|
|
293
482
|
} catch (err) {
|
|
294
483
|
const message = err instanceof Error ? err.message : String(err);
|
|
295
484
|
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
296
485
|
}
|
|
297
486
|
}
|
|
487
|
+
async function handleTranscription(runtime, input) {
|
|
488
|
+
let modelName = getSetting(runtime, "OPENAI_TRANSCRIPTION_MODEL", "gpt-4o-mini-transcribe");
|
|
489
|
+
import_core8.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
490
|
+
const baseURL = getBaseURL(runtime);
|
|
491
|
+
let blob;
|
|
492
|
+
let extraParams = null;
|
|
493
|
+
if (input instanceof Blob || input instanceof File) {
|
|
494
|
+
blob = input;
|
|
495
|
+
} else if (Buffer.isBuffer(input)) {
|
|
496
|
+
const detectedMimeType = detectAudioMimeType(input);
|
|
497
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
498
|
+
const uint8Array = new Uint8Array(input);
|
|
499
|
+
blob = new Blob([uint8Array], { type: detectedMimeType });
|
|
500
|
+
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
501
|
+
const params = input;
|
|
502
|
+
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
503
|
+
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
504
|
+
}
|
|
505
|
+
if (Buffer.isBuffer(params.audio)) {
|
|
506
|
+
let mimeType = params.mimeType;
|
|
507
|
+
if (!mimeType) {
|
|
508
|
+
mimeType = detectAudioMimeType(params.audio);
|
|
509
|
+
import_core8.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
510
|
+
} else {
|
|
511
|
+
import_core8.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
512
|
+
}
|
|
513
|
+
const uint8Array = new Uint8Array(params.audio);
|
|
514
|
+
blob = new Blob([uint8Array], { type: mimeType });
|
|
515
|
+
} else {
|
|
516
|
+
blob = params.audio;
|
|
517
|
+
}
|
|
518
|
+
extraParams = params;
|
|
519
|
+
if (typeof params.model === "string" && params.model) {
|
|
520
|
+
modelName = params.model;
|
|
521
|
+
}
|
|
522
|
+
} else {
|
|
523
|
+
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
524
|
+
}
|
|
525
|
+
const mime = blob.type || "audio/webm";
|
|
526
|
+
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
527
|
+
const formData = new FormData;
|
|
528
|
+
formData.append("file", blob, filename);
|
|
529
|
+
formData.append("model", String(modelName));
|
|
530
|
+
if (extraParams) {
|
|
531
|
+
if (typeof extraParams.language === "string") {
|
|
532
|
+
formData.append("language", String(extraParams.language));
|
|
533
|
+
}
|
|
534
|
+
if (typeof extraParams.response_format === "string") {
|
|
535
|
+
formData.append("response_format", String(extraParams.response_format));
|
|
536
|
+
}
|
|
537
|
+
if (typeof extraParams.prompt === "string") {
|
|
538
|
+
formData.append("prompt", String(extraParams.prompt));
|
|
539
|
+
}
|
|
540
|
+
if (typeof extraParams.temperature === "number") {
|
|
541
|
+
formData.append("temperature", String(extraParams.temperature));
|
|
542
|
+
}
|
|
543
|
+
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
544
|
+
for (const g of extraParams.timestampGranularities) {
|
|
545
|
+
formData.append("timestamp_granularities[]", String(g));
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
}
|
|
549
|
+
try {
|
|
550
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
551
|
+
method: "POST",
|
|
552
|
+
headers: {
|
|
553
|
+
...getAuthHeader(runtime)
|
|
554
|
+
},
|
|
555
|
+
body: formData
|
|
556
|
+
});
|
|
557
|
+
if (!response.ok) {
|
|
558
|
+
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
559
|
+
}
|
|
560
|
+
const data = await response.json();
|
|
561
|
+
return data.text || "";
|
|
562
|
+
} catch (error) {
|
|
563
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
564
|
+
import_core8.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
565
|
+
throw error;
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
async function handleTextToSpeech(runtime, input) {
|
|
569
|
+
const options = typeof input === "string" ? { text: input } : input;
|
|
570
|
+
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
571
|
+
import_core8.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
572
|
+
try {
|
|
573
|
+
return await fetchTextToSpeech(runtime, options);
|
|
574
|
+
} catch (error) {
|
|
575
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
576
|
+
import_core8.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
577
|
+
throw error;
|
|
578
|
+
}
|
|
579
|
+
}
|
|
580
|
+
// src/models/object.ts
|
|
581
|
+
var import_core10 = require("@elizaos/core");
|
|
582
|
+
var import_ai3 = require("ai");
|
|
583
|
+
|
|
584
|
+
// src/utils/json.ts
|
|
585
|
+
var import_core9 = require("@elizaos/core");
|
|
586
|
+
var import_ai2 = require("ai");
|
|
587
|
+
function getJsonRepairFunction() {
|
|
588
|
+
return async ({ text, error }) => {
|
|
589
|
+
try {
|
|
590
|
+
if (error instanceof import_ai2.JSONParseError) {
|
|
591
|
+
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
592
|
+
JSON.parse(cleanedText);
|
|
593
|
+
return cleanedText;
|
|
594
|
+
}
|
|
595
|
+
return null;
|
|
596
|
+
} catch (jsonError) {
|
|
597
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
598
|
+
import_core9.logger.warn(`Failed to repair JSON text: ${message}`);
|
|
599
|
+
return null;
|
|
600
|
+
}
|
|
601
|
+
};
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
// src/models/object.ts
|
|
605
|
+
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
606
|
+
const openai = createOpenAIClient(runtime);
|
|
607
|
+
const modelName = getModelFn(runtime);
|
|
608
|
+
import_core10.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
609
|
+
const temperature = params.temperature ?? 0;
|
|
610
|
+
const schemaPresent = !!params.schema;
|
|
611
|
+
if (schemaPresent) {
|
|
612
|
+
import_core10.logger.warn(`Schema provided but ignored: OpenAI object generation currently uses output=no-schema. The schema parameter has no effect.`);
|
|
613
|
+
}
|
|
614
|
+
try {
|
|
615
|
+
const { object, usage } = await import_ai3.generateObject({
|
|
616
|
+
model: openai.languageModel(modelName),
|
|
617
|
+
output: "no-schema",
|
|
618
|
+
prompt: params.prompt,
|
|
619
|
+
temperature,
|
|
620
|
+
experimental_repairText: getJsonRepairFunction()
|
|
621
|
+
});
|
|
622
|
+
if (usage) {
|
|
623
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
624
|
+
}
|
|
625
|
+
return object;
|
|
626
|
+
} catch (error) {
|
|
627
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
628
|
+
import_core10.logger.error(`[generateObject] Error: ${message}`);
|
|
629
|
+
throw error;
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
async function handleObjectSmall(runtime, params) {
|
|
633
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_SMALL, getSmallModel);
|
|
634
|
+
}
|
|
635
|
+
async function handleObjectLarge(runtime, params) {
|
|
636
|
+
return generateObjectByModelType(runtime, params, import_core10.ModelType.OBJECT_LARGE, getLargeModel);
|
|
637
|
+
}
|
|
638
|
+
// src/models/tokenizer.ts
|
|
639
|
+
var import_core12 = require("@elizaos/core");
|
|
640
|
+
|
|
641
|
+
// src/utils/tokenization.ts
|
|
642
|
+
var import_core11 = require("@elizaos/core");
|
|
643
|
+
var import_js_tiktoken = require("js-tiktoken");
|
|
644
|
+
function resolveTokenizerEncoding(modelName) {
|
|
645
|
+
const normalized = modelName.toLowerCase();
|
|
646
|
+
const fallbackEncoding = normalized.includes("4o") ? "o200k_base" : "cl100k_base";
|
|
647
|
+
try {
|
|
648
|
+
return import_js_tiktoken.encodingForModel(modelName);
|
|
649
|
+
} catch (error) {
|
|
650
|
+
return import_js_tiktoken.getEncoding(fallbackEncoding);
|
|
651
|
+
}
|
|
652
|
+
}
|
|
653
|
+
async function tokenizeText(runtime, model, prompt) {
|
|
654
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
655
|
+
const tokens = resolveTokenizerEncoding(modelName).encode(prompt);
|
|
656
|
+
return tokens;
|
|
657
|
+
}
|
|
658
|
+
async function detokenizeText(runtime, model, tokens) {
|
|
659
|
+
const modelName = model === import_core11.ModelType.TEXT_SMALL ? getSmallModel(runtime) : getLargeModel(runtime);
|
|
660
|
+
return resolveTokenizerEncoding(modelName).decode(tokens);
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
// src/models/tokenizer.ts
|
|
664
|
+
async function handleTokenizerEncode(runtime, { prompt, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
665
|
+
return await tokenizeText(runtime, modelType, prompt);
|
|
666
|
+
}
|
|
667
|
+
async function handleTokenizerDecode(runtime, { tokens, modelType = import_core12.ModelType.TEXT_LARGE }) {
|
|
668
|
+
return await detokenizeText(runtime, modelType, tokens);
|
|
669
|
+
}
|
|
670
|
+
// src/index.ts
|
|
298
671
|
var openaiPlugin = {
|
|
299
672
|
name: "openai",
|
|
300
673
|
description: "OpenAI plugin",
|
|
@@ -314,383 +687,41 @@ var openaiPlugin = {
|
|
|
314
687
|
OPENAI_EXPERIMENTAL_TELEMETRY: process.env.OPENAI_EXPERIMENTAL_TELEMETRY
|
|
315
688
|
},
|
|
316
689
|
async init(_config, runtime) {
|
|
317
|
-
|
|
318
|
-
resolve();
|
|
319
|
-
try {
|
|
320
|
-
if (!getApiKey(runtime) && !isBrowser()) {
|
|
321
|
-
import_core.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
322
|
-
return;
|
|
323
|
-
}
|
|
324
|
-
try {
|
|
325
|
-
const baseURL = getBaseURL(runtime);
|
|
326
|
-
const response = await fetch(`${baseURL}/models`, {
|
|
327
|
-
headers: { ...getAuthHeader(runtime) }
|
|
328
|
-
});
|
|
329
|
-
if (!response.ok) {
|
|
330
|
-
import_core.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
331
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
332
|
-
} else {
|
|
333
|
-
import_core.logger.log("OpenAI API key validated successfully");
|
|
334
|
-
}
|
|
335
|
-
} catch (fetchError) {
|
|
336
|
-
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
337
|
-
import_core.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
338
|
-
import_core.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
339
|
-
}
|
|
340
|
-
} catch (error) {
|
|
341
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
342
|
-
import_core.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
343
|
-
}
|
|
344
|
-
});
|
|
690
|
+
initializeOpenAI(_config, runtime);
|
|
345
691
|
},
|
|
346
692
|
models: {
|
|
347
|
-
[
|
|
348
|
-
|
|
349
|
-
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
350
|
-
if (!Object.values(import_core.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
351
|
-
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core.VECTOR_DIMS).join(", ")}`;
|
|
352
|
-
import_core.logger.error(errorMsg);
|
|
353
|
-
throw new Error(errorMsg);
|
|
354
|
-
}
|
|
355
|
-
if (params === null) {
|
|
356
|
-
import_core.logger.debug("Creating test embedding for initialization");
|
|
357
|
-
const testVector = Array(embeddingDimension).fill(0);
|
|
358
|
-
testVector[0] = 0.1;
|
|
359
|
-
return testVector;
|
|
360
|
-
}
|
|
361
|
-
let text;
|
|
362
|
-
if (typeof params === "string") {
|
|
363
|
-
text = params;
|
|
364
|
-
} else if (typeof params === "object" && params.text) {
|
|
365
|
-
text = params.text;
|
|
366
|
-
} else {
|
|
367
|
-
import_core.logger.warn("Invalid input format for embedding");
|
|
368
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
369
|
-
fallbackVector[0] = 0.2;
|
|
370
|
-
return fallbackVector;
|
|
371
|
-
}
|
|
372
|
-
if (!text.trim()) {
|
|
373
|
-
import_core.logger.warn("Empty text for embedding");
|
|
374
|
-
const emptyVector = Array(embeddingDimension).fill(0);
|
|
375
|
-
emptyVector[0] = 0.3;
|
|
376
|
-
return emptyVector;
|
|
377
|
-
}
|
|
378
|
-
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
379
|
-
try {
|
|
380
|
-
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
381
|
-
method: "POST",
|
|
382
|
-
headers: {
|
|
383
|
-
...getAuthHeader(runtime, true),
|
|
384
|
-
"Content-Type": "application/json"
|
|
385
|
-
},
|
|
386
|
-
body: JSON.stringify({
|
|
387
|
-
model: embeddingModelName,
|
|
388
|
-
input: text
|
|
389
|
-
})
|
|
390
|
-
});
|
|
391
|
-
if (!response.ok) {
|
|
392
|
-
import_core.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
393
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
394
|
-
errorVector[0] = 0.4;
|
|
395
|
-
return errorVector;
|
|
396
|
-
}
|
|
397
|
-
const data = await response.json();
|
|
398
|
-
if (!data?.data?.[0]?.embedding) {
|
|
399
|
-
import_core.logger.error("API returned invalid structure");
|
|
400
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
401
|
-
errorVector[0] = 0.5;
|
|
402
|
-
return errorVector;
|
|
403
|
-
}
|
|
404
|
-
const embedding = data.data[0].embedding;
|
|
405
|
-
if (data.usage) {
|
|
406
|
-
const usage = {
|
|
407
|
-
inputTokens: data.usage.prompt_tokens,
|
|
408
|
-
outputTokens: 0,
|
|
409
|
-
totalTokens: data.usage.total_tokens
|
|
410
|
-
};
|
|
411
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_EMBEDDING, text, usage);
|
|
412
|
-
}
|
|
413
|
-
import_core.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
414
|
-
return embedding;
|
|
415
|
-
} catch (error) {
|
|
416
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
417
|
-
import_core.logger.error(`Error generating embedding: ${message}`);
|
|
418
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
419
|
-
errorVector[0] = 0.6;
|
|
420
|
-
return errorVector;
|
|
421
|
-
}
|
|
693
|
+
[import_core13.ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
694
|
+
return handleTextEmbedding(runtime, params);
|
|
422
695
|
},
|
|
423
|
-
[
|
|
424
|
-
return
|
|
696
|
+
[import_core13.ModelType.TEXT_TOKENIZER_ENCODE]: async (runtime, params) => {
|
|
697
|
+
return handleTokenizerEncode(runtime, params);
|
|
425
698
|
},
|
|
426
|
-
[
|
|
427
|
-
return
|
|
699
|
+
[import_core13.ModelType.TEXT_TOKENIZER_DECODE]: async (runtime, params) => {
|
|
700
|
+
return handleTokenizerDecode(runtime, params);
|
|
428
701
|
},
|
|
429
|
-
[
|
|
430
|
-
|
|
431
|
-
stopSequences = [],
|
|
432
|
-
maxTokens = 8192,
|
|
433
|
-
temperature = 0.7,
|
|
434
|
-
frequencyPenalty = 0.7,
|
|
435
|
-
presencePenalty = 0.7
|
|
436
|
-
}) => {
|
|
437
|
-
const openai = createOpenAIClient(runtime);
|
|
438
|
-
const modelName = getSmallModel(runtime);
|
|
439
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
440
|
-
import_core.logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
441
|
-
import_core.logger.log(prompt);
|
|
442
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
443
|
-
model: openai.languageModel(modelName),
|
|
444
|
-
prompt,
|
|
445
|
-
system: runtime.character.system ?? undefined,
|
|
446
|
-
temperature,
|
|
447
|
-
maxOutputTokens: maxTokens,
|
|
448
|
-
frequencyPenalty,
|
|
449
|
-
presencePenalty,
|
|
450
|
-
stopSequences,
|
|
451
|
-
experimental_telemetry: {
|
|
452
|
-
isEnabled: experimentalTelemetry
|
|
453
|
-
}
|
|
454
|
-
});
|
|
455
|
-
if (usage) {
|
|
456
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_SMALL, prompt, usage);
|
|
457
|
-
}
|
|
458
|
-
return openaiResponse;
|
|
702
|
+
[import_core13.ModelType.TEXT_SMALL]: async (runtime, params) => {
|
|
703
|
+
return handleTextSmall(runtime, params);
|
|
459
704
|
},
|
|
460
|
-
[
|
|
461
|
-
|
|
462
|
-
stopSequences = [],
|
|
463
|
-
maxTokens = 8192,
|
|
464
|
-
temperature = 0.7,
|
|
465
|
-
frequencyPenalty = 0.7,
|
|
466
|
-
presencePenalty = 0.7
|
|
467
|
-
}) => {
|
|
468
|
-
const openai = createOpenAIClient(runtime);
|
|
469
|
-
const modelName = getLargeModel(runtime);
|
|
470
|
-
const experimentalTelemetry = getExperimentalTelemetry(runtime);
|
|
471
|
-
import_core.logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
472
|
-
import_core.logger.log(prompt);
|
|
473
|
-
const { text: openaiResponse, usage } = await import_ai.generateText({
|
|
474
|
-
model: openai.languageModel(modelName),
|
|
475
|
-
prompt,
|
|
476
|
-
system: runtime.character.system ?? undefined,
|
|
477
|
-
temperature,
|
|
478
|
-
maxOutputTokens: maxTokens,
|
|
479
|
-
frequencyPenalty,
|
|
480
|
-
presencePenalty,
|
|
481
|
-
stopSequences,
|
|
482
|
-
experimental_telemetry: {
|
|
483
|
-
isEnabled: experimentalTelemetry
|
|
484
|
-
}
|
|
485
|
-
});
|
|
486
|
-
if (usage) {
|
|
487
|
-
emitModelUsageEvent(runtime, import_core.ModelType.TEXT_LARGE, prompt, usage);
|
|
488
|
-
}
|
|
489
|
-
return openaiResponse;
|
|
705
|
+
[import_core13.ModelType.TEXT_LARGE]: async (runtime, params) => {
|
|
706
|
+
return handleTextLarge(runtime, params);
|
|
490
707
|
},
|
|
491
|
-
[
|
|
492
|
-
|
|
493
|
-
const size = params.size || "1024x1024";
|
|
494
|
-
const prompt = params.prompt;
|
|
495
|
-
const modelName = "gpt-image-1";
|
|
496
|
-
import_core.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
497
|
-
const baseURL = getBaseURL(runtime);
|
|
498
|
-
try {
|
|
499
|
-
const response = await fetch(`${baseURL}/images/generations`, {
|
|
500
|
-
method: "POST",
|
|
501
|
-
headers: {
|
|
502
|
-
...getAuthHeader(runtime),
|
|
503
|
-
"Content-Type": "application/json"
|
|
504
|
-
},
|
|
505
|
-
body: JSON.stringify({
|
|
506
|
-
model: modelName,
|
|
507
|
-
prompt,
|
|
508
|
-
n,
|
|
509
|
-
size
|
|
510
|
-
})
|
|
511
|
-
});
|
|
512
|
-
if (!response.ok) {
|
|
513
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
514
|
-
}
|
|
515
|
-
const data = await response.json();
|
|
516
|
-
const typedData = data;
|
|
517
|
-
return typedData.data;
|
|
518
|
-
} catch (error) {
|
|
519
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
520
|
-
throw error;
|
|
521
|
-
}
|
|
708
|
+
[import_core13.ModelType.IMAGE]: async (runtime, params) => {
|
|
709
|
+
return handleImageGeneration(runtime, params);
|
|
522
710
|
},
|
|
523
|
-
[
|
|
524
|
-
|
|
525
|
-
let promptText;
|
|
526
|
-
const modelName = getImageDescriptionModel(runtime);
|
|
527
|
-
import_core.logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
528
|
-
const maxTokens = Number.parseInt(getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192", 10);
|
|
529
|
-
if (typeof params === "string") {
|
|
530
|
-
imageUrl = params;
|
|
531
|
-
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
532
|
-
} else {
|
|
533
|
-
imageUrl = params.imageUrl;
|
|
534
|
-
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
535
|
-
}
|
|
536
|
-
const messages = [
|
|
537
|
-
{
|
|
538
|
-
role: "user",
|
|
539
|
-
content: [
|
|
540
|
-
{ type: "text", text: promptText },
|
|
541
|
-
{ type: "image_url", image_url: { url: imageUrl } }
|
|
542
|
-
]
|
|
543
|
-
}
|
|
544
|
-
];
|
|
545
|
-
const baseURL = getBaseURL(runtime);
|
|
546
|
-
try {
|
|
547
|
-
const requestBody = {
|
|
548
|
-
model: modelName,
|
|
549
|
-
messages,
|
|
550
|
-
max_tokens: maxTokens
|
|
551
|
-
};
|
|
552
|
-
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
553
|
-
method: "POST",
|
|
554
|
-
headers: {
|
|
555
|
-
"Content-Type": "application/json",
|
|
556
|
-
...getAuthHeader(runtime)
|
|
557
|
-
},
|
|
558
|
-
body: JSON.stringify(requestBody)
|
|
559
|
-
});
|
|
560
|
-
if (!response.ok) {
|
|
561
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
562
|
-
}
|
|
563
|
-
const result = await response.json();
|
|
564
|
-
const typedResult = result;
|
|
565
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
566
|
-
if (typedResult.usage) {
|
|
567
|
-
emitModelUsageEvent(runtime, import_core.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
568
|
-
inputTokens: typedResult.usage.prompt_tokens,
|
|
569
|
-
outputTokens: typedResult.usage.completion_tokens,
|
|
570
|
-
totalTokens: typedResult.usage.total_tokens
|
|
571
|
-
});
|
|
572
|
-
}
|
|
573
|
-
if (!content) {
|
|
574
|
-
return {
|
|
575
|
-
title: "Failed to analyze image",
|
|
576
|
-
description: "No response from API"
|
|
577
|
-
};
|
|
578
|
-
}
|
|
579
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
580
|
-
if (isCustomPrompt) {
|
|
581
|
-
return content;
|
|
582
|
-
}
|
|
583
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
584
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
585
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
586
|
-
const processedResult = { title, description };
|
|
587
|
-
return processedResult;
|
|
588
|
-
} catch (error) {
|
|
589
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
590
|
-
import_core.logger.error(`Error analyzing image: ${message}`);
|
|
591
|
-
return {
|
|
592
|
-
title: "Failed to analyze image",
|
|
593
|
-
description: `Error: ${message}`
|
|
594
|
-
};
|
|
595
|
-
}
|
|
711
|
+
[import_core13.ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
712
|
+
return handleImageDescription(runtime, params);
|
|
596
713
|
},
|
|
597
|
-
[
|
|
598
|
-
|
|
599
|
-
import_core.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
600
|
-
const baseURL = getBaseURL(runtime);
|
|
601
|
-
let blob;
|
|
602
|
-
let extraParams = null;
|
|
603
|
-
if (input instanceof Blob || input instanceof File) {
|
|
604
|
-
blob = input;
|
|
605
|
-
} else if (Buffer.isBuffer(input)) {
|
|
606
|
-
const detectedMimeType = detectAudioMimeType(input);
|
|
607
|
-
import_core.logger.debug(`Auto-detected audio MIME type: ${detectedMimeType}`);
|
|
608
|
-
blob = new Blob([input], { type: detectedMimeType });
|
|
609
|
-
} else if (typeof input === "object" && input !== null && input.audio != null) {
|
|
610
|
-
const params = input;
|
|
611
|
-
if (!(params.audio instanceof Blob) && !(params.audio instanceof File) && !Buffer.isBuffer(params.audio)) {
|
|
612
|
-
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
613
|
-
}
|
|
614
|
-
if (Buffer.isBuffer(params.audio)) {
|
|
615
|
-
let mimeType = params.mimeType;
|
|
616
|
-
if (!mimeType) {
|
|
617
|
-
mimeType = detectAudioMimeType(params.audio);
|
|
618
|
-
import_core.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
619
|
-
} else {
|
|
620
|
-
import_core.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
621
|
-
}
|
|
622
|
-
blob = new Blob([params.audio], { type: mimeType });
|
|
623
|
-
} else {
|
|
624
|
-
blob = params.audio;
|
|
625
|
-
}
|
|
626
|
-
extraParams = params;
|
|
627
|
-
if (typeof params.model === "string" && params.model) {
|
|
628
|
-
modelName = params.model;
|
|
629
|
-
}
|
|
630
|
-
} else {
|
|
631
|
-
throw new Error("TRANSCRIPTION expects a Blob/File/Buffer or an object { audio: Blob/File/Buffer, mimeType?, language?, response_format?, timestampGranularities?, prompt?, temperature?, model? }");
|
|
632
|
-
}
|
|
633
|
-
const mime = blob.type || "audio/webm";
|
|
634
|
-
const filename = blob.name || (mime.includes("mp3") || mime.includes("mpeg") ? "recording.mp3" : mime.includes("ogg") ? "recording.ogg" : mime.includes("wav") ? "recording.wav" : mime.includes("webm") ? "recording.webm" : "recording.bin");
|
|
635
|
-
const formData = new FormData;
|
|
636
|
-
formData.append("file", blob, filename);
|
|
637
|
-
formData.append("model", String(modelName));
|
|
638
|
-
if (extraParams) {
|
|
639
|
-
if (typeof extraParams.language === "string") {
|
|
640
|
-
formData.append("language", String(extraParams.language));
|
|
641
|
-
}
|
|
642
|
-
if (typeof extraParams.response_format === "string") {
|
|
643
|
-
formData.append("response_format", String(extraParams.response_format));
|
|
644
|
-
}
|
|
645
|
-
if (typeof extraParams.prompt === "string") {
|
|
646
|
-
formData.append("prompt", String(extraParams.prompt));
|
|
647
|
-
}
|
|
648
|
-
if (typeof extraParams.temperature === "number") {
|
|
649
|
-
formData.append("temperature", String(extraParams.temperature));
|
|
650
|
-
}
|
|
651
|
-
if (Array.isArray(extraParams.timestampGranularities)) {
|
|
652
|
-
for (const g of extraParams.timestampGranularities) {
|
|
653
|
-
formData.append("timestamp_granularities[]", String(g));
|
|
654
|
-
}
|
|
655
|
-
}
|
|
656
|
-
}
|
|
657
|
-
try {
|
|
658
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
659
|
-
method: "POST",
|
|
660
|
-
headers: {
|
|
661
|
-
...getAuthHeader(runtime)
|
|
662
|
-
},
|
|
663
|
-
body: formData
|
|
664
|
-
});
|
|
665
|
-
if (!response.ok) {
|
|
666
|
-
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
667
|
-
}
|
|
668
|
-
const data = await response.json();
|
|
669
|
-
return data.text || "";
|
|
670
|
-
} catch (error) {
|
|
671
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
672
|
-
import_core.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
673
|
-
throw error;
|
|
674
|
-
}
|
|
714
|
+
[import_core13.ModelType.TRANSCRIPTION]: async (runtime, input) => {
|
|
715
|
+
return handleTranscription(runtime, input);
|
|
675
716
|
},
|
|
676
|
-
[
|
|
677
|
-
|
|
678
|
-
const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
679
|
-
import_core.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
|
|
680
|
-
try {
|
|
681
|
-
const speechStream = await fetchTextToSpeech(runtime, options);
|
|
682
|
-
return speechStream;
|
|
683
|
-
} catch (error) {
|
|
684
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
685
|
-
import_core.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
|
|
686
|
-
throw error;
|
|
687
|
-
}
|
|
717
|
+
[import_core13.ModelType.TEXT_TO_SPEECH]: async (runtime, input) => {
|
|
718
|
+
return handleTextToSpeech(runtime, input);
|
|
688
719
|
},
|
|
689
|
-
[
|
|
690
|
-
return
|
|
720
|
+
[import_core13.ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
721
|
+
return handleObjectSmall(runtime, params);
|
|
691
722
|
},
|
|
692
|
-
[
|
|
693
|
-
return
|
|
723
|
+
[import_core13.ModelType.OBJECT_LARGE]: async (runtime, params) => {
|
|
724
|
+
return handleObjectLarge(runtime, params);
|
|
694
725
|
}
|
|
695
726
|
},
|
|
696
727
|
tests: [
|
|
@@ -702,12 +733,10 @@ var openaiPlugin = {
|
|
|
702
733
|
fn: async (runtime) => {
|
|
703
734
|
const baseURL = getBaseURL(runtime);
|
|
704
735
|
const response = await fetch(`${baseURL}/models`, {
|
|
705
|
-
headers:
|
|
706
|
-
Authorization: `Bearer ${getApiKey(runtime)}`
|
|
707
|
-
}
|
|
736
|
+
headers: getAuthHeader(runtime)
|
|
708
737
|
});
|
|
709
738
|
const data = await response.json();
|
|
710
|
-
|
|
739
|
+
import_core13.logger.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
711
740
|
if (!response.ok) {
|
|
712
741
|
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
713
742
|
}
|
|
@@ -717,13 +746,13 @@ var openaiPlugin = {
|
|
|
717
746
|
name: "openai_test_text_embedding",
|
|
718
747
|
fn: async (runtime) => {
|
|
719
748
|
try {
|
|
720
|
-
const embedding = await runtime.useModel(
|
|
749
|
+
const embedding = await runtime.useModel(import_core13.ModelType.TEXT_EMBEDDING, {
|
|
721
750
|
text: "Hello, world!"
|
|
722
751
|
});
|
|
723
|
-
|
|
752
|
+
import_core13.logger.log({ embedding }, "embedding");
|
|
724
753
|
} catch (error) {
|
|
725
754
|
const message = error instanceof Error ? error.message : String(error);
|
|
726
|
-
|
|
755
|
+
import_core13.logger.error(`Error in test_text_embedding: ${message}`);
|
|
727
756
|
throw error;
|
|
728
757
|
}
|
|
729
758
|
}
|
|
@@ -732,16 +761,16 @@ var openaiPlugin = {
|
|
|
732
761
|
name: "openai_test_text_large",
|
|
733
762
|
fn: async (runtime) => {
|
|
734
763
|
try {
|
|
735
|
-
const text = await runtime.useModel(
|
|
764
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
|
|
736
765
|
prompt: "What is the nature of reality in 10 words?"
|
|
737
766
|
});
|
|
738
767
|
if (text.length === 0) {
|
|
739
768
|
throw new Error("Failed to generate text");
|
|
740
769
|
}
|
|
741
|
-
|
|
770
|
+
import_core13.logger.log({ text }, "generated with test_text_large");
|
|
742
771
|
} catch (error) {
|
|
743
772
|
const message = error instanceof Error ? error.message : String(error);
|
|
744
|
-
|
|
773
|
+
import_core13.logger.error(`Error in test_text_large: ${message}`);
|
|
745
774
|
throw error;
|
|
746
775
|
}
|
|
747
776
|
}
|
|
@@ -750,16 +779,16 @@ var openaiPlugin = {
|
|
|
750
779
|
name: "openai_test_text_small",
|
|
751
780
|
fn: async (runtime) => {
|
|
752
781
|
try {
|
|
753
|
-
const text = await runtime.useModel(
|
|
782
|
+
const text = await runtime.useModel(import_core13.ModelType.TEXT_SMALL, {
|
|
754
783
|
prompt: "What is the nature of reality in 10 words?"
|
|
755
784
|
});
|
|
756
785
|
if (text.length === 0) {
|
|
757
786
|
throw new Error("Failed to generate text");
|
|
758
787
|
}
|
|
759
|
-
|
|
788
|
+
import_core13.logger.log({ text }, "generated with test_text_small");
|
|
760
789
|
} catch (error) {
|
|
761
790
|
const message = error instanceof Error ? error.message : String(error);
|
|
762
|
-
|
|
791
|
+
import_core13.logger.error(`Error in test_text_small: ${message}`);
|
|
763
792
|
throw error;
|
|
764
793
|
}
|
|
765
794
|
}
|
|
@@ -767,17 +796,17 @@ var openaiPlugin = {
|
|
|
767
796
|
{
|
|
768
797
|
name: "openai_test_image_generation",
|
|
769
798
|
fn: async (runtime) => {
|
|
770
|
-
|
|
799
|
+
import_core13.logger.log("openai_test_image_generation");
|
|
771
800
|
try {
|
|
772
|
-
const image = await runtime.useModel(
|
|
801
|
+
const image = await runtime.useModel(import_core13.ModelType.IMAGE, {
|
|
773
802
|
prompt: "A beautiful sunset over a calm ocean",
|
|
774
|
-
|
|
803
|
+
count: 1,
|
|
775
804
|
size: "1024x1024"
|
|
776
805
|
});
|
|
777
|
-
|
|
806
|
+
import_core13.logger.log({ image }, "generated with test_image_generation");
|
|
778
807
|
} catch (error) {
|
|
779
808
|
const message = error instanceof Error ? error.message : String(error);
|
|
780
|
-
|
|
809
|
+
import_core13.logger.error(`Error in test_image_generation: ${message}`);
|
|
781
810
|
throw error;
|
|
782
811
|
}
|
|
783
812
|
}
|
|
@@ -786,36 +815,36 @@ var openaiPlugin = {
|
|
|
786
815
|
name: "image-description",
|
|
787
816
|
fn: async (runtime) => {
|
|
788
817
|
try {
|
|
789
|
-
|
|
818
|
+
import_core13.logger.log("openai_test_image_description");
|
|
790
819
|
try {
|
|
791
|
-
const result = await runtime.useModel(
|
|
820
|
+
const result = await runtime.useModel(import_core13.ModelType.IMAGE_DESCRIPTION, "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");
|
|
792
821
|
if (result && typeof result === "object" && "title" in result && "description" in result) {
|
|
793
|
-
|
|
822
|
+
import_core13.logger.log({ result }, "Image description");
|
|
794
823
|
} else {
|
|
795
|
-
|
|
824
|
+
import_core13.logger.error("Invalid image description result format:", result);
|
|
796
825
|
}
|
|
797
826
|
} catch (e) {
|
|
798
827
|
const message = e instanceof Error ? e.message : String(e);
|
|
799
|
-
|
|
828
|
+
import_core13.logger.error(`Error in image description test: ${message}`);
|
|
800
829
|
}
|
|
801
830
|
} catch (e) {
|
|
802
831
|
const message = e instanceof Error ? e.message : String(e);
|
|
803
|
-
|
|
832
|
+
import_core13.logger.error(`Error in openai_test_image_description: ${message}`);
|
|
804
833
|
}
|
|
805
834
|
}
|
|
806
835
|
},
|
|
807
836
|
{
|
|
808
837
|
name: "openai_test_transcription",
|
|
809
838
|
fn: async (runtime) => {
|
|
810
|
-
|
|
839
|
+
import_core13.logger.log("openai_test_transcription");
|
|
811
840
|
try {
|
|
812
841
|
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
813
842
|
const arrayBuffer = await response.arrayBuffer();
|
|
814
|
-
const transcription = await runtime.useModel(
|
|
815
|
-
|
|
843
|
+
const transcription = await runtime.useModel(import_core13.ModelType.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
844
|
+
import_core13.logger.log({ transcription }, "generated with test_transcription");
|
|
816
845
|
} catch (error) {
|
|
817
846
|
const message = error instanceof Error ? error.message : String(error);
|
|
818
|
-
|
|
847
|
+
import_core13.logger.error(`Error in test_transcription: ${message}`);
|
|
819
848
|
throw error;
|
|
820
849
|
}
|
|
821
850
|
}
|
|
@@ -824,39 +853,85 @@ var openaiPlugin = {
|
|
|
824
853
|
name: "openai_test_text_tokenizer_encode",
|
|
825
854
|
fn: async (runtime) => {
|
|
826
855
|
const prompt = "Hello tokenizer encode!";
|
|
827
|
-
const tokens = await runtime.useModel(
|
|
856
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt, modelType: import_core13.ModelType.TEXT_SMALL });
|
|
828
857
|
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
829
858
|
throw new Error("Failed to tokenize text: expected non-empty array of tokens");
|
|
830
859
|
}
|
|
831
|
-
|
|
860
|
+
import_core13.logger.log({ tokens }, "Tokenized output");
|
|
832
861
|
}
|
|
833
862
|
},
|
|
834
863
|
{
|
|
835
864
|
name: "openai_test_text_tokenizer_decode",
|
|
836
865
|
fn: async (runtime) => {
|
|
837
866
|
const prompt = "Hello tokenizer decode!";
|
|
838
|
-
const tokens = await runtime.useModel(
|
|
839
|
-
const decodedText = await runtime.useModel(
|
|
867
|
+
const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt, modelType: import_core13.ModelType.TEXT_SMALL });
|
|
868
|
+
const decodedText = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_DECODE, {
|
|
869
|
+
tokens,
|
|
870
|
+
modelType: import_core13.ModelType.TEXT_SMALL
|
|
871
|
+
});
|
|
840
872
|
if (decodedText !== prompt) {
|
|
841
873
|
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
842
874
|
}
|
|
843
|
-
|
|
875
|
+
import_core13.logger.log({ decodedText }, "Decoded text");
|
|
844
876
|
}
|
|
845
877
|
},
|
|
846
878
|
{
|
|
847
879
|
name: "openai_test_text_to_speech",
|
|
848
880
|
fn: async (runtime) => {
|
|
849
881
|
try {
|
|
850
|
-
const response = await
|
|
882
|
+
const response = await runtime.useModel(import_core13.ModelType.TEXT_TO_SPEECH, {
|
|
851
883
|
text: "Hello, this is a test for text-to-speech."
|
|
852
884
|
});
|
|
853
885
|
if (!response) {
|
|
854
886
|
throw new Error("Failed to generate speech");
|
|
855
887
|
}
|
|
856
|
-
|
|
888
|
+
import_core13.logger.log("Generated speech successfully");
|
|
889
|
+
} catch (error) {
|
|
890
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
891
|
+
import_core13.logger.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
892
|
+
throw error;
|
|
893
|
+
}
|
|
894
|
+
}
|
|
895
|
+
},
|
|
896
|
+
{
|
|
897
|
+
name: "openai_test_text_generation_large",
|
|
898
|
+
fn: async (runtime) => {
|
|
899
|
+
try {
|
|
900
|
+
const result = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
|
|
901
|
+
prompt: "Say hello in 5 words."
|
|
902
|
+
});
|
|
903
|
+
if (!result || result.length === 0) {
|
|
904
|
+
throw new Error("Text generation returned empty result");
|
|
905
|
+
}
|
|
906
|
+
import_core13.logger.log({ result }, "Text generation test completed");
|
|
907
|
+
} catch (error) {
|
|
908
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
909
|
+
import_core13.logger.error(`Error in openai_test_text_generation_large: ${message}`);
|
|
910
|
+
throw error;
|
|
911
|
+
}
|
|
912
|
+
}
|
|
913
|
+
},
|
|
914
|
+
{
|
|
915
|
+
name: "openai_test_streaming",
|
|
916
|
+
fn: async (runtime) => {
|
|
917
|
+
try {
|
|
918
|
+
const chunks = [];
|
|
919
|
+
const result = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
|
|
920
|
+
prompt: "Count from 1 to 5.",
|
|
921
|
+
onStreamChunk: (chunk) => {
|
|
922
|
+
chunks.push(chunk);
|
|
923
|
+
}
|
|
924
|
+
});
|
|
925
|
+
if (!result || result.length === 0) {
|
|
926
|
+
throw new Error("Streaming returned empty result");
|
|
927
|
+
}
|
|
928
|
+
if (chunks.length === 0) {
|
|
929
|
+
throw new Error("No streaming chunks received");
|
|
930
|
+
}
|
|
931
|
+
import_core13.logger.log({ chunks: chunks.length, result: result.substring(0, 50) }, "Streaming test completed");
|
|
857
932
|
} catch (error) {
|
|
858
933
|
const message = error instanceof Error ? error.message : String(error);
|
|
859
|
-
|
|
934
|
+
import_core13.logger.error(`Error in openai_test_streaming: ${message}`);
|
|
860
935
|
throw error;
|
|
861
936
|
}
|
|
862
937
|
}
|
|
@@ -867,4 +942,4 @@ var openaiPlugin = {
|
|
|
867
942
|
};
|
|
868
943
|
var src_default = openaiPlugin;
|
|
869
944
|
|
|
870
|
-
//# debugId=
|
|
945
|
+
//# debugId=F03D6434951BC08A64756E2164756E21
|