@elizaos/plugin-openai 1.6.0 → 2.0.0-alpha.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/browser/index.browser.js +2 -2
- package/dist/browser/index.browser.js.map +18 -17
- package/dist/build.d.ts +13 -0
- package/dist/build.d.ts.map +1 -0
- package/dist/cjs/index.node.cjs +1027 -658
- package/dist/cjs/index.node.js.map +18 -17
- package/dist/generated/specs/specs.d.ts +55 -0
- package/dist/generated/specs/specs.d.ts.map +1 -0
- package/dist/index.browser.d.ts +1 -0
- package/dist/index.browser.d.ts.map +1 -0
- package/dist/index.d.ts +1 -5
- package/dist/index.d.ts.map +1 -0
- package/dist/index.node.d.ts +1 -0
- package/dist/index.node.d.ts.map +1 -0
- package/dist/init.d.ts +4 -5
- package/dist/init.d.ts.map +1 -0
- package/dist/models/audio.d.ts +9 -10
- package/dist/models/audio.d.ts.map +1 -0
- package/dist/models/embedding.d.ts +1 -3
- package/dist/models/embedding.d.ts.map +1 -0
- package/dist/models/image.d.ts +4 -13
- package/dist/models/image.d.ts.map +1 -0
- package/dist/models/index.d.ts +7 -5
- package/dist/models/index.d.ts.map +1 -0
- package/dist/models/object.d.ts +4 -9
- package/dist/models/object.d.ts.map +1 -0
- package/dist/models/research.d.ts +34 -0
- package/dist/models/research.d.ts.map +1 -0
- package/dist/models/text.d.ts +22 -3
- package/dist/models/text.d.ts.map +1 -0
- package/dist/models/tokenizer.d.ts +4 -9
- package/dist/models/tokenizer.d.ts.map +1 -0
- package/dist/node/index.node.js +1016 -644
- package/dist/node/index.node.js.map +18 -17
- package/dist/providers/index.d.ts +2 -1
- package/dist/providers/index.d.ts.map +1 -0
- package/dist/providers/openai.d.ts +3 -7
- package/dist/providers/openai.d.ts.map +1 -0
- package/dist/types/index.d.ts +329 -10
- package/dist/types/index.d.ts.map +1 -0
- package/dist/utils/audio.d.ts +6 -12
- package/dist/utils/audio.d.ts.map +1 -0
- package/dist/utils/config.d.ts +16 -59
- package/dist/utils/config.d.ts.map +1 -0
- package/dist/utils/events.d.ts +20 -9
- package/dist/utils/events.d.ts.map +1 -0
- package/dist/utils/index.d.ts +2 -1
- package/dist/utils/index.d.ts.map +1 -0
- package/dist/utils/json.d.ts +9 -6
- package/dist/utils/json.d.ts.map +1 -0
- package/dist/utils/tokenization.d.ts +5 -16
- package/dist/utils/tokenization.d.ts.map +1 -0
- package/package.json +37 -29
- package/LICENSE +0 -21
- package/README.md +0 -160
package/dist/cjs/index.node.cjs
CHANGED
|
@@ -26,384 +26,173 @@ var __export = (target, all) => {
|
|
|
26
26
|
});
|
|
27
27
|
};
|
|
28
28
|
|
|
29
|
-
//
|
|
29
|
+
// index.node.ts
|
|
30
30
|
var exports_index_node = {};
|
|
31
31
|
__export(exports_index_node, {
|
|
32
32
|
openaiPlugin: () => openaiPlugin,
|
|
33
|
-
default: () =>
|
|
33
|
+
default: () => typescript_default
|
|
34
34
|
});
|
|
35
35
|
module.exports = __toCommonJS(exports_index_node);
|
|
36
36
|
|
|
37
|
-
//
|
|
38
|
-
var
|
|
37
|
+
// index.ts
|
|
38
|
+
var import_core14 = require("@elizaos/core");
|
|
39
39
|
|
|
40
|
-
//
|
|
40
|
+
// init.ts
|
|
41
41
|
var import_core2 = require("@elizaos/core");
|
|
42
42
|
|
|
43
|
-
//
|
|
43
|
+
// utils/config.ts
|
|
44
44
|
var import_core = require("@elizaos/core");
|
|
45
|
+
function getEnvValue(key) {
|
|
46
|
+
if (typeof process === "undefined" || !process.env) {
|
|
47
|
+
return;
|
|
48
|
+
}
|
|
49
|
+
const value = process.env[key];
|
|
50
|
+
return value === undefined ? undefined : String(value);
|
|
51
|
+
}
|
|
45
52
|
function getSetting(runtime, key, defaultValue) {
|
|
46
53
|
const value = runtime.getSetting(key);
|
|
47
54
|
if (value !== undefined && value !== null) {
|
|
48
55
|
return String(value);
|
|
49
56
|
}
|
|
50
|
-
return
|
|
57
|
+
return getEnvValue(key) ?? defaultValue;
|
|
58
|
+
}
|
|
59
|
+
function getNumericSetting(runtime, key, defaultValue) {
|
|
60
|
+
const value = getSetting(runtime, key);
|
|
61
|
+
if (value === undefined) {
|
|
62
|
+
return defaultValue;
|
|
63
|
+
}
|
|
64
|
+
const parsed = Number.parseInt(value, 10);
|
|
65
|
+
if (!Number.isFinite(parsed)) {
|
|
66
|
+
throw new Error(`Setting '${key}' must be a valid integer, got: ${value}`);
|
|
67
|
+
}
|
|
68
|
+
return parsed;
|
|
69
|
+
}
|
|
70
|
+
function getBooleanSetting(runtime, key, defaultValue) {
|
|
71
|
+
const value = getSetting(runtime, key);
|
|
72
|
+
if (value === undefined) {
|
|
73
|
+
return defaultValue;
|
|
74
|
+
}
|
|
75
|
+
const normalized = value.toLowerCase();
|
|
76
|
+
return normalized === "true" || normalized === "1" || normalized === "yes";
|
|
51
77
|
}
|
|
52
78
|
function isBrowser() {
|
|
53
|
-
return typeof globalThis !== "undefined" &&
|
|
79
|
+
return typeof globalThis !== "undefined" && typeof globalThis.document !== "undefined";
|
|
54
80
|
}
|
|
55
81
|
function isProxyMode(runtime) {
|
|
56
82
|
return isBrowser() && !!getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
57
83
|
}
|
|
84
|
+
function getApiKey(runtime) {
|
|
85
|
+
return getSetting(runtime, "OPENAI_API_KEY");
|
|
86
|
+
}
|
|
87
|
+
function getEmbeddingApiKey(runtime) {
|
|
88
|
+
const embeddingApiKey = getSetting(runtime, "OPENAI_EMBEDDING_API_KEY");
|
|
89
|
+
if (embeddingApiKey) {
|
|
90
|
+
import_core.logger.debug("[OpenAI] Using specific embedding API key");
|
|
91
|
+
return embeddingApiKey;
|
|
92
|
+
}
|
|
93
|
+
import_core.logger.debug("[OpenAI] Falling back to general API key for embeddings");
|
|
94
|
+
return getApiKey(runtime);
|
|
95
|
+
}
|
|
58
96
|
function getAuthHeader(runtime, forEmbedding = false) {
|
|
59
|
-
if (isBrowser())
|
|
97
|
+
if (isBrowser() && !getBooleanSetting(runtime, "OPENAI_ALLOW_BROWSER_API_KEY", false)) {
|
|
60
98
|
return {};
|
|
99
|
+
}
|
|
61
100
|
const key = forEmbedding ? getEmbeddingApiKey(runtime) : getApiKey(runtime);
|
|
62
101
|
return key ? { Authorization: `Bearer ${key}` } : {};
|
|
63
102
|
}
|
|
64
103
|
function getBaseURL(runtime) {
|
|
65
104
|
const browserURL = getSetting(runtime, "OPENAI_BROWSER_BASE_URL");
|
|
66
|
-
const baseURL = isBrowser() && browserURL ? browserURL : getSetting(runtime, "OPENAI_BASE_URL"
|
|
67
|
-
import_core.logger.debug(`[OpenAI]
|
|
105
|
+
const baseURL = isBrowser() && browserURL ? browserURL : getSetting(runtime, "OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
|
|
106
|
+
import_core.logger.debug(`[OpenAI] Base URL: ${baseURL}`);
|
|
68
107
|
return baseURL;
|
|
69
108
|
}
|
|
70
109
|
function getEmbeddingBaseURL(runtime) {
|
|
71
|
-
const embeddingURL = isBrowser() ? getSetting(runtime, "OPENAI_BROWSER_EMBEDDING_URL")
|
|
110
|
+
const embeddingURL = isBrowser() ? getSetting(runtime, "OPENAI_BROWSER_EMBEDDING_URL") ?? getSetting(runtime, "OPENAI_BROWSER_BASE_URL") : getSetting(runtime, "OPENAI_EMBEDDING_URL");
|
|
72
111
|
if (embeddingURL) {
|
|
73
|
-
import_core.logger.debug(`[OpenAI] Using
|
|
112
|
+
import_core.logger.debug(`[OpenAI] Using embedding base URL: ${embeddingURL}`);
|
|
74
113
|
return embeddingURL;
|
|
75
114
|
}
|
|
76
|
-
import_core.logger.debug("[OpenAI] Falling back to general base URL for embeddings
|
|
115
|
+
import_core.logger.debug("[OpenAI] Falling back to general base URL for embeddings");
|
|
77
116
|
return getBaseURL(runtime);
|
|
78
117
|
}
|
|
79
|
-
function getApiKey(runtime) {
|
|
80
|
-
return getSetting(runtime, "OPENAI_API_KEY");
|
|
81
|
-
}
|
|
82
|
-
function getEmbeddingApiKey(runtime) {
|
|
83
|
-
const embeddingApiKey = getSetting(runtime, "OPENAI_EMBEDDING_API_KEY");
|
|
84
|
-
if (embeddingApiKey) {
|
|
85
|
-
import_core.logger.debug("[OpenAI] Using specific embedding API key (present)");
|
|
86
|
-
return embeddingApiKey;
|
|
87
|
-
}
|
|
88
|
-
import_core.logger.debug("[OpenAI] Falling back to general API key for embeddings.");
|
|
89
|
-
return getApiKey(runtime);
|
|
90
|
-
}
|
|
91
118
|
function getSmallModel(runtime) {
|
|
92
|
-
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL"
|
|
119
|
+
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL") ?? "gpt-5-mini";
|
|
93
120
|
}
|
|
94
121
|
function getLargeModel(runtime) {
|
|
95
|
-
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL"
|
|
122
|
+
return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL") ?? "gpt-5";
|
|
123
|
+
}
|
|
124
|
+
function getEmbeddingModel(runtime) {
|
|
125
|
+
return getSetting(runtime, "OPENAI_EMBEDDING_MODEL") ?? "text-embedding-3-small";
|
|
96
126
|
}
|
|
97
127
|
function getImageDescriptionModel(runtime) {
|
|
98
|
-
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL"
|
|
128
|
+
return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL") ?? "gpt-5-mini";
|
|
99
129
|
}
|
|
100
|
-
function
|
|
101
|
-
|
|
102
|
-
const normalizedSetting = String(setting).toLowerCase();
|
|
103
|
-
const result = normalizedSetting === "true";
|
|
104
|
-
import_core.logger.debug(`[OpenAI] Experimental telemetry in function: "${setting}" (type: ${typeof setting}, normalized: "${normalizedSetting}", result: ${result})`);
|
|
105
|
-
return result;
|
|
130
|
+
function getTranscriptionModel(runtime) {
|
|
131
|
+
return getSetting(runtime, "OPENAI_TRANSCRIPTION_MODEL") ?? "gpt-5-mini-transcribe";
|
|
106
132
|
}
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
function initializeOpenAI(_config, runtime) {
|
|
110
|
-
(async () => {
|
|
111
|
-
try {
|
|
112
|
-
if (!getApiKey(runtime) && !isBrowser()) {
|
|
113
|
-
import_core2.logger.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");
|
|
114
|
-
return;
|
|
115
|
-
}
|
|
116
|
-
try {
|
|
117
|
-
const baseURL = getBaseURL(runtime);
|
|
118
|
-
const response = await fetch(`${baseURL}/models`, {
|
|
119
|
-
headers: getAuthHeader(runtime)
|
|
120
|
-
});
|
|
121
|
-
if (!response.ok) {
|
|
122
|
-
import_core2.logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
123
|
-
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
124
|
-
} else {
|
|
125
|
-
import_core2.logger.log("OpenAI API key validated successfully");
|
|
126
|
-
}
|
|
127
|
-
} catch (fetchError) {
|
|
128
|
-
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
129
|
-
import_core2.logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
130
|
-
import_core2.logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
131
|
-
}
|
|
132
|
-
} catch (error) {
|
|
133
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
134
|
-
import_core2.logger.warn(`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`);
|
|
135
|
-
}
|
|
136
|
-
})();
|
|
133
|
+
function getTTSModel(runtime) {
|
|
134
|
+
return getSetting(runtime, "OPENAI_TTS_MODEL") ?? "tts-1";
|
|
137
135
|
}
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
var import_core4 = require("@elizaos/core");
|
|
141
|
-
var import_ai = require("ai");
|
|
142
|
-
|
|
143
|
-
// src/providers/openai.ts
|
|
144
|
-
var import_openai = require("@ai-sdk/openai");
|
|
145
|
-
function createOpenAIClient(runtime) {
|
|
146
|
-
const baseURL = getBaseURL(runtime);
|
|
147
|
-
const apiKey = getApiKey(runtime) ?? (isProxyMode(runtime) ? "sk-proxy" : undefined);
|
|
148
|
-
return import_openai.createOpenAI({ apiKey: apiKey ?? "", baseURL });
|
|
136
|
+
function getTTSVoice(runtime) {
|
|
137
|
+
return getSetting(runtime, "OPENAI_TTS_VOICE") ?? "nova";
|
|
149
138
|
}
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
var import_core3 = require("@elizaos/core");
|
|
153
|
-
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
154
|
-
const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
|
|
155
|
-
const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
|
|
156
|
-
const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
|
|
157
|
-
const truncatedPrompt = typeof prompt === "string" ? prompt.length > 200 ? `${prompt.slice(0, 200)}…` : prompt : "";
|
|
158
|
-
runtime.emitEvent(import_core3.EventType.MODEL_USED, {
|
|
159
|
-
runtime,
|
|
160
|
-
source: "openai",
|
|
161
|
-
provider: "openai",
|
|
162
|
-
type,
|
|
163
|
-
prompt: truncatedPrompt,
|
|
164
|
-
tokens: {
|
|
165
|
-
prompt: promptTokens,
|
|
166
|
-
completion: completionTokens,
|
|
167
|
-
total: totalTokens
|
|
168
|
-
}
|
|
169
|
-
});
|
|
139
|
+
function getTTSInstructions(runtime) {
|
|
140
|
+
return getSetting(runtime, "OPENAI_TTS_INSTRUCTIONS") ?? "";
|
|
170
141
|
}
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
174
|
-
const openai = createOpenAIClient(runtime);
|
|
175
|
-
const modelName = getModelFn(runtime);
|
|
176
|
-
import_core4.logger.debug(`[OpenAI] ${modelType} model: ${modelName}`);
|
|
177
|
-
const generateParams = {
|
|
178
|
-
model: openai.languageModel(modelName),
|
|
179
|
-
prompt: params.prompt,
|
|
180
|
-
system: runtime.character.system ?? undefined,
|
|
181
|
-
temperature: params.temperature ?? 0.7,
|
|
182
|
-
maxOutputTokens: params.maxTokens ?? 8192,
|
|
183
|
-
frequencyPenalty: params.frequencyPenalty ?? 0.7,
|
|
184
|
-
presencePenalty: params.presencePenalty ?? 0.7,
|
|
185
|
-
stopSequences: params.stopSequences ?? [],
|
|
186
|
-
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) }
|
|
187
|
-
};
|
|
188
|
-
if (params.stream) {
|
|
189
|
-
const result = import_ai.streamText(generateParams);
|
|
190
|
-
return {
|
|
191
|
-
textStream: result.textStream,
|
|
192
|
-
text: result.text,
|
|
193
|
-
usage: result.usage.then((u) => u ? {
|
|
194
|
-
promptTokens: u.inputTokens ?? 0,
|
|
195
|
-
completionTokens: u.outputTokens ?? 0,
|
|
196
|
-
totalTokens: (u.inputTokens ?? 0) + (u.outputTokens ?? 0)
|
|
197
|
-
} : undefined),
|
|
198
|
-
finishReason: result.finishReason
|
|
199
|
-
};
|
|
200
|
-
}
|
|
201
|
-
const { text, usage } = await import_ai.generateText(generateParams);
|
|
202
|
-
if (usage)
|
|
203
|
-
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
204
|
-
return text;
|
|
142
|
+
function getImageModel(runtime) {
|
|
143
|
+
return getSetting(runtime, "OPENAI_IMAGE_MODEL") ?? "dall-e-3";
|
|
205
144
|
}
|
|
206
|
-
|
|
207
|
-
return
|
|
145
|
+
function getExperimentalTelemetry(runtime) {
|
|
146
|
+
return getBooleanSetting(runtime, "OPENAI_EXPERIMENTAL_TELEMETRY", false);
|
|
208
147
|
}
|
|
209
|
-
|
|
210
|
-
return
|
|
148
|
+
function getEmbeddingDimensions(runtime) {
|
|
149
|
+
return getNumericSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", 1536);
|
|
211
150
|
}
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
async function handleTextEmbedding(runtime, params) {
|
|
215
|
-
const embeddingModelName = getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small");
|
|
216
|
-
const embeddingDimension = Number.parseInt(getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536", 10);
|
|
217
|
-
if (!Object.values(import_core5.VECTOR_DIMS).includes(embeddingDimension)) {
|
|
218
|
-
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(import_core5.VECTOR_DIMS).join(", ")}`;
|
|
219
|
-
import_core5.logger.error(errorMsg);
|
|
220
|
-
throw new Error(errorMsg);
|
|
221
|
-
}
|
|
222
|
-
if (params === null) {
|
|
223
|
-
import_core5.logger.debug("Creating test embedding for initialization");
|
|
224
|
-
const testVector = Array(embeddingDimension).fill(0);
|
|
225
|
-
testVector[0] = 0.1;
|
|
226
|
-
return testVector;
|
|
227
|
-
}
|
|
228
|
-
let text;
|
|
229
|
-
if (typeof params === "string") {
|
|
230
|
-
text = params;
|
|
231
|
-
} else if (typeof params === "object" && params.text) {
|
|
232
|
-
text = params.text;
|
|
233
|
-
} else {
|
|
234
|
-
const errorMsg = "Invalid input format for embedding";
|
|
235
|
-
import_core5.logger.warn(errorMsg);
|
|
236
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
237
|
-
fallbackVector[0] = 0.2;
|
|
238
|
-
return fallbackVector;
|
|
239
|
-
}
|
|
240
|
-
if (!text.trim()) {
|
|
241
|
-
const errorMsg = "Empty text for embedding";
|
|
242
|
-
import_core5.logger.warn(errorMsg);
|
|
243
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
244
|
-
fallbackVector[0] = 0.3;
|
|
245
|
-
return fallbackVector;
|
|
246
|
-
}
|
|
247
|
-
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
248
|
-
try {
|
|
249
|
-
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
250
|
-
method: "POST",
|
|
251
|
-
headers: {
|
|
252
|
-
...getAuthHeader(runtime, true),
|
|
253
|
-
"Content-Type": "application/json"
|
|
254
|
-
},
|
|
255
|
-
body: JSON.stringify({
|
|
256
|
-
model: embeddingModelName,
|
|
257
|
-
input: text
|
|
258
|
-
})
|
|
259
|
-
});
|
|
260
|
-
if (!response.ok) {
|
|
261
|
-
import_core5.logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
262
|
-
throw new Error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
263
|
-
}
|
|
264
|
-
const data = await response.json();
|
|
265
|
-
if (!data?.data?.[0]?.embedding) {
|
|
266
|
-
import_core5.logger.error("API returned invalid structure");
|
|
267
|
-
throw new Error("API returned invalid structure");
|
|
268
|
-
}
|
|
269
|
-
const embedding = data.data[0].embedding;
|
|
270
|
-
if (!Array.isArray(embedding) || embedding.length !== embeddingDimension) {
|
|
271
|
-
const errorMsg = `Embedding length ${embedding?.length ?? 0} does not match configured dimension ${embeddingDimension}`;
|
|
272
|
-
import_core5.logger.error(errorMsg);
|
|
273
|
-
const fallbackVector = Array(embeddingDimension).fill(0);
|
|
274
|
-
fallbackVector[0] = 0.4;
|
|
275
|
-
return fallbackVector;
|
|
276
|
-
}
|
|
277
|
-
if (data.usage) {
|
|
278
|
-
const usage = {
|
|
279
|
-
inputTokens: data.usage.prompt_tokens,
|
|
280
|
-
outputTokens: 0,
|
|
281
|
-
totalTokens: data.usage.total_tokens
|
|
282
|
-
};
|
|
283
|
-
emitModelUsageEvent(runtime, import_core5.ModelType.TEXT_EMBEDDING, text, usage);
|
|
284
|
-
}
|
|
285
|
-
import_core5.logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
286
|
-
return embedding;
|
|
287
|
-
} catch (error) {
|
|
288
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
289
|
-
import_core5.logger.error(`Error generating embedding: ${message}`);
|
|
290
|
-
throw error instanceof Error ? error : new Error(message);
|
|
291
|
-
}
|
|
151
|
+
function getImageDescriptionMaxTokens(runtime) {
|
|
152
|
+
return getNumericSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", 8192);
|
|
292
153
|
}
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
async function handleImageGeneration(runtime, params) {
|
|
296
|
-
const n = params.count || 1;
|
|
297
|
-
const size = params.size || "1024x1024";
|
|
298
|
-
const prompt = params.prompt;
|
|
299
|
-
const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
|
|
300
|
-
import_core6.logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
301
|
-
const baseURL = getBaseURL(runtime);
|
|
302
|
-
try {
|
|
303
|
-
const response = await fetch(`${baseURL}/images/generations`, {
|
|
304
|
-
method: "POST",
|
|
305
|
-
headers: {
|
|
306
|
-
...getAuthHeader(runtime),
|
|
307
|
-
"Content-Type": "application/json"
|
|
308
|
-
},
|
|
309
|
-
body: JSON.stringify({
|
|
310
|
-
model: modelName,
|
|
311
|
-
prompt,
|
|
312
|
-
n,
|
|
313
|
-
size
|
|
314
|
-
})
|
|
315
|
-
});
|
|
316
|
-
if (!response.ok) {
|
|
317
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
318
|
-
}
|
|
319
|
-
const data = await response.json();
|
|
320
|
-
const typedData = data;
|
|
321
|
-
return typedData.data;
|
|
322
|
-
} catch (error) {
|
|
323
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
324
|
-
throw error;
|
|
325
|
-
}
|
|
154
|
+
function getResearchModel(runtime) {
|
|
155
|
+
return getSetting(runtime, "OPENAI_RESEARCH_MODEL") ?? "o3-deep-research";
|
|
326
156
|
}
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
157
|
+
function getResearchTimeout(runtime) {
|
|
158
|
+
return getNumericSetting(runtime, "OPENAI_RESEARCH_TIMEOUT", 3600000);
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// init.ts
|
|
162
|
+
globalThis.AI_SDK_LOG_WARNINGS ??= false;
|
|
163
|
+
function initializeOpenAI(_config, runtime) {
|
|
164
|
+
validateOpenAIConfiguration(runtime);
|
|
165
|
+
}
|
|
166
|
+
async function validateOpenAIConfiguration(runtime) {
|
|
167
|
+
if (isBrowser()) {
|
|
168
|
+
import_core2.logger.debug("[OpenAI] Skipping API validation in browser environment");
|
|
169
|
+
return;
|
|
170
|
+
}
|
|
171
|
+
const apiKey = getApiKey(runtime);
|
|
172
|
+
if (!apiKey) {
|
|
173
|
+
import_core2.logger.warn("[OpenAI] OPENAI_API_KEY is not configured. " + "OpenAI functionality will fail until a valid API key is provided.");
|
|
174
|
+
return;
|
|
340
175
|
}
|
|
341
|
-
const messages = [
|
|
342
|
-
{
|
|
343
|
-
role: "user",
|
|
344
|
-
content: [
|
|
345
|
-
{ type: "text", text: promptText },
|
|
346
|
-
{ type: "image_url", image_url: { url: imageUrl } }
|
|
347
|
-
]
|
|
348
|
-
}
|
|
349
|
-
];
|
|
350
|
-
const baseURL = getBaseURL(runtime);
|
|
351
176
|
try {
|
|
352
|
-
const
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
max_tokens: maxTokens
|
|
356
|
-
};
|
|
357
|
-
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
358
|
-
method: "POST",
|
|
359
|
-
headers: {
|
|
360
|
-
"Content-Type": "application/json",
|
|
361
|
-
...getAuthHeader(runtime)
|
|
362
|
-
},
|
|
363
|
-
body: JSON.stringify(requestBody)
|
|
177
|
+
const baseURL = getBaseURL(runtime);
|
|
178
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
179
|
+
headers: getAuthHeader(runtime)
|
|
364
180
|
});
|
|
365
181
|
if (!response.ok) {
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
const result = await response.json();
|
|
369
|
-
const typedResult = result;
|
|
370
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
371
|
-
if (typedResult.usage) {
|
|
372
|
-
emitModelUsageEvent(runtime, import_core6.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt || "", {
|
|
373
|
-
inputTokens: typedResult.usage.prompt_tokens,
|
|
374
|
-
outputTokens: typedResult.usage.completion_tokens,
|
|
375
|
-
totalTokens: typedResult.usage.total_tokens
|
|
376
|
-
});
|
|
182
|
+
import_core2.logger.warn(`[OpenAI] API key validation failed: ${response.status} ${response.statusText}. ` + "Please verify your OPENAI_API_KEY is correct.");
|
|
183
|
+
return;
|
|
377
184
|
}
|
|
378
|
-
if (!content) {
|
|
379
|
-
return {
|
|
380
|
-
title: "Failed to analyze image",
|
|
381
|
-
description: "No response from API"
|
|
382
|
-
};
|
|
383
|
-
}
|
|
384
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
385
|
-
const title = titleMatch?.[1]?.trim();
|
|
386
|
-
if (!title) {
|
|
387
|
-
import_core6.logger.warn("Could not extract title from image description response");
|
|
388
|
-
}
|
|
389
|
-
const finalTitle = title || "Image Analysis";
|
|
390
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
391
|
-
const processedResult = { title: finalTitle, description };
|
|
392
|
-
return processedResult;
|
|
393
185
|
} catch (error) {
|
|
394
186
|
const message = error instanceof Error ? error.message : String(error);
|
|
395
|
-
|
|
396
|
-
return {
|
|
397
|
-
title: "Failed to analyze image",
|
|
398
|
-
description: `Error: ${message}`
|
|
399
|
-
};
|
|
187
|
+
import_core2.logger.warn(`[OpenAI] API validation error: ${message}. OpenAI functionality may be limited.`);
|
|
400
188
|
}
|
|
401
189
|
}
|
|
402
|
-
// src/models/audio.ts
|
|
403
|
-
var import_core8 = require("@elizaos/core");
|
|
404
190
|
|
|
405
|
-
//
|
|
406
|
-
var
|
|
191
|
+
// models/audio.ts
|
|
192
|
+
var import_core4 = require("@elizaos/core");
|
|
193
|
+
|
|
194
|
+
// utils/audio.ts
|
|
195
|
+
var import_core3 = require("@elizaos/core");
|
|
407
196
|
var MAGIC_BYTES = {
|
|
408
197
|
WAV: {
|
|
409
198
|
HEADER: [82, 73, 70, 70],
|
|
@@ -415,21 +204,26 @@ var MAGIC_BYTES = {
|
|
|
415
204
|
FTYP: [102, 116, 121, 112],
|
|
416
205
|
WEBM_EBML: [26, 69, 223, 163]
|
|
417
206
|
};
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
207
|
+
var MIN_DETECTION_BUFFER_SIZE = 12;
|
|
208
|
+
function matchBytes(buffer, offset, expected) {
|
|
209
|
+
for (let i = 0;i < expected.length; i++) {
|
|
210
|
+
const expectedByte = expected[i];
|
|
211
|
+
if (expectedByte === undefined || buffer[offset + i] !== expectedByte) {
|
|
421
212
|
return false;
|
|
213
|
+
}
|
|
422
214
|
}
|
|
423
215
|
return true;
|
|
424
216
|
}
|
|
425
217
|
function detectAudioMimeType(buffer) {
|
|
426
|
-
if (buffer.length <
|
|
218
|
+
if (buffer.length < MIN_DETECTION_BUFFER_SIZE) {
|
|
427
219
|
return "application/octet-stream";
|
|
428
220
|
}
|
|
429
221
|
if (matchBytes(buffer, 0, MAGIC_BYTES.WAV.HEADER) && matchBytes(buffer, 8, MAGIC_BYTES.WAV.IDENTIFIER)) {
|
|
430
222
|
return "audio/wav";
|
|
431
223
|
}
|
|
432
|
-
|
|
224
|
+
const firstByte = buffer[0];
|
|
225
|
+
const secondByte = buffer[1];
|
|
226
|
+
if (matchBytes(buffer, 0, MAGIC_BYTES.MP3_ID3) || firstByte === 255 && secondByte !== undefined && (secondByte & 224) === 224) {
|
|
433
227
|
return "audio/mpeg";
|
|
434
228
|
}
|
|
435
229
|
if (matchBytes(buffer, 0, MAGIC_BYTES.OGG)) {
|
|
@@ -444,284 +238,910 @@ function detectAudioMimeType(buffer) {
|
|
|
444
238
|
if (matchBytes(buffer, 0, MAGIC_BYTES.WEBM_EBML)) {
|
|
445
239
|
return "audio/webm";
|
|
446
240
|
}
|
|
447
|
-
|
|
241
|
+
import_core3.logger.warn("Could not detect audio format from buffer, using generic binary type");
|
|
448
242
|
return "application/octet-stream";
|
|
449
243
|
}
|
|
244
|
+
function getExtensionForMimeType(mimeType) {
|
|
245
|
+
switch (mimeType) {
|
|
246
|
+
case "audio/wav":
|
|
247
|
+
return "wav";
|
|
248
|
+
case "audio/mpeg":
|
|
249
|
+
return "mp3";
|
|
250
|
+
case "audio/ogg":
|
|
251
|
+
return "ogg";
|
|
252
|
+
case "audio/flac":
|
|
253
|
+
return "flac";
|
|
254
|
+
case "audio/mp4":
|
|
255
|
+
return "m4a";
|
|
256
|
+
case "audio/webm":
|
|
257
|
+
return "webm";
|
|
258
|
+
case "application/octet-stream":
|
|
259
|
+
return "bin";
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
function getFilenameForMimeType(mimeType) {
|
|
263
|
+
const ext = getExtensionForMimeType(mimeType);
|
|
264
|
+
return `recording.${ext}`;
|
|
265
|
+
}
|
|
450
266
|
|
|
451
|
-
//
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
},
|
|
469
|
-
body: JSON.stringify({
|
|
470
|
-
model,
|
|
471
|
-
voice,
|
|
472
|
-
input: options.text,
|
|
473
|
-
format,
|
|
474
|
-
...instructions && { instructions }
|
|
475
|
-
})
|
|
476
|
-
});
|
|
477
|
-
if (!res.ok) {
|
|
478
|
-
const err = await res.text();
|
|
479
|
-
throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
|
|
480
|
-
}
|
|
481
|
-
return await res.arrayBuffer();
|
|
482
|
-
} catch (err) {
|
|
483
|
-
const message = err instanceof Error ? err.message : String(err);
|
|
484
|
-
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
267
|
+
// models/audio.ts
|
|
268
|
+
function isBlobOrFile(value) {
|
|
269
|
+
return value instanceof Blob || value instanceof File;
|
|
270
|
+
}
|
|
271
|
+
function isBuffer(value) {
|
|
272
|
+
return Buffer.isBuffer(value);
|
|
273
|
+
}
|
|
274
|
+
function isLocalTranscriptionParams(value) {
|
|
275
|
+
return typeof value === "object" && value !== null && "audio" in value && (isBlobOrFile(value.audio) || isBuffer(value.audio));
|
|
276
|
+
}
|
|
277
|
+
function isCoreTranscriptionParams(value) {
|
|
278
|
+
return typeof value === "object" && value !== null && "audioUrl" in value && typeof value.audioUrl === "string";
|
|
279
|
+
}
|
|
280
|
+
async function fetchAudioFromUrl(url) {
|
|
281
|
+
const response = await fetch(url);
|
|
282
|
+
if (!response.ok) {
|
|
283
|
+
throw new Error(`Failed to fetch audio from URL: ${response.status}`);
|
|
485
284
|
}
|
|
285
|
+
return response.blob();
|
|
486
286
|
}
|
|
487
287
|
async function handleTranscription(runtime, input) {
|
|
488
|
-
let modelName =
|
|
489
|
-
import_core8.logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
490
|
-
const baseURL = getBaseURL(runtime);
|
|
288
|
+
let modelName = getTranscriptionModel(runtime);
|
|
491
289
|
let blob;
|
|
492
|
-
let extraParams =
|
|
493
|
-
if (
|
|
290
|
+
let extraParams = {};
|
|
291
|
+
if (typeof input === "string") {
|
|
292
|
+
import_core4.logger.debug(`[OpenAI] Fetching audio from URL: ${input}`);
|
|
293
|
+
blob = await fetchAudioFromUrl(input);
|
|
294
|
+
} else if (isBlobOrFile(input)) {
|
|
494
295
|
blob = input;
|
|
495
|
-
} else if (
|
|
496
|
-
const
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
throw new Error("TRANSCRIPTION param 'audio' must be a Blob/File/Buffer.");
|
|
296
|
+
} else if (isBuffer(input)) {
|
|
297
|
+
const mimeType2 = detectAudioMimeType(input);
|
|
298
|
+
import_core4.logger.debug(`[OpenAI] Auto-detected audio MIME type: ${mimeType2}`);
|
|
299
|
+
blob = new Blob([new Uint8Array(input)], { type: mimeType2 });
|
|
300
|
+
} else if (isLocalTranscriptionParams(input)) {
|
|
301
|
+
extraParams = input;
|
|
302
|
+
if (input.model) {
|
|
303
|
+
modelName = input.model;
|
|
504
304
|
}
|
|
505
|
-
if (
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
import_core8.logger.debug(`Auto-detected audio MIME type: ${mimeType}`);
|
|
510
|
-
} else {
|
|
511
|
-
import_core8.logger.debug(`Using provided MIME type: ${mimeType}`);
|
|
512
|
-
}
|
|
513
|
-
const uint8Array = new Uint8Array(params.audio);
|
|
514
|
-
blob = new Blob([uint8Array], { type: mimeType });
|
|
305
|
+
if (isBuffer(input.audio)) {
|
|
306
|
+
const mimeType2 = input.mimeType ?? detectAudioMimeType(input.audio);
|
|
307
|
+
import_core4.logger.debug(`[OpenAI] Using MIME type: ${mimeType2}`);
|
|
308
|
+
blob = new Blob([new Uint8Array(input.audio)], { type: mimeType2 });
|
|
515
309
|
} else {
|
|
516
|
-
blob =
|
|
517
|
-
}
|
|
518
|
-
extraParams = params;
|
|
519
|
-
if (typeof params.model === "string" && params.model) {
|
|
520
|
-
modelName = params.model;
|
|
310
|
+
blob = input.audio;
|
|
521
311
|
}
|
|
312
|
+
} else if (isCoreTranscriptionParams(input)) {
|
|
313
|
+
import_core4.logger.debug(`[OpenAI] Fetching audio from URL: ${input.audioUrl}`);
|
|
314
|
+
blob = await fetchAudioFromUrl(input.audioUrl);
|
|
315
|
+
extraParams = { prompt: input.prompt };
|
|
522
316
|
} else {
|
|
523
|
-
throw new Error("TRANSCRIPTION expects
|
|
317
|
+
throw new Error("TRANSCRIPTION expects Blob, File, Buffer, URL string, or TranscriptionParams object");
|
|
524
318
|
}
|
|
525
|
-
|
|
526
|
-
const
|
|
319
|
+
import_core4.logger.debug(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
320
|
+
const mimeType = blob.type || "audio/webm";
|
|
321
|
+
const filename = blob.name || getFilenameForMimeType(mimeType.startsWith("audio/") ? mimeType : "audio/webm");
|
|
527
322
|
const formData = new FormData;
|
|
528
323
|
formData.append("file", blob, filename);
|
|
529
|
-
formData.append("model",
|
|
530
|
-
if (extraParams) {
|
|
531
|
-
|
|
532
|
-
|
|
324
|
+
formData.append("model", modelName);
|
|
325
|
+
if (extraParams.language) {
|
|
326
|
+
formData.append("language", extraParams.language);
|
|
327
|
+
}
|
|
328
|
+
if (extraParams.responseFormat) {
|
|
329
|
+
formData.append("response_format", extraParams.responseFormat);
|
|
330
|
+
}
|
|
331
|
+
if (extraParams.prompt) {
|
|
332
|
+
formData.append("prompt", extraParams.prompt);
|
|
333
|
+
}
|
|
334
|
+
if (extraParams.temperature !== undefined) {
|
|
335
|
+
formData.append("temperature", String(extraParams.temperature));
|
|
336
|
+
}
|
|
337
|
+
if (extraParams.timestampGranularities) {
|
|
338
|
+
for (const granularity of extraParams.timestampGranularities) {
|
|
339
|
+
formData.append("timestamp_granularities[]", granularity);
|
|
533
340
|
}
|
|
534
|
-
|
|
535
|
-
|
|
341
|
+
}
|
|
342
|
+
const baseURL = getBaseURL(runtime);
|
|
343
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
344
|
+
method: "POST",
|
|
345
|
+
headers: getAuthHeader(runtime),
|
|
346
|
+
body: formData
|
|
347
|
+
});
|
|
348
|
+
if (!response.ok) {
|
|
349
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
350
|
+
throw new Error(`OpenAI transcription failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
351
|
+
}
|
|
352
|
+
const data = await response.json();
|
|
353
|
+
return data.text;
|
|
354
|
+
}
|
|
355
|
+
async function handleTextToSpeech(runtime, input) {
|
|
356
|
+
let text;
|
|
357
|
+
let voice;
|
|
358
|
+
let format = "mp3";
|
|
359
|
+
let model;
|
|
360
|
+
let instructions;
|
|
361
|
+
if (typeof input === "string") {
|
|
362
|
+
text = input;
|
|
363
|
+
voice = undefined;
|
|
364
|
+
} else {
|
|
365
|
+
text = input.text;
|
|
366
|
+
voice = input.voice;
|
|
367
|
+
if ("format" in input && input.format) {
|
|
368
|
+
format = input.format;
|
|
536
369
|
}
|
|
537
|
-
if (
|
|
538
|
-
|
|
370
|
+
if ("model" in input && input.model) {
|
|
371
|
+
model = input.model;
|
|
539
372
|
}
|
|
540
|
-
if (
|
|
541
|
-
|
|
373
|
+
if ("instructions" in input && input.instructions) {
|
|
374
|
+
instructions = input.instructions;
|
|
542
375
|
}
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
376
|
+
}
|
|
377
|
+
model = model ?? getTTSModel(runtime);
|
|
378
|
+
voice = voice ?? getTTSVoice(runtime);
|
|
379
|
+
instructions = instructions ?? getTTSInstructions(runtime);
|
|
380
|
+
import_core4.logger.debug(`[OpenAI] Using TEXT_TO_SPEECH model: ${model}`);
|
|
381
|
+
if (!text || text.trim().length === 0) {
|
|
382
|
+
throw new Error("TEXT_TO_SPEECH requires non-empty text");
|
|
383
|
+
}
|
|
384
|
+
if (text.length > 4096) {
|
|
385
|
+
throw new Error("TEXT_TO_SPEECH text exceeds 4096 character limit");
|
|
386
|
+
}
|
|
387
|
+
const validVoices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"];
|
|
388
|
+
if (voice && !validVoices.includes(voice)) {
|
|
389
|
+
throw new Error(`Invalid voice: ${voice}. Must be one of: ${validVoices.join(", ")}`);
|
|
390
|
+
}
|
|
391
|
+
const baseURL = getBaseURL(runtime);
|
|
392
|
+
const requestBody = {
|
|
393
|
+
model,
|
|
394
|
+
voice,
|
|
395
|
+
input: text,
|
|
396
|
+
response_format: format
|
|
397
|
+
};
|
|
398
|
+
if (instructions && instructions.length > 0) {
|
|
399
|
+
requestBody.instructions = instructions;
|
|
400
|
+
}
|
|
401
|
+
const response = await fetch(`${baseURL}/audio/speech`, {
|
|
402
|
+
method: "POST",
|
|
403
|
+
headers: {
|
|
404
|
+
...getAuthHeader(runtime),
|
|
405
|
+
"Content-Type": "application/json",
|
|
406
|
+
...format === "mp3" ? { Accept: "audio/mpeg" } : {}
|
|
407
|
+
},
|
|
408
|
+
body: JSON.stringify(requestBody)
|
|
409
|
+
});
|
|
410
|
+
if (!response.ok) {
|
|
411
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
412
|
+
throw new Error(`OpenAI TTS failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
413
|
+
}
|
|
414
|
+
return response.arrayBuffer();
|
|
415
|
+
}
|
|
416
|
+
// models/embedding.ts
|
|
417
|
+
var import_core6 = require("@elizaos/core");
|
|
418
|
+
|
|
419
|
+
// utils/events.ts
|
|
420
|
+
var import_core5 = require("@elizaos/core");
|
|
421
|
+
var MAX_PROMPT_LENGTH = 200;
|
|
422
|
+
function truncatePrompt(prompt) {
|
|
423
|
+
if (prompt.length <= MAX_PROMPT_LENGTH) {
|
|
424
|
+
return prompt;
|
|
425
|
+
}
|
|
426
|
+
return `${prompt.slice(0, MAX_PROMPT_LENGTH)}…`;
|
|
427
|
+
}
|
|
428
|
+
function normalizeUsage(usage) {
|
|
429
|
+
if ("promptTokens" in usage) {
|
|
430
|
+
const promptTokensDetails = "promptTokensDetails" in usage ? usage.promptTokensDetails : undefined;
|
|
431
|
+
const cachedPromptTokens = usage.cachedPromptTokens ?? promptTokensDetails?.cachedTokens;
|
|
432
|
+
return {
|
|
433
|
+
promptTokens: usage.promptTokens ?? 0,
|
|
434
|
+
completionTokens: usage.completionTokens ?? 0,
|
|
435
|
+
totalTokens: usage.totalTokens ?? (usage.promptTokens ?? 0) + (usage.completionTokens ?? 0),
|
|
436
|
+
cachedPromptTokens
|
|
437
|
+
};
|
|
438
|
+
}
|
|
439
|
+
if ("inputTokens" in usage || "outputTokens" in usage) {
|
|
440
|
+
const input = usage.inputTokens ?? 0;
|
|
441
|
+
const output = usage.outputTokens ?? 0;
|
|
442
|
+
const total = usage.totalTokens ?? input + output;
|
|
443
|
+
return {
|
|
444
|
+
promptTokens: input,
|
|
445
|
+
completionTokens: output,
|
|
446
|
+
totalTokens: total,
|
|
447
|
+
cachedPromptTokens: usage.cachedInputTokens
|
|
448
|
+
};
|
|
449
|
+
}
|
|
450
|
+
return {
|
|
451
|
+
promptTokens: 0,
|
|
452
|
+
completionTokens: 0,
|
|
453
|
+
totalTokens: 0
|
|
454
|
+
};
|
|
455
|
+
}
|
|
456
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
457
|
+
const normalized = normalizeUsage(usage);
|
|
458
|
+
const payload = {
|
|
459
|
+
runtime,
|
|
460
|
+
source: "openai",
|
|
461
|
+
provider: "openai",
|
|
462
|
+
type,
|
|
463
|
+
prompt: truncatePrompt(prompt),
|
|
464
|
+
tokens: {
|
|
465
|
+
prompt: normalized.promptTokens,
|
|
466
|
+
completion: normalized.completionTokens,
|
|
467
|
+
total: normalized.totalTokens,
|
|
468
|
+
...normalized.cachedPromptTokens !== undefined ? { cached: normalized.cachedPromptTokens } : {}
|
|
547
469
|
}
|
|
470
|
+
};
|
|
471
|
+
runtime.emitEvent(import_core5.EventType.MODEL_USED, payload);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// models/embedding.ts
|
|
475
|
+
function validateDimension(dimension) {
|
|
476
|
+
const validDimensions = Object.values(import_core6.VECTOR_DIMS);
|
|
477
|
+
if (!validDimensions.includes(dimension)) {
|
|
478
|
+
throw new Error(`Invalid embedding dimension: ${dimension}. Must be one of: ${validDimensions.join(", ")}`);
|
|
548
479
|
}
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
480
|
+
return dimension;
|
|
481
|
+
}
|
|
482
|
+
function extractText(params) {
|
|
483
|
+
if (params === null) {
|
|
484
|
+
return null;
|
|
485
|
+
}
|
|
486
|
+
if (typeof params === "string") {
|
|
487
|
+
return params;
|
|
488
|
+
}
|
|
489
|
+
if (typeof params === "object" && typeof params.text === "string") {
|
|
490
|
+
return params.text;
|
|
491
|
+
}
|
|
492
|
+
throw new Error("Invalid embedding params: expected string, { text: string }, or null");
|
|
493
|
+
}
|
|
494
|
+
async function handleTextEmbedding(runtime, params) {
|
|
495
|
+
const embeddingModel = getEmbeddingModel(runtime);
|
|
496
|
+
const embeddingDimension = validateDimension(getEmbeddingDimensions(runtime));
|
|
497
|
+
const text = extractText(params);
|
|
498
|
+
if (text === null) {
|
|
499
|
+
import_core6.logger.debug("[OpenAI] Creating test embedding for initialization");
|
|
500
|
+
const testVector = new Array(embeddingDimension).fill(0);
|
|
501
|
+
testVector[0] = 0.1;
|
|
502
|
+
return testVector;
|
|
503
|
+
}
|
|
504
|
+
let trimmedText = text.trim();
|
|
505
|
+
if (trimmedText.length === 0) {
|
|
506
|
+
throw new Error("Cannot generate embedding for empty text");
|
|
507
|
+
}
|
|
508
|
+
const maxChars = 8000 * 4;
|
|
509
|
+
if (trimmedText.length > maxChars) {
|
|
510
|
+
import_core6.logger.warn(`[OpenAI] Embedding input too long (~${Math.ceil(trimmedText.length / 4)} tokens), truncating to ~8000 tokens`);
|
|
511
|
+
trimmedText = trimmedText.slice(0, maxChars);
|
|
512
|
+
}
|
|
513
|
+
const baseURL = getEmbeddingBaseURL(runtime);
|
|
514
|
+
const url = `${baseURL}/embeddings`;
|
|
515
|
+
import_core6.logger.debug(`[OpenAI] Generating embedding with model: ${embeddingModel}`);
|
|
516
|
+
const response = await fetch(url, {
|
|
517
|
+
method: "POST",
|
|
518
|
+
headers: {
|
|
519
|
+
...getAuthHeader(runtime, true),
|
|
520
|
+
"Content-Type": "application/json"
|
|
521
|
+
},
|
|
522
|
+
body: JSON.stringify({
|
|
523
|
+
model: embeddingModel,
|
|
524
|
+
input: trimmedText
|
|
525
|
+
})
|
|
526
|
+
});
|
|
527
|
+
if (!response.ok) {
|
|
528
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
529
|
+
throw new Error(`OpenAI embedding API error: ${response.status} ${response.statusText} - ${errorText}`);
|
|
530
|
+
}
|
|
531
|
+
const data = await response.json();
|
|
532
|
+
const firstResult = data?.data?.[0];
|
|
533
|
+
if (!firstResult || !firstResult.embedding) {
|
|
534
|
+
throw new Error("OpenAI API returned invalid embedding response structure");
|
|
535
|
+
}
|
|
536
|
+
const embedding = firstResult.embedding;
|
|
537
|
+
if (embedding.length !== embeddingDimension) {
|
|
538
|
+
throw new Error(`Embedding dimension mismatch: got ${embedding.length}, expected ${embeddingDimension}. ` + `Check OPENAI_EMBEDDING_DIMENSIONS setting.`);
|
|
539
|
+
}
|
|
540
|
+
if (data.usage) {
|
|
541
|
+
emitModelUsageEvent(runtime, import_core6.ModelType.TEXT_EMBEDDING, trimmedText, {
|
|
542
|
+
promptTokens: data.usage.prompt_tokens,
|
|
543
|
+
completionTokens: 0,
|
|
544
|
+
totalTokens: data.usage.total_tokens
|
|
556
545
|
});
|
|
557
|
-
if (!response.ok) {
|
|
558
|
-
throw new Error(`Failed to transcribe audio: ${response.status} ${response.statusText}`);
|
|
559
|
-
}
|
|
560
|
-
const data = await response.json();
|
|
561
|
-
return data.text || "";
|
|
562
|
-
} catch (error) {
|
|
563
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
564
|
-
import_core8.logger.error(`TRANSCRIPTION error: ${message}`);
|
|
565
|
-
throw error;
|
|
566
546
|
}
|
|
547
|
+
import_core6.logger.debug(`[OpenAI] Generated embedding with ${embedding.length} dimensions`);
|
|
548
|
+
return embedding;
|
|
567
549
|
}
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
550
|
+
// models/image.ts
|
|
551
|
+
var import_core7 = require("@elizaos/core");
|
|
552
|
+
var DEFAULT_IMAGE_DESCRIPTION_PROMPT = "Please analyze this image and provide a title and detailed description.";
|
|
553
|
+
async function handleImageGeneration(runtime, params) {
|
|
554
|
+
const modelName = getImageModel(runtime);
|
|
555
|
+
const count = params.count ?? 1;
|
|
556
|
+
const size = params.size ?? "1024x1024";
|
|
557
|
+
const extendedParams = params;
|
|
558
|
+
import_core7.logger.debug(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
559
|
+
if (!params.prompt || params.prompt.trim().length === 0) {
|
|
560
|
+
throw new Error("IMAGE generation requires a non-empty prompt");
|
|
561
|
+
}
|
|
562
|
+
if (count < 1 || count > 10) {
|
|
563
|
+
throw new Error("IMAGE count must be between 1 and 10");
|
|
564
|
+
}
|
|
565
|
+
const baseURL = getBaseURL(runtime);
|
|
566
|
+
const requestBody = {
|
|
567
|
+
model: modelName,
|
|
568
|
+
prompt: params.prompt,
|
|
569
|
+
n: count,
|
|
570
|
+
size
|
|
571
|
+
};
|
|
572
|
+
if (extendedParams.quality) {
|
|
573
|
+
requestBody.quality = extendedParams.quality;
|
|
578
574
|
}
|
|
575
|
+
if (extendedParams.style) {
|
|
576
|
+
requestBody.style = extendedParams.style;
|
|
577
|
+
}
|
|
578
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
579
|
+
method: "POST",
|
|
580
|
+
headers: {
|
|
581
|
+
...getAuthHeader(runtime),
|
|
582
|
+
"Content-Type": "application/json"
|
|
583
|
+
},
|
|
584
|
+
body: JSON.stringify(requestBody)
|
|
585
|
+
});
|
|
586
|
+
if (!response.ok) {
|
|
587
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
588
|
+
throw new Error(`OpenAI image generation failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
589
|
+
}
|
|
590
|
+
const data = await response.json();
|
|
591
|
+
if (!data.data || data.data.length === 0) {
|
|
592
|
+
throw new Error("OpenAI API returned no images");
|
|
593
|
+
}
|
|
594
|
+
return data.data.map((item) => ({
|
|
595
|
+
url: item.url,
|
|
596
|
+
revisedPrompt: item.revised_prompt
|
|
597
|
+
}));
|
|
579
598
|
}
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
599
|
+
function parseTitleFromResponse(content) {
|
|
600
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
601
|
+
return titleMatch?.[1]?.trim() ?? "Image Analysis";
|
|
602
|
+
}
|
|
603
|
+
function parseDescriptionFromResponse(content) {
|
|
604
|
+
return content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
605
|
+
}
|
|
606
|
+
async function handleImageDescription(runtime, params) {
|
|
607
|
+
const modelName = getImageDescriptionModel(runtime);
|
|
608
|
+
const maxTokens = getImageDescriptionMaxTokens(runtime);
|
|
609
|
+
import_core7.logger.debug(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
|
|
610
|
+
let imageUrl;
|
|
611
|
+
let promptText;
|
|
612
|
+
if (typeof params === "string") {
|
|
613
|
+
imageUrl = params;
|
|
614
|
+
promptText = DEFAULT_IMAGE_DESCRIPTION_PROMPT;
|
|
615
|
+
} else {
|
|
616
|
+
imageUrl = params.imageUrl;
|
|
617
|
+
promptText = params.prompt ?? DEFAULT_IMAGE_DESCRIPTION_PROMPT;
|
|
618
|
+
}
|
|
619
|
+
if (!imageUrl || imageUrl.trim().length === 0) {
|
|
620
|
+
throw new Error("IMAGE_DESCRIPTION requires a valid image URL");
|
|
621
|
+
}
|
|
622
|
+
const baseURL = getBaseURL(runtime);
|
|
623
|
+
const requestBody = {
|
|
624
|
+
model: modelName,
|
|
625
|
+
messages: [
|
|
626
|
+
{
|
|
627
|
+
role: "user",
|
|
628
|
+
content: [
|
|
629
|
+
{ type: "text", text: promptText },
|
|
630
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
631
|
+
]
|
|
632
|
+
}
|
|
633
|
+
],
|
|
634
|
+
max_tokens: maxTokens
|
|
635
|
+
};
|
|
636
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
637
|
+
method: "POST",
|
|
638
|
+
headers: {
|
|
639
|
+
...getAuthHeader(runtime),
|
|
640
|
+
"Content-Type": "application/json"
|
|
641
|
+
},
|
|
642
|
+
body: JSON.stringify(requestBody)
|
|
643
|
+
});
|
|
644
|
+
if (!response.ok) {
|
|
645
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
646
|
+
throw new Error(`OpenAI image description failed: ${response.status} ${response.statusText} - ${errorText}`);
|
|
647
|
+
}
|
|
648
|
+
const data = await response.json();
|
|
649
|
+
if (data.usage) {
|
|
650
|
+
emitModelUsageEvent(runtime, import_core7.ModelType.IMAGE_DESCRIPTION, typeof params === "string" ? params : params.prompt ?? "", {
|
|
651
|
+
promptTokens: data.usage.prompt_tokens,
|
|
652
|
+
completionTokens: data.usage.completion_tokens,
|
|
653
|
+
totalTokens: data.usage.total_tokens
|
|
654
|
+
});
|
|
655
|
+
}
|
|
656
|
+
const firstChoice = data.choices?.[0];
|
|
657
|
+
const content = firstChoice?.message?.content;
|
|
658
|
+
if (!content) {
|
|
659
|
+
throw new Error("OpenAI API returned empty image description");
|
|
660
|
+
}
|
|
661
|
+
return {
|
|
662
|
+
title: parseTitleFromResponse(content),
|
|
663
|
+
description: parseDescriptionFromResponse(content)
|
|
664
|
+
};
|
|
665
|
+
}
|
|
666
|
+
// models/object.ts
|
|
585
667
|
var import_core9 = require("@elizaos/core");
|
|
586
668
|
var import_ai2 = require("ai");
|
|
669
|
+
|
|
670
|
+
// providers/openai.ts
|
|
671
|
+
var import_openai = require("@ai-sdk/openai");
|
|
672
|
+
var PROXY_API_KEY = "sk-proxy";
|
|
673
|
+
function createOpenAIClient(runtime) {
|
|
674
|
+
const baseURL = getBaseURL(runtime);
|
|
675
|
+
const apiKey = getApiKey(runtime);
|
|
676
|
+
if (!apiKey && isProxyMode(runtime)) {
|
|
677
|
+
return import_openai.createOpenAI({
|
|
678
|
+
apiKey: PROXY_API_KEY,
|
|
679
|
+
baseURL
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
if (!apiKey) {
|
|
683
|
+
throw new Error("OPENAI_API_KEY is required. Set it in your environment variables or runtime settings.");
|
|
684
|
+
}
|
|
685
|
+
return import_openai.createOpenAI({
|
|
686
|
+
apiKey,
|
|
687
|
+
baseURL
|
|
688
|
+
});
|
|
689
|
+
}
|
|
690
|
+
// utils/json.ts
|
|
691
|
+
var import_core8 = require("@elizaos/core");
|
|
692
|
+
var import_ai = require("ai");
|
|
693
|
+
var JSON_CLEANUP_PATTERNS = {
|
|
694
|
+
MARKDOWN_JSON: /```json\n|\n```|```/g,
|
|
695
|
+
WHITESPACE: /^\s+|\s+$/g
|
|
696
|
+
};
|
|
587
697
|
function getJsonRepairFunction() {
|
|
588
698
|
return async ({ text, error }) => {
|
|
589
|
-
|
|
590
|
-
if (error instanceof import_ai2.JSONParseError) {
|
|
591
|
-
const cleanedText = text.replace(/```json\n|\n```|```/g, "");
|
|
592
|
-
JSON.parse(cleanedText);
|
|
593
|
-
return cleanedText;
|
|
594
|
-
}
|
|
699
|
+
if (!(error instanceof import_ai.JSONParseError)) {
|
|
595
700
|
return null;
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
|
|
701
|
+
}
|
|
702
|
+
try {
|
|
703
|
+
const cleanedText = text.replace(JSON_CLEANUP_PATTERNS.MARKDOWN_JSON, "");
|
|
704
|
+
JSON.parse(cleanedText);
|
|
705
|
+
import_core8.logger.debug("[JSON Repair] Successfully repaired JSON by removing markdown wrappers");
|
|
706
|
+
return cleanedText;
|
|
707
|
+
} catch {
|
|
708
|
+
import_core8.logger.warn("[JSON Repair] Unable to repair JSON text");
|
|
599
709
|
return null;
|
|
600
710
|
}
|
|
601
711
|
};
|
|
602
712
|
}
|
|
603
713
|
|
|
604
|
-
//
|
|
714
|
+
// models/object.ts
|
|
605
715
|
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
606
716
|
const openai = createOpenAIClient(runtime);
|
|
607
717
|
const modelName = getModelFn(runtime);
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
if (schemaPresent) {
|
|
612
|
-
import_core10.logger.warn(`Schema provided but ignored: OpenAI object generation currently uses output=no-schema. The schema parameter has no effect.`);
|
|
718
|
+
import_core9.logger.debug(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
719
|
+
if (!params.prompt || params.prompt.trim().length === 0) {
|
|
720
|
+
throw new Error("Object generation requires a non-empty prompt");
|
|
613
721
|
}
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
model: openai.languageModel(modelName),
|
|
617
|
-
output: "no-schema",
|
|
618
|
-
prompt: params.prompt,
|
|
619
|
-
temperature,
|
|
620
|
-
experimental_repairText: getJsonRepairFunction()
|
|
621
|
-
});
|
|
622
|
-
if (usage) {
|
|
623
|
-
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
624
|
-
}
|
|
625
|
-
return object;
|
|
626
|
-
} catch (error) {
|
|
627
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
628
|
-
import_core10.logger.error(`[generateObject] Error: ${message}`);
|
|
629
|
-
throw error;
|
|
722
|
+
if (params.schema) {
|
|
723
|
+
import_core9.logger.debug("[OpenAI] Schema provided but using no-schema mode. " + "Structure is determined by prompt instructions.");
|
|
630
724
|
}
|
|
725
|
+
const model = openai.chat(modelName);
|
|
726
|
+
const { object, usage } = await import_ai2.generateObject({
|
|
727
|
+
model,
|
|
728
|
+
output: "no-schema",
|
|
729
|
+
prompt: params.prompt,
|
|
730
|
+
experimental_repairText: getJsonRepairFunction()
|
|
731
|
+
});
|
|
732
|
+
if (usage) {
|
|
733
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
734
|
+
}
|
|
735
|
+
if (typeof object !== "object" || object === null) {
|
|
736
|
+
throw new Error(`Object generation returned ${typeof object}, expected object`);
|
|
737
|
+
}
|
|
738
|
+
return object;
|
|
631
739
|
}
|
|
632
740
|
async function handleObjectSmall(runtime, params) {
|
|
633
|
-
return generateObjectByModelType(runtime, params,
|
|
741
|
+
return generateObjectByModelType(runtime, params, import_core9.ModelType.OBJECT_SMALL, getSmallModel);
|
|
634
742
|
}
|
|
635
743
|
async function handleObjectLarge(runtime, params) {
|
|
636
|
-
return generateObjectByModelType(runtime, params,
|
|
744
|
+
return generateObjectByModelType(runtime, params, import_core9.ModelType.OBJECT_LARGE, getLargeModel);
|
|
637
745
|
}
|
|
638
|
-
//
|
|
639
|
-
var
|
|
640
|
-
|
|
641
|
-
|
|
746
|
+
// models/research.ts
|
|
747
|
+
var import_core10 = require("@elizaos/core");
|
|
748
|
+
function convertToolToApi(tool) {
|
|
749
|
+
switch (tool.type) {
|
|
750
|
+
case "web_search_preview":
|
|
751
|
+
return { type: "web_search_preview" };
|
|
752
|
+
case "file_search":
|
|
753
|
+
return {
|
|
754
|
+
type: "file_search",
|
|
755
|
+
vector_store_ids: tool.vectorStoreIds
|
|
756
|
+
};
|
|
757
|
+
case "code_interpreter":
|
|
758
|
+
return {
|
|
759
|
+
type: "code_interpreter",
|
|
760
|
+
container: tool.container ?? { type: "auto" }
|
|
761
|
+
};
|
|
762
|
+
case "mcp":
|
|
763
|
+
return {
|
|
764
|
+
type: "mcp",
|
|
765
|
+
server_label: tool.serverLabel,
|
|
766
|
+
server_url: tool.serverUrl,
|
|
767
|
+
require_approval: tool.requireApproval ?? "never"
|
|
768
|
+
};
|
|
769
|
+
default:
|
|
770
|
+
throw new Error(`Unknown research tool type: ${tool.type}`);
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
function convertOutputItem(item) {
|
|
774
|
+
switch (item.type) {
|
|
775
|
+
case "web_search_call":
|
|
776
|
+
return {
|
|
777
|
+
id: item.id ?? "",
|
|
778
|
+
type: "web_search_call",
|
|
779
|
+
status: item.status ?? "completed",
|
|
780
|
+
action: {
|
|
781
|
+
type: item.action?.type ?? "search",
|
|
782
|
+
query: item.action?.query,
|
|
783
|
+
url: item.action?.url
|
|
784
|
+
}
|
|
785
|
+
};
|
|
786
|
+
case "file_search_call":
|
|
787
|
+
return {
|
|
788
|
+
id: item.id ?? "",
|
|
789
|
+
type: "file_search_call",
|
|
790
|
+
status: item.status ?? "completed",
|
|
791
|
+
query: item.query ?? "",
|
|
792
|
+
results: item.results?.map((r) => ({
|
|
793
|
+
fileId: r.file_id,
|
|
794
|
+
fileName: r.file_name,
|
|
795
|
+
score: r.score
|
|
796
|
+
}))
|
|
797
|
+
};
|
|
798
|
+
case "code_interpreter_call":
|
|
799
|
+
return {
|
|
800
|
+
id: item.id ?? "",
|
|
801
|
+
type: "code_interpreter_call",
|
|
802
|
+
status: item.status ?? "completed",
|
|
803
|
+
code: item.code ?? "",
|
|
804
|
+
output: item.output
|
|
805
|
+
};
|
|
806
|
+
case "mcp_tool_call":
|
|
807
|
+
return {
|
|
808
|
+
id: item.id ?? "",
|
|
809
|
+
type: "mcp_tool_call",
|
|
810
|
+
status: item.status ?? "completed",
|
|
811
|
+
serverLabel: item.server_label ?? "",
|
|
812
|
+
toolName: item.tool_name ?? "",
|
|
813
|
+
arguments: item.arguments ?? {},
|
|
814
|
+
result: item.result
|
|
815
|
+
};
|
|
816
|
+
case "message":
|
|
817
|
+
return {
|
|
818
|
+
type: "message",
|
|
819
|
+
content: item.content?.map((c) => ({
|
|
820
|
+
type: "output_text",
|
|
821
|
+
text: c.text,
|
|
822
|
+
annotations: c.annotations?.map((a) => ({
|
|
823
|
+
url: a.url,
|
|
824
|
+
title: a.title,
|
|
825
|
+
startIndex: a.start_index,
|
|
826
|
+
endIndex: a.end_index
|
|
827
|
+
})) ?? []
|
|
828
|
+
})) ?? []
|
|
829
|
+
};
|
|
830
|
+
default:
|
|
831
|
+
return null;
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
function extractTextAndAnnotations(response) {
|
|
835
|
+
if (response.output_text) {
|
|
836
|
+
const annotations2 = [];
|
|
837
|
+
if (response.output) {
|
|
838
|
+
for (const item of response.output) {
|
|
839
|
+
if (item.type === "message" && item.content) {
|
|
840
|
+
for (const content of item.content) {
|
|
841
|
+
if (content.annotations) {
|
|
842
|
+
for (const ann of content.annotations) {
|
|
843
|
+
annotations2.push({
|
|
844
|
+
url: ann.url,
|
|
845
|
+
title: ann.title,
|
|
846
|
+
startIndex: ann.start_index,
|
|
847
|
+
endIndex: ann.end_index
|
|
848
|
+
});
|
|
849
|
+
}
|
|
850
|
+
}
|
|
851
|
+
}
|
|
852
|
+
}
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
return { text: response.output_text, annotations: annotations2 };
|
|
856
|
+
}
|
|
857
|
+
let text = "";
|
|
858
|
+
const annotations = [];
|
|
859
|
+
if (response.output) {
|
|
860
|
+
for (const item of response.output) {
|
|
861
|
+
if (item.type === "message" && item.content) {
|
|
862
|
+
for (const content of item.content) {
|
|
863
|
+
text += content.text;
|
|
864
|
+
if (content.annotations) {
|
|
865
|
+
for (const ann of content.annotations) {
|
|
866
|
+
annotations.push({
|
|
867
|
+
url: ann.url,
|
|
868
|
+
title: ann.title,
|
|
869
|
+
startIndex: ann.start_index,
|
|
870
|
+
endIndex: ann.end_index
|
|
871
|
+
});
|
|
872
|
+
}
|
|
873
|
+
}
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
}
|
|
877
|
+
}
|
|
878
|
+
return { text, annotations };
|
|
879
|
+
}
|
|
880
|
+
async function handleResearch(runtime, params) {
|
|
881
|
+
const apiKey = getApiKey(runtime);
|
|
882
|
+
if (!apiKey) {
|
|
883
|
+
throw new Error("OPENAI_API_KEY is required for deep research. Set it in your environment variables or runtime settings.");
|
|
884
|
+
}
|
|
885
|
+
const baseURL = getBaseURL(runtime);
|
|
886
|
+
const modelName = params.model ?? getResearchModel(runtime);
|
|
887
|
+
const timeout = getResearchTimeout(runtime);
|
|
888
|
+
import_core10.logger.debug(`[OpenAI] Starting deep research with model: ${modelName}`);
|
|
889
|
+
import_core10.logger.debug(`[OpenAI] Research input: ${params.input.substring(0, 100)}...`);
|
|
890
|
+
const dataSourceTools = params.tools?.filter((t) => t.type === "web_search_preview" || t.type === "file_search" || t.type === "mcp");
|
|
891
|
+
if (!dataSourceTools || dataSourceTools.length === 0) {
|
|
892
|
+
import_core10.logger.debug("[OpenAI] No data source tools specified, defaulting to web_search_preview");
|
|
893
|
+
params.tools = [{ type: "web_search_preview" }, ...params.tools ?? []];
|
|
894
|
+
}
|
|
895
|
+
const requestBody = {
|
|
896
|
+
model: modelName,
|
|
897
|
+
input: params.input
|
|
898
|
+
};
|
|
899
|
+
if (params.instructions) {
|
|
900
|
+
requestBody.instructions = params.instructions;
|
|
901
|
+
}
|
|
902
|
+
if (params.background !== undefined) {
|
|
903
|
+
requestBody.background = params.background;
|
|
904
|
+
}
|
|
905
|
+
if (params.tools && params.tools.length > 0) {
|
|
906
|
+
requestBody.tools = params.tools.map(convertToolToApi);
|
|
907
|
+
}
|
|
908
|
+
if (params.maxToolCalls !== undefined) {
|
|
909
|
+
requestBody.max_tool_calls = params.maxToolCalls;
|
|
910
|
+
}
|
|
911
|
+
if (params.reasoningSummary) {
|
|
912
|
+
requestBody.reasoning = { summary: params.reasoningSummary };
|
|
913
|
+
}
|
|
914
|
+
import_core10.logger.debug(`[OpenAI] Research request body: ${JSON.stringify(requestBody, null, 2)}`);
|
|
915
|
+
const response = await fetch(`${baseURL}/responses`, {
|
|
916
|
+
method: "POST",
|
|
917
|
+
headers: {
|
|
918
|
+
Authorization: `Bearer ${apiKey}`,
|
|
919
|
+
"Content-Type": "application/json"
|
|
920
|
+
},
|
|
921
|
+
body: JSON.stringify(requestBody),
|
|
922
|
+
signal: AbortSignal.timeout(timeout)
|
|
923
|
+
});
|
|
924
|
+
if (!response.ok) {
|
|
925
|
+
const errorText = await response.text();
|
|
926
|
+
import_core10.logger.error(`[OpenAI] Research request failed: ${response.status} ${errorText}`);
|
|
927
|
+
throw new Error(`Deep research request failed: ${response.status} ${response.statusText}`);
|
|
928
|
+
}
|
|
929
|
+
const data = await response.json();
|
|
930
|
+
if (data.error) {
|
|
931
|
+
import_core10.logger.error(`[OpenAI] Research API error: ${data.error.message}`);
|
|
932
|
+
throw new Error(`Deep research error: ${data.error.message}`);
|
|
933
|
+
}
|
|
934
|
+
import_core10.logger.debug(`[OpenAI] Research response received. Status: ${data.status ?? "completed"}`);
|
|
935
|
+
const { text, annotations } = extractTextAndAnnotations(data);
|
|
936
|
+
const outputItems = [];
|
|
937
|
+
if (data.output) {
|
|
938
|
+
for (const item of data.output) {
|
|
939
|
+
const converted = convertOutputItem(item);
|
|
940
|
+
if (converted) {
|
|
941
|
+
outputItems.push(converted);
|
|
942
|
+
}
|
|
943
|
+
}
|
|
944
|
+
}
|
|
945
|
+
const result = {
|
|
946
|
+
id: data.id,
|
|
947
|
+
text,
|
|
948
|
+
annotations,
|
|
949
|
+
outputItems,
|
|
950
|
+
status: data.status
|
|
951
|
+
};
|
|
952
|
+
import_core10.logger.info(`[OpenAI] Research completed. Text length: ${text.length}, Annotations: ${annotations.length}, Output items: ${outputItems.length}`);
|
|
953
|
+
return result;
|
|
954
|
+
}
|
|
955
|
+
// models/text.ts
|
|
642
956
|
var import_core11 = require("@elizaos/core");
|
|
957
|
+
var import_ai3 = require("ai");
|
|
958
|
+
function convertUsage(usage) {
|
|
959
|
+
if (!usage) {
|
|
960
|
+
return;
|
|
961
|
+
}
|
|
962
|
+
const promptTokens = usage.inputTokens ?? 0;
|
|
963
|
+
const completionTokens = usage.outputTokens ?? 0;
|
|
964
|
+
const usageWithCache = usage;
|
|
965
|
+
return {
|
|
966
|
+
promptTokens,
|
|
967
|
+
completionTokens,
|
|
968
|
+
totalTokens: promptTokens + completionTokens,
|
|
969
|
+
cachedPromptTokens: usageWithCache.cachedInputTokens
|
|
970
|
+
};
|
|
971
|
+
}
|
|
972
|
+
function resolvePromptCacheOptions(params) {
|
|
973
|
+
const withOpenAIOptions = params;
|
|
974
|
+
return {
|
|
975
|
+
promptCacheKey: withOpenAIOptions.providerOptions?.openai?.promptCacheKey,
|
|
976
|
+
promptCacheRetention: withOpenAIOptions.providerOptions?.openai?.promptCacheRetention
|
|
977
|
+
};
|
|
978
|
+
}
|
|
979
|
+
async function generateTextByModelType(runtime, params, modelType, getModelFn) {
|
|
980
|
+
const openai = createOpenAIClient(runtime);
|
|
981
|
+
const modelName = getModelFn(runtime);
|
|
982
|
+
import_core11.logger.debug(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
983
|
+
const promptCacheOptions = resolvePromptCacheOptions(params);
|
|
984
|
+
const systemPrompt = runtime.character.system ?? undefined;
|
|
985
|
+
const model = openai.chat(modelName);
|
|
986
|
+
const generateParams = {
|
|
987
|
+
model,
|
|
988
|
+
prompt: params.prompt,
|
|
989
|
+
system: systemPrompt,
|
|
990
|
+
maxOutputTokens: params.maxTokens ?? 8192,
|
|
991
|
+
experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) },
|
|
992
|
+
...promptCacheOptions.promptCacheKey || promptCacheOptions.promptCacheRetention ? {
|
|
993
|
+
providerOptions: {
|
|
994
|
+
openai: {
|
|
995
|
+
...promptCacheOptions.promptCacheKey ? { promptCacheKey: promptCacheOptions.promptCacheKey } : {},
|
|
996
|
+
...promptCacheOptions.promptCacheRetention ? { promptCacheRetention: promptCacheOptions.promptCacheRetention } : {}
|
|
997
|
+
}
|
|
998
|
+
}
|
|
999
|
+
} : {}
|
|
1000
|
+
};
|
|
1001
|
+
if (params.stream) {
|
|
1002
|
+
const result = import_ai3.streamText(generateParams);
|
|
1003
|
+
return {
|
|
1004
|
+
textStream: result.textStream,
|
|
1005
|
+
text: Promise.resolve(result.text),
|
|
1006
|
+
usage: Promise.resolve(result.usage).then(convertUsage),
|
|
1007
|
+
finishReason: Promise.resolve(result.finishReason).then((r) => r)
|
|
1008
|
+
};
|
|
1009
|
+
}
|
|
1010
|
+
const { text, usage } = await import_ai3.generateText(generateParams);
|
|
1011
|
+
if (usage) {
|
|
1012
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
1013
|
+
}
|
|
1014
|
+
return text;
|
|
1015
|
+
}
|
|
1016
|
+
async function handleTextSmall(runtime, params) {
|
|
1017
|
+
return generateTextByModelType(runtime, params, import_core11.ModelType.TEXT_SMALL, getSmallModel);
|
|
1018
|
+
}
|
|
1019
|
+
async function handleTextLarge(runtime, params) {
|
|
1020
|
+
return generateTextByModelType(runtime, params, import_core11.ModelType.TEXT_LARGE, getLargeModel);
|
|
1021
|
+
}
|
|
1022
|
+
// models/tokenizer.ts
|
|
1023
|
+
var import_core13 = require("@elizaos/core");
|
|
1024
|
+
|
|
1025
|
+
// utils/tokenization.ts
|
|
1026
|
+
var import_core12 = require("@elizaos/core");
|
|
643
1027
|
var import_js_tiktoken = require("js-tiktoken");
|
|
644
1028
|
function resolveTokenizerEncoding(modelName) {
|
|
645
1029
|
const normalized = modelName.toLowerCase();
|
|
646
1030
|
const fallbackEncoding = normalized.includes("4o") ? "o200k_base" : "cl100k_base";
|
|
647
1031
|
try {
|
|
648
1032
|
return import_js_tiktoken.encodingForModel(modelName);
|
|
649
|
-
} catch
|
|
1033
|
+
} catch {
|
|
650
1034
|
return import_js_tiktoken.getEncoding(fallbackEncoding);
|
|
651
1035
|
}
|
|
652
1036
|
}
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
1037
|
+
function getModelName(runtime, modelType) {
|
|
1038
|
+
if (modelType === import_core12.ModelType.TEXT_SMALL) {
|
|
1039
|
+
return getSmallModel(runtime);
|
|
1040
|
+
}
|
|
1041
|
+
return getLargeModel(runtime);
|
|
1042
|
+
}
|
|
1043
|
+
function tokenizeText(runtime, modelType, text) {
|
|
1044
|
+
const modelName = getModelName(runtime, modelType);
|
|
1045
|
+
const encoder = resolveTokenizerEncoding(modelName);
|
|
1046
|
+
return encoder.encode(text);
|
|
657
1047
|
}
|
|
658
|
-
|
|
659
|
-
const modelName =
|
|
660
|
-
|
|
1048
|
+
function detokenizeText(runtime, modelType, tokens) {
|
|
1049
|
+
const modelName = getModelName(runtime, modelType);
|
|
1050
|
+
const encoder = resolveTokenizerEncoding(modelName);
|
|
1051
|
+
return encoder.decode(tokens);
|
|
661
1052
|
}
|
|
662
1053
|
|
|
663
|
-
//
|
|
664
|
-
async function handleTokenizerEncode(runtime,
|
|
665
|
-
|
|
1054
|
+
// models/tokenizer.ts
|
|
1055
|
+
async function handleTokenizerEncode(runtime, params) {
|
|
1056
|
+
if (!params.prompt) {
|
|
1057
|
+
throw new Error("Tokenization requires a non-empty prompt");
|
|
1058
|
+
}
|
|
1059
|
+
const modelType = params.modelType ?? import_core13.ModelType.TEXT_LARGE;
|
|
1060
|
+
return tokenizeText(runtime, modelType, params.prompt);
|
|
1061
|
+
}
|
|
1062
|
+
async function handleTokenizerDecode(runtime, params) {
|
|
1063
|
+
if (!params.tokens || !Array.isArray(params.tokens)) {
|
|
1064
|
+
throw new Error("Detokenization requires a valid tokens array");
|
|
1065
|
+
}
|
|
1066
|
+
if (params.tokens.length === 0) {
|
|
1067
|
+
return "";
|
|
1068
|
+
}
|
|
1069
|
+
for (let i = 0;i < params.tokens.length; i++) {
|
|
1070
|
+
const token = params.tokens[i];
|
|
1071
|
+
if (typeof token !== "number" || !Number.isFinite(token)) {
|
|
1072
|
+
throw new Error(`Invalid token at index ${i}: expected number`);
|
|
1073
|
+
}
|
|
1074
|
+
}
|
|
1075
|
+
const modelType = params.modelType ?? import_core13.ModelType.TEXT_LARGE;
|
|
1076
|
+
return detokenizeText(runtime, modelType, params.tokens);
|
|
666
1077
|
}
|
|
667
|
-
|
|
668
|
-
|
|
1078
|
+
// index.ts
|
|
1079
|
+
function getProcessEnv() {
|
|
1080
|
+
if (typeof process === "undefined") {
|
|
1081
|
+
return {};
|
|
1082
|
+
}
|
|
1083
|
+
return process.env;
|
|
669
1084
|
}
|
|
670
|
-
|
|
1085
|
+
var env = getProcessEnv();
|
|
671
1086
|
var openaiPlugin = {
|
|
672
1087
|
name: "openai",
|
|
673
|
-
description: "OpenAI
|
|
1088
|
+
description: "OpenAI API integration for text, image, audio, and embedding models",
|
|
674
1089
|
config: {
|
|
675
|
-
OPENAI_API_KEY:
|
|
676
|
-
OPENAI_BASE_URL:
|
|
677
|
-
OPENAI_SMALL_MODEL:
|
|
678
|
-
OPENAI_LARGE_MODEL:
|
|
679
|
-
SMALL_MODEL:
|
|
680
|
-
LARGE_MODEL:
|
|
681
|
-
OPENAI_EMBEDDING_MODEL:
|
|
682
|
-
OPENAI_EMBEDDING_API_KEY:
|
|
683
|
-
OPENAI_EMBEDDING_URL:
|
|
684
|
-
OPENAI_EMBEDDING_DIMENSIONS:
|
|
685
|
-
OPENAI_IMAGE_DESCRIPTION_MODEL:
|
|
686
|
-
OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS:
|
|
687
|
-
OPENAI_EXPERIMENTAL_TELEMETRY:
|
|
1090
|
+
OPENAI_API_KEY: env.OPENAI_API_KEY ?? null,
|
|
1091
|
+
OPENAI_BASE_URL: env.OPENAI_BASE_URL ?? null,
|
|
1092
|
+
OPENAI_SMALL_MODEL: env.OPENAI_SMALL_MODEL ?? null,
|
|
1093
|
+
OPENAI_LARGE_MODEL: env.OPENAI_LARGE_MODEL ?? null,
|
|
1094
|
+
SMALL_MODEL: env.SMALL_MODEL ?? null,
|
|
1095
|
+
LARGE_MODEL: env.LARGE_MODEL ?? null,
|
|
1096
|
+
OPENAI_EMBEDDING_MODEL: env.OPENAI_EMBEDDING_MODEL ?? null,
|
|
1097
|
+
OPENAI_EMBEDDING_API_KEY: env.OPENAI_EMBEDDING_API_KEY ?? null,
|
|
1098
|
+
OPENAI_EMBEDDING_URL: env.OPENAI_EMBEDDING_URL ?? null,
|
|
1099
|
+
OPENAI_EMBEDDING_DIMENSIONS: env.OPENAI_EMBEDDING_DIMENSIONS ?? null,
|
|
1100
|
+
OPENAI_IMAGE_DESCRIPTION_MODEL: env.OPENAI_IMAGE_DESCRIPTION_MODEL ?? null,
|
|
1101
|
+
OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS: env.OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS ?? null,
|
|
1102
|
+
OPENAI_EXPERIMENTAL_TELEMETRY: env.OPENAI_EXPERIMENTAL_TELEMETRY ?? null,
|
|
1103
|
+
OPENAI_RESEARCH_MODEL: env.OPENAI_RESEARCH_MODEL ?? null,
|
|
1104
|
+
OPENAI_RESEARCH_TIMEOUT: env.OPENAI_RESEARCH_TIMEOUT ?? null
|
|
688
1105
|
},
|
|
689
|
-
async init(
|
|
690
|
-
initializeOpenAI(
|
|
1106
|
+
async init(config, runtime) {
|
|
1107
|
+
initializeOpenAI(config, runtime);
|
|
691
1108
|
},
|
|
692
1109
|
models: {
|
|
693
|
-
[
|
|
1110
|
+
[import_core14.ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
694
1111
|
return handleTextEmbedding(runtime, params);
|
|
695
1112
|
},
|
|
696
|
-
[
|
|
1113
|
+
[import_core14.ModelType.TEXT_TOKENIZER_ENCODE]: async (runtime, params) => {
|
|
697
1114
|
return handleTokenizerEncode(runtime, params);
|
|
698
1115
|
},
|
|
699
|
-
[
|
|
1116
|
+
[import_core14.ModelType.TEXT_TOKENIZER_DECODE]: async (runtime, params) => {
|
|
700
1117
|
return handleTokenizerDecode(runtime, params);
|
|
701
1118
|
},
|
|
702
|
-
[
|
|
1119
|
+
[import_core14.ModelType.TEXT_SMALL]: async (runtime, params) => {
|
|
703
1120
|
return handleTextSmall(runtime, params);
|
|
704
1121
|
},
|
|
705
|
-
[
|
|
1122
|
+
[import_core14.ModelType.TEXT_LARGE]: async (runtime, params) => {
|
|
706
1123
|
return handleTextLarge(runtime, params);
|
|
707
1124
|
},
|
|
708
|
-
[
|
|
1125
|
+
[import_core14.ModelType.IMAGE]: async (runtime, params) => {
|
|
709
1126
|
return handleImageGeneration(runtime, params);
|
|
710
1127
|
},
|
|
711
|
-
[
|
|
1128
|
+
[import_core14.ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
712
1129
|
return handleImageDescription(runtime, params);
|
|
713
1130
|
},
|
|
714
|
-
[
|
|
1131
|
+
[import_core14.ModelType.TRANSCRIPTION]: async (runtime, input) => {
|
|
715
1132
|
return handleTranscription(runtime, input);
|
|
716
1133
|
},
|
|
717
|
-
[
|
|
1134
|
+
[import_core14.ModelType.TEXT_TO_SPEECH]: async (runtime, input) => {
|
|
718
1135
|
return handleTextToSpeech(runtime, input);
|
|
719
1136
|
},
|
|
720
|
-
[
|
|
1137
|
+
[import_core14.ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
721
1138
|
return handleObjectSmall(runtime, params);
|
|
722
1139
|
},
|
|
723
|
-
[
|
|
1140
|
+
[import_core14.ModelType.OBJECT_LARGE]: async (runtime, params) => {
|
|
724
1141
|
return handleObjectLarge(runtime, params);
|
|
1142
|
+
},
|
|
1143
|
+
[import_core14.ModelType.RESEARCH]: async (runtime, params) => {
|
|
1144
|
+
return handleResearch(runtime, params);
|
|
725
1145
|
}
|
|
726
1146
|
},
|
|
727
1147
|
tests: [
|
|
@@ -729,217 +1149,166 @@ var openaiPlugin = {
|
|
|
729
1149
|
name: "openai_plugin_tests",
|
|
730
1150
|
tests: [
|
|
731
1151
|
{
|
|
732
|
-
name: "
|
|
1152
|
+
name: "openai_test_api_connectivity",
|
|
733
1153
|
fn: async (runtime) => {
|
|
734
1154
|
const baseURL = getBaseURL(runtime);
|
|
735
1155
|
const response = await fetch(`${baseURL}/models`, {
|
|
736
1156
|
headers: getAuthHeader(runtime)
|
|
737
1157
|
});
|
|
738
|
-
const data = await response.json();
|
|
739
|
-
import_core13.logger.log({ data: data?.data?.length ?? "N/A" }, "Models Available");
|
|
740
1158
|
if (!response.ok) {
|
|
741
|
-
throw new Error(`
|
|
1159
|
+
throw new Error(`API connectivity test failed: ${response.status} ${response.statusText}`);
|
|
742
1160
|
}
|
|
1161
|
+
const data = await response.json();
|
|
1162
|
+
import_core14.logger.info(`[OpenAI Test] API connected. ${data.data?.length ?? 0} models available.`);
|
|
743
1163
|
}
|
|
744
1164
|
},
|
|
745
1165
|
{
|
|
746
1166
|
name: "openai_test_text_embedding",
|
|
747
1167
|
fn: async (runtime) => {
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
} catch (error) {
|
|
754
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
755
|
-
import_core13.logger.error(`Error in test_text_embedding: ${message}`);
|
|
756
|
-
throw error;
|
|
1168
|
+
const embedding = await runtime.useModel(import_core14.ModelType.TEXT_EMBEDDING, {
|
|
1169
|
+
text: "Hello, world!"
|
|
1170
|
+
});
|
|
1171
|
+
if (!Array.isArray(embedding) || embedding.length === 0) {
|
|
1172
|
+
throw new Error("Embedding should return a non-empty array");
|
|
757
1173
|
}
|
|
1174
|
+
import_core14.logger.info(`[OpenAI Test] Generated embedding with ${embedding.length} dimensions`);
|
|
758
1175
|
}
|
|
759
1176
|
},
|
|
760
1177
|
{
|
|
761
|
-
name: "
|
|
1178
|
+
name: "openai_test_text_small",
|
|
762
1179
|
fn: async (runtime) => {
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
throw new Error("Failed to generate text");
|
|
769
|
-
}
|
|
770
|
-
import_core13.logger.log({ text }, "generated with test_text_large");
|
|
771
|
-
} catch (error) {
|
|
772
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
773
|
-
import_core13.logger.error(`Error in test_text_large: ${message}`);
|
|
774
|
-
throw error;
|
|
1180
|
+
const text = await runtime.useModel(import_core14.ModelType.TEXT_SMALL, {
|
|
1181
|
+
prompt: "Say hello in exactly 5 words."
|
|
1182
|
+
});
|
|
1183
|
+
if (typeof text !== "string" || text.length === 0) {
|
|
1184
|
+
throw new Error("TEXT_SMALL should return non-empty string");
|
|
775
1185
|
}
|
|
1186
|
+
import_core14.logger.info(`[OpenAI Test] TEXT_SMALL generated: "${text.substring(0, 50)}..."`);
|
|
776
1187
|
}
|
|
777
1188
|
},
|
|
778
1189
|
{
|
|
779
|
-
name: "
|
|
1190
|
+
name: "openai_test_text_large",
|
|
780
1191
|
fn: async (runtime) => {
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
throw new Error("Failed to generate text");
|
|
787
|
-
}
|
|
788
|
-
import_core13.logger.log({ text }, "generated with test_text_small");
|
|
789
|
-
} catch (error) {
|
|
790
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
791
|
-
import_core13.logger.error(`Error in test_text_small: ${message}`);
|
|
792
|
-
throw error;
|
|
1192
|
+
const text = await runtime.useModel(import_core14.ModelType.TEXT_LARGE, {
|
|
1193
|
+
prompt: "Explain quantum computing in 2 sentences."
|
|
1194
|
+
});
|
|
1195
|
+
if (typeof text !== "string" || text.length === 0) {
|
|
1196
|
+
throw new Error("TEXT_LARGE should return non-empty string");
|
|
793
1197
|
}
|
|
1198
|
+
import_core14.logger.info(`[OpenAI Test] TEXT_LARGE generated: "${text.substring(0, 50)}..."`);
|
|
794
1199
|
}
|
|
795
1200
|
},
|
|
796
1201
|
{
|
|
797
|
-
name: "
|
|
1202
|
+
name: "openai_test_tokenizer_roundtrip",
|
|
798
1203
|
fn: async (runtime) => {
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
1204
|
+
const originalText = "Hello, tokenizer test!";
|
|
1205
|
+
const tokens = await runtime.useModel(import_core14.ModelType.TEXT_TOKENIZER_ENCODE, {
|
|
1206
|
+
prompt: originalText,
|
|
1207
|
+
modelType: import_core14.ModelType.TEXT_SMALL
|
|
1208
|
+
});
|
|
1209
|
+
if (!Array.isArray(tokens) || tokens.length === 0) {
|
|
1210
|
+
throw new Error("Tokenization should return non-empty token array");
|
|
1211
|
+
}
|
|
1212
|
+
const decodedText = await runtime.useModel(import_core14.ModelType.TEXT_TOKENIZER_DECODE, {
|
|
1213
|
+
tokens,
|
|
1214
|
+
modelType: import_core14.ModelType.TEXT_SMALL
|
|
1215
|
+
});
|
|
1216
|
+
if (decodedText !== originalText) {
|
|
1217
|
+
throw new Error(`Tokenizer roundtrip failed: expected "${originalText}", got "${decodedText}"`);
|
|
811
1218
|
}
|
|
1219
|
+
import_core14.logger.info(`[OpenAI Test] Tokenizer roundtrip successful (${tokens.length} tokens)`);
|
|
812
1220
|
}
|
|
813
1221
|
},
|
|
814
1222
|
{
|
|
815
|
-
name: "
|
|
1223
|
+
name: "openai_test_streaming",
|
|
816
1224
|
fn: async (runtime) => {
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
} else {
|
|
824
|
-
import_core13.logger.error("Invalid image description result format:", result);
|
|
825
|
-
}
|
|
826
|
-
} catch (e) {
|
|
827
|
-
const message = e instanceof Error ? e.message : String(e);
|
|
828
|
-
import_core13.logger.error(`Error in image description test: ${message}`);
|
|
1225
|
+
const chunks = [];
|
|
1226
|
+
const result = await runtime.useModel(import_core14.ModelType.TEXT_LARGE, {
|
|
1227
|
+
prompt: "Count from 1 to 5, one number per line.",
|
|
1228
|
+
stream: true,
|
|
1229
|
+
onStreamChunk: (chunk) => {
|
|
1230
|
+
chunks.push(chunk);
|
|
829
1231
|
}
|
|
830
|
-
}
|
|
831
|
-
|
|
832
|
-
|
|
1232
|
+
});
|
|
1233
|
+
if (typeof result !== "string" || result.length === 0) {
|
|
1234
|
+
throw new Error("Streaming should return non-empty result");
|
|
833
1235
|
}
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
{
|
|
837
|
-
name: "openai_test_transcription",
|
|
838
|
-
fn: async (runtime) => {
|
|
839
|
-
import_core13.logger.log("openai_test_transcription");
|
|
840
|
-
try {
|
|
841
|
-
const response = await fetch("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg");
|
|
842
|
-
const arrayBuffer = await response.arrayBuffer();
|
|
843
|
-
const transcription = await runtime.useModel(import_core13.ModelType.TRANSCRIPTION, Buffer.from(new Uint8Array(arrayBuffer)));
|
|
844
|
-
import_core13.logger.log({ transcription }, "generated with test_transcription");
|
|
845
|
-
} catch (error) {
|
|
846
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
847
|
-
import_core13.logger.error(`Error in test_transcription: ${message}`);
|
|
848
|
-
throw error;
|
|
1236
|
+
if (chunks.length === 0) {
|
|
1237
|
+
throw new Error("No streaming chunks received");
|
|
849
1238
|
}
|
|
1239
|
+
import_core14.logger.info(`[OpenAI Test] Streaming test: ${chunks.length} chunks received`);
|
|
850
1240
|
}
|
|
851
1241
|
},
|
|
852
1242
|
{
|
|
853
|
-
name: "
|
|
1243
|
+
name: "openai_test_image_description",
|
|
854
1244
|
fn: async (runtime) => {
|
|
855
|
-
const
|
|
856
|
-
const
|
|
857
|
-
if (!
|
|
858
|
-
throw new Error("
|
|
1245
|
+
const testImageUrl = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a7/Camponotus_flavomarginatus_ant.jpg/440px-Camponotus_flavomarginatus_ant.jpg";
|
|
1246
|
+
const result = await runtime.useModel(import_core14.ModelType.IMAGE_DESCRIPTION, testImageUrl);
|
|
1247
|
+
if (!result || typeof result !== "object" || !("title" in result) || !("description" in result)) {
|
|
1248
|
+
throw new Error("Image description should return { title, description }");
|
|
859
1249
|
}
|
|
860
|
-
|
|
1250
|
+
import_core14.logger.info(`[OpenAI Test] Image described: "${result.title}"`);
|
|
861
1251
|
}
|
|
862
1252
|
},
|
|
863
1253
|
{
|
|
864
|
-
name: "
|
|
1254
|
+
name: "openai_test_transcription",
|
|
865
1255
|
fn: async (runtime) => {
|
|
866
|
-
const
|
|
867
|
-
const
|
|
868
|
-
const
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
|
|
1256
|
+
const audioUrl = "https://upload.wikimedia.org/wikipedia/commons/2/25/En-Open_Source.ogg";
|
|
1257
|
+
const response = await fetch(audioUrl);
|
|
1258
|
+
const arrayBuffer = await response.arrayBuffer();
|
|
1259
|
+
const audioBuffer = Buffer.from(new Uint8Array(arrayBuffer));
|
|
1260
|
+
const transcription = await runtime.useModel(import_core14.ModelType.TRANSCRIPTION, audioBuffer);
|
|
1261
|
+
if (typeof transcription !== "string") {
|
|
1262
|
+
throw new Error("Transcription should return a string");
|
|
874
1263
|
}
|
|
875
|
-
|
|
1264
|
+
import_core14.logger.info(`[OpenAI Test] Transcription: "${transcription.substring(0, 50)}..."`);
|
|
876
1265
|
}
|
|
877
1266
|
},
|
|
878
1267
|
{
|
|
879
1268
|
name: "openai_test_text_to_speech",
|
|
880
1269
|
fn: async (runtime) => {
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
throw new Error("Failed to generate speech");
|
|
887
|
-
}
|
|
888
|
-
import_core13.logger.log("Generated speech successfully");
|
|
889
|
-
} catch (error) {
|
|
890
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
891
|
-
import_core13.logger.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
892
|
-
throw error;
|
|
1270
|
+
const audioData = await runtime.useModel(import_core14.ModelType.TEXT_TO_SPEECH, {
|
|
1271
|
+
text: "Hello, this is a text-to-speech test."
|
|
1272
|
+
});
|
|
1273
|
+
if (!(audioData instanceof ArrayBuffer) || audioData.byteLength === 0) {
|
|
1274
|
+
throw new Error("TTS should return non-empty ArrayBuffer");
|
|
893
1275
|
}
|
|
1276
|
+
import_core14.logger.info(`[OpenAI Test] TTS generated ${audioData.byteLength} bytes of audio`);
|
|
894
1277
|
}
|
|
895
1278
|
},
|
|
896
1279
|
{
|
|
897
|
-
name: "
|
|
1280
|
+
name: "openai_test_object_generation",
|
|
898
1281
|
fn: async (runtime) => {
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
throw new Error("Text generation returned empty result");
|
|
905
|
-
}
|
|
906
|
-
import_core13.logger.log({ result }, "Text generation test completed");
|
|
907
|
-
} catch (error) {
|
|
908
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
909
|
-
import_core13.logger.error(`Error in openai_test_text_generation_large: ${message}`);
|
|
910
|
-
throw error;
|
|
1282
|
+
const result = await runtime.useModel(import_core14.ModelType.OBJECT_SMALL, {
|
|
1283
|
+
prompt: "Return a JSON object with exactly these fields: name (string), age (number), active (boolean)"
|
|
1284
|
+
});
|
|
1285
|
+
if (!result || typeof result !== "object") {
|
|
1286
|
+
throw new Error("Object generation should return an object");
|
|
911
1287
|
}
|
|
1288
|
+
import_core14.logger.info(`[OpenAI Test] Object generated: ${JSON.stringify(result).substring(0, 100)}`);
|
|
912
1289
|
}
|
|
913
1290
|
},
|
|
914
1291
|
{
|
|
915
|
-
name: "
|
|
1292
|
+
name: "openai_test_research",
|
|
916
1293
|
fn: async (runtime) => {
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
}
|
|
928
|
-
if (chunks.length === 0) {
|
|
929
|
-
throw new Error("No streaming chunks received");
|
|
930
|
-
}
|
|
931
|
-
import_core13.logger.log({ chunks: chunks.length, result: result.substring(0, 50) }, "Streaming test completed");
|
|
932
|
-
} catch (error) {
|
|
933
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
934
|
-
import_core13.logger.error(`Error in openai_test_streaming: ${message}`);
|
|
935
|
-
throw error;
|
|
1294
|
+
const result = await runtime.useModel(import_core14.ModelType.RESEARCH, {
|
|
1295
|
+
input: "What is the current date and time?",
|
|
1296
|
+
tools: [{ type: "web_search_preview" }],
|
|
1297
|
+
maxToolCalls: 3
|
|
1298
|
+
});
|
|
1299
|
+
if (!result || typeof result !== "object" || !("text" in result)) {
|
|
1300
|
+
throw new Error("Research should return an object with text property");
|
|
1301
|
+
}
|
|
1302
|
+
if (typeof result.text !== "string" || result.text.length === 0) {
|
|
1303
|
+
throw new Error("Research result text should be a non-empty string");
|
|
936
1304
|
}
|
|
1305
|
+
import_core14.logger.info(`[OpenAI Test] Research completed. Text length: ${result.text.length}, Annotations: ${result.annotations?.length ?? 0}`);
|
|
937
1306
|
}
|
|
938
1307
|
}
|
|
939
1308
|
]
|
|
940
1309
|
}
|
|
941
1310
|
]
|
|
942
1311
|
};
|
|
943
|
-
var
|
|
1312
|
+
var typescript_default = openaiPlugin;
|
|
944
1313
|
|
|
945
|
-
//# debugId=
|
|
1314
|
+
//# debugId=063EB1323839B08A64756E2164756E21
|