@elizaos/plugin-openai 1.0.0-beta.5 → 1.0.0-beta.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,11 +1,609 @@
1
1
  // src/index.ts
2
2
  import { createOpenAI } from "@ai-sdk/openai";
3
3
  import {
4
+ EventType,
5
+ logger,
4
6
  ModelType,
5
- logger
7
+ safeReplacer,
8
+ ServiceType,
9
+ VECTOR_DIMS
6
10
  } from "@elizaos/core";
7
- import { generateObject, generateText } from "ai";
11
+
12
+ // node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
13
+ var _globalThis = typeof globalThis === "object" ? globalThis : global;
14
+
15
+ // node_modules/@opentelemetry/api/build/esm/version.js
16
+ var VERSION = "1.9.0";
17
+
18
+ // node_modules/@opentelemetry/api/build/esm/internal/semver.js
19
+ var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
20
+ function _makeCompatibilityCheck(ownVersion) {
21
+ var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
22
+ var rejectedVersions = /* @__PURE__ */ new Set();
23
+ var myVersionMatch = ownVersion.match(re);
24
+ if (!myVersionMatch) {
25
+ return function() {
26
+ return false;
27
+ };
28
+ }
29
+ var ownVersionParsed = {
30
+ major: +myVersionMatch[1],
31
+ minor: +myVersionMatch[2],
32
+ patch: +myVersionMatch[3],
33
+ prerelease: myVersionMatch[4]
34
+ };
35
+ if (ownVersionParsed.prerelease != null) {
36
+ return function isExactmatch(globalVersion) {
37
+ return globalVersion === ownVersion;
38
+ };
39
+ }
40
+ function _reject(v) {
41
+ rejectedVersions.add(v);
42
+ return false;
43
+ }
44
+ function _accept(v) {
45
+ acceptedVersions.add(v);
46
+ return true;
47
+ }
48
+ return function isCompatible2(globalVersion) {
49
+ if (acceptedVersions.has(globalVersion)) {
50
+ return true;
51
+ }
52
+ if (rejectedVersions.has(globalVersion)) {
53
+ return false;
54
+ }
55
+ var globalVersionMatch = globalVersion.match(re);
56
+ if (!globalVersionMatch) {
57
+ return _reject(globalVersion);
58
+ }
59
+ var globalVersionParsed = {
60
+ major: +globalVersionMatch[1],
61
+ minor: +globalVersionMatch[2],
62
+ patch: +globalVersionMatch[3],
63
+ prerelease: globalVersionMatch[4]
64
+ };
65
+ if (globalVersionParsed.prerelease != null) {
66
+ return _reject(globalVersion);
67
+ }
68
+ if (ownVersionParsed.major !== globalVersionParsed.major) {
69
+ return _reject(globalVersion);
70
+ }
71
+ if (ownVersionParsed.major === 0) {
72
+ if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
73
+ return _accept(globalVersion);
74
+ }
75
+ return _reject(globalVersion);
76
+ }
77
+ if (ownVersionParsed.minor <= globalVersionParsed.minor) {
78
+ return _accept(globalVersion);
79
+ }
80
+ return _reject(globalVersion);
81
+ };
82
+ }
83
+ var isCompatible = _makeCompatibilityCheck(VERSION);
84
+
85
+ // node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
86
+ var major = VERSION.split(".")[0];
87
+ var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
88
+ var _global = _globalThis;
89
+ function registerGlobal(type, instance, diag, allowOverride) {
90
+ var _a;
91
+ if (allowOverride === void 0) {
92
+ allowOverride = false;
93
+ }
94
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
95
+ version: VERSION
96
+ };
97
+ if (!allowOverride && api[type]) {
98
+ var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
99
+ diag.error(err.stack || err.message);
100
+ return false;
101
+ }
102
+ if (api.version !== VERSION) {
103
+ var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
104
+ diag.error(err.stack || err.message);
105
+ return false;
106
+ }
107
+ api[type] = instance;
108
+ diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
109
+ return true;
110
+ }
111
+ function getGlobal(type) {
112
+ var _a, _b;
113
+ var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
114
+ if (!globalVersion || !isCompatible(globalVersion)) {
115
+ return;
116
+ }
117
+ return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
118
+ }
119
+ function unregisterGlobal(type, diag) {
120
+ diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
121
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
122
+ if (api) {
123
+ delete api[type];
124
+ }
125
+ }
126
+
127
+ // node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
128
+ var __read = function(o, n) {
129
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
130
+ if (!m) return o;
131
+ var i = m.call(o), r, ar = [], e;
132
+ try {
133
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
134
+ } catch (error) {
135
+ e = { error };
136
+ } finally {
137
+ try {
138
+ if (r && !r.done && (m = i["return"])) m.call(i);
139
+ } finally {
140
+ if (e) throw e.error;
141
+ }
142
+ }
143
+ return ar;
144
+ };
145
+ var __spreadArray = function(to, from, pack) {
146
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
147
+ if (ar || !(i in from)) {
148
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
149
+ ar[i] = from[i];
150
+ }
151
+ }
152
+ return to.concat(ar || Array.prototype.slice.call(from));
153
+ };
154
+ var DiagComponentLogger = (
155
+ /** @class */
156
+ function() {
157
+ function DiagComponentLogger2(props) {
158
+ this._namespace = props.namespace || "DiagComponentLogger";
159
+ }
160
+ DiagComponentLogger2.prototype.debug = function() {
161
+ var args = [];
162
+ for (var _i = 0; _i < arguments.length; _i++) {
163
+ args[_i] = arguments[_i];
164
+ }
165
+ return logProxy("debug", this._namespace, args);
166
+ };
167
+ DiagComponentLogger2.prototype.error = function() {
168
+ var args = [];
169
+ for (var _i = 0; _i < arguments.length; _i++) {
170
+ args[_i] = arguments[_i];
171
+ }
172
+ return logProxy("error", this._namespace, args);
173
+ };
174
+ DiagComponentLogger2.prototype.info = function() {
175
+ var args = [];
176
+ for (var _i = 0; _i < arguments.length; _i++) {
177
+ args[_i] = arguments[_i];
178
+ }
179
+ return logProxy("info", this._namespace, args);
180
+ };
181
+ DiagComponentLogger2.prototype.warn = function() {
182
+ var args = [];
183
+ for (var _i = 0; _i < arguments.length; _i++) {
184
+ args[_i] = arguments[_i];
185
+ }
186
+ return logProxy("warn", this._namespace, args);
187
+ };
188
+ DiagComponentLogger2.prototype.verbose = function() {
189
+ var args = [];
190
+ for (var _i = 0; _i < arguments.length; _i++) {
191
+ args[_i] = arguments[_i];
192
+ }
193
+ return logProxy("verbose", this._namespace, args);
194
+ };
195
+ return DiagComponentLogger2;
196
+ }()
197
+ );
198
+ function logProxy(funcName, namespace, args) {
199
+ var logger2 = getGlobal("diag");
200
+ if (!logger2) {
201
+ return;
202
+ }
203
+ args.unshift(namespace);
204
+ return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
205
+ }
206
+
207
+ // node_modules/@opentelemetry/api/build/esm/diag/types.js
208
+ var DiagLogLevel;
209
+ (function(DiagLogLevel2) {
210
+ DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
211
+ DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
212
+ DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
213
+ DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
214
+ DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
215
+ DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
216
+ DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
217
+ })(DiagLogLevel || (DiagLogLevel = {}));
218
+
219
+ // node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
220
+ function createLogLevelDiagLogger(maxLevel, logger2) {
221
+ if (maxLevel < DiagLogLevel.NONE) {
222
+ maxLevel = DiagLogLevel.NONE;
223
+ } else if (maxLevel > DiagLogLevel.ALL) {
224
+ maxLevel = DiagLogLevel.ALL;
225
+ }
226
+ logger2 = logger2 || {};
227
+ function _filterFunc(funcName, theLevel) {
228
+ var theFunc = logger2[funcName];
229
+ if (typeof theFunc === "function" && maxLevel >= theLevel) {
230
+ return theFunc.bind(logger2);
231
+ }
232
+ return function() {
233
+ };
234
+ }
235
+ return {
236
+ error: _filterFunc("error", DiagLogLevel.ERROR),
237
+ warn: _filterFunc("warn", DiagLogLevel.WARN),
238
+ info: _filterFunc("info", DiagLogLevel.INFO),
239
+ debug: _filterFunc("debug", DiagLogLevel.DEBUG),
240
+ verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
241
+ };
242
+ }
243
+
244
+ // node_modules/@opentelemetry/api/build/esm/api/diag.js
245
+ var __read2 = function(o, n) {
246
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
247
+ if (!m) return o;
248
+ var i = m.call(o), r, ar = [], e;
249
+ try {
250
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
251
+ } catch (error) {
252
+ e = { error };
253
+ } finally {
254
+ try {
255
+ if (r && !r.done && (m = i["return"])) m.call(i);
256
+ } finally {
257
+ if (e) throw e.error;
258
+ }
259
+ }
260
+ return ar;
261
+ };
262
+ var __spreadArray2 = function(to, from, pack) {
263
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
264
+ if (ar || !(i in from)) {
265
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
266
+ ar[i] = from[i];
267
+ }
268
+ }
269
+ return to.concat(ar || Array.prototype.slice.call(from));
270
+ };
271
+ var API_NAME = "diag";
272
+ var DiagAPI = (
273
+ /** @class */
274
+ function() {
275
+ function DiagAPI2() {
276
+ function _logProxy(funcName) {
277
+ return function() {
278
+ var args = [];
279
+ for (var _i = 0; _i < arguments.length; _i++) {
280
+ args[_i] = arguments[_i];
281
+ }
282
+ var logger2 = getGlobal("diag");
283
+ if (!logger2)
284
+ return;
285
+ return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
286
+ };
287
+ }
288
+ var self = this;
289
+ var setLogger = function(logger2, optionsOrLogLevel) {
290
+ var _a, _b, _c;
291
+ if (optionsOrLogLevel === void 0) {
292
+ optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
293
+ }
294
+ if (logger2 === self) {
295
+ var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
296
+ self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
297
+ return false;
298
+ }
299
+ if (typeof optionsOrLogLevel === "number") {
300
+ optionsOrLogLevel = {
301
+ logLevel: optionsOrLogLevel
302
+ };
303
+ }
304
+ var oldLogger = getGlobal("diag");
305
+ var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
306
+ if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
307
+ var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
308
+ oldLogger.warn("Current logger will be overwritten from " + stack);
309
+ newLogger.warn("Current logger will overwrite one already registered from " + stack);
310
+ }
311
+ return registerGlobal("diag", newLogger, self, true);
312
+ };
313
+ self.setLogger = setLogger;
314
+ self.disable = function() {
315
+ unregisterGlobal(API_NAME, self);
316
+ };
317
+ self.createComponentLogger = function(options) {
318
+ return new DiagComponentLogger(options);
319
+ };
320
+ self.verbose = _logProxy("verbose");
321
+ self.debug = _logProxy("debug");
322
+ self.info = _logProxy("info");
323
+ self.warn = _logProxy("warn");
324
+ self.error = _logProxy("error");
325
+ }
326
+ DiagAPI2.instance = function() {
327
+ if (!this._instance) {
328
+ this._instance = new DiagAPI2();
329
+ }
330
+ return this._instance;
331
+ };
332
+ return DiagAPI2;
333
+ }()
334
+ );
335
+
336
+ // node_modules/@opentelemetry/api/build/esm/context/context.js
337
+ var BaseContext = (
338
+ /** @class */
339
+ /* @__PURE__ */ function() {
340
+ function BaseContext2(parentContext) {
341
+ var self = this;
342
+ self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
343
+ self.getValue = function(key) {
344
+ return self._currentContext.get(key);
345
+ };
346
+ self.setValue = function(key, value) {
347
+ var context2 = new BaseContext2(self._currentContext);
348
+ context2._currentContext.set(key, value);
349
+ return context2;
350
+ };
351
+ self.deleteValue = function(key) {
352
+ var context2 = new BaseContext2(self._currentContext);
353
+ context2._currentContext.delete(key);
354
+ return context2;
355
+ };
356
+ }
357
+ return BaseContext2;
358
+ }()
359
+ );
360
+ var ROOT_CONTEXT = new BaseContext();
361
+
362
+ // node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
363
+ var __read3 = function(o, n) {
364
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
365
+ if (!m) return o;
366
+ var i = m.call(o), r, ar = [], e;
367
+ try {
368
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
369
+ } catch (error) {
370
+ e = { error };
371
+ } finally {
372
+ try {
373
+ if (r && !r.done && (m = i["return"])) m.call(i);
374
+ } finally {
375
+ if (e) throw e.error;
376
+ }
377
+ }
378
+ return ar;
379
+ };
380
+ var __spreadArray3 = function(to, from, pack) {
381
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
382
+ if (ar || !(i in from)) {
383
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
384
+ ar[i] = from[i];
385
+ }
386
+ }
387
+ return to.concat(ar || Array.prototype.slice.call(from));
388
+ };
389
+ var NoopContextManager = (
390
+ /** @class */
391
+ function() {
392
+ function NoopContextManager2() {
393
+ }
394
+ NoopContextManager2.prototype.active = function() {
395
+ return ROOT_CONTEXT;
396
+ };
397
+ NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
398
+ var args = [];
399
+ for (var _i = 3; _i < arguments.length; _i++) {
400
+ args[_i - 3] = arguments[_i];
401
+ }
402
+ return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
403
+ };
404
+ NoopContextManager2.prototype.bind = function(_context, target) {
405
+ return target;
406
+ };
407
+ NoopContextManager2.prototype.enable = function() {
408
+ return this;
409
+ };
410
+ NoopContextManager2.prototype.disable = function() {
411
+ return this;
412
+ };
413
+ return NoopContextManager2;
414
+ }()
415
+ );
416
+
417
+ // node_modules/@opentelemetry/api/build/esm/api/context.js
418
+ var __read4 = function(o, n) {
419
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
420
+ if (!m) return o;
421
+ var i = m.call(o), r, ar = [], e;
422
+ try {
423
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
424
+ } catch (error) {
425
+ e = { error };
426
+ } finally {
427
+ try {
428
+ if (r && !r.done && (m = i["return"])) m.call(i);
429
+ } finally {
430
+ if (e) throw e.error;
431
+ }
432
+ }
433
+ return ar;
434
+ };
435
+ var __spreadArray4 = function(to, from, pack) {
436
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
437
+ if (ar || !(i in from)) {
438
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
439
+ ar[i] = from[i];
440
+ }
441
+ }
442
+ return to.concat(ar || Array.prototype.slice.call(from));
443
+ };
444
+ var API_NAME2 = "context";
445
+ var NOOP_CONTEXT_MANAGER = new NoopContextManager();
446
+ var ContextAPI = (
447
+ /** @class */
448
+ function() {
449
+ function ContextAPI2() {
450
+ }
451
+ ContextAPI2.getInstance = function() {
452
+ if (!this._instance) {
453
+ this._instance = new ContextAPI2();
454
+ }
455
+ return this._instance;
456
+ };
457
+ ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
458
+ return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
459
+ };
460
+ ContextAPI2.prototype.active = function() {
461
+ return this._getContextManager().active();
462
+ };
463
+ ContextAPI2.prototype.with = function(context2, fn, thisArg) {
464
+ var _a;
465
+ var args = [];
466
+ for (var _i = 3; _i < arguments.length; _i++) {
467
+ args[_i - 3] = arguments[_i];
468
+ }
469
+ return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
470
+ };
471
+ ContextAPI2.prototype.bind = function(context2, target) {
472
+ return this._getContextManager().bind(context2, target);
473
+ };
474
+ ContextAPI2.prototype._getContextManager = function() {
475
+ return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
476
+ };
477
+ ContextAPI2.prototype.disable = function() {
478
+ this._getContextManager().disable();
479
+ unregisterGlobal(API_NAME2, DiagAPI.instance());
480
+ };
481
+ return ContextAPI2;
482
+ }()
483
+ );
484
+
485
+ // node_modules/@opentelemetry/api/build/esm/trace/status.js
486
+ var SpanStatusCode;
487
+ (function(SpanStatusCode2) {
488
+ SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
489
+ SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
490
+ SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
491
+ })(SpanStatusCode || (SpanStatusCode = {}));
492
+
493
+ // node_modules/@opentelemetry/api/build/esm/context-api.js
494
+ var context = ContextAPI.getInstance();
495
+
496
+ // src/index.ts
497
+ import {
498
+ generateObject,
499
+ generateText,
500
+ JSONParseError
501
+ } from "ai";
8
502
  import { encodingForModel } from "js-tiktoken";
503
+ import { fetch, FormData } from "undici";
504
+ function getTracer(runtime) {
505
+ const availableServices = Array.from(runtime.getAllServices().keys());
506
+ logger.debug(
507
+ `[getTracer] Available services: ${JSON.stringify(availableServices)}`
508
+ );
509
+ logger.debug(
510
+ `[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`
511
+ );
512
+ const instrumentationService = runtime.getService(
513
+ ServiceType.INSTRUMENTATION
514
+ );
515
+ if (!instrumentationService) {
516
+ logger.warn(
517
+ `[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`
518
+ );
519
+ return null;
520
+ }
521
+ if (!instrumentationService.isEnabled()) {
522
+ logger.debug("[getTracer] Instrumentation service found but is disabled.");
523
+ return null;
524
+ }
525
+ logger.debug(
526
+ "[getTracer] Successfully retrieved enabled instrumentation service."
527
+ );
528
+ return instrumentationService.getTracer("eliza.llm.openai");
529
+ }
530
+ async function startLlmSpan(runtime, spanName, attributes, fn) {
531
+ const tracer = getTracer(runtime);
532
+ if (!tracer) {
533
+ const dummySpan = {
534
+ setAttribute: () => {
535
+ },
536
+ setAttributes: () => {
537
+ },
538
+ addEvent: () => {
539
+ },
540
+ recordException: () => {
541
+ },
542
+ setStatus: () => {
543
+ },
544
+ end: () => {
545
+ },
546
+ spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
547
+ };
548
+ return fn(dummySpan);
549
+ }
550
+ const activeContext = context.active();
551
+ return tracer.startActiveSpan(
552
+ spanName,
553
+ { attributes },
554
+ activeContext,
555
+ async (span) => {
556
+ try {
557
+ const result = await fn(span);
558
+ span.setStatus({ code: SpanStatusCode.OK });
559
+ span.end();
560
+ return result;
561
+ } catch (error) {
562
+ const message = error instanceof Error ? error.message : String(error);
563
+ span.recordException(error);
564
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
565
+ span.end();
566
+ throw error;
567
+ }
568
+ }
569
+ );
570
+ }
571
+ function getSetting(runtime, key, defaultValue) {
572
+ return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
573
+ }
574
+ function getBaseURL(runtime) {
575
+ const baseURL = getSetting(
576
+ runtime,
577
+ "OPENAI_BASE_URL",
578
+ "https://api.openai.com/v1"
579
+ );
580
+ logger.debug(`[OpenAI] Default base URL: ${baseURL}`);
581
+ return baseURL;
582
+ }
583
+ function getEmbeddingBaseURL(runtime) {
584
+ const embeddingURL = getSetting(runtime, "OPENAI_EMBEDDING_URL");
585
+ if (embeddingURL) {
586
+ logger.debug(`[OpenAI] Using specific embedding base URL: ${embeddingURL}`);
587
+ return embeddingURL;
588
+ }
589
+ logger.debug("[OpenAI] Falling back to general base URL for embeddings.");
590
+ return getBaseURL(runtime);
591
+ }
592
+ function getApiKey(runtime) {
593
+ return getSetting(runtime, "OPENAI_API_KEY");
594
+ }
595
+ function getSmallModel(runtime) {
596
+ return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
597
+ }
598
+ function getLargeModel(runtime) {
599
+ return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
600
+ }
601
+ function createOpenAIClient(runtime) {
602
+ return createOpenAI({
603
+ apiKey: getApiKey(runtime),
604
+ baseURL: getBaseURL(runtime)
605
+ });
606
+ }
9
607
  async function tokenizeText(model, prompt) {
10
608
  const modelName = model === ModelType.TEXT_SMALL ? process.env.OPENAI_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-4o-mini" : process.env.LARGE_MODEL ?? "gpt-4o";
11
609
  const encoding = encodingForModel(modelName);
@@ -17,6 +615,185 @@ async function detokenizeText(model, tokens) {
17
615
  const encoding = encodingForModel(modelName);
18
616
  return encoding.decode(tokens);
19
617
  }
618
+ async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
619
+ const openai = createOpenAIClient(runtime);
620
+ const modelName = getModelFn(runtime);
621
+ logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
622
+ const temperature = params.temperature ?? 0;
623
+ const schemaPresent = !!params.schema;
624
+ const attributes = {
625
+ "llm.vendor": "OpenAI",
626
+ "llm.request.type": "object_generation",
627
+ "llm.request.model": modelName,
628
+ "llm.request.temperature": temperature,
629
+ "llm.request.schema_present": schemaPresent
630
+ };
631
+ return startLlmSpan(
632
+ runtime,
633
+ "LLM.generateObject",
634
+ attributes,
635
+ async (span) => {
636
+ span.addEvent("llm.prompt", { "prompt.content": params.prompt });
637
+ if (schemaPresent) {
638
+ span.addEvent("llm.request.schema", {
639
+ schema: JSON.stringify(params.schema, safeReplacer())
640
+ });
641
+ logger.info(
642
+ `Using ${modelType} without schema validation (schema provided but output=no-schema)`
643
+ );
644
+ }
645
+ try {
646
+ const { object, usage } = await generateObject({
647
+ model: openai.languageModel(modelName),
648
+ output: "no-schema",
649
+ prompt: params.prompt,
650
+ temperature,
651
+ experimental_repairText: getJsonRepairFunction()
652
+ });
653
+ span.addEvent("llm.response.processed", {
654
+ "response.object": JSON.stringify(object, safeReplacer())
655
+ });
656
+ if (usage) {
657
+ span.setAttributes({
658
+ "llm.usage.prompt_tokens": usage.promptTokens,
659
+ "llm.usage.completion_tokens": usage.completionTokens,
660
+ "llm.usage.total_tokens": usage.totalTokens
661
+ });
662
+ emitModelUsageEvent(
663
+ runtime,
664
+ modelType,
665
+ params.prompt,
666
+ usage
667
+ );
668
+ }
669
+ return object;
670
+ } catch (error) {
671
+ if (error instanceof JSONParseError) {
672
+ logger.error(
673
+ `[generateObject] Failed to parse JSON: ${error.message}`
674
+ );
675
+ span.recordException(error);
676
+ span.addEvent("llm.error.json_parse", {
677
+ "error.message": error.message,
678
+ "error.text": error.text
679
+ });
680
+ span.addEvent("llm.repair.attempt");
681
+ const repairFunction = getJsonRepairFunction();
682
+ const repairedJsonString = await repairFunction({
683
+ text: error.text,
684
+ error
685
+ });
686
+ if (repairedJsonString) {
687
+ try {
688
+ const repairedObject = JSON.parse(repairedJsonString);
689
+ span.addEvent("llm.repair.success", {
690
+ repaired_object: JSON.stringify(repairedObject, safeReplacer())
691
+ });
692
+ logger.info("[generateObject] Successfully repaired JSON.");
693
+ span.setStatus({
694
+ code: SpanStatusCode.ERROR,
695
+ message: "JSON parsing failed but was repaired"
696
+ });
697
+ return repairedObject;
698
+ } catch (repairParseError) {
699
+ const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
700
+ logger.error(
701
+ `[generateObject] Failed to parse repaired JSON: ${message}`
702
+ );
703
+ const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
704
+ span.recordException(exception);
705
+ span.addEvent("llm.repair.parse_error", {
706
+ "error.message": message
707
+ });
708
+ span.setStatus({
709
+ code: SpanStatusCode.ERROR,
710
+ message: `JSON repair failed: ${message}`
711
+ });
712
+ throw repairParseError;
713
+ }
714
+ } else {
715
+ const errMsg = error instanceof Error ? error.message : String(error);
716
+ logger.error("[generateObject] JSON repair failed.");
717
+ span.addEvent("llm.repair.failed");
718
+ span.setStatus({
719
+ code: SpanStatusCode.ERROR,
720
+ message: `JSON repair failed: ${errMsg}`
721
+ });
722
+ throw error;
723
+ }
724
+ } else {
725
+ const message = error instanceof Error ? error.message : String(error);
726
+ logger.error(`[generateObject] Unknown error: ${message}`);
727
+ const exception = error instanceof Error ? error : new Error(message);
728
+ span.recordException(exception);
729
+ span.setStatus({
730
+ code: SpanStatusCode.ERROR,
731
+ message
732
+ });
733
+ throw error;
734
+ }
735
+ }
736
+ }
737
+ );
738
+ }
739
+ function getJsonRepairFunction() {
740
+ return async ({ text, error }) => {
741
+ try {
742
+ if (error instanceof JSONParseError) {
743
+ const cleanedText = text.replace(/```json\n|\n```|```/g, "");
744
+ JSON.parse(cleanedText);
745
+ return cleanedText;
746
+ }
747
+ return null;
748
+ } catch (jsonError) {
749
+ const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
750
+ logger.warn(`Failed to repair JSON text: ${message}`);
751
+ return null;
752
+ }
753
+ };
754
+ }
755
+ function emitModelUsageEvent(runtime, type, prompt, usage) {
756
+ runtime.emitEvent(EventType.MODEL_USED, {
757
+ provider: "openai",
758
+ type,
759
+ prompt,
760
+ tokens: {
761
+ prompt: usage.promptTokens,
762
+ completion: usage.completionTokens,
763
+ total: usage.totalTokens
764
+ }
765
+ });
766
+ }
767
+ async function fetchTextToSpeech(runtime, text) {
768
+ const apiKey = getApiKey(runtime);
769
+ const model = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
770
+ const voice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
771
+ const instructions = getSetting(runtime, "OPENAI_TTS_INSTRUCTIONS", "");
772
+ const baseURL = getBaseURL(runtime);
773
+ try {
774
+ const res = await fetch(`${baseURL}/audio/speech`, {
775
+ method: "POST",
776
+ headers: {
777
+ Authorization: `Bearer ${apiKey}`,
778
+ "Content-Type": "application/json"
779
+ },
780
+ body: JSON.stringify({
781
+ model,
782
+ voice,
783
+ input: text,
784
+ ...instructions && { instructions }
785
+ })
786
+ });
787
+ if (!res.ok) {
788
+ const err = await res.text();
789
+ throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
790
+ }
791
+ return res.body;
792
+ } catch (err) {
793
+ const message = err instanceof Error ? err.message : String(err);
794
+ throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
795
+ }
796
+ }
20
797
  var openaiPlugin = {
21
798
  name: "openai",
22
799
  description: "OpenAI plugin",
@@ -26,41 +803,70 @@ var openaiPlugin = {
26
803
  OPENAI_SMALL_MODEL: process.env.OPENAI_SMALL_MODEL,
27
804
  OPENAI_LARGE_MODEL: process.env.OPENAI_LARGE_MODEL,
28
805
  SMALL_MODEL: process.env.SMALL_MODEL,
29
- LARGE_MODEL: process.env.LARGE_MODEL
806
+ LARGE_MODEL: process.env.LARGE_MODEL,
807
+ OPENAI_EMBEDDING_MODEL: process.env.OPENAI_EMBEDDING_MODEL,
808
+ OPENAI_EMBEDDING_URL: process.env.OPENAI_EMBEDDING_URL,
809
+ OPENAI_EMBEDDING_DIMENSIONS: process.env.OPENAI_EMBEDDING_DIMENSIONS
30
810
  },
31
- async init(config) {
811
+ async init(_config, runtime) {
32
812
  try {
33
- if (!process.env.OPENAI_API_KEY) {
813
+ if (!getApiKey(runtime)) {
34
814
  logger.warn(
35
815
  "OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited"
36
816
  );
37
817
  return;
38
818
  }
39
819
  try {
40
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
820
+ const baseURL = getBaseURL(runtime);
41
821
  const response = await fetch(`${baseURL}/models`, {
42
- headers: { Authorization: `Bearer ${process.env.OPENAI_API_KEY}` }
822
+ headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
43
823
  });
44
824
  if (!response.ok) {
45
- logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
46
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
825
+ logger.warn(
826
+ `OpenAI API key validation failed: ${response.statusText}`
827
+ );
828
+ logger.warn(
829
+ "OpenAI functionality will be limited until a valid API key is provided"
830
+ );
47
831
  } else {
832
+ logger.log("OpenAI API key validated successfully");
48
833
  }
49
834
  } catch (fetchError) {
50
- logger.warn(`Error validating OpenAI API key: ${fetchError}`);
51
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
835
+ const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
836
+ logger.warn(`Error validating OpenAI API key: ${message}`);
837
+ logger.warn(
838
+ "OpenAI functionality will be limited until a valid API key is provided"
839
+ );
52
840
  }
53
841
  } catch (error) {
842
+ const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
54
843
  logger.warn(
55
- `OpenAI plugin configuration issue: ${error.errors.map((e) => e.message).join(", ")} - You need to configure the OPENAI_API_KEY in your environment variables`
844
+ `OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
56
845
  );
57
846
  }
58
847
  },
59
848
  models: {
60
- [ModelType.TEXT_EMBEDDING]: async (_runtime, params) => {
849
+ [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
850
+ const embeddingModelName = getSetting(
851
+ runtime,
852
+ "OPENAI_EMBEDDING_MODEL",
853
+ "text-embedding-3-small"
854
+ );
855
+ const embeddingDimension = Number.parseInt(
856
+ getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536",
857
+ 10
858
+ );
859
+ logger.debug(
860
+ `[OpenAI] Using embedding model: ${embeddingModelName} with dimension: ${embeddingDimension}`
861
+ );
862
+ if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
863
+ const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
864
+ logger.error(errorMsg);
865
+ throw new Error(errorMsg);
866
+ }
61
867
  if (params === null) {
62
868
  logger.debug("Creating test embedding for initialization");
63
- const testVector = Array(1536).fill(0);
869
+ const testVector = Array(embeddingDimension).fill(0);
64
870
  testVector[0] = 0.1;
65
871
  return testVector;
66
872
  }
@@ -71,51 +877,115 @@ var openaiPlugin = {
71
877
  text = params.text;
72
878
  } else {
73
879
  logger.warn("Invalid input format for embedding");
74
- const fallbackVector = Array(1536).fill(0);
880
+ const fallbackVector = Array(embeddingDimension).fill(0);
75
881
  fallbackVector[0] = 0.2;
76
882
  return fallbackVector;
77
883
  }
78
884
  if (!text.trim()) {
79
885
  logger.warn("Empty text for embedding");
80
- const emptyVector = Array(1536).fill(0);
886
+ const emptyVector = Array(embeddingDimension).fill(0);
81
887
  emptyVector[0] = 0.3;
82
888
  return emptyVector;
83
889
  }
84
- try {
85
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
86
- const response = await fetch(`${baseURL}/embeddings`, {
87
- method: "POST",
88
- headers: {
89
- Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
90
- "Content-Type": "application/json"
91
- },
92
- body: JSON.stringify({
93
- model: "text-embedding-3-small",
94
- input: text
95
- })
96
- });
97
- if (!response.ok) {
98
- logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
99
- const errorVector = Array(1536).fill(0);
100
- errorVector[0] = 0.4;
101
- return errorVector;
102
- }
103
- const data = await response.json();
104
- if (!data?.data?.[0]?.embedding) {
105
- logger.error("API returned invalid structure");
106
- const errorVector = Array(1536).fill(0);
107
- errorVector[0] = 0.5;
108
- return errorVector;
890
+ const attributes = {
891
+ "llm.vendor": "OpenAI",
892
+ "llm.request.type": "embedding",
893
+ "llm.request.model": embeddingModelName,
894
+ "llm.request.embedding.dimensions": embeddingDimension,
895
+ "input.text.length": text.length
896
+ };
897
+ return startLlmSpan(
898
+ runtime,
899
+ "LLM.embedding",
900
+ attributes,
901
+ async (span) => {
902
+ span.addEvent("llm.prompt", { "prompt.content": text });
903
+ const embeddingBaseURL = getEmbeddingBaseURL(runtime);
904
+ const apiKey = getApiKey(runtime);
905
+ if (!apiKey) {
906
+ span.setStatus({
907
+ code: SpanStatusCode.ERROR,
908
+ message: "OpenAI API key not configured"
909
+ });
910
+ throw new Error("OpenAI API key not configured");
911
+ }
912
+ try {
913
+ const response = await fetch(`${embeddingBaseURL}/embeddings`, {
914
+ method: "POST",
915
+ headers: {
916
+ Authorization: `Bearer ${apiKey}`,
917
+ "Content-Type": "application/json"
918
+ },
919
+ body: JSON.stringify({
920
+ model: embeddingModelName,
921
+ input: text
922
+ })
923
+ });
924
+ const responseClone = response.clone();
925
+ const rawResponseBody = await responseClone.text();
926
+ span.addEvent("llm.response.raw", {
927
+ "response.body": rawResponseBody
928
+ });
929
+ if (!response.ok) {
930
+ logger.error(
931
+ `OpenAI API error: ${response.status} - ${response.statusText}`
932
+ );
933
+ span.setAttributes({ "error.api.status": response.status });
934
+ span.setStatus({
935
+ code: SpanStatusCode.ERROR,
936
+ message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
937
+ });
938
+ const errorVector = Array(embeddingDimension).fill(0);
939
+ errorVector[0] = 0.4;
940
+ return errorVector;
941
+ }
942
+ const data = await response.json();
943
+ if (!data?.data?.[0]?.embedding) {
944
+ logger.error("API returned invalid structure");
945
+ span.setStatus({
946
+ code: SpanStatusCode.ERROR,
947
+ message: "API returned invalid structure"
948
+ });
949
+ const errorVector = Array(embeddingDimension).fill(0);
950
+ errorVector[0] = 0.5;
951
+ return errorVector;
952
+ }
953
+ const embedding = data.data[0].embedding;
954
+ span.setAttribute(
955
+ "llm.response.embedding.vector_length",
956
+ embedding.length
957
+ );
958
+ if (data.usage) {
959
+ span.setAttributes({
960
+ "llm.usage.prompt_tokens": data.usage.prompt_tokens,
961
+ "llm.usage.total_tokens": data.usage.total_tokens
962
+ });
963
+ const usage = {
964
+ promptTokens: data.usage.prompt_tokens,
965
+ completionTokens: 0,
966
+ totalTokens: data.usage.total_tokens
967
+ };
968
+ emitModelUsageEvent(
969
+ runtime,
970
+ ModelType.TEXT_EMBEDDING,
971
+ text,
972
+ usage
973
+ );
974
+ }
975
+ logger.log(`Got valid embedding with length ${embedding.length}`);
976
+ return embedding;
977
+ } catch (error) {
978
+ const message = error instanceof Error ? error.message : String(error);
979
+ logger.error(`Error generating embedding: ${message}`);
980
+ const exception = error instanceof Error ? error : new Error(message);
981
+ span.recordException(exception);
982
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
983
+ const errorVector = Array(embeddingDimension).fill(0);
984
+ errorVector[0] = 0.6;
985
+ return errorVector;
986
+ }
109
987
  }
110
- const embedding = data.data[0].embedding;
111
- logger.log(`Got valid embedding with length ${embedding.length}`);
112
- return embedding;
113
- } catch (error) {
114
- logger.error("Error generating embedding:", error);
115
- const errorVector = Array(1536).fill(0);
116
- errorVector[0] = 0.6;
117
- return errorVector;
118
- }
988
+ );
119
989
  },
120
990
  [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
121
991
  return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
@@ -128,25 +998,54 @@ var openaiPlugin = {
128
998
  const frequency_penalty = 0.7;
129
999
  const presence_penalty = 0.7;
130
1000
  const max_response_length = 8192;
131
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
132
- const openai = createOpenAI({
133
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
134
- baseURL
135
- });
136
- const model = runtime.getSetting("OPENAI_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "gpt-4o-mini";
137
- logger.log("generating text");
1001
+ const openai = createOpenAIClient(runtime);
1002
+ const modelName = getSmallModel(runtime);
1003
+ logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
138
1004
  logger.log(prompt);
139
- const { text: openaiResponse } = await generateText({
140
- model: openai.languageModel(model),
141
- prompt,
142
- system: runtime.character.system ?? void 0,
143
- temperature,
144
- maxTokens: max_response_length,
145
- frequencyPenalty: frequency_penalty,
146
- presencePenalty: presence_penalty,
147
- stopSequences
148
- });
149
- return openaiResponse;
1005
+ const attributes = {
1006
+ "llm.vendor": "OpenAI",
1007
+ "llm.request.type": "completion",
1008
+ "llm.request.model": modelName,
1009
+ "llm.request.temperature": temperature,
1010
+ "llm.request.max_tokens": max_response_length,
1011
+ "llm.request.frequency_penalty": frequency_penalty,
1012
+ "llm.request.presence_penalty": presence_penalty,
1013
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
1014
+ };
1015
+ return startLlmSpan(
1016
+ runtime,
1017
+ "LLM.generateText",
1018
+ attributes,
1019
+ async (span) => {
1020
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1021
+ const { text: openaiResponse, usage } = await generateText({
1022
+ model: openai.languageModel(modelName),
1023
+ prompt,
1024
+ system: runtime.character.system ?? void 0,
1025
+ temperature,
1026
+ maxTokens: max_response_length,
1027
+ frequencyPenalty: frequency_penalty,
1028
+ presencePenalty: presence_penalty,
1029
+ stopSequences
1030
+ });
1031
+ span.setAttribute(
1032
+ "llm.response.processed.length",
1033
+ openaiResponse.length
1034
+ );
1035
+ span.addEvent("llm.response.processed", {
1036
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1037
+ });
1038
+ if (usage) {
1039
+ span.setAttributes({
1040
+ "llm.usage.prompt_tokens": usage.promptTokens,
1041
+ "llm.usage.completion_tokens": usage.completionTokens,
1042
+ "llm.usage.total_tokens": usage.totalTokens
1043
+ });
1044
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1045
+ }
1046
+ return openaiResponse;
1047
+ }
1048
+ );
150
1049
  },
151
1050
  [ModelType.TEXT_LARGE]: async (runtime, {
152
1051
  prompt,
@@ -156,194 +1055,393 @@ var openaiPlugin = {
156
1055
  frequencyPenalty = 0.7,
157
1056
  presencePenalty = 0.7
158
1057
  }) => {
159
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
160
- const openai = createOpenAI({
161
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
162
- baseURL
163
- });
164
- const model = runtime.getSetting("OPENAI_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "gpt-4o";
165
- const { text: openaiResponse } = await generateText({
166
- model: openai.languageModel(model),
167
- prompt,
168
- system: runtime.character.system ?? void 0,
169
- temperature,
170
- maxTokens,
171
- frequencyPenalty,
172
- presencePenalty,
173
- stopSequences
174
- });
175
- return openaiResponse;
1058
+ const openai = createOpenAIClient(runtime);
1059
+ const modelName = getLargeModel(runtime);
1060
+ logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
1061
+ logger.log(prompt);
1062
+ const attributes = {
1063
+ "llm.vendor": "OpenAI",
1064
+ "llm.request.type": "completion",
1065
+ "llm.request.model": modelName,
1066
+ "llm.request.temperature": temperature,
1067
+ "llm.request.max_tokens": maxTokens,
1068
+ "llm.request.frequency_penalty": frequencyPenalty,
1069
+ "llm.request.presence_penalty": presencePenalty,
1070
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
1071
+ };
1072
+ return startLlmSpan(
1073
+ runtime,
1074
+ "LLM.generateText",
1075
+ attributes,
1076
+ async (span) => {
1077
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1078
+ const { text: openaiResponse, usage } = await generateText({
1079
+ model: openai.languageModel(modelName),
1080
+ prompt,
1081
+ system: runtime.character.system ?? void 0,
1082
+ temperature,
1083
+ maxTokens,
1084
+ frequencyPenalty,
1085
+ presencePenalty,
1086
+ stopSequences
1087
+ });
1088
+ span.setAttribute(
1089
+ "llm.response.processed.length",
1090
+ openaiResponse.length
1091
+ );
1092
+ span.addEvent("llm.response.processed", {
1093
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1094
+ });
1095
+ if (usage) {
1096
+ span.setAttributes({
1097
+ "llm.usage.prompt_tokens": usage.promptTokens,
1098
+ "llm.usage.completion_tokens": usage.completionTokens,
1099
+ "llm.usage.total_tokens": usage.totalTokens
1100
+ });
1101
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1102
+ }
1103
+ return openaiResponse;
1104
+ }
1105
+ );
176
1106
  },
177
1107
  [ModelType.IMAGE]: async (runtime, params) => {
178
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
179
- const response = await fetch(`${baseURL}/images/generations`, {
180
- method: "POST",
181
- headers: {
182
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`,
183
- "Content-Type": "application/json"
184
- },
185
- body: JSON.stringify({
186
- prompt: params.prompt,
187
- n: params.n || 1,
188
- size: params.size || "1024x1024"
189
- })
190
- });
191
- if (!response.ok) {
192
- throw new Error(`Failed to generate image: ${response.statusText}`);
193
- }
194
- const data = await response.json();
195
- const typedData = data;
196
- return typedData.data;
1108
+ const n = params.n || 1;
1109
+ const size = params.size || "1024x1024";
1110
+ const prompt = params.prompt;
1111
+ const modelName = "dall-e-3";
1112
+ logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
1113
+ const attributes = {
1114
+ "llm.vendor": "OpenAI",
1115
+ "llm.request.type": "image_generation",
1116
+ "llm.request.image.size": size,
1117
+ "llm.request.image.count": n
1118
+ };
1119
+ return startLlmSpan(
1120
+ runtime,
1121
+ "LLM.imageGeneration",
1122
+ attributes,
1123
+ async (span) => {
1124
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1125
+ const baseURL = getBaseURL(runtime);
1126
+ const apiKey = getApiKey(runtime);
1127
+ if (!apiKey) {
1128
+ span.setStatus({
1129
+ code: SpanStatusCode.ERROR,
1130
+ message: "OpenAI API key not configured"
1131
+ });
1132
+ throw new Error("OpenAI API key not configured");
1133
+ }
1134
+ try {
1135
+ const response = await fetch(`${baseURL}/images/generations`, {
1136
+ method: "POST",
1137
+ headers: {
1138
+ Authorization: `Bearer ${apiKey}`,
1139
+ "Content-Type": "application/json"
1140
+ },
1141
+ body: JSON.stringify({
1142
+ prompt,
1143
+ n,
1144
+ size
1145
+ })
1146
+ });
1147
+ const responseClone = response.clone();
1148
+ const rawResponseBody = await responseClone.text();
1149
+ span.addEvent("llm.response.raw", {
1150
+ "response.body": rawResponseBody
1151
+ });
1152
+ if (!response.ok) {
1153
+ span.setAttributes({ "error.api.status": response.status });
1154
+ span.setStatus({
1155
+ code: SpanStatusCode.ERROR,
1156
+ message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1157
+ });
1158
+ throw new Error(
1159
+ `Failed to generate image: ${response.statusText}`
1160
+ );
1161
+ }
1162
+ const data = await response.json();
1163
+ const typedData = data;
1164
+ span.addEvent("llm.response.processed", {
1165
+ "response.urls": JSON.stringify(typedData.data)
1166
+ });
1167
+ return typedData.data;
1168
+ } catch (error) {
1169
+ const message = error instanceof Error ? error.message : String(error);
1170
+ const exception = error instanceof Error ? error : new Error(message);
1171
+ span.recordException(exception);
1172
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1173
+ throw error;
1174
+ }
1175
+ }
1176
+ );
197
1177
  },
198
1178
  [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
199
1179
  let imageUrl;
200
- let prompt;
1180
+ let promptText;
1181
+ const modelName = "gpt-4o-mini";
1182
+ logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
1183
+ const maxTokens = 300;
201
1184
  if (typeof params === "string") {
202
1185
  imageUrl = params;
203
- prompt = void 0;
1186
+ promptText = "Please analyze this image and provide a title and detailed description.";
204
1187
  } else {
205
1188
  imageUrl = params.imageUrl;
206
- prompt = params.prompt;
1189
+ promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
207
1190
  }
208
- try {
209
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
210
- const apiKey = process.env.OPENAI_API_KEY;
211
- if (!apiKey) {
212
- logger.error("OpenAI API key not set");
213
- return {
214
- title: "Failed to analyze image",
215
- description: "API key not configured"
216
- };
217
- }
218
- const response = await fetch(`${baseURL}/chat/completions`, {
219
- method: "POST",
220
- headers: {
221
- "Content-Type": "application/json",
222
- Authorization: `Bearer ${apiKey}`
223
- },
224
- body: JSON.stringify({
225
- model: "gpt-4-vision-preview",
226
- messages: [
227
- {
228
- role: "user",
229
- content: [
230
- {
231
- type: "text",
232
- text: prompt || "Please analyze this image and provide a title and detailed description."
233
- },
234
- {
235
- type: "image_url",
236
- image_url: { url: imageUrl }
237
- }
238
- ]
239
- }
240
- ],
241
- max_tokens: 300
242
- })
243
- });
244
- if (!response.ok) {
245
- throw new Error(`OpenAI API error: ${response.status}`);
1191
+ const attributes = {
1192
+ "llm.vendor": "OpenAI",
1193
+ "llm.request.type": "chat",
1194
+ "llm.request.model": modelName,
1195
+ "llm.request.max_tokens": maxTokens,
1196
+ "llm.request.image.url": imageUrl
1197
+ };
1198
+ const messages = [
1199
+ {
1200
+ role: "user",
1201
+ content: [
1202
+ { type: "text", text: promptText },
1203
+ { type: "image_url", image_url: { url: imageUrl } }
1204
+ ]
246
1205
  }
247
- const result = await response.json();
248
- const content = result.choices?.[0]?.message?.content;
249
- if (!content) {
250
- return {
251
- title: "Failed to analyze image",
252
- description: "No response from API"
253
- };
1206
+ ];
1207
+ return startLlmSpan(
1208
+ runtime,
1209
+ "LLM.imageDescription",
1210
+ attributes,
1211
+ async (span) => {
1212
+ span.addEvent("llm.prompt", {
1213
+ "prompt.content": JSON.stringify(messages, safeReplacer())
1214
+ });
1215
+ const baseURL = getBaseURL(runtime);
1216
+ const apiKey = getApiKey(runtime);
1217
+ if (!apiKey) {
1218
+ logger.error("OpenAI API key not set");
1219
+ span.setStatus({
1220
+ code: SpanStatusCode.ERROR,
1221
+ message: "OpenAI API key not configured"
1222
+ });
1223
+ return {
1224
+ title: "Failed to analyze image",
1225
+ description: "API key not configured"
1226
+ };
1227
+ }
1228
+ try {
1229
+ const response = await fetch(`${baseURL}/chat/completions`, {
1230
+ method: "POST",
1231
+ headers: {
1232
+ "Content-Type": "application/json",
1233
+ Authorization: `Bearer ${apiKey}`
1234
+ },
1235
+ body: JSON.stringify({
1236
+ model: modelName,
1237
+ messages,
1238
+ max_tokens: maxTokens
1239
+ })
1240
+ });
1241
+ const responseClone = response.clone();
1242
+ const rawResponseBody = await responseClone.text();
1243
+ span.addEvent("llm.response.raw", {
1244
+ "response.body": rawResponseBody
1245
+ });
1246
+ if (!response.ok) {
1247
+ span.setAttributes({ "error.api.status": response.status });
1248
+ span.setStatus({
1249
+ code: SpanStatusCode.ERROR,
1250
+ message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1251
+ });
1252
+ throw new Error(`OpenAI API error: ${response.status}`);
1253
+ }
1254
+ const result = await response.json();
1255
+ const typedResult = result;
1256
+ const content = typedResult.choices?.[0]?.message?.content;
1257
+ if (typedResult.usage) {
1258
+ span.setAttributes({
1259
+ "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1260
+ "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1261
+ "llm.usage.total_tokens": typedResult.usage.total_tokens
1262
+ });
1263
+ emitModelUsageEvent(
1264
+ runtime,
1265
+ ModelType.IMAGE_DESCRIPTION,
1266
+ typeof params === "string" ? params : params.prompt || "",
1267
+ {
1268
+ promptTokens: typedResult.usage.prompt_tokens,
1269
+ completionTokens: typedResult.usage.completion_tokens,
1270
+ totalTokens: typedResult.usage.total_tokens
1271
+ }
1272
+ );
1273
+ }
1274
+ if (typedResult.choices?.[0]?.finish_reason) {
1275
+ span.setAttribute(
1276
+ "llm.response.finish_reason",
1277
+ typedResult.choices[0].finish_reason
1278
+ );
1279
+ }
1280
+ if (!content) {
1281
+ span.setStatus({
1282
+ code: SpanStatusCode.ERROR,
1283
+ message: "No content in API response"
1284
+ });
1285
+ return {
1286
+ title: "Failed to analyze image",
1287
+ description: "No response from API"
1288
+ };
1289
+ }
1290
+ const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1291
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
1292
+ const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1293
+ const processedResult = { title, description };
1294
+ span.addEvent("llm.response.processed", {
1295
+ "response.object": JSON.stringify(
1296
+ processedResult,
1297
+ safeReplacer()
1298
+ )
1299
+ });
1300
+ return processedResult;
1301
+ } catch (error) {
1302
+ const message = error instanceof Error ? error.message : String(error);
1303
+ logger.error(`Error analyzing image: ${message}`);
1304
+ const exception = error instanceof Error ? error : new Error(message);
1305
+ span.recordException(exception);
1306
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1307
+ return {
1308
+ title: "Failed to analyze image",
1309
+ description: `Error: ${message}`
1310
+ };
1311
+ }
254
1312
  }
255
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
256
- const title = titleMatch?.[1] || "Image Analysis";
257
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
258
- return { title, description };
259
- } catch (error) {
260
- logger.error("Error analyzing image:", error);
261
- return {
262
- title: "Failed to analyze image",
263
- description: `Error: ${error instanceof Error ? error.message : String(error)}`
264
- };
265
- }
1313
+ );
266
1314
  },
267
1315
  [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
268
1316
  logger.log("audioBuffer", audioBuffer);
269
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
270
- const formData = new FormData();
271
- formData.append("file", new Blob([audioBuffer], { type: "audio/mp3" }));
272
- formData.append("model", "whisper-1");
273
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
274
- method: "POST",
275
- headers: {
276
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`
277
- // Note: Do not set a Content-Type header—letting fetch set it for FormData is best
278
- },
279
- body: formData
280
- });
281
- logger.log("response", response);
282
- if (!response.ok) {
283
- throw new Error(`Failed to transcribe audio: ${response.statusText}`);
284
- }
285
- const data = await response.json();
286
- return data.text;
287
- },
288
- [ModelType.OBJECT_SMALL]: async (runtime, params) => {
289
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
290
- const openai = createOpenAI({
291
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
292
- baseURL
293
- });
294
- const model = runtime.getSetting("OPENAI_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "gpt-4o-mini";
295
- try {
296
- if (params.schema) {
297
- logger.info("Using OBJECT_SMALL without schema validation");
298
- const { object: object2 } = await generateObject({
299
- model: openai.languageModel(model),
300
- output: "no-schema",
301
- prompt: params.prompt,
302
- temperature: params.temperature
1317
+ const modelName = "whisper-1";
1318
+ logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
1319
+ const attributes = {
1320
+ "llm.vendor": "OpenAI",
1321
+ "llm.request.type": "transcription",
1322
+ "llm.request.model": modelName,
1323
+ "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1324
+ };
1325
+ return startLlmSpan(
1326
+ runtime,
1327
+ "LLM.transcription",
1328
+ attributes,
1329
+ async (span) => {
1330
+ span.addEvent("llm.prompt", {
1331
+ "prompt.info": "Audio buffer for transcription"
303
1332
  });
304
- return object2;
1333
+ const baseURL = getBaseURL(runtime);
1334
+ const apiKey = getApiKey(runtime);
1335
+ if (!apiKey) {
1336
+ span.setStatus({
1337
+ code: SpanStatusCode.ERROR,
1338
+ message: "OpenAI API key not configured"
1339
+ });
1340
+ throw new Error(
1341
+ "OpenAI API key not configured - Cannot make request"
1342
+ );
1343
+ }
1344
+ if (!audioBuffer || audioBuffer.length === 0) {
1345
+ span.setStatus({
1346
+ code: SpanStatusCode.ERROR,
1347
+ message: "Audio buffer is empty or invalid"
1348
+ });
1349
+ throw new Error(
1350
+ "Audio buffer is empty or invalid for transcription"
1351
+ );
1352
+ }
1353
+ const formData = new FormData();
1354
+ formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1355
+ formData.append("model", "whisper-1");
1356
+ try {
1357
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
1358
+ method: "POST",
1359
+ headers: {
1360
+ Authorization: `Bearer ${apiKey}`
1361
+ },
1362
+ body: formData
1363
+ });
1364
+ const responseClone = response.clone();
1365
+ const rawResponseBody = await responseClone.text();
1366
+ span.addEvent("llm.response.raw", {
1367
+ "response.body": rawResponseBody
1368
+ });
1369
+ logger.log("response", response);
1370
+ if (!response.ok) {
1371
+ span.setAttributes({ "error.api.status": response.status });
1372
+ span.setStatus({
1373
+ code: SpanStatusCode.ERROR,
1374
+ message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1375
+ });
1376
+ throw new Error(
1377
+ `Failed to transcribe audio: ${response.statusText}`
1378
+ );
1379
+ }
1380
+ const data = await response.json();
1381
+ const processedText = data.text;
1382
+ span.setAttribute(
1383
+ "llm.response.processed.length",
1384
+ processedText.length
1385
+ );
1386
+ span.addEvent("llm.response.processed", {
1387
+ "response.text": processedText
1388
+ });
1389
+ return processedText;
1390
+ } catch (error) {
1391
+ const message = error instanceof Error ? error.message : String(error);
1392
+ const exception = error instanceof Error ? error : new Error(message);
1393
+ span.recordException(exception);
1394
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1395
+ throw error;
1396
+ }
305
1397
  }
306
- const { object } = await generateObject({
307
- model: openai.languageModel(model),
308
- output: "no-schema",
309
- prompt: params.prompt,
310
- temperature: params.temperature
311
- });
312
- return object;
313
- } catch (error) {
314
- logger.error("Error generating object:", error);
315
- throw error;
316
- }
1398
+ );
317
1399
  },
318
- [ModelType.OBJECT_LARGE]: async (runtime, params) => {
319
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
320
- const openai = createOpenAI({
321
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
322
- baseURL
323
- });
324
- const model = runtime.getSetting("OPENAI_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "gpt-4o";
325
- try {
326
- if (params.schema) {
327
- logger.info("Using OBJECT_LARGE without schema validation");
328
- const { object: object2 } = await generateObject({
329
- model: openai.languageModel(model),
330
- output: "no-schema",
331
- prompt: params.prompt,
332
- temperature: params.temperature
1400
+ [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
1401
+ const ttsModelName = getSetting(
1402
+ runtime,
1403
+ "OPENAI_TTS_MODEL",
1404
+ "gpt-4o-mini-tts"
1405
+ );
1406
+ const attributes = {
1407
+ "llm.vendor": "OpenAI",
1408
+ "llm.request.type": "tts",
1409
+ "llm.request.model": ttsModelName,
1410
+ "input.text.length": text.length
1411
+ };
1412
+ return startLlmSpan(runtime, "LLM.tts", attributes, async (span) => {
1413
+ logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${ttsModelName}`);
1414
+ span.addEvent("llm.prompt", { "prompt.content": text });
1415
+ try {
1416
+ const speechStream = await fetchTextToSpeech(runtime, text);
1417
+ span.addEvent("llm.response.success", {
1418
+ info: "Speech stream generated"
333
1419
  });
334
- return object2;
1420
+ return speechStream;
1421
+ } catch (error) {
1422
+ const message = error instanceof Error ? error.message : String(error);
1423
+ const exception = error instanceof Error ? error : new Error(message);
1424
+ span.recordException(exception);
1425
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1426
+ throw error;
335
1427
  }
336
- const { object } = await generateObject({
337
- model: openai.languageModel(model),
338
- output: "no-schema",
339
- prompt: params.prompt,
340
- temperature: params.temperature
341
- });
342
- return object;
343
- } catch (error) {
344
- logger.error("Error generating object:", error);
345
- throw error;
346
- }
1428
+ });
1429
+ },
1430
+ [ModelType.OBJECT_SMALL]: async (runtime, params) => {
1431
+ return generateObjectByModelType(
1432
+ runtime,
1433
+ params,
1434
+ ModelType.OBJECT_SMALL,
1435
+ getSmallModel
1436
+ );
1437
+ },
1438
+ [ModelType.OBJECT_LARGE]: async (runtime, params) => {
1439
+ return generateObjectByModelType(
1440
+ runtime,
1441
+ params,
1442
+ ModelType.OBJECT_LARGE,
1443
+ getLargeModel
1444
+ );
347
1445
  }
348
1446
  },
349
1447
  tests: [
@@ -353,16 +1451,21 @@ var openaiPlugin = {
353
1451
  {
354
1452
  name: "openai_test_url_and_api_key_validation",
355
1453
  fn: async (runtime) => {
356
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
1454
+ const baseURL = getBaseURL(runtime);
357
1455
  const response = await fetch(`${baseURL}/models`, {
358
1456
  headers: {
359
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`
1457
+ Authorization: `Bearer ${getApiKey(runtime)}`
360
1458
  }
361
1459
  });
362
1460
  const data = await response.json();
363
- logger.log("Models Available:", data?.data.length);
1461
+ logger.log(
1462
+ "Models Available:",
1463
+ data?.data?.length ?? "N/A"
1464
+ );
364
1465
  if (!response.ok) {
365
- throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1466
+ throw new Error(
1467
+ `Failed to validate OpenAI API key: ${response.statusText}`
1468
+ );
366
1469
  }
367
1470
  }
368
1471
  },
@@ -370,12 +1473,16 @@ var openaiPlugin = {
370
1473
  name: "openai_test_text_embedding",
371
1474
  fn: async (runtime) => {
372
1475
  try {
373
- const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
374
- text: "Hello, world!"
375
- });
1476
+ const embedding = await runtime.useModel(
1477
+ ModelType.TEXT_EMBEDDING,
1478
+ {
1479
+ text: "Hello, world!"
1480
+ }
1481
+ );
376
1482
  logger.log("embedding", embedding);
377
1483
  } catch (error) {
378
- logger.error("Error in test_text_embedding:", error);
1484
+ const message = error instanceof Error ? error.message : String(error);
1485
+ logger.error(`Error in test_text_embedding: ${message}`);
379
1486
  throw error;
380
1487
  }
381
1488
  }
@@ -392,7 +1499,8 @@ var openaiPlugin = {
392
1499
  }
393
1500
  logger.log("generated with test_text_large:", text);
394
1501
  } catch (error) {
395
- logger.error("Error in test_text_large:", error);
1502
+ const message = error instanceof Error ? error.message : String(error);
1503
+ logger.error(`Error in test_text_large: ${message}`);
396
1504
  throw error;
397
1505
  }
398
1506
  }
@@ -409,7 +1517,8 @@ var openaiPlugin = {
409
1517
  }
410
1518
  logger.log("generated with test_text_small:", text);
411
1519
  } catch (error) {
412
- logger.error("Error in test_text_small:", error);
1520
+ const message = error instanceof Error ? error.message : String(error);
1521
+ logger.error(`Error in test_text_small: ${message}`);
413
1522
  throw error;
414
1523
  }
415
1524
  }
@@ -426,7 +1535,8 @@ var openaiPlugin = {
426
1535
  });
427
1536
  logger.log("generated with test_image_generation:", image);
428
1537
  } catch (error) {
429
- logger.error("Error in test_image_generation:", error);
1538
+ const message = error instanceof Error ? error.message : String(error);
1539
+ logger.error(`Error in test_image_generation: ${message}`);
430
1540
  throw error;
431
1541
  }
432
1542
  }
@@ -444,13 +1554,20 @@ var openaiPlugin = {
444
1554
  if (result && typeof result === "object" && "title" in result && "description" in result) {
445
1555
  logger.log("Image description:", result);
446
1556
  } else {
447
- logger.error("Invalid image description result format:", result);
1557
+ logger.error(
1558
+ "Invalid image description result format:",
1559
+ result
1560
+ );
448
1561
  }
449
1562
  } catch (e) {
450
- logger.error("Error in image description test:", e);
1563
+ const message = e instanceof Error ? e.message : String(e);
1564
+ logger.error(`Error in image description test: ${message}`);
451
1565
  }
452
1566
  } catch (e) {
453
- logger.error("Error in openai_test_image_description:", e);
1567
+ const message = e instanceof Error ? e.message : String(e);
1568
+ logger.error(
1569
+ `Error in openai_test_image_description: ${message}`
1570
+ );
454
1571
  }
455
1572
  }
456
1573
  },
@@ -469,7 +1586,8 @@ var openaiPlugin = {
469
1586
  );
470
1587
  logger.log("generated with test_transcription:", transcription);
471
1588
  } catch (error) {
472
- logger.error("Error in test_transcription:", error);
1589
+ const message = error instanceof Error ? error.message : String(error);
1590
+ logger.error(`Error in test_transcription: ${message}`);
473
1591
  throw error;
474
1592
  }
475
1593
  }
@@ -478,9 +1596,14 @@ var openaiPlugin = {
478
1596
  name: "openai_test_text_tokenizer_encode",
479
1597
  fn: async (runtime) => {
480
1598
  const prompt = "Hello tokenizer encode!";
481
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1599
+ const tokens = await runtime.useModel(
1600
+ ModelType.TEXT_TOKENIZER_ENCODE,
1601
+ { prompt }
1602
+ );
482
1603
  if (!Array.isArray(tokens) || tokens.length === 0) {
483
- throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1604
+ throw new Error(
1605
+ "Failed to tokenize text: expected non-empty array of tokens"
1606
+ );
484
1607
  }
485
1608
  logger.log("Tokenized output:", tokens);
486
1609
  }
@@ -489,8 +1612,14 @@ var openaiPlugin = {
489
1612
  name: "openai_test_text_tokenizer_decode",
490
1613
  fn: async (runtime) => {
491
1614
  const prompt = "Hello tokenizer decode!";
492
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
493
- const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, { tokens });
1615
+ const tokens = await runtime.useModel(
1616
+ ModelType.TEXT_TOKENIZER_ENCODE,
1617
+ { prompt }
1618
+ );
1619
+ const decodedText = await runtime.useModel(
1620
+ ModelType.TEXT_TOKENIZER_DECODE,
1621
+ { tokens }
1622
+ );
494
1623
  if (decodedText !== prompt) {
495
1624
  throw new Error(
496
1625
  `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`
@@ -498,6 +1627,23 @@ var openaiPlugin = {
498
1627
  }
499
1628
  logger.log("Decoded text:", decodedText);
500
1629
  }
1630
+ },
1631
+ {
1632
+ name: "openai_test_text_to_speech",
1633
+ fn: async (runtime) => {
1634
+ try {
1635
+ const text = "Hello, this is a test for text-to-speech.";
1636
+ const response = await fetchTextToSpeech(runtime, text);
1637
+ if (!response) {
1638
+ throw new Error("Failed to generate speech");
1639
+ }
1640
+ logger.log("Generated speech successfully");
1641
+ } catch (error) {
1642
+ const message = error instanceof Error ? error.message : String(error);
1643
+ logger.error(`Error in openai_test_text_to_speech: ${message}`);
1644
+ throw error;
1645
+ }
1646
+ }
501
1647
  }
502
1648
  ]
503
1649
  }