@elizaos/plugin-openai 1.0.0-beta.6 → 1.0.0-beta.71

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,11 +1,612 @@
1
1
  // src/index.ts
2
2
  import { createOpenAI } from "@ai-sdk/openai";
3
3
  import {
4
+ EventType,
5
+ logger,
4
6
  ModelType,
5
- logger
7
+ safeReplacer,
8
+ ServiceType,
9
+ VECTOR_DIMS
6
10
  } from "@elizaos/core";
7
- import { generateObject, generateText } from "ai";
11
+
12
+ // node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
13
+ var _globalThis = typeof globalThis === "object" ? globalThis : global;
14
+
15
+ // node_modules/@opentelemetry/api/build/esm/version.js
16
+ var VERSION = "1.9.0";
17
+
18
+ // node_modules/@opentelemetry/api/build/esm/internal/semver.js
19
+ var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
20
+ function _makeCompatibilityCheck(ownVersion) {
21
+ var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
22
+ var rejectedVersions = /* @__PURE__ */ new Set();
23
+ var myVersionMatch = ownVersion.match(re);
24
+ if (!myVersionMatch) {
25
+ return function() {
26
+ return false;
27
+ };
28
+ }
29
+ var ownVersionParsed = {
30
+ major: +myVersionMatch[1],
31
+ minor: +myVersionMatch[2],
32
+ patch: +myVersionMatch[3],
33
+ prerelease: myVersionMatch[4]
34
+ };
35
+ if (ownVersionParsed.prerelease != null) {
36
+ return function isExactmatch(globalVersion) {
37
+ return globalVersion === ownVersion;
38
+ };
39
+ }
40
+ function _reject(v) {
41
+ rejectedVersions.add(v);
42
+ return false;
43
+ }
44
+ function _accept(v) {
45
+ acceptedVersions.add(v);
46
+ return true;
47
+ }
48
+ return function isCompatible2(globalVersion) {
49
+ if (acceptedVersions.has(globalVersion)) {
50
+ return true;
51
+ }
52
+ if (rejectedVersions.has(globalVersion)) {
53
+ return false;
54
+ }
55
+ var globalVersionMatch = globalVersion.match(re);
56
+ if (!globalVersionMatch) {
57
+ return _reject(globalVersion);
58
+ }
59
+ var globalVersionParsed = {
60
+ major: +globalVersionMatch[1],
61
+ minor: +globalVersionMatch[2],
62
+ patch: +globalVersionMatch[3],
63
+ prerelease: globalVersionMatch[4]
64
+ };
65
+ if (globalVersionParsed.prerelease != null) {
66
+ return _reject(globalVersion);
67
+ }
68
+ if (ownVersionParsed.major !== globalVersionParsed.major) {
69
+ return _reject(globalVersion);
70
+ }
71
+ if (ownVersionParsed.major === 0) {
72
+ if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
73
+ return _accept(globalVersion);
74
+ }
75
+ return _reject(globalVersion);
76
+ }
77
+ if (ownVersionParsed.minor <= globalVersionParsed.minor) {
78
+ return _accept(globalVersion);
79
+ }
80
+ return _reject(globalVersion);
81
+ };
82
+ }
83
+ var isCompatible = _makeCompatibilityCheck(VERSION);
84
+
85
+ // node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
86
+ var major = VERSION.split(".")[0];
87
+ var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
88
+ var _global = _globalThis;
89
+ function registerGlobal(type, instance, diag, allowOverride) {
90
+ var _a;
91
+ if (allowOverride === void 0) {
92
+ allowOverride = false;
93
+ }
94
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
95
+ version: VERSION
96
+ };
97
+ if (!allowOverride && api[type]) {
98
+ var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
99
+ diag.error(err.stack || err.message);
100
+ return false;
101
+ }
102
+ if (api.version !== VERSION) {
103
+ var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
104
+ diag.error(err.stack || err.message);
105
+ return false;
106
+ }
107
+ api[type] = instance;
108
+ diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
109
+ return true;
110
+ }
111
+ function getGlobal(type) {
112
+ var _a, _b;
113
+ var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
114
+ if (!globalVersion || !isCompatible(globalVersion)) {
115
+ return;
116
+ }
117
+ return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
118
+ }
119
+ function unregisterGlobal(type, diag) {
120
+ diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
121
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
122
+ if (api) {
123
+ delete api[type];
124
+ }
125
+ }
126
+
127
+ // node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
128
+ var __read = function(o, n) {
129
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
130
+ if (!m) return o;
131
+ var i = m.call(o), r, ar = [], e;
132
+ try {
133
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
134
+ } catch (error) {
135
+ e = { error };
136
+ } finally {
137
+ try {
138
+ if (r && !r.done && (m = i["return"])) m.call(i);
139
+ } finally {
140
+ if (e) throw e.error;
141
+ }
142
+ }
143
+ return ar;
144
+ };
145
+ var __spreadArray = function(to, from, pack) {
146
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
147
+ if (ar || !(i in from)) {
148
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
149
+ ar[i] = from[i];
150
+ }
151
+ }
152
+ return to.concat(ar || Array.prototype.slice.call(from));
153
+ };
154
+ var DiagComponentLogger = (
155
+ /** @class */
156
+ function() {
157
+ function DiagComponentLogger2(props) {
158
+ this._namespace = props.namespace || "DiagComponentLogger";
159
+ }
160
+ DiagComponentLogger2.prototype.debug = function() {
161
+ var args = [];
162
+ for (var _i = 0; _i < arguments.length; _i++) {
163
+ args[_i] = arguments[_i];
164
+ }
165
+ return logProxy("debug", this._namespace, args);
166
+ };
167
+ DiagComponentLogger2.prototype.error = function() {
168
+ var args = [];
169
+ for (var _i = 0; _i < arguments.length; _i++) {
170
+ args[_i] = arguments[_i];
171
+ }
172
+ return logProxy("error", this._namespace, args);
173
+ };
174
+ DiagComponentLogger2.prototype.info = function() {
175
+ var args = [];
176
+ for (var _i = 0; _i < arguments.length; _i++) {
177
+ args[_i] = arguments[_i];
178
+ }
179
+ return logProxy("info", this._namespace, args);
180
+ };
181
+ DiagComponentLogger2.prototype.warn = function() {
182
+ var args = [];
183
+ for (var _i = 0; _i < arguments.length; _i++) {
184
+ args[_i] = arguments[_i];
185
+ }
186
+ return logProxy("warn", this._namespace, args);
187
+ };
188
+ DiagComponentLogger2.prototype.verbose = function() {
189
+ var args = [];
190
+ for (var _i = 0; _i < arguments.length; _i++) {
191
+ args[_i] = arguments[_i];
192
+ }
193
+ return logProxy("verbose", this._namespace, args);
194
+ };
195
+ return DiagComponentLogger2;
196
+ }()
197
+ );
198
+ function logProxy(funcName, namespace, args) {
199
+ var logger2 = getGlobal("diag");
200
+ if (!logger2) {
201
+ return;
202
+ }
203
+ args.unshift(namespace);
204
+ return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
205
+ }
206
+
207
+ // node_modules/@opentelemetry/api/build/esm/diag/types.js
208
+ var DiagLogLevel;
209
+ (function(DiagLogLevel2) {
210
+ DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
211
+ DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
212
+ DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
213
+ DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
214
+ DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
215
+ DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
216
+ DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
217
+ })(DiagLogLevel || (DiagLogLevel = {}));
218
+
219
+ // node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
220
+ function createLogLevelDiagLogger(maxLevel, logger2) {
221
+ if (maxLevel < DiagLogLevel.NONE) {
222
+ maxLevel = DiagLogLevel.NONE;
223
+ } else if (maxLevel > DiagLogLevel.ALL) {
224
+ maxLevel = DiagLogLevel.ALL;
225
+ }
226
+ logger2 = logger2 || {};
227
+ function _filterFunc(funcName, theLevel) {
228
+ var theFunc = logger2[funcName];
229
+ if (typeof theFunc === "function" && maxLevel >= theLevel) {
230
+ return theFunc.bind(logger2);
231
+ }
232
+ return function() {
233
+ };
234
+ }
235
+ return {
236
+ error: _filterFunc("error", DiagLogLevel.ERROR),
237
+ warn: _filterFunc("warn", DiagLogLevel.WARN),
238
+ info: _filterFunc("info", DiagLogLevel.INFO),
239
+ debug: _filterFunc("debug", DiagLogLevel.DEBUG),
240
+ verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
241
+ };
242
+ }
243
+
244
+ // node_modules/@opentelemetry/api/build/esm/api/diag.js
245
+ var __read2 = function(o, n) {
246
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
247
+ if (!m) return o;
248
+ var i = m.call(o), r, ar = [], e;
249
+ try {
250
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
251
+ } catch (error) {
252
+ e = { error };
253
+ } finally {
254
+ try {
255
+ if (r && !r.done && (m = i["return"])) m.call(i);
256
+ } finally {
257
+ if (e) throw e.error;
258
+ }
259
+ }
260
+ return ar;
261
+ };
262
+ var __spreadArray2 = function(to, from, pack) {
263
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
264
+ if (ar || !(i in from)) {
265
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
266
+ ar[i] = from[i];
267
+ }
268
+ }
269
+ return to.concat(ar || Array.prototype.slice.call(from));
270
+ };
271
+ var API_NAME = "diag";
272
+ var DiagAPI = (
273
+ /** @class */
274
+ function() {
275
+ function DiagAPI2() {
276
+ function _logProxy(funcName) {
277
+ return function() {
278
+ var args = [];
279
+ for (var _i = 0; _i < arguments.length; _i++) {
280
+ args[_i] = arguments[_i];
281
+ }
282
+ var logger2 = getGlobal("diag");
283
+ if (!logger2)
284
+ return;
285
+ return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
286
+ };
287
+ }
288
+ var self = this;
289
+ var setLogger = function(logger2, optionsOrLogLevel) {
290
+ var _a, _b, _c;
291
+ if (optionsOrLogLevel === void 0) {
292
+ optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
293
+ }
294
+ if (logger2 === self) {
295
+ var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
296
+ self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
297
+ return false;
298
+ }
299
+ if (typeof optionsOrLogLevel === "number") {
300
+ optionsOrLogLevel = {
301
+ logLevel: optionsOrLogLevel
302
+ };
303
+ }
304
+ var oldLogger = getGlobal("diag");
305
+ var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
306
+ if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
307
+ var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
308
+ oldLogger.warn("Current logger will be overwritten from " + stack);
309
+ newLogger.warn("Current logger will overwrite one already registered from " + stack);
310
+ }
311
+ return registerGlobal("diag", newLogger, self, true);
312
+ };
313
+ self.setLogger = setLogger;
314
+ self.disable = function() {
315
+ unregisterGlobal(API_NAME, self);
316
+ };
317
+ self.createComponentLogger = function(options) {
318
+ return new DiagComponentLogger(options);
319
+ };
320
+ self.verbose = _logProxy("verbose");
321
+ self.debug = _logProxy("debug");
322
+ self.info = _logProxy("info");
323
+ self.warn = _logProxy("warn");
324
+ self.error = _logProxy("error");
325
+ }
326
+ DiagAPI2.instance = function() {
327
+ if (!this._instance) {
328
+ this._instance = new DiagAPI2();
329
+ }
330
+ return this._instance;
331
+ };
332
+ return DiagAPI2;
333
+ }()
334
+ );
335
+
336
+ // node_modules/@opentelemetry/api/build/esm/context/context.js
337
+ var BaseContext = (
338
+ /** @class */
339
+ /* @__PURE__ */ function() {
340
+ function BaseContext2(parentContext) {
341
+ var self = this;
342
+ self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
343
+ self.getValue = function(key) {
344
+ return self._currentContext.get(key);
345
+ };
346
+ self.setValue = function(key, value) {
347
+ var context2 = new BaseContext2(self._currentContext);
348
+ context2._currentContext.set(key, value);
349
+ return context2;
350
+ };
351
+ self.deleteValue = function(key) {
352
+ var context2 = new BaseContext2(self._currentContext);
353
+ context2._currentContext.delete(key);
354
+ return context2;
355
+ };
356
+ }
357
+ return BaseContext2;
358
+ }()
359
+ );
360
+ var ROOT_CONTEXT = new BaseContext();
361
+
362
+ // node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
363
+ var __read3 = function(o, n) {
364
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
365
+ if (!m) return o;
366
+ var i = m.call(o), r, ar = [], e;
367
+ try {
368
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
369
+ } catch (error) {
370
+ e = { error };
371
+ } finally {
372
+ try {
373
+ if (r && !r.done && (m = i["return"])) m.call(i);
374
+ } finally {
375
+ if (e) throw e.error;
376
+ }
377
+ }
378
+ return ar;
379
+ };
380
+ var __spreadArray3 = function(to, from, pack) {
381
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
382
+ if (ar || !(i in from)) {
383
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
384
+ ar[i] = from[i];
385
+ }
386
+ }
387
+ return to.concat(ar || Array.prototype.slice.call(from));
388
+ };
389
+ var NoopContextManager = (
390
+ /** @class */
391
+ function() {
392
+ function NoopContextManager2() {
393
+ }
394
+ NoopContextManager2.prototype.active = function() {
395
+ return ROOT_CONTEXT;
396
+ };
397
+ NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
398
+ var args = [];
399
+ for (var _i = 3; _i < arguments.length; _i++) {
400
+ args[_i - 3] = arguments[_i];
401
+ }
402
+ return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
403
+ };
404
+ NoopContextManager2.prototype.bind = function(_context, target) {
405
+ return target;
406
+ };
407
+ NoopContextManager2.prototype.enable = function() {
408
+ return this;
409
+ };
410
+ NoopContextManager2.prototype.disable = function() {
411
+ return this;
412
+ };
413
+ return NoopContextManager2;
414
+ }()
415
+ );
416
+
417
+ // node_modules/@opentelemetry/api/build/esm/api/context.js
418
+ var __read4 = function(o, n) {
419
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
420
+ if (!m) return o;
421
+ var i = m.call(o), r, ar = [], e;
422
+ try {
423
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
424
+ } catch (error) {
425
+ e = { error };
426
+ } finally {
427
+ try {
428
+ if (r && !r.done && (m = i["return"])) m.call(i);
429
+ } finally {
430
+ if (e) throw e.error;
431
+ }
432
+ }
433
+ return ar;
434
+ };
435
+ var __spreadArray4 = function(to, from, pack) {
436
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
437
+ if (ar || !(i in from)) {
438
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
439
+ ar[i] = from[i];
440
+ }
441
+ }
442
+ return to.concat(ar || Array.prototype.slice.call(from));
443
+ };
444
+ var API_NAME2 = "context";
445
+ var NOOP_CONTEXT_MANAGER = new NoopContextManager();
446
+ var ContextAPI = (
447
+ /** @class */
448
+ function() {
449
+ function ContextAPI2() {
450
+ }
451
+ ContextAPI2.getInstance = function() {
452
+ if (!this._instance) {
453
+ this._instance = new ContextAPI2();
454
+ }
455
+ return this._instance;
456
+ };
457
+ ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
458
+ return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
459
+ };
460
+ ContextAPI2.prototype.active = function() {
461
+ return this._getContextManager().active();
462
+ };
463
+ ContextAPI2.prototype.with = function(context2, fn, thisArg) {
464
+ var _a;
465
+ var args = [];
466
+ for (var _i = 3; _i < arguments.length; _i++) {
467
+ args[_i - 3] = arguments[_i];
468
+ }
469
+ return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
470
+ };
471
+ ContextAPI2.prototype.bind = function(context2, target) {
472
+ return this._getContextManager().bind(context2, target);
473
+ };
474
+ ContextAPI2.prototype._getContextManager = function() {
475
+ return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
476
+ };
477
+ ContextAPI2.prototype.disable = function() {
478
+ this._getContextManager().disable();
479
+ unregisterGlobal(API_NAME2, DiagAPI.instance());
480
+ };
481
+ return ContextAPI2;
482
+ }()
483
+ );
484
+
485
+ // node_modules/@opentelemetry/api/build/esm/trace/status.js
486
+ var SpanStatusCode;
487
+ (function(SpanStatusCode2) {
488
+ SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
489
+ SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
490
+ SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
491
+ })(SpanStatusCode || (SpanStatusCode = {}));
492
+
493
+ // node_modules/@opentelemetry/api/build/esm/context-api.js
494
+ var context = ContextAPI.getInstance();
495
+
496
+ // src/index.ts
497
+ import {
498
+ generateObject,
499
+ generateText,
500
+ JSONParseError
501
+ } from "ai";
8
502
  import { encodingForModel } from "js-tiktoken";
503
+ import { fetch, FormData } from "undici";
504
+ function getTracer(runtime) {
505
+ const availableServices = Array.from(runtime.getAllServices().keys());
506
+ logger.debug(
507
+ `[getTracer] Available services: ${JSON.stringify(availableServices)}`
508
+ );
509
+ logger.debug(
510
+ `[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`
511
+ );
512
+ const instrumentationService = runtime.getService(
513
+ ServiceType.INSTRUMENTATION
514
+ );
515
+ if (!instrumentationService) {
516
+ logger.warn(
517
+ `[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`
518
+ );
519
+ return null;
520
+ }
521
+ if (!instrumentationService.isEnabled()) {
522
+ logger.debug("[getTracer] Instrumentation service found but is disabled.");
523
+ return null;
524
+ }
525
+ logger.debug(
526
+ "[getTracer] Successfully retrieved enabled instrumentation service."
527
+ );
528
+ return instrumentationService.getTracer("eliza.llm.openai");
529
+ }
530
+ async function startLlmSpan(runtime, spanName, attributes, fn) {
531
+ const tracer = getTracer(runtime);
532
+ if (!tracer) {
533
+ const dummySpan = {
534
+ setAttribute: () => {
535
+ },
536
+ setAttributes: () => {
537
+ },
538
+ addEvent: () => {
539
+ },
540
+ recordException: () => {
541
+ },
542
+ setStatus: () => {
543
+ },
544
+ end: () => {
545
+ },
546
+ spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
547
+ };
548
+ return fn(dummySpan);
549
+ }
550
+ const activeContext = context.active();
551
+ return tracer.startActiveSpan(
552
+ spanName,
553
+ { attributes },
554
+ activeContext,
555
+ async (span) => {
556
+ try {
557
+ const result = await fn(span);
558
+ span.setStatus({ code: SpanStatusCode.OK });
559
+ span.end();
560
+ return result;
561
+ } catch (error) {
562
+ const message = error instanceof Error ? error.message : String(error);
563
+ span.recordException(error);
564
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
565
+ span.end();
566
+ throw error;
567
+ }
568
+ }
569
+ );
570
+ }
571
+ function getSetting(runtime, key, defaultValue) {
572
+ return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
573
+ }
574
+ function getBaseURL(runtime) {
575
+ const baseURL = getSetting(
576
+ runtime,
577
+ "OPENAI_BASE_URL",
578
+ "https://api.openai.com/v1"
579
+ );
580
+ logger.debug(`[OpenAI] Default base URL: ${baseURL}`);
581
+ return baseURL;
582
+ }
583
+ function getEmbeddingBaseURL(runtime) {
584
+ const embeddingURL = getSetting(runtime, "OPENAI_EMBEDDING_URL");
585
+ if (embeddingURL) {
586
+ logger.debug(`[OpenAI] Using specific embedding base URL: ${embeddingURL}`);
587
+ return embeddingURL;
588
+ }
589
+ logger.debug("[OpenAI] Falling back to general base URL for embeddings.");
590
+ return getBaseURL(runtime);
591
+ }
592
+ function getApiKey(runtime) {
593
+ return getSetting(runtime, "OPENAI_API_KEY");
594
+ }
595
+ function getSmallModel(runtime) {
596
+ return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
597
+ }
598
+ function getLargeModel(runtime) {
599
+ return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
600
+ }
601
+ function getImageDescriptionModel(runtime) {
602
+ return getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MODEL", "gpt-4o-mini") ?? "gpt-4o-mini";
603
+ }
604
+ function createOpenAIClient(runtime) {
605
+ return createOpenAI({
606
+ apiKey: getApiKey(runtime),
607
+ baseURL: getBaseURL(runtime)
608
+ });
609
+ }
9
610
  async function tokenizeText(model, prompt) {
10
611
  const modelName = model === ModelType.TEXT_SMALL ? process.env.OPENAI_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-4o-mini" : process.env.LARGE_MODEL ?? "gpt-4o";
11
612
  const encoding = encodingForModel(modelName);
@@ -17,6 +618,185 @@ async function detokenizeText(model, tokens) {
17
618
  const encoding = encodingForModel(modelName);
18
619
  return encoding.decode(tokens);
19
620
  }
621
+ async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
622
+ const openai = createOpenAIClient(runtime);
623
+ const modelName = getModelFn(runtime);
624
+ logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
625
+ const temperature = params.temperature ?? 0;
626
+ const schemaPresent = !!params.schema;
627
+ const attributes = {
628
+ "llm.vendor": "OpenAI",
629
+ "llm.request.type": "object_generation",
630
+ "llm.request.model": modelName,
631
+ "llm.request.temperature": temperature,
632
+ "llm.request.schema_present": schemaPresent
633
+ };
634
+ return startLlmSpan(
635
+ runtime,
636
+ "LLM.generateObject",
637
+ attributes,
638
+ async (span) => {
639
+ span.addEvent("llm.prompt", { "prompt.content": params.prompt });
640
+ if (schemaPresent) {
641
+ span.addEvent("llm.request.schema", {
642
+ schema: JSON.stringify(params.schema, safeReplacer())
643
+ });
644
+ logger.info(
645
+ `Using ${modelType} without schema validation (schema provided but output=no-schema)`
646
+ );
647
+ }
648
+ try {
649
+ const { object, usage } = await generateObject({
650
+ model: openai.languageModel(modelName),
651
+ output: "no-schema",
652
+ prompt: params.prompt,
653
+ temperature,
654
+ experimental_repairText: getJsonRepairFunction()
655
+ });
656
+ span.addEvent("llm.response.processed", {
657
+ "response.object": JSON.stringify(object, safeReplacer())
658
+ });
659
+ if (usage) {
660
+ span.setAttributes({
661
+ "llm.usage.prompt_tokens": usage.promptTokens,
662
+ "llm.usage.completion_tokens": usage.completionTokens,
663
+ "llm.usage.total_tokens": usage.totalTokens
664
+ });
665
+ emitModelUsageEvent(
666
+ runtime,
667
+ modelType,
668
+ params.prompt,
669
+ usage
670
+ );
671
+ }
672
+ return object;
673
+ } catch (error) {
674
+ if (error instanceof JSONParseError) {
675
+ logger.error(
676
+ `[generateObject] Failed to parse JSON: ${error.message}`
677
+ );
678
+ span.recordException(error);
679
+ span.addEvent("llm.error.json_parse", {
680
+ "error.message": error.message,
681
+ "error.text": error.text
682
+ });
683
+ span.addEvent("llm.repair.attempt");
684
+ const repairFunction = getJsonRepairFunction();
685
+ const repairedJsonString = await repairFunction({
686
+ text: error.text,
687
+ error
688
+ });
689
+ if (repairedJsonString) {
690
+ try {
691
+ const repairedObject = JSON.parse(repairedJsonString);
692
+ span.addEvent("llm.repair.success", {
693
+ repaired_object: JSON.stringify(repairedObject, safeReplacer())
694
+ });
695
+ logger.info("[generateObject] Successfully repaired JSON.");
696
+ span.setStatus({
697
+ code: SpanStatusCode.ERROR,
698
+ message: "JSON parsing failed but was repaired"
699
+ });
700
+ return repairedObject;
701
+ } catch (repairParseError) {
702
+ const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
703
+ logger.error(
704
+ `[generateObject] Failed to parse repaired JSON: ${message}`
705
+ );
706
+ const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
707
+ span.recordException(exception);
708
+ span.addEvent("llm.repair.parse_error", {
709
+ "error.message": message
710
+ });
711
+ span.setStatus({
712
+ code: SpanStatusCode.ERROR,
713
+ message: `JSON repair failed: ${message}`
714
+ });
715
+ throw repairParseError;
716
+ }
717
+ } else {
718
+ const errMsg = error instanceof Error ? error.message : String(error);
719
+ logger.error("[generateObject] JSON repair failed.");
720
+ span.addEvent("llm.repair.failed");
721
+ span.setStatus({
722
+ code: SpanStatusCode.ERROR,
723
+ message: `JSON repair failed: ${errMsg}`
724
+ });
725
+ throw error;
726
+ }
727
+ } else {
728
+ const message = error instanceof Error ? error.message : String(error);
729
+ logger.error(`[generateObject] Unknown error: ${message}`);
730
+ const exception = error instanceof Error ? error : new Error(message);
731
+ span.recordException(exception);
732
+ span.setStatus({
733
+ code: SpanStatusCode.ERROR,
734
+ message
735
+ });
736
+ throw error;
737
+ }
738
+ }
739
+ }
740
+ );
741
+ }
742
+ function getJsonRepairFunction() {
743
+ return async ({ text, error }) => {
744
+ try {
745
+ if (error instanceof JSONParseError) {
746
+ const cleanedText = text.replace(/```json\n|\n```|```/g, "");
747
+ JSON.parse(cleanedText);
748
+ return cleanedText;
749
+ }
750
+ return null;
751
+ } catch (jsonError) {
752
+ const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
753
+ logger.warn(`Failed to repair JSON text: ${message}`);
754
+ return null;
755
+ }
756
+ };
757
+ }
758
+ function emitModelUsageEvent(runtime, type, prompt, usage) {
759
+ runtime.emitEvent(EventType.MODEL_USED, {
760
+ provider: "openai",
761
+ type,
762
+ prompt,
763
+ tokens: {
764
+ prompt: usage.promptTokens,
765
+ completion: usage.completionTokens,
766
+ total: usage.totalTokens
767
+ }
768
+ });
769
+ }
770
+ async function fetchTextToSpeech(runtime, text) {
771
+ const apiKey = getApiKey(runtime);
772
+ const model = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
773
+ const voice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
774
+ const instructions = getSetting(runtime, "OPENAI_TTS_INSTRUCTIONS", "");
775
+ const baseURL = getBaseURL(runtime);
776
+ try {
777
+ const res = await fetch(`${baseURL}/audio/speech`, {
778
+ method: "POST",
779
+ headers: {
780
+ Authorization: `Bearer ${apiKey}`,
781
+ "Content-Type": "application/json"
782
+ },
783
+ body: JSON.stringify({
784
+ model,
785
+ voice,
786
+ input: text,
787
+ ...instructions && { instructions }
788
+ })
789
+ });
790
+ if (!res.ok) {
791
+ const err = await res.text();
792
+ throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
793
+ }
794
+ return res.body;
795
+ } catch (err) {
796
+ const message = err instanceof Error ? err.message : String(err);
797
+ throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
798
+ }
799
+ }
20
800
  var openaiPlugin = {
21
801
  name: "openai",
22
802
  description: "OpenAI plugin",
@@ -26,41 +806,72 @@ var openaiPlugin = {
26
806
  OPENAI_SMALL_MODEL: process.env.OPENAI_SMALL_MODEL,
27
807
  OPENAI_LARGE_MODEL: process.env.OPENAI_LARGE_MODEL,
28
808
  SMALL_MODEL: process.env.SMALL_MODEL,
29
- LARGE_MODEL: process.env.LARGE_MODEL
809
+ LARGE_MODEL: process.env.LARGE_MODEL,
810
+ OPENAI_EMBEDDING_MODEL: process.env.OPENAI_EMBEDDING_MODEL,
811
+ OPENAI_EMBEDDING_URL: process.env.OPENAI_EMBEDDING_URL,
812
+ OPENAI_EMBEDDING_DIMENSIONS: process.env.OPENAI_EMBEDDING_DIMENSIONS,
813
+ OPENAI_IMAGE_DESCRIPTION_MODEL: process.env.OPENAI_IMAGE_DESCRIPTION_MODEL,
814
+ OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS: process.env.OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS
30
815
  },
31
- async init(config) {
816
+ async init(_config, runtime) {
32
817
  try {
33
- if (!process.env.OPENAI_API_KEY) {
818
+ if (!getApiKey(runtime)) {
34
819
  logger.warn(
35
820
  "OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited"
36
821
  );
37
822
  return;
38
823
  }
39
824
  try {
40
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
825
+ const baseURL = getBaseURL(runtime);
41
826
  const response = await fetch(`${baseURL}/models`, {
42
- headers: { Authorization: `Bearer ${process.env.OPENAI_API_KEY}` }
827
+ headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
43
828
  });
44
829
  if (!response.ok) {
45
- logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
46
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
830
+ logger.warn(
831
+ `OpenAI API key validation failed: ${response.statusText}`
832
+ );
833
+ logger.warn(
834
+ "OpenAI functionality will be limited until a valid API key is provided"
835
+ );
47
836
  } else {
837
+ logger.log("OpenAI API key validated successfully");
48
838
  }
49
839
  } catch (fetchError) {
50
- logger.warn(`Error validating OpenAI API key: ${fetchError}`);
51
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
840
+ const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
841
+ logger.warn(`Error validating OpenAI API key: ${message}`);
842
+ logger.warn(
843
+ "OpenAI functionality will be limited until a valid API key is provided"
844
+ );
52
845
  }
53
846
  } catch (error) {
847
+ const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
54
848
  logger.warn(
55
- `OpenAI plugin configuration issue: ${error.errors.map((e) => e.message).join(", ")} - You need to configure the OPENAI_API_KEY in your environment variables`
849
+ `OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
56
850
  );
57
851
  }
58
852
  },
59
853
  models: {
60
- [ModelType.TEXT_EMBEDDING]: async (_runtime, params) => {
854
+ [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
855
+ const embeddingModelName = getSetting(
856
+ runtime,
857
+ "OPENAI_EMBEDDING_MODEL",
858
+ "text-embedding-3-small"
859
+ );
860
+ const embeddingDimension = Number.parseInt(
861
+ getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536",
862
+ 10
863
+ );
864
+ logger.debug(
865
+ `[OpenAI] Using embedding model: ${embeddingModelName} with dimension: ${embeddingDimension}`
866
+ );
867
+ if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
868
+ const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
869
+ logger.error(errorMsg);
870
+ throw new Error(errorMsg);
871
+ }
61
872
  if (params === null) {
62
873
  logger.debug("Creating test embedding for initialization");
63
- const testVector = Array(1536).fill(0);
874
+ const testVector = Array(embeddingDimension).fill(0);
64
875
  testVector[0] = 0.1;
65
876
  return testVector;
66
877
  }
@@ -71,51 +882,115 @@ var openaiPlugin = {
71
882
  text = params.text;
72
883
  } else {
73
884
  logger.warn("Invalid input format for embedding");
74
- const fallbackVector = Array(1536).fill(0);
885
+ const fallbackVector = Array(embeddingDimension).fill(0);
75
886
  fallbackVector[0] = 0.2;
76
887
  return fallbackVector;
77
888
  }
78
889
  if (!text.trim()) {
79
890
  logger.warn("Empty text for embedding");
80
- const emptyVector = Array(1536).fill(0);
891
+ const emptyVector = Array(embeddingDimension).fill(0);
81
892
  emptyVector[0] = 0.3;
82
893
  return emptyVector;
83
894
  }
84
- try {
85
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
86
- const response = await fetch(`${baseURL}/embeddings`, {
87
- method: "POST",
88
- headers: {
89
- Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
90
- "Content-Type": "application/json"
91
- },
92
- body: JSON.stringify({
93
- model: "text-embedding-3-small",
94
- input: text
95
- })
96
- });
97
- if (!response.ok) {
98
- logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
99
- const errorVector = Array(1536).fill(0);
100
- errorVector[0] = 0.4;
101
- return errorVector;
102
- }
103
- const data = await response.json();
104
- if (!data?.data?.[0]?.embedding) {
105
- logger.error("API returned invalid structure");
106
- const errorVector = Array(1536).fill(0);
107
- errorVector[0] = 0.5;
108
- return errorVector;
895
+ const attributes = {
896
+ "llm.vendor": "OpenAI",
897
+ "llm.request.type": "embedding",
898
+ "llm.request.model": embeddingModelName,
899
+ "llm.request.embedding.dimensions": embeddingDimension,
900
+ "input.text.length": text.length
901
+ };
902
+ return startLlmSpan(
903
+ runtime,
904
+ "LLM.embedding",
905
+ attributes,
906
+ async (span) => {
907
+ span.addEvent("llm.prompt", { "prompt.content": text });
908
+ const embeddingBaseURL = getEmbeddingBaseURL(runtime);
909
+ const apiKey = getApiKey(runtime);
910
+ if (!apiKey) {
911
+ span.setStatus({
912
+ code: SpanStatusCode.ERROR,
913
+ message: "OpenAI API key not configured"
914
+ });
915
+ throw new Error("OpenAI API key not configured");
916
+ }
917
+ try {
918
+ const response = await fetch(`${embeddingBaseURL}/embeddings`, {
919
+ method: "POST",
920
+ headers: {
921
+ Authorization: `Bearer ${apiKey}`,
922
+ "Content-Type": "application/json"
923
+ },
924
+ body: JSON.stringify({
925
+ model: embeddingModelName,
926
+ input: text
927
+ })
928
+ });
929
+ const responseClone = response.clone();
930
+ const rawResponseBody = await responseClone.text();
931
+ span.addEvent("llm.response.raw", {
932
+ "response.body": rawResponseBody
933
+ });
934
+ if (!response.ok) {
935
+ logger.error(
936
+ `OpenAI API error: ${response.status} - ${response.statusText}`
937
+ );
938
+ span.setAttributes({ "error.api.status": response.status });
939
+ span.setStatus({
940
+ code: SpanStatusCode.ERROR,
941
+ message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
942
+ });
943
+ const errorVector = Array(embeddingDimension).fill(0);
944
+ errorVector[0] = 0.4;
945
+ return errorVector;
946
+ }
947
+ const data = await response.json();
948
+ if (!data?.data?.[0]?.embedding) {
949
+ logger.error("API returned invalid structure");
950
+ span.setStatus({
951
+ code: SpanStatusCode.ERROR,
952
+ message: "API returned invalid structure"
953
+ });
954
+ const errorVector = Array(embeddingDimension).fill(0);
955
+ errorVector[0] = 0.5;
956
+ return errorVector;
957
+ }
958
+ const embedding = data.data[0].embedding;
959
+ span.setAttribute(
960
+ "llm.response.embedding.vector_length",
961
+ embedding.length
962
+ );
963
+ if (data.usage) {
964
+ span.setAttributes({
965
+ "llm.usage.prompt_tokens": data.usage.prompt_tokens,
966
+ "llm.usage.total_tokens": data.usage.total_tokens
967
+ });
968
+ const usage = {
969
+ promptTokens: data.usage.prompt_tokens,
970
+ completionTokens: 0,
971
+ totalTokens: data.usage.total_tokens
972
+ };
973
+ emitModelUsageEvent(
974
+ runtime,
975
+ ModelType.TEXT_EMBEDDING,
976
+ text,
977
+ usage
978
+ );
979
+ }
980
+ logger.log(`Got valid embedding with length ${embedding.length}`);
981
+ return embedding;
982
+ } catch (error) {
983
+ const message = error instanceof Error ? error.message : String(error);
984
+ logger.error(`Error generating embedding: ${message}`);
985
+ const exception = error instanceof Error ? error : new Error(message);
986
+ span.recordException(exception);
987
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
988
+ const errorVector = Array(embeddingDimension).fill(0);
989
+ errorVector[0] = 0.6;
990
+ return errorVector;
991
+ }
109
992
  }
110
- const embedding = data.data[0].embedding;
111
- logger.log(`Got valid embedding with length ${embedding.length}`);
112
- return embedding;
113
- } catch (error) {
114
- logger.error("Error generating embedding:", error);
115
- const errorVector = Array(1536).fill(0);
116
- errorVector[0] = 0.6;
117
- return errorVector;
118
- }
993
+ );
119
994
  },
120
995
  [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
121
996
  return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
@@ -128,25 +1003,54 @@ var openaiPlugin = {
128
1003
  const frequency_penalty = 0.7;
129
1004
  const presence_penalty = 0.7;
130
1005
  const max_response_length = 8192;
131
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
132
- const openai = createOpenAI({
133
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
134
- baseURL
135
- });
136
- const model = runtime.getSetting("OPENAI_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "gpt-4o-mini";
137
- logger.log("generating text");
1006
+ const openai = createOpenAIClient(runtime);
1007
+ const modelName = getSmallModel(runtime);
1008
+ logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
138
1009
  logger.log(prompt);
139
- const { text: openaiResponse } = await generateText({
140
- model: openai.languageModel(model),
141
- prompt,
142
- system: runtime.character.system ?? void 0,
143
- temperature,
144
- maxTokens: max_response_length,
145
- frequencyPenalty: frequency_penalty,
146
- presencePenalty: presence_penalty,
147
- stopSequences
148
- });
149
- return openaiResponse;
1010
+ const attributes = {
1011
+ "llm.vendor": "OpenAI",
1012
+ "llm.request.type": "completion",
1013
+ "llm.request.model": modelName,
1014
+ "llm.request.temperature": temperature,
1015
+ "llm.request.max_tokens": max_response_length,
1016
+ "llm.request.frequency_penalty": frequency_penalty,
1017
+ "llm.request.presence_penalty": presence_penalty,
1018
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
1019
+ };
1020
+ return startLlmSpan(
1021
+ runtime,
1022
+ "LLM.generateText",
1023
+ attributes,
1024
+ async (span) => {
1025
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1026
+ const { text: openaiResponse, usage } = await generateText({
1027
+ model: openai.languageModel(modelName),
1028
+ prompt,
1029
+ system: runtime.character.system ?? void 0,
1030
+ temperature,
1031
+ maxTokens: max_response_length,
1032
+ frequencyPenalty: frequency_penalty,
1033
+ presencePenalty: presence_penalty,
1034
+ stopSequences
1035
+ });
1036
+ span.setAttribute(
1037
+ "llm.response.processed.length",
1038
+ openaiResponse.length
1039
+ );
1040
+ span.addEvent("llm.response.processed", {
1041
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1042
+ });
1043
+ if (usage) {
1044
+ span.setAttributes({
1045
+ "llm.usage.prompt_tokens": usage.promptTokens,
1046
+ "llm.usage.completion_tokens": usage.completionTokens,
1047
+ "llm.usage.total_tokens": usage.totalTokens
1048
+ });
1049
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
1050
+ }
1051
+ return openaiResponse;
1052
+ }
1053
+ );
150
1054
  },
151
1055
  [ModelType.TEXT_LARGE]: async (runtime, {
152
1056
  prompt,
@@ -156,194 +1060,397 @@ var openaiPlugin = {
156
1060
  frequencyPenalty = 0.7,
157
1061
  presencePenalty = 0.7
158
1062
  }) => {
159
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
160
- const openai = createOpenAI({
161
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
162
- baseURL
163
- });
164
- const model = runtime.getSetting("OPENAI_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "gpt-4o";
165
- const { text: openaiResponse } = await generateText({
166
- model: openai.languageModel(model),
167
- prompt,
168
- system: runtime.character.system ?? void 0,
169
- temperature,
170
- maxTokens,
171
- frequencyPenalty,
172
- presencePenalty,
173
- stopSequences
174
- });
175
- return openaiResponse;
1063
+ const openai = createOpenAIClient(runtime);
1064
+ const modelName = getLargeModel(runtime);
1065
+ logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
1066
+ logger.log(prompt);
1067
+ const attributes = {
1068
+ "llm.vendor": "OpenAI",
1069
+ "llm.request.type": "completion",
1070
+ "llm.request.model": modelName,
1071
+ "llm.request.temperature": temperature,
1072
+ "llm.request.max_tokens": maxTokens,
1073
+ "llm.request.frequency_penalty": frequencyPenalty,
1074
+ "llm.request.presence_penalty": presencePenalty,
1075
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
1076
+ };
1077
+ return startLlmSpan(
1078
+ runtime,
1079
+ "LLM.generateText",
1080
+ attributes,
1081
+ async (span) => {
1082
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1083
+ const { text: openaiResponse, usage } = await generateText({
1084
+ model: openai.languageModel(modelName),
1085
+ prompt,
1086
+ system: runtime.character.system ?? void 0,
1087
+ temperature,
1088
+ maxTokens,
1089
+ frequencyPenalty,
1090
+ presencePenalty,
1091
+ stopSequences
1092
+ });
1093
+ span.setAttribute(
1094
+ "llm.response.processed.length",
1095
+ openaiResponse.length
1096
+ );
1097
+ span.addEvent("llm.response.processed", {
1098
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1099
+ });
1100
+ if (usage) {
1101
+ span.setAttributes({
1102
+ "llm.usage.prompt_tokens": usage.promptTokens,
1103
+ "llm.usage.completion_tokens": usage.completionTokens,
1104
+ "llm.usage.total_tokens": usage.totalTokens
1105
+ });
1106
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1107
+ }
1108
+ return openaiResponse;
1109
+ }
1110
+ );
176
1111
  },
177
1112
  [ModelType.IMAGE]: async (runtime, params) => {
178
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
179
- const response = await fetch(`${baseURL}/images/generations`, {
180
- method: "POST",
181
- headers: {
182
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`,
183
- "Content-Type": "application/json"
184
- },
185
- body: JSON.stringify({
186
- prompt: params.prompt,
187
- n: params.n || 1,
188
- size: params.size || "1024x1024"
189
- })
190
- });
191
- if (!response.ok) {
192
- throw new Error(`Failed to generate image: ${response.statusText}`);
193
- }
194
- const data = await response.json();
195
- const typedData = data;
196
- return typedData.data;
1113
+ const n = params.n || 1;
1114
+ const size = params.size || "1024x1024";
1115
+ const prompt = params.prompt;
1116
+ const modelName = "dall-e-3";
1117
+ logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
1118
+ const attributes = {
1119
+ "llm.vendor": "OpenAI",
1120
+ "llm.request.type": "image_generation",
1121
+ "llm.request.image.size": size,
1122
+ "llm.request.image.count": n
1123
+ };
1124
+ return startLlmSpan(
1125
+ runtime,
1126
+ "LLM.imageGeneration",
1127
+ attributes,
1128
+ async (span) => {
1129
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1130
+ const baseURL = getBaseURL(runtime);
1131
+ const apiKey = getApiKey(runtime);
1132
+ if (!apiKey) {
1133
+ span.setStatus({
1134
+ code: SpanStatusCode.ERROR,
1135
+ message: "OpenAI API key not configured"
1136
+ });
1137
+ throw new Error("OpenAI API key not configured");
1138
+ }
1139
+ try {
1140
+ const response = await fetch(`${baseURL}/images/generations`, {
1141
+ method: "POST",
1142
+ headers: {
1143
+ Authorization: `Bearer ${apiKey}`,
1144
+ "Content-Type": "application/json"
1145
+ },
1146
+ body: JSON.stringify({
1147
+ prompt,
1148
+ n,
1149
+ size
1150
+ })
1151
+ });
1152
+ const responseClone = response.clone();
1153
+ const rawResponseBody = await responseClone.text();
1154
+ span.addEvent("llm.response.raw", {
1155
+ "response.body": rawResponseBody
1156
+ });
1157
+ if (!response.ok) {
1158
+ span.setAttributes({ "error.api.status": response.status });
1159
+ span.setStatus({
1160
+ code: SpanStatusCode.ERROR,
1161
+ message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1162
+ });
1163
+ throw new Error(
1164
+ `Failed to generate image: ${response.statusText}`
1165
+ );
1166
+ }
1167
+ const data = await response.json();
1168
+ const typedData = data;
1169
+ span.addEvent("llm.response.processed", {
1170
+ "response.urls": JSON.stringify(typedData.data)
1171
+ });
1172
+ return typedData.data;
1173
+ } catch (error) {
1174
+ const message = error instanceof Error ? error.message : String(error);
1175
+ const exception = error instanceof Error ? error : new Error(message);
1176
+ span.recordException(exception);
1177
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1178
+ throw error;
1179
+ }
1180
+ }
1181
+ );
197
1182
  },
198
1183
  [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
199
1184
  let imageUrl;
200
- let prompt;
1185
+ let promptText;
1186
+ const modelName = getImageDescriptionModel(runtime);
1187
+ logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
1188
+ const maxTokens = Number.parseInt(
1189
+ getSetting(runtime, "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS", "8192") || "8192",
1190
+ 10
1191
+ );
201
1192
  if (typeof params === "string") {
202
1193
  imageUrl = params;
203
- prompt = void 0;
1194
+ promptText = "Please analyze this image and provide a title and detailed description.";
204
1195
  } else {
205
1196
  imageUrl = params.imageUrl;
206
- prompt = params.prompt;
1197
+ promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
207
1198
  }
208
- try {
209
- const baseURL = process.env.OPENAI_BASE_URL ?? "https://api.openai.com/v1";
210
- const apiKey = process.env.OPENAI_API_KEY;
211
- if (!apiKey) {
212
- logger.error("OpenAI API key not set");
213
- return {
214
- title: "Failed to analyze image",
215
- description: "API key not configured"
216
- };
217
- }
218
- const response = await fetch(`${baseURL}/chat/completions`, {
219
- method: "POST",
220
- headers: {
221
- "Content-Type": "application/json",
222
- Authorization: `Bearer ${apiKey}`
223
- },
224
- body: JSON.stringify({
225
- model: "gpt-4-vision-preview",
226
- messages: [
227
- {
228
- role: "user",
229
- content: [
230
- {
231
- type: "text",
232
- text: prompt || "Please analyze this image and provide a title and detailed description."
233
- },
234
- {
235
- type: "image_url",
236
- image_url: { url: imageUrl }
237
- }
238
- ]
239
- }
240
- ],
241
- max_tokens: 300
242
- })
243
- });
244
- if (!response.ok) {
245
- throw new Error(`OpenAI API error: ${response.status}`);
1199
+ const attributes = {
1200
+ "llm.vendor": "OpenAI",
1201
+ "llm.request.type": "chat",
1202
+ "llm.request.model": modelName,
1203
+ "llm.request.max_tokens": maxTokens,
1204
+ "llm.request.image.url": imageUrl
1205
+ };
1206
+ const messages = [
1207
+ {
1208
+ role: "user",
1209
+ content: [
1210
+ { type: "text", text: promptText },
1211
+ { type: "image_url", image_url: { url: imageUrl } }
1212
+ ]
246
1213
  }
247
- const result = await response.json();
248
- const content = result.choices?.[0]?.message?.content;
249
- if (!content) {
250
- return {
251
- title: "Failed to analyze image",
252
- description: "No response from API"
253
- };
1214
+ ];
1215
+ return startLlmSpan(
1216
+ runtime,
1217
+ "LLM.imageDescription",
1218
+ attributes,
1219
+ async (span) => {
1220
+ span.addEvent("llm.prompt", {
1221
+ "prompt.content": JSON.stringify(messages, safeReplacer())
1222
+ });
1223
+ const baseURL = getBaseURL(runtime);
1224
+ const apiKey = getApiKey(runtime);
1225
+ if (!apiKey) {
1226
+ logger.error("OpenAI API key not set");
1227
+ span.setStatus({
1228
+ code: SpanStatusCode.ERROR,
1229
+ message: "OpenAI API key not configured"
1230
+ });
1231
+ return {
1232
+ title: "Failed to analyze image",
1233
+ description: "API key not configured"
1234
+ };
1235
+ }
1236
+ try {
1237
+ const requestBody = {
1238
+ model: modelName,
1239
+ messages,
1240
+ max_tokens: maxTokens
1241
+ };
1242
+ const response = await fetch(`${baseURL}/chat/completions`, {
1243
+ method: "POST",
1244
+ headers: {
1245
+ "Content-Type": "application/json",
1246
+ Authorization: `Bearer ${apiKey}`
1247
+ },
1248
+ body: JSON.stringify(requestBody)
1249
+ });
1250
+ const responseClone = response.clone();
1251
+ const rawResponseBody = await responseClone.text();
1252
+ span.addEvent("llm.response.raw", {
1253
+ "response.body": rawResponseBody
1254
+ });
1255
+ if (!response.ok) {
1256
+ span.setAttributes({ "error.api.status": response.status });
1257
+ span.setStatus({
1258
+ code: SpanStatusCode.ERROR,
1259
+ message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1260
+ });
1261
+ throw new Error(`OpenAI API error: ${response.status}`);
1262
+ }
1263
+ const result = await response.json();
1264
+ const typedResult = result;
1265
+ const content = typedResult.choices?.[0]?.message?.content;
1266
+ if (typedResult.usage) {
1267
+ span.setAttributes({
1268
+ "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1269
+ "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1270
+ "llm.usage.total_tokens": typedResult.usage.total_tokens
1271
+ });
1272
+ emitModelUsageEvent(
1273
+ runtime,
1274
+ ModelType.IMAGE_DESCRIPTION,
1275
+ typeof params === "string" ? params : params.prompt || "",
1276
+ {
1277
+ promptTokens: typedResult.usage.prompt_tokens,
1278
+ completionTokens: typedResult.usage.completion_tokens,
1279
+ totalTokens: typedResult.usage.total_tokens
1280
+ }
1281
+ );
1282
+ }
1283
+ if (typedResult.choices?.[0]?.finish_reason) {
1284
+ span.setAttribute(
1285
+ "llm.response.finish_reason",
1286
+ typedResult.choices[0].finish_reason
1287
+ );
1288
+ }
1289
+ if (!content) {
1290
+ span.setStatus({
1291
+ code: SpanStatusCode.ERROR,
1292
+ message: "No content in API response"
1293
+ });
1294
+ return {
1295
+ title: "Failed to analyze image",
1296
+ description: "No response from API"
1297
+ };
1298
+ }
1299
+ const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1300
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
1301
+ const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1302
+ const processedResult = { title, description };
1303
+ span.addEvent("llm.response.processed", {
1304
+ "response.object": JSON.stringify(
1305
+ processedResult,
1306
+ safeReplacer()
1307
+ )
1308
+ });
1309
+ return processedResult;
1310
+ } catch (error) {
1311
+ const message = error instanceof Error ? error.message : String(error);
1312
+ logger.error(`Error analyzing image: ${message}`);
1313
+ const exception = error instanceof Error ? error : new Error(message);
1314
+ span.recordException(exception);
1315
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1316
+ return {
1317
+ title: "Failed to analyze image",
1318
+ description: `Error: ${message}`
1319
+ };
1320
+ }
254
1321
  }
255
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
256
- const title = titleMatch?.[1] || "Image Analysis";
257
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
258
- return { title, description };
259
- } catch (error) {
260
- logger.error("Error analyzing image:", error);
261
- return {
262
- title: "Failed to analyze image",
263
- description: `Error: ${error instanceof Error ? error.message : String(error)}`
264
- };
265
- }
1322
+ );
266
1323
  },
267
1324
  [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
268
1325
  logger.log("audioBuffer", audioBuffer);
269
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
270
- const formData = new FormData();
271
- formData.append("file", new Blob([audioBuffer], { type: "audio/mp3" }));
272
- formData.append("model", "whisper-1");
273
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
274
- method: "POST",
275
- headers: {
276
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`
277
- // Note: Do not set a Content-Type header—letting fetch set it for FormData is best
278
- },
279
- body: formData
280
- });
281
- logger.log("response", response);
282
- if (!response.ok) {
283
- throw new Error(`Failed to transcribe audio: ${response.statusText}`);
284
- }
285
- const data = await response.json();
286
- return data.text;
287
- },
288
- [ModelType.OBJECT_SMALL]: async (runtime, params) => {
289
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
290
- const openai = createOpenAI({
291
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
292
- baseURL
293
- });
294
- const model = runtime.getSetting("OPENAI_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "gpt-4o-mini";
295
- try {
296
- if (params.schema) {
297
- logger.info("Using OBJECT_SMALL without schema validation");
298
- const { object: object2 } = await generateObject({
299
- model: openai.languageModel(model),
300
- output: "no-schema",
301
- prompt: params.prompt,
302
- temperature: params.temperature
1326
+ const modelName = "whisper-1";
1327
+ logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
1328
+ const attributes = {
1329
+ "llm.vendor": "OpenAI",
1330
+ "llm.request.type": "transcription",
1331
+ "llm.request.model": modelName,
1332
+ "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1333
+ };
1334
+ return startLlmSpan(
1335
+ runtime,
1336
+ "LLM.transcription",
1337
+ attributes,
1338
+ async (span) => {
1339
+ span.addEvent("llm.prompt", {
1340
+ "prompt.info": "Audio buffer for transcription"
303
1341
  });
304
- return object2;
1342
+ const baseURL = getBaseURL(runtime);
1343
+ const apiKey = getApiKey(runtime);
1344
+ if (!apiKey) {
1345
+ span.setStatus({
1346
+ code: SpanStatusCode.ERROR,
1347
+ message: "OpenAI API key not configured"
1348
+ });
1349
+ throw new Error(
1350
+ "OpenAI API key not configured - Cannot make request"
1351
+ );
1352
+ }
1353
+ if (!audioBuffer || audioBuffer.length === 0) {
1354
+ span.setStatus({
1355
+ code: SpanStatusCode.ERROR,
1356
+ message: "Audio buffer is empty or invalid"
1357
+ });
1358
+ throw new Error(
1359
+ "Audio buffer is empty or invalid for transcription"
1360
+ );
1361
+ }
1362
+ const formData = new FormData();
1363
+ formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1364
+ formData.append("model", "whisper-1");
1365
+ try {
1366
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
1367
+ method: "POST",
1368
+ headers: {
1369
+ Authorization: `Bearer ${apiKey}`
1370
+ },
1371
+ body: formData
1372
+ });
1373
+ const responseClone = response.clone();
1374
+ const rawResponseBody = await responseClone.text();
1375
+ span.addEvent("llm.response.raw", {
1376
+ "response.body": rawResponseBody
1377
+ });
1378
+ logger.log("response", response);
1379
+ if (!response.ok) {
1380
+ span.setAttributes({ "error.api.status": response.status });
1381
+ span.setStatus({
1382
+ code: SpanStatusCode.ERROR,
1383
+ message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1384
+ });
1385
+ throw new Error(
1386
+ `Failed to transcribe audio: ${response.statusText}`
1387
+ );
1388
+ }
1389
+ const data = await response.json();
1390
+ const processedText = data.text;
1391
+ span.setAttribute(
1392
+ "llm.response.processed.length",
1393
+ processedText.length
1394
+ );
1395
+ span.addEvent("llm.response.processed", {
1396
+ "response.text": processedText
1397
+ });
1398
+ return processedText;
1399
+ } catch (error) {
1400
+ const message = error instanceof Error ? error.message : String(error);
1401
+ const exception = error instanceof Error ? error : new Error(message);
1402
+ span.recordException(exception);
1403
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1404
+ throw error;
1405
+ }
305
1406
  }
306
- const { object } = await generateObject({
307
- model: openai.languageModel(model),
308
- output: "no-schema",
309
- prompt: params.prompt,
310
- temperature: params.temperature
311
- });
312
- return object;
313
- } catch (error) {
314
- logger.error("Error generating object:", error);
315
- throw error;
316
- }
1407
+ );
317
1408
  },
318
- [ModelType.OBJECT_LARGE]: async (runtime, params) => {
319
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
320
- const openai = createOpenAI({
321
- apiKey: runtime.getSetting("OPENAI_API_KEY"),
322
- baseURL
323
- });
324
- const model = runtime.getSetting("OPENAI_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "gpt-4o";
325
- try {
326
- if (params.schema) {
327
- logger.info("Using OBJECT_LARGE without schema validation");
328
- const { object: object2 } = await generateObject({
329
- model: openai.languageModel(model),
330
- output: "no-schema",
331
- prompt: params.prompt,
332
- temperature: params.temperature
1409
+ [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
1410
+ const ttsModelName = getSetting(
1411
+ runtime,
1412
+ "OPENAI_TTS_MODEL",
1413
+ "gpt-4o-mini-tts"
1414
+ );
1415
+ const attributes = {
1416
+ "llm.vendor": "OpenAI",
1417
+ "llm.request.type": "tts",
1418
+ "llm.request.model": ttsModelName,
1419
+ "input.text.length": text.length
1420
+ };
1421
+ return startLlmSpan(runtime, "LLM.tts", attributes, async (span) => {
1422
+ logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${ttsModelName}`);
1423
+ span.addEvent("llm.prompt", { "prompt.content": text });
1424
+ try {
1425
+ const speechStream = await fetchTextToSpeech(runtime, text);
1426
+ span.addEvent("llm.response.success", {
1427
+ info: "Speech stream generated"
333
1428
  });
334
- return object2;
1429
+ return speechStream;
1430
+ } catch (error) {
1431
+ const message = error instanceof Error ? error.message : String(error);
1432
+ const exception = error instanceof Error ? error : new Error(message);
1433
+ span.recordException(exception);
1434
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1435
+ throw error;
335
1436
  }
336
- const { object } = await generateObject({
337
- model: openai.languageModel(model),
338
- output: "no-schema",
339
- prompt: params.prompt,
340
- temperature: params.temperature
341
- });
342
- return object;
343
- } catch (error) {
344
- logger.error("Error generating object:", error);
345
- throw error;
346
- }
1437
+ });
1438
+ },
1439
+ [ModelType.OBJECT_SMALL]: async (runtime, params) => {
1440
+ return generateObjectByModelType(
1441
+ runtime,
1442
+ params,
1443
+ ModelType.OBJECT_SMALL,
1444
+ getSmallModel
1445
+ );
1446
+ },
1447
+ [ModelType.OBJECT_LARGE]: async (runtime, params) => {
1448
+ return generateObjectByModelType(
1449
+ runtime,
1450
+ params,
1451
+ ModelType.OBJECT_LARGE,
1452
+ getLargeModel
1453
+ );
347
1454
  }
348
1455
  },
349
1456
  tests: [
@@ -353,16 +1460,21 @@ var openaiPlugin = {
353
1460
  {
354
1461
  name: "openai_test_url_and_api_key_validation",
355
1462
  fn: async (runtime) => {
356
- const baseURL = runtime.getSetting("OPENAI_BASE_URL") ?? "https://api.openai.com/v1";
1463
+ const baseURL = getBaseURL(runtime);
357
1464
  const response = await fetch(`${baseURL}/models`, {
358
1465
  headers: {
359
- Authorization: `Bearer ${runtime.getSetting("OPENAI_API_KEY")}`
1466
+ Authorization: `Bearer ${getApiKey(runtime)}`
360
1467
  }
361
1468
  });
362
1469
  const data = await response.json();
363
- logger.log("Models Available:", data?.data.length);
1470
+ logger.log(
1471
+ "Models Available:",
1472
+ data?.data?.length ?? "N/A"
1473
+ );
364
1474
  if (!response.ok) {
365
- throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1475
+ throw new Error(
1476
+ `Failed to validate OpenAI API key: ${response.statusText}`
1477
+ );
366
1478
  }
367
1479
  }
368
1480
  },
@@ -370,12 +1482,16 @@ var openaiPlugin = {
370
1482
  name: "openai_test_text_embedding",
371
1483
  fn: async (runtime) => {
372
1484
  try {
373
- const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
374
- text: "Hello, world!"
375
- });
1485
+ const embedding = await runtime.useModel(
1486
+ ModelType.TEXT_EMBEDDING,
1487
+ {
1488
+ text: "Hello, world!"
1489
+ }
1490
+ );
376
1491
  logger.log("embedding", embedding);
377
1492
  } catch (error) {
378
- logger.error("Error in test_text_embedding:", error);
1493
+ const message = error instanceof Error ? error.message : String(error);
1494
+ logger.error(`Error in test_text_embedding: ${message}`);
379
1495
  throw error;
380
1496
  }
381
1497
  }
@@ -392,7 +1508,8 @@ var openaiPlugin = {
392
1508
  }
393
1509
  logger.log("generated with test_text_large:", text);
394
1510
  } catch (error) {
395
- logger.error("Error in test_text_large:", error);
1511
+ const message = error instanceof Error ? error.message : String(error);
1512
+ logger.error(`Error in test_text_large: ${message}`);
396
1513
  throw error;
397
1514
  }
398
1515
  }
@@ -409,7 +1526,8 @@ var openaiPlugin = {
409
1526
  }
410
1527
  logger.log("generated with test_text_small:", text);
411
1528
  } catch (error) {
412
- logger.error("Error in test_text_small:", error);
1529
+ const message = error instanceof Error ? error.message : String(error);
1530
+ logger.error(`Error in test_text_small: ${message}`);
413
1531
  throw error;
414
1532
  }
415
1533
  }
@@ -426,7 +1544,8 @@ var openaiPlugin = {
426
1544
  });
427
1545
  logger.log("generated with test_image_generation:", image);
428
1546
  } catch (error) {
429
- logger.error("Error in test_image_generation:", error);
1547
+ const message = error instanceof Error ? error.message : String(error);
1548
+ logger.error(`Error in test_image_generation: ${message}`);
430
1549
  throw error;
431
1550
  }
432
1551
  }
@@ -444,13 +1563,20 @@ var openaiPlugin = {
444
1563
  if (result && typeof result === "object" && "title" in result && "description" in result) {
445
1564
  logger.log("Image description:", result);
446
1565
  } else {
447
- logger.error("Invalid image description result format:", result);
1566
+ logger.error(
1567
+ "Invalid image description result format:",
1568
+ result
1569
+ );
448
1570
  }
449
1571
  } catch (e) {
450
- logger.error("Error in image description test:", e);
1572
+ const message = e instanceof Error ? e.message : String(e);
1573
+ logger.error(`Error in image description test: ${message}`);
451
1574
  }
452
1575
  } catch (e) {
453
- logger.error("Error in openai_test_image_description:", e);
1576
+ const message = e instanceof Error ? e.message : String(e);
1577
+ logger.error(
1578
+ `Error in openai_test_image_description: ${message}`
1579
+ );
454
1580
  }
455
1581
  }
456
1582
  },
@@ -469,7 +1595,8 @@ var openaiPlugin = {
469
1595
  );
470
1596
  logger.log("generated with test_transcription:", transcription);
471
1597
  } catch (error) {
472
- logger.error("Error in test_transcription:", error);
1598
+ const message = error instanceof Error ? error.message : String(error);
1599
+ logger.error(`Error in test_transcription: ${message}`);
473
1600
  throw error;
474
1601
  }
475
1602
  }
@@ -478,9 +1605,14 @@ var openaiPlugin = {
478
1605
  name: "openai_test_text_tokenizer_encode",
479
1606
  fn: async (runtime) => {
480
1607
  const prompt = "Hello tokenizer encode!";
481
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1608
+ const tokens = await runtime.useModel(
1609
+ ModelType.TEXT_TOKENIZER_ENCODE,
1610
+ { prompt }
1611
+ );
482
1612
  if (!Array.isArray(tokens) || tokens.length === 0) {
483
- throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1613
+ throw new Error(
1614
+ "Failed to tokenize text: expected non-empty array of tokens"
1615
+ );
484
1616
  }
485
1617
  logger.log("Tokenized output:", tokens);
486
1618
  }
@@ -489,8 +1621,14 @@ var openaiPlugin = {
489
1621
  name: "openai_test_text_tokenizer_decode",
490
1622
  fn: async (runtime) => {
491
1623
  const prompt = "Hello tokenizer decode!";
492
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
493
- const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, { tokens });
1624
+ const tokens = await runtime.useModel(
1625
+ ModelType.TEXT_TOKENIZER_ENCODE,
1626
+ { prompt }
1627
+ );
1628
+ const decodedText = await runtime.useModel(
1629
+ ModelType.TEXT_TOKENIZER_DECODE,
1630
+ { tokens }
1631
+ );
494
1632
  if (decodedText !== prompt) {
495
1633
  throw new Error(
496
1634
  `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`
@@ -498,6 +1636,23 @@ var openaiPlugin = {
498
1636
  }
499
1637
  logger.log("Decoded text:", decodedText);
500
1638
  }
1639
+ },
1640
+ {
1641
+ name: "openai_test_text_to_speech",
1642
+ fn: async (runtime) => {
1643
+ try {
1644
+ const text = "Hello, this is a test for text-to-speech.";
1645
+ const response = await fetchTextToSpeech(runtime, text);
1646
+ if (!response) {
1647
+ throw new Error("Failed to generate speech");
1648
+ }
1649
+ logger.log("Generated speech successfully");
1650
+ } catch (error) {
1651
+ const message = error instanceof Error ? error.message : String(error);
1652
+ logger.error(`Error in openai_test_text_to_speech: ${message}`);
1653
+ throw error;
1654
+ }
1655
+ }
501
1656
  }
502
1657
  ]
503
1658
  }