@elizaos/plugin-openai 1.0.0-beta.34 → 1.0.0-beta.37

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,14 +1,560 @@
1
1
  // src/index.ts
2
2
  import { createOpenAI } from "@ai-sdk/openai";
3
3
  import {
4
- ModelType,
4
+ EventType,
5
5
  logger,
6
- VECTOR_DIMS
6
+ ModelType,
7
+ VECTOR_DIMS,
8
+ safeReplacer,
9
+ ServiceType
7
10
  } from "@elizaos/core";
8
- import { generateObject, generateText, JSONParseError } from "ai";
11
+ import {
12
+ generateObject,
13
+ generateText,
14
+ JSONParseError
15
+ } from "ai";
9
16
  import { encodingForModel } from "js-tiktoken";
10
- import FormData from "form-data";
11
- import fetch from "node-fetch";
17
+ import { fetch, FormData } from "undici";
18
+
19
+ // ../../node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
20
+ var _globalThis = typeof globalThis === "object" ? globalThis : global;
21
+
22
+ // ../../node_modules/@opentelemetry/api/build/esm/version.js
23
+ var VERSION = "1.9.0";
24
+
25
+ // ../../node_modules/@opentelemetry/api/build/esm/internal/semver.js
26
+ var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
27
+ function _makeCompatibilityCheck(ownVersion) {
28
+ var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
29
+ var rejectedVersions = /* @__PURE__ */ new Set();
30
+ var myVersionMatch = ownVersion.match(re);
31
+ if (!myVersionMatch) {
32
+ return function() {
33
+ return false;
34
+ };
35
+ }
36
+ var ownVersionParsed = {
37
+ major: +myVersionMatch[1],
38
+ minor: +myVersionMatch[2],
39
+ patch: +myVersionMatch[3],
40
+ prerelease: myVersionMatch[4]
41
+ };
42
+ if (ownVersionParsed.prerelease != null) {
43
+ return function isExactmatch(globalVersion) {
44
+ return globalVersion === ownVersion;
45
+ };
46
+ }
47
+ function _reject(v) {
48
+ rejectedVersions.add(v);
49
+ return false;
50
+ }
51
+ function _accept(v) {
52
+ acceptedVersions.add(v);
53
+ return true;
54
+ }
55
+ return function isCompatible2(globalVersion) {
56
+ if (acceptedVersions.has(globalVersion)) {
57
+ return true;
58
+ }
59
+ if (rejectedVersions.has(globalVersion)) {
60
+ return false;
61
+ }
62
+ var globalVersionMatch = globalVersion.match(re);
63
+ if (!globalVersionMatch) {
64
+ return _reject(globalVersion);
65
+ }
66
+ var globalVersionParsed = {
67
+ major: +globalVersionMatch[1],
68
+ minor: +globalVersionMatch[2],
69
+ patch: +globalVersionMatch[3],
70
+ prerelease: globalVersionMatch[4]
71
+ };
72
+ if (globalVersionParsed.prerelease != null) {
73
+ return _reject(globalVersion);
74
+ }
75
+ if (ownVersionParsed.major !== globalVersionParsed.major) {
76
+ return _reject(globalVersion);
77
+ }
78
+ if (ownVersionParsed.major === 0) {
79
+ if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
80
+ return _accept(globalVersion);
81
+ }
82
+ return _reject(globalVersion);
83
+ }
84
+ if (ownVersionParsed.minor <= globalVersionParsed.minor) {
85
+ return _accept(globalVersion);
86
+ }
87
+ return _reject(globalVersion);
88
+ };
89
+ }
90
+ var isCompatible = _makeCompatibilityCheck(VERSION);
91
+
92
+ // ../../node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
93
+ var major = VERSION.split(".")[0];
94
+ var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
95
+ var _global = _globalThis;
96
+ function registerGlobal(type, instance, diag, allowOverride) {
97
+ var _a;
98
+ if (allowOverride === void 0) {
99
+ allowOverride = false;
100
+ }
101
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
102
+ version: VERSION
103
+ };
104
+ if (!allowOverride && api[type]) {
105
+ var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
106
+ diag.error(err.stack || err.message);
107
+ return false;
108
+ }
109
+ if (api.version !== VERSION) {
110
+ var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
111
+ diag.error(err.stack || err.message);
112
+ return false;
113
+ }
114
+ api[type] = instance;
115
+ diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
116
+ return true;
117
+ }
118
+ function getGlobal(type) {
119
+ var _a, _b;
120
+ var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
121
+ if (!globalVersion || !isCompatible(globalVersion)) {
122
+ return;
123
+ }
124
+ return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
125
+ }
126
+ function unregisterGlobal(type, diag) {
127
+ diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
128
+ var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
129
+ if (api) {
130
+ delete api[type];
131
+ }
132
+ }
133
+
134
+ // ../../node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
135
+ var __read = function(o, n) {
136
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
137
+ if (!m) return o;
138
+ var i = m.call(o), r, ar = [], e;
139
+ try {
140
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
141
+ } catch (error) {
142
+ e = { error };
143
+ } finally {
144
+ try {
145
+ if (r && !r.done && (m = i["return"])) m.call(i);
146
+ } finally {
147
+ if (e) throw e.error;
148
+ }
149
+ }
150
+ return ar;
151
+ };
152
+ var __spreadArray = function(to, from, pack) {
153
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
154
+ if (ar || !(i in from)) {
155
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
156
+ ar[i] = from[i];
157
+ }
158
+ }
159
+ return to.concat(ar || Array.prototype.slice.call(from));
160
+ };
161
+ var DiagComponentLogger = (
162
+ /** @class */
163
+ function() {
164
+ function DiagComponentLogger2(props) {
165
+ this._namespace = props.namespace || "DiagComponentLogger";
166
+ }
167
+ DiagComponentLogger2.prototype.debug = function() {
168
+ var args = [];
169
+ for (var _i = 0; _i < arguments.length; _i++) {
170
+ args[_i] = arguments[_i];
171
+ }
172
+ return logProxy("debug", this._namespace, args);
173
+ };
174
+ DiagComponentLogger2.prototype.error = function() {
175
+ var args = [];
176
+ for (var _i = 0; _i < arguments.length; _i++) {
177
+ args[_i] = arguments[_i];
178
+ }
179
+ return logProxy("error", this._namespace, args);
180
+ };
181
+ DiagComponentLogger2.prototype.info = function() {
182
+ var args = [];
183
+ for (var _i = 0; _i < arguments.length; _i++) {
184
+ args[_i] = arguments[_i];
185
+ }
186
+ return logProxy("info", this._namespace, args);
187
+ };
188
+ DiagComponentLogger2.prototype.warn = function() {
189
+ var args = [];
190
+ for (var _i = 0; _i < arguments.length; _i++) {
191
+ args[_i] = arguments[_i];
192
+ }
193
+ return logProxy("warn", this._namespace, args);
194
+ };
195
+ DiagComponentLogger2.prototype.verbose = function() {
196
+ var args = [];
197
+ for (var _i = 0; _i < arguments.length; _i++) {
198
+ args[_i] = arguments[_i];
199
+ }
200
+ return logProxy("verbose", this._namespace, args);
201
+ };
202
+ return DiagComponentLogger2;
203
+ }()
204
+ );
205
+ function logProxy(funcName, namespace, args) {
206
+ var logger2 = getGlobal("diag");
207
+ if (!logger2) {
208
+ return;
209
+ }
210
+ args.unshift(namespace);
211
+ return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
212
+ }
213
+
214
+ // ../../node_modules/@opentelemetry/api/build/esm/diag/types.js
215
+ var DiagLogLevel;
216
+ (function(DiagLogLevel2) {
217
+ DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
218
+ DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
219
+ DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
220
+ DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
221
+ DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
222
+ DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
223
+ DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
224
+ })(DiagLogLevel || (DiagLogLevel = {}));
225
+
226
+ // ../../node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
227
+ function createLogLevelDiagLogger(maxLevel, logger2) {
228
+ if (maxLevel < DiagLogLevel.NONE) {
229
+ maxLevel = DiagLogLevel.NONE;
230
+ } else if (maxLevel > DiagLogLevel.ALL) {
231
+ maxLevel = DiagLogLevel.ALL;
232
+ }
233
+ logger2 = logger2 || {};
234
+ function _filterFunc(funcName, theLevel) {
235
+ var theFunc = logger2[funcName];
236
+ if (typeof theFunc === "function" && maxLevel >= theLevel) {
237
+ return theFunc.bind(logger2);
238
+ }
239
+ return function() {
240
+ };
241
+ }
242
+ return {
243
+ error: _filterFunc("error", DiagLogLevel.ERROR),
244
+ warn: _filterFunc("warn", DiagLogLevel.WARN),
245
+ info: _filterFunc("info", DiagLogLevel.INFO),
246
+ debug: _filterFunc("debug", DiagLogLevel.DEBUG),
247
+ verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
248
+ };
249
+ }
250
+
251
+ // ../../node_modules/@opentelemetry/api/build/esm/api/diag.js
252
+ var __read2 = function(o, n) {
253
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
254
+ if (!m) return o;
255
+ var i = m.call(o), r, ar = [], e;
256
+ try {
257
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
258
+ } catch (error) {
259
+ e = { error };
260
+ } finally {
261
+ try {
262
+ if (r && !r.done && (m = i["return"])) m.call(i);
263
+ } finally {
264
+ if (e) throw e.error;
265
+ }
266
+ }
267
+ return ar;
268
+ };
269
+ var __spreadArray2 = function(to, from, pack) {
270
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
271
+ if (ar || !(i in from)) {
272
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
273
+ ar[i] = from[i];
274
+ }
275
+ }
276
+ return to.concat(ar || Array.prototype.slice.call(from));
277
+ };
278
+ var API_NAME = "diag";
279
+ var DiagAPI = (
280
+ /** @class */
281
+ function() {
282
+ function DiagAPI2() {
283
+ function _logProxy(funcName) {
284
+ return function() {
285
+ var args = [];
286
+ for (var _i = 0; _i < arguments.length; _i++) {
287
+ args[_i] = arguments[_i];
288
+ }
289
+ var logger2 = getGlobal("diag");
290
+ if (!logger2)
291
+ return;
292
+ return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
293
+ };
294
+ }
295
+ var self = this;
296
+ var setLogger = function(logger2, optionsOrLogLevel) {
297
+ var _a, _b, _c;
298
+ if (optionsOrLogLevel === void 0) {
299
+ optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
300
+ }
301
+ if (logger2 === self) {
302
+ var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
303
+ self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
304
+ return false;
305
+ }
306
+ if (typeof optionsOrLogLevel === "number") {
307
+ optionsOrLogLevel = {
308
+ logLevel: optionsOrLogLevel
309
+ };
310
+ }
311
+ var oldLogger = getGlobal("diag");
312
+ var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
313
+ if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
314
+ var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
315
+ oldLogger.warn("Current logger will be overwritten from " + stack);
316
+ newLogger.warn("Current logger will overwrite one already registered from " + stack);
317
+ }
318
+ return registerGlobal("diag", newLogger, self, true);
319
+ };
320
+ self.setLogger = setLogger;
321
+ self.disable = function() {
322
+ unregisterGlobal(API_NAME, self);
323
+ };
324
+ self.createComponentLogger = function(options) {
325
+ return new DiagComponentLogger(options);
326
+ };
327
+ self.verbose = _logProxy("verbose");
328
+ self.debug = _logProxy("debug");
329
+ self.info = _logProxy("info");
330
+ self.warn = _logProxy("warn");
331
+ self.error = _logProxy("error");
332
+ }
333
+ DiagAPI2.instance = function() {
334
+ if (!this._instance) {
335
+ this._instance = new DiagAPI2();
336
+ }
337
+ return this._instance;
338
+ };
339
+ return DiagAPI2;
340
+ }()
341
+ );
342
+
343
+ // ../../node_modules/@opentelemetry/api/build/esm/context/context.js
344
+ var BaseContext = (
345
+ /** @class */
346
+ /* @__PURE__ */ function() {
347
+ function BaseContext2(parentContext) {
348
+ var self = this;
349
+ self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
350
+ self.getValue = function(key) {
351
+ return self._currentContext.get(key);
352
+ };
353
+ self.setValue = function(key, value) {
354
+ var context2 = new BaseContext2(self._currentContext);
355
+ context2._currentContext.set(key, value);
356
+ return context2;
357
+ };
358
+ self.deleteValue = function(key) {
359
+ var context2 = new BaseContext2(self._currentContext);
360
+ context2._currentContext.delete(key);
361
+ return context2;
362
+ };
363
+ }
364
+ return BaseContext2;
365
+ }()
366
+ );
367
+ var ROOT_CONTEXT = new BaseContext();
368
+
369
+ // ../../node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
370
+ var __read3 = function(o, n) {
371
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
372
+ if (!m) return o;
373
+ var i = m.call(o), r, ar = [], e;
374
+ try {
375
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
376
+ } catch (error) {
377
+ e = { error };
378
+ } finally {
379
+ try {
380
+ if (r && !r.done && (m = i["return"])) m.call(i);
381
+ } finally {
382
+ if (e) throw e.error;
383
+ }
384
+ }
385
+ return ar;
386
+ };
387
+ var __spreadArray3 = function(to, from, pack) {
388
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
389
+ if (ar || !(i in from)) {
390
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
391
+ ar[i] = from[i];
392
+ }
393
+ }
394
+ return to.concat(ar || Array.prototype.slice.call(from));
395
+ };
396
+ var NoopContextManager = (
397
+ /** @class */
398
+ function() {
399
+ function NoopContextManager2() {
400
+ }
401
+ NoopContextManager2.prototype.active = function() {
402
+ return ROOT_CONTEXT;
403
+ };
404
+ NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
405
+ var args = [];
406
+ for (var _i = 3; _i < arguments.length; _i++) {
407
+ args[_i - 3] = arguments[_i];
408
+ }
409
+ return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
410
+ };
411
+ NoopContextManager2.prototype.bind = function(_context, target) {
412
+ return target;
413
+ };
414
+ NoopContextManager2.prototype.enable = function() {
415
+ return this;
416
+ };
417
+ NoopContextManager2.prototype.disable = function() {
418
+ return this;
419
+ };
420
+ return NoopContextManager2;
421
+ }()
422
+ );
423
+
424
+ // ../../node_modules/@opentelemetry/api/build/esm/api/context.js
425
+ var __read4 = function(o, n) {
426
+ var m = typeof Symbol === "function" && o[Symbol.iterator];
427
+ if (!m) return o;
428
+ var i = m.call(o), r, ar = [], e;
429
+ try {
430
+ while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
431
+ } catch (error) {
432
+ e = { error };
433
+ } finally {
434
+ try {
435
+ if (r && !r.done && (m = i["return"])) m.call(i);
436
+ } finally {
437
+ if (e) throw e.error;
438
+ }
439
+ }
440
+ return ar;
441
+ };
442
+ var __spreadArray4 = function(to, from, pack) {
443
+ if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
444
+ if (ar || !(i in from)) {
445
+ if (!ar) ar = Array.prototype.slice.call(from, 0, i);
446
+ ar[i] = from[i];
447
+ }
448
+ }
449
+ return to.concat(ar || Array.prototype.slice.call(from));
450
+ };
451
+ var API_NAME2 = "context";
452
+ var NOOP_CONTEXT_MANAGER = new NoopContextManager();
453
+ var ContextAPI = (
454
+ /** @class */
455
+ function() {
456
+ function ContextAPI2() {
457
+ }
458
+ ContextAPI2.getInstance = function() {
459
+ if (!this._instance) {
460
+ this._instance = new ContextAPI2();
461
+ }
462
+ return this._instance;
463
+ };
464
+ ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
465
+ return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
466
+ };
467
+ ContextAPI2.prototype.active = function() {
468
+ return this._getContextManager().active();
469
+ };
470
+ ContextAPI2.prototype.with = function(context2, fn, thisArg) {
471
+ var _a;
472
+ var args = [];
473
+ for (var _i = 3; _i < arguments.length; _i++) {
474
+ args[_i - 3] = arguments[_i];
475
+ }
476
+ return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
477
+ };
478
+ ContextAPI2.prototype.bind = function(context2, target) {
479
+ return this._getContextManager().bind(context2, target);
480
+ };
481
+ ContextAPI2.prototype._getContextManager = function() {
482
+ return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
483
+ };
484
+ ContextAPI2.prototype.disable = function() {
485
+ this._getContextManager().disable();
486
+ unregisterGlobal(API_NAME2, DiagAPI.instance());
487
+ };
488
+ return ContextAPI2;
489
+ }()
490
+ );
491
+
492
+ // ../../node_modules/@opentelemetry/api/build/esm/trace/status.js
493
+ var SpanStatusCode;
494
+ (function(SpanStatusCode2) {
495
+ SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
496
+ SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
497
+ SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
498
+ })(SpanStatusCode || (SpanStatusCode = {}));
499
+
500
+ // ../../node_modules/@opentelemetry/api/build/esm/context-api.js
501
+ var context = ContextAPI.getInstance();
502
+
503
+ // src/index.ts
504
+ function getTracer(runtime) {
505
+ const availableServices = Array.from(runtime.getAllServices().keys());
506
+ logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
507
+ logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
508
+ const instrumentationService = runtime.getService(
509
+ ServiceType.INSTRUMENTATION
510
+ );
511
+ if (!instrumentationService) {
512
+ logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
513
+ return null;
514
+ }
515
+ if (!instrumentationService.isEnabled()) {
516
+ logger.debug("[getTracer] Instrumentation service found but is disabled.");
517
+ return null;
518
+ }
519
+ logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
520
+ return instrumentationService.getTracer("eliza.llm.openai");
521
+ }
522
+ async function startLlmSpan(runtime, spanName, attributes, fn) {
523
+ const tracer = getTracer(runtime);
524
+ if (!tracer) {
525
+ const dummySpan = {
526
+ setAttribute: () => {
527
+ },
528
+ setAttributes: () => {
529
+ },
530
+ addEvent: () => {
531
+ },
532
+ recordException: () => {
533
+ },
534
+ setStatus: () => {
535
+ },
536
+ end: () => {
537
+ },
538
+ spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
539
+ };
540
+ return fn(dummySpan);
541
+ }
542
+ const activeContext = context.active();
543
+ return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
544
+ try {
545
+ const result = await fn(span);
546
+ span.setStatus({ code: SpanStatusCode.OK });
547
+ span.end();
548
+ return result;
549
+ } catch (error) {
550
+ const message = error instanceof Error ? error.message : String(error);
551
+ span.recordException(error);
552
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
553
+ span.end();
554
+ throw error;
555
+ }
556
+ });
557
+ }
12
558
  function getSetting(runtime, key, defaultValue) {
13
559
  return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
14
560
  }
@@ -43,23 +589,109 @@ async function detokenizeText(model, tokens) {
43
589
  }
44
590
  async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
45
591
  const openai = createOpenAIClient(runtime);
46
- const model = getModelFn(runtime);
47
- try {
48
- if (params.schema) {
49
- logger.info(`Using ${modelType} without schema validation`);
592
+ const modelName = getModelFn(runtime);
593
+ const temperature = params.temperature ?? 0;
594
+ const schemaPresent = !!params.schema;
595
+ const attributes = {
596
+ "llm.vendor": "OpenAI",
597
+ "llm.request.type": "object_generation",
598
+ "llm.request.model": modelName,
599
+ "llm.request.temperature": temperature,
600
+ "llm.request.schema_present": schemaPresent
601
+ };
602
+ return startLlmSpan(runtime, "LLM.generateObject", attributes, async (span) => {
603
+ span.addEvent("llm.prompt", { "prompt.content": params.prompt });
604
+ if (schemaPresent) {
605
+ span.addEvent("llm.request.schema", {
606
+ schema: JSON.stringify(params.schema, safeReplacer())
607
+ });
608
+ logger.info(
609
+ `Using ${modelType} without schema validation (schema provided but output=no-schema)`
610
+ );
50
611
  }
51
- const { object } = await generateObject({
52
- model: openai.languageModel(model),
53
- output: "no-schema",
54
- prompt: params.prompt,
55
- temperature: params.temperature,
56
- experimental_repairText: getJsonRepairFunction()
57
- });
58
- return object;
59
- } catch (error) {
60
- logger.error(`Error generating object with ${modelType}:`, error);
61
- throw error;
62
- }
612
+ try {
613
+ const { object, usage } = await generateObject({
614
+ model: openai.languageModel(modelName),
615
+ output: "no-schema",
616
+ prompt: params.prompt,
617
+ temperature,
618
+ experimental_repairText: getJsonRepairFunction()
619
+ });
620
+ span.addEvent("llm.response.processed", {
621
+ "response.object": JSON.stringify(object, safeReplacer())
622
+ });
623
+ if (usage) {
624
+ span.setAttributes({
625
+ "llm.usage.prompt_tokens": usage.promptTokens,
626
+ "llm.usage.completion_tokens": usage.completionTokens,
627
+ "llm.usage.total_tokens": usage.totalTokens
628
+ });
629
+ emitModelUsageEvent(runtime, modelType, params.prompt, usage);
630
+ }
631
+ return object;
632
+ } catch (error) {
633
+ if (error instanceof JSONParseError) {
634
+ logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
635
+ span.recordException(error);
636
+ span.addEvent("llm.error.json_parse", {
637
+ "error.message": error.message,
638
+ "error.text": error.text
639
+ });
640
+ span.addEvent("llm.repair.attempt");
641
+ const repairFunction = getJsonRepairFunction();
642
+ const repairedJsonString = await repairFunction({
643
+ text: error.text,
644
+ error
645
+ });
646
+ if (repairedJsonString) {
647
+ try {
648
+ const repairedObject = JSON.parse(repairedJsonString);
649
+ span.addEvent("llm.repair.success", {
650
+ repaired_object: JSON.stringify(repairedObject, safeReplacer())
651
+ });
652
+ logger.info("[generateObject] Successfully repaired JSON.");
653
+ span.setStatus({
654
+ code: SpanStatusCode.ERROR,
655
+ message: "JSON parsing failed but was repaired"
656
+ });
657
+ return repairedObject;
658
+ } catch (repairParseError) {
659
+ const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
660
+ logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
661
+ const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
662
+ span.recordException(exception);
663
+ span.addEvent("llm.repair.parse_error", {
664
+ "error.message": message
665
+ });
666
+ span.setStatus({
667
+ code: SpanStatusCode.ERROR,
668
+ message: `JSON repair failed: ${message}`
669
+ });
670
+ throw repairParseError;
671
+ }
672
+ } else {
673
+ const errMsg = error instanceof Error ? error.message : String(error);
674
+ logger.error("[generateObject] JSON repair failed.");
675
+ span.addEvent("llm.repair.failed");
676
+ span.setStatus({
677
+ code: SpanStatusCode.ERROR,
678
+ message: `JSON repair failed: ${errMsg}`
679
+ });
680
+ throw error;
681
+ }
682
+ } else {
683
+ const message = error instanceof Error ? error.message : String(error);
684
+ logger.error(`[generateObject] Unknown error: ${message}`);
685
+ const exception = error instanceof Error ? error : new Error(message);
686
+ span.recordException(exception);
687
+ span.setStatus({
688
+ code: SpanStatusCode.ERROR,
689
+ message
690
+ });
691
+ throw error;
692
+ }
693
+ }
694
+ });
63
695
  }
64
696
  function getJsonRepairFunction() {
65
697
  return async ({ text, error }) => {
@@ -69,12 +701,26 @@ function getJsonRepairFunction() {
69
701
  JSON.parse(cleanedText);
70
702
  return cleanedText;
71
703
  }
704
+ return null;
72
705
  } catch (jsonError) {
73
- logger.warn("Failed to repair JSON text:", jsonError);
706
+ const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
707
+ logger.warn(`Failed to repair JSON text: ${message}`);
74
708
  return null;
75
709
  }
76
710
  };
77
711
  }
712
+ function emitModelUsageEvent(runtime, type, prompt, usage) {
713
+ runtime.emitEvent(EventType.MODEL_USED, {
714
+ provider: "openai",
715
+ type,
716
+ prompt,
717
+ tokens: {
718
+ prompt: usage.promptTokens,
719
+ completion: usage.completionTokens,
720
+ total: usage.totalTokens
721
+ }
722
+ });
723
+ }
78
724
  async function fetchTextToSpeech(runtime, text) {
79
725
  const apiKey = getApiKey(runtime);
80
726
  const model = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
@@ -101,7 +747,8 @@ async function fetchTextToSpeech(runtime, text) {
101
747
  }
102
748
  return res.body;
103
749
  } catch (err) {
104
- throw new Error(`Failed to fetch speech from OpenAI TTS: ${err.message || err}`);
750
+ const message = err instanceof Error ? err.message : String(err);
751
+ throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
105
752
  }
106
753
  }
107
754
  var openaiPlugin = {
@@ -134,29 +781,35 @@ var openaiPlugin = {
134
781
  logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
135
782
  logger.warn("OpenAI functionality will be limited until a valid API key is provided");
136
783
  } else {
784
+ logger.log("OpenAI API key validated successfully");
137
785
  }
138
786
  } catch (fetchError) {
139
- logger.warn(`Error validating OpenAI API key: ${fetchError}`);
787
+ const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
788
+ logger.warn(`Error validating OpenAI API key: ${message}`);
140
789
  logger.warn("OpenAI functionality will be limited until a valid API key is provided");
141
790
  }
142
791
  } catch (error) {
792
+ const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
143
793
  logger.warn(
144
- `OpenAI plugin configuration issue: ${error.errors.map((e) => e.message).join(", ")} - You need to configure the OPENAI_API_KEY in your environment variables`
794
+ `OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
145
795
  );
146
796
  }
147
797
  },
148
798
  models: {
149
799
  [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
150
- const embeddingDimension = parseInt(
151
- getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536")
800
+ const embeddingModelName = getSetting(
801
+ runtime,
802
+ "OPENAI_EMBEDDING_MODEL",
803
+ "text-embedding-3-small"
804
+ );
805
+ const embeddingDimension = Number.parseInt(
806
+ getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536",
807
+ 10
152
808
  );
153
809
  if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
154
- logger.error(
155
- `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`
156
- );
157
- throw new Error(
158
- `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`
159
- );
810
+ const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
811
+ logger.error(errorMsg);
812
+ throw new Error(errorMsg);
160
813
  }
161
814
  if (params === null) {
162
815
  logger.debug("Creating test embedding for initialization");
@@ -181,41 +834,84 @@ var openaiPlugin = {
181
834
  emptyVector[0] = 0.3;
182
835
  return emptyVector;
183
836
  }
184
- try {
837
+ const attributes = {
838
+ "llm.vendor": "OpenAI",
839
+ "llm.request.type": "embedding",
840
+ "llm.request.model": embeddingModelName,
841
+ "llm.request.embedding.dimensions": embeddingDimension,
842
+ "input.text.length": text.length
843
+ };
844
+ return startLlmSpan(runtime, "LLM.embedding", attributes, async (span) => {
845
+ span.addEvent("llm.prompt", { "prompt.content": text });
185
846
  const baseURL = getBaseURL(runtime);
186
- const response = await fetch(`${baseURL}/embeddings`, {
187
- method: "POST",
188
- headers: {
189
- Authorization: `Bearer ${getApiKey(runtime)}`,
190
- "Content-Type": "application/json"
191
- },
192
- body: JSON.stringify({
193
- model: getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small"),
194
- input: text
195
- })
196
- });
197
- if (!response.ok) {
198
- logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
199
- const errorVector = Array(embeddingDimension).fill(0);
200
- errorVector[0] = 0.4;
201
- return errorVector;
847
+ const apiKey = getApiKey(runtime);
848
+ if (!apiKey) {
849
+ span.setStatus({
850
+ code: SpanStatusCode.ERROR,
851
+ message: "OpenAI API key not configured"
852
+ });
853
+ throw new Error("OpenAI API key not configured");
202
854
  }
203
- const data = await response.json();
204
- if (!data?.data?.[0]?.embedding) {
205
- logger.error("API returned invalid structure");
855
+ try {
856
+ const response = await fetch(`${baseURL}/embeddings`, {
857
+ method: "POST",
858
+ headers: {
859
+ Authorization: `Bearer ${apiKey}`,
860
+ "Content-Type": "application/json"
861
+ },
862
+ body: JSON.stringify({
863
+ model: embeddingModelName,
864
+ input: text
865
+ })
866
+ });
867
+ const responseClone = response.clone();
868
+ const rawResponseBody = await responseClone.text();
869
+ span.addEvent("llm.response.raw", {
870
+ "response.body": rawResponseBody
871
+ });
872
+ if (!response.ok) {
873
+ logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
874
+ span.setAttributes({ "error.api.status": response.status });
875
+ span.setStatus({
876
+ code: SpanStatusCode.ERROR,
877
+ message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
878
+ });
879
+ const errorVector = Array(embeddingDimension).fill(0);
880
+ errorVector[0] = 0.4;
881
+ return errorVector;
882
+ }
883
+ const data = await response.json();
884
+ if (!data?.data?.[0]?.embedding) {
885
+ logger.error("API returned invalid structure");
886
+ span.setStatus({
887
+ code: SpanStatusCode.ERROR,
888
+ message: "API returned invalid structure"
889
+ });
890
+ const errorVector = Array(embeddingDimension).fill(0);
891
+ errorVector[0] = 0.5;
892
+ return errorVector;
893
+ }
894
+ const embedding = data.data[0].embedding;
895
+ span.setAttribute("llm.response.embedding.vector_length", embedding.length);
896
+ if (data.usage) {
897
+ span.setAttributes({
898
+ "llm.usage.prompt_tokens": data.usage.prompt_tokens,
899
+ "llm.usage.total_tokens": data.usage.total_tokens
900
+ });
901
+ }
902
+ logger.log(`Got valid embedding with length ${embedding.length}`);
903
+ return embedding;
904
+ } catch (error) {
905
+ const message = error instanceof Error ? error.message : String(error);
906
+ logger.error(`Error generating embedding: ${message}`);
907
+ const exception = error instanceof Error ? error : new Error(message);
908
+ span.recordException(exception);
909
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
206
910
  const errorVector = Array(embeddingDimension).fill(0);
207
- errorVector[0] = 0.5;
911
+ errorVector[0] = 0.6;
208
912
  return errorVector;
209
913
  }
210
- const embedding = data.data[0].embedding;
211
- logger.log(`Got valid embedding with length ${embedding.length}`);
212
- return embedding;
213
- } catch (error) {
214
- logger.error("Error generating embedding:", error);
215
- const errorVector = Array(embeddingDimension).fill(0);
216
- errorVector[0] = 0.6;
217
- return errorVector;
218
- }
914
+ });
219
915
  },
220
916
  [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
221
917
  return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
@@ -229,20 +925,45 @@ var openaiPlugin = {
229
925
  const presence_penalty = 0.7;
230
926
  const max_response_length = 8192;
231
927
  const openai = createOpenAIClient(runtime);
232
- const model = getSmallModel(runtime);
928
+ const modelName = getSmallModel(runtime);
233
929
  logger.log("generating text");
234
930
  logger.log(prompt);
235
- const { text: openaiResponse } = await generateText({
236
- model: openai.languageModel(model),
237
- prompt,
238
- system: runtime.character.system ?? void 0,
239
- temperature,
240
- maxTokens: max_response_length,
241
- frequencyPenalty: frequency_penalty,
242
- presencePenalty: presence_penalty,
243
- stopSequences
931
+ const attributes = {
932
+ "llm.vendor": "OpenAI",
933
+ "llm.request.type": "completion",
934
+ "llm.request.model": modelName,
935
+ "llm.request.temperature": temperature,
936
+ "llm.request.max_tokens": max_response_length,
937
+ "llm.request.frequency_penalty": frequency_penalty,
938
+ "llm.request.presence_penalty": presence_penalty,
939
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
940
+ };
941
+ return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
942
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
943
+ const { text: openaiResponse, usage } = await generateText({
944
+ model: openai.languageModel(modelName),
945
+ prompt,
946
+ system: runtime.character.system ?? void 0,
947
+ temperature,
948
+ maxTokens: max_response_length,
949
+ frequencyPenalty: frequency_penalty,
950
+ presencePenalty: presence_penalty,
951
+ stopSequences
952
+ });
953
+ span.setAttribute("llm.response.processed.length", openaiResponse.length);
954
+ span.addEvent("llm.response.processed", {
955
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
956
+ });
957
+ if (usage) {
958
+ span.setAttributes({
959
+ "llm.usage.prompt_tokens": usage.promptTokens,
960
+ "llm.usage.completion_tokens": usage.completionTokens,
961
+ "llm.usage.total_tokens": usage.totalTokens
962
+ });
963
+ emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
964
+ }
965
+ return openaiResponse;
244
966
  });
245
- return openaiResponse;
246
967
  },
247
968
  [ModelType.TEXT_LARGE]: async (runtime, {
248
969
  prompt,
@@ -253,134 +974,316 @@ var openaiPlugin = {
253
974
  presencePenalty = 0.7
254
975
  }) => {
255
976
  const openai = createOpenAIClient(runtime);
256
- const model = getLargeModel(runtime);
257
- const { text: openaiResponse } = await generateText({
258
- model: openai.languageModel(model),
259
- prompt,
260
- system: runtime.character.system ?? void 0,
261
- temperature,
262
- maxTokens,
263
- frequencyPenalty,
264
- presencePenalty,
265
- stopSequences
977
+ const modelName = getLargeModel(runtime);
978
+ logger.log("generating text");
979
+ logger.log(prompt);
980
+ const attributes = {
981
+ "llm.vendor": "OpenAI",
982
+ "llm.request.type": "completion",
983
+ "llm.request.model": modelName,
984
+ "llm.request.temperature": temperature,
985
+ "llm.request.max_tokens": maxTokens,
986
+ "llm.request.frequency_penalty": frequencyPenalty,
987
+ "llm.request.presence_penalty": presencePenalty,
988
+ "llm.request.stop_sequences": JSON.stringify(stopSequences)
989
+ };
990
+ return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
991
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
992
+ const { text: openaiResponse, usage } = await generateText({
993
+ model: openai.languageModel(modelName),
994
+ prompt,
995
+ system: runtime.character.system ?? void 0,
996
+ temperature,
997
+ maxTokens,
998
+ frequencyPenalty,
999
+ presencePenalty,
1000
+ stopSequences
1001
+ });
1002
+ span.setAttribute("llm.response.processed.length", openaiResponse.length);
1003
+ span.addEvent("llm.response.processed", {
1004
+ "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1005
+ });
1006
+ if (usage) {
1007
+ span.setAttributes({
1008
+ "llm.usage.prompt_tokens": usage.promptTokens,
1009
+ "llm.usage.completion_tokens": usage.completionTokens,
1010
+ "llm.usage.total_tokens": usage.totalTokens
1011
+ });
1012
+ emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1013
+ }
1014
+ return openaiResponse;
266
1015
  });
267
- return openaiResponse;
268
1016
  },
269
1017
  [ModelType.IMAGE]: async (runtime, params) => {
270
- const baseURL = getBaseURL(runtime);
271
- const response = await fetch(`${baseURL}/images/generations`, {
272
- method: "POST",
273
- headers: {
274
- Authorization: `Bearer ${getApiKey(runtime)}`,
275
- "Content-Type": "application/json"
276
- },
277
- body: JSON.stringify({
278
- prompt: params.prompt,
279
- n: params.n || 1,
280
- size: params.size || "1024x1024"
281
- })
1018
+ const n = params.n || 1;
1019
+ const size = params.size || "1024x1024";
1020
+ const prompt = params.prompt;
1021
+ const attributes = {
1022
+ "llm.vendor": "OpenAI",
1023
+ "llm.request.type": "image_generation",
1024
+ "llm.request.image.size": size,
1025
+ "llm.request.image.count": n
1026
+ };
1027
+ return startLlmSpan(runtime, "LLM.imageGeneration", attributes, async (span) => {
1028
+ span.addEvent("llm.prompt", { "prompt.content": prompt });
1029
+ const baseURL = getBaseURL(runtime);
1030
+ const apiKey = getApiKey(runtime);
1031
+ if (!apiKey) {
1032
+ span.setStatus({
1033
+ code: SpanStatusCode.ERROR,
1034
+ message: "OpenAI API key not configured"
1035
+ });
1036
+ throw new Error("OpenAI API key not configured");
1037
+ }
1038
+ try {
1039
+ const response = await fetch(`${baseURL}/images/generations`, {
1040
+ method: "POST",
1041
+ headers: {
1042
+ Authorization: `Bearer ${apiKey}`,
1043
+ "Content-Type": "application/json"
1044
+ },
1045
+ body: JSON.stringify({
1046
+ prompt,
1047
+ n,
1048
+ size
1049
+ })
1050
+ });
1051
+ const responseClone = response.clone();
1052
+ const rawResponseBody = await responseClone.text();
1053
+ span.addEvent("llm.response.raw", {
1054
+ "response.body": rawResponseBody
1055
+ });
1056
+ if (!response.ok) {
1057
+ span.setAttributes({ "error.api.status": response.status });
1058
+ span.setStatus({
1059
+ code: SpanStatusCode.ERROR,
1060
+ message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1061
+ });
1062
+ throw new Error(`Failed to generate image: ${response.statusText}`);
1063
+ }
1064
+ const data = await response.json();
1065
+ const typedData = data;
1066
+ span.addEvent("llm.response.processed", {
1067
+ "response.urls": JSON.stringify(typedData.data)
1068
+ });
1069
+ return typedData.data;
1070
+ } catch (error) {
1071
+ const message = error instanceof Error ? error.message : String(error);
1072
+ const exception = error instanceof Error ? error : new Error(message);
1073
+ span.recordException(exception);
1074
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1075
+ throw error;
1076
+ }
282
1077
  });
283
- if (!response.ok) {
284
- throw new Error(`Failed to generate image: ${response.statusText}`);
285
- }
286
- const data = await response.json();
287
- const typedData = data;
288
- return typedData.data;
289
1078
  },
290
1079
  [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
291
1080
  let imageUrl;
292
- let prompt;
1081
+ let promptText;
1082
+ const modelName = "gpt-4o-mini";
1083
+ const maxTokens = 300;
293
1084
  if (typeof params === "string") {
294
1085
  imageUrl = params;
295
- prompt = void 0;
1086
+ promptText = "Please analyze this image and provide a title and detailed description.";
296
1087
  } else {
297
1088
  imageUrl = params.imageUrl;
298
- prompt = params.prompt;
1089
+ promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
299
1090
  }
300
- try {
1091
+ const attributes = {
1092
+ "llm.vendor": "OpenAI",
1093
+ "llm.request.type": "chat",
1094
+ "llm.request.model": modelName,
1095
+ "llm.request.max_tokens": maxTokens,
1096
+ "llm.request.image.url": imageUrl
1097
+ };
1098
+ const messages = [
1099
+ {
1100
+ role: "user",
1101
+ content: [
1102
+ { type: "text", text: promptText },
1103
+ { type: "image_url", image_url: { url: imageUrl } }
1104
+ ]
1105
+ }
1106
+ ];
1107
+ return startLlmSpan(runtime, "LLM.imageDescription", attributes, async (span) => {
1108
+ span.addEvent("llm.prompt", {
1109
+ "prompt.content": JSON.stringify(messages, safeReplacer())
1110
+ });
301
1111
  const baseURL = getBaseURL(runtime);
302
1112
  const apiKey = getApiKey(runtime);
303
1113
  if (!apiKey) {
304
1114
  logger.error("OpenAI API key not set");
1115
+ span.setStatus({
1116
+ code: SpanStatusCode.ERROR,
1117
+ message: "OpenAI API key not configured"
1118
+ });
305
1119
  return {
306
1120
  title: "Failed to analyze image",
307
1121
  description: "API key not configured"
308
1122
  };
309
1123
  }
310
- const response = await fetch(`${baseURL}/chat/completions`, {
311
- method: "POST",
312
- headers: {
313
- "Content-Type": "application/json",
314
- Authorization: `Bearer ${apiKey}`
315
- },
316
- body: JSON.stringify({
317
- model: "gpt-4o-mini",
318
- messages: [
319
- {
320
- role: "user",
321
- content: [
322
- {
323
- type: "text",
324
- text: prompt || "Please analyze this image and provide a title and detailed description."
325
- },
326
- {
327
- type: "image_url",
328
- image_url: { url: imageUrl }
329
- }
330
- ]
331
- }
332
- ],
333
- max_tokens: 300
334
- })
335
- });
336
- if (!response.ok) {
337
- throw new Error(`OpenAI API error: ${response.status}`);
338
- }
339
- const result = await response.json();
340
- const content = result.choices?.[0]?.message?.content;
341
- if (!content) {
1124
+ try {
1125
+ const response = await fetch(`${baseURL}/chat/completions`, {
1126
+ method: "POST",
1127
+ headers: {
1128
+ "Content-Type": "application/json",
1129
+ Authorization: `Bearer ${apiKey}`
1130
+ },
1131
+ body: JSON.stringify({
1132
+ model: modelName,
1133
+ messages,
1134
+ max_tokens: maxTokens
1135
+ })
1136
+ });
1137
+ const responseClone = response.clone();
1138
+ const rawResponseBody = await responseClone.text();
1139
+ span.addEvent("llm.response.raw", {
1140
+ "response.body": rawResponseBody
1141
+ });
1142
+ if (!response.ok) {
1143
+ span.setAttributes({ "error.api.status": response.status });
1144
+ span.setStatus({
1145
+ code: SpanStatusCode.ERROR,
1146
+ message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1147
+ });
1148
+ throw new Error(`OpenAI API error: ${response.status}`);
1149
+ }
1150
+ const result = await response.json();
1151
+ const typedResult = result;
1152
+ const content = typedResult.choices?.[0]?.message?.content;
1153
+ if (typedResult.usage) {
1154
+ span.setAttributes({
1155
+ "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1156
+ "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1157
+ "llm.usage.total_tokens": typedResult.usage.total_tokens
1158
+ });
1159
+ }
1160
+ if (typedResult.choices?.[0]?.finish_reason) {
1161
+ span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
1162
+ }
1163
+ if (!content) {
1164
+ span.setStatus({
1165
+ code: SpanStatusCode.ERROR,
1166
+ message: "No content in API response"
1167
+ });
1168
+ return {
1169
+ title: "Failed to analyze image",
1170
+ description: "No response from API"
1171
+ };
1172
+ }
1173
+ const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1174
+ const title = titleMatch?.[1]?.trim() || "Image Analysis";
1175
+ const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1176
+ const processedResult = { title, description };
1177
+ span.addEvent("llm.response.processed", {
1178
+ "response.object": JSON.stringify(processedResult, safeReplacer())
1179
+ });
1180
+ return processedResult;
1181
+ } catch (error) {
1182
+ const message = error instanceof Error ? error.message : String(error);
1183
+ logger.error(`Error analyzing image: ${message}`);
1184
+ const exception = error instanceof Error ? error : new Error(message);
1185
+ span.recordException(exception);
1186
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
342
1187
  return {
343
1188
  title: "Failed to analyze image",
344
- description: "No response from API"
1189
+ description: `Error: ${message}`
345
1190
  };
346
1191
  }
347
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
348
- const title = titleMatch?.[1] || "Image Analysis";
349
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
350
- return { title, description };
351
- } catch (error) {
352
- logger.error("Error analyzing image:", error);
353
- return {
354
- title: "Failed to analyze image",
355
- description: `Error: ${error instanceof Error ? error.message : String(error)}`
356
- };
357
- }
1192
+ });
358
1193
  },
359
1194
  [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
360
1195
  logger.log("audioBuffer", audioBuffer);
361
- const baseURL = getBaseURL(runtime);
362
- const formData = new FormData();
363
- formData.append("file", audioBuffer, {
364
- filename: "recording.mp3",
365
- contentType: "audio/mp3"
366
- });
367
- formData.append("model", "whisper-1");
368
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
369
- method: "POST",
370
- headers: {
371
- Authorization: `Bearer ${getApiKey(runtime)}`
372
- },
373
- body: formData
1196
+ const modelName = "whisper-1";
1197
+ const attributes = {
1198
+ "llm.vendor": "OpenAI",
1199
+ "llm.request.type": "transcription",
1200
+ "llm.request.model": modelName,
1201
+ "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1202
+ };
1203
+ return startLlmSpan(runtime, "LLM.transcription", attributes, async (span) => {
1204
+ span.addEvent("llm.prompt", {
1205
+ "prompt.info": "Audio buffer for transcription"
1206
+ });
1207
+ const baseURL = getBaseURL(runtime);
1208
+ const apiKey = getApiKey(runtime);
1209
+ if (!apiKey) {
1210
+ span.setStatus({
1211
+ code: SpanStatusCode.ERROR,
1212
+ message: "OpenAI API key not configured"
1213
+ });
1214
+ throw new Error("OpenAI API key not configured - Cannot make request");
1215
+ }
1216
+ if (!audioBuffer || audioBuffer.length === 0) {
1217
+ span.setStatus({
1218
+ code: SpanStatusCode.ERROR,
1219
+ message: "Audio buffer is empty or invalid"
1220
+ });
1221
+ throw new Error("Audio buffer is empty or invalid for transcription");
1222
+ }
1223
+ const formData = new FormData();
1224
+ formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1225
+ formData.append("model", "whisper-1");
1226
+ try {
1227
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
1228
+ method: "POST",
1229
+ headers: {
1230
+ Authorization: `Bearer ${apiKey}`
1231
+ },
1232
+ body: formData
1233
+ });
1234
+ const responseClone = response.clone();
1235
+ const rawResponseBody = await responseClone.text();
1236
+ span.addEvent("llm.response.raw", {
1237
+ "response.body": rawResponseBody
1238
+ });
1239
+ logger.log("response", response);
1240
+ if (!response.ok) {
1241
+ span.setAttributes({ "error.api.status": response.status });
1242
+ span.setStatus({
1243
+ code: SpanStatusCode.ERROR,
1244
+ message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1245
+ });
1246
+ throw new Error(`Failed to transcribe audio: ${response.statusText}`);
1247
+ }
1248
+ const data = await response.json();
1249
+ const processedText = data.text;
1250
+ span.setAttribute("llm.response.processed.length", processedText.length);
1251
+ span.addEvent("llm.response.processed", {
1252
+ "response.text": processedText
1253
+ });
1254
+ return processedText;
1255
+ } catch (error) {
1256
+ const message = error instanceof Error ? error.message : String(error);
1257
+ const exception = error instanceof Error ? error : new Error(message);
1258
+ span.recordException(exception);
1259
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1260
+ throw error;
1261
+ }
374
1262
  });
375
- logger.log("response", response);
376
- if (!response.ok) {
377
- throw new Error(`Failed to transcribe audio: ${response.statusText}`);
378
- }
379
- const data = await response.json();
380
- return data.text;
381
1263
  },
382
1264
  [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
383
- return await fetchTextToSpeech(runtime, text);
1265
+ const attributes = {
1266
+ "llm.vendor": "OpenAI",
1267
+ "llm.request.type": "tts",
1268
+ "llm.request.model": getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts"),
1269
+ "input.text.length": text.length
1270
+ };
1271
+ return startLlmSpan(runtime, "LLM.tts", attributes, async (span) => {
1272
+ span.addEvent("llm.prompt", { "prompt.content": text });
1273
+ try {
1274
+ const speechStream = await fetchTextToSpeech(runtime, text);
1275
+ span.addEvent("llm.response.success", {
1276
+ info: "Speech stream generated"
1277
+ });
1278
+ return speechStream;
1279
+ } catch (error) {
1280
+ const message = error instanceof Error ? error.message : String(error);
1281
+ const exception = error instanceof Error ? error : new Error(message);
1282
+ span.recordException(exception);
1283
+ span.setStatus({ code: SpanStatusCode.ERROR, message });
1284
+ throw error;
1285
+ }
1286
+ });
384
1287
  },
385
1288
  [ModelType.OBJECT_SMALL]: async (runtime, params) => {
386
1289
  return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
@@ -403,7 +1306,7 @@ var openaiPlugin = {
403
1306
  }
404
1307
  });
405
1308
  const data = await response.json();
406
- logger.log("Models Available:", data?.data.length);
1309
+ logger.log("Models Available:", data?.data?.length ?? "N/A");
407
1310
  if (!response.ok) {
408
1311
  throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
409
1312
  }
@@ -418,7 +1321,8 @@ var openaiPlugin = {
418
1321
  });
419
1322
  logger.log("embedding", embedding);
420
1323
  } catch (error) {
421
- logger.error("Error in test_text_embedding:", error);
1324
+ const message = error instanceof Error ? error.message : String(error);
1325
+ logger.error(`Error in test_text_embedding: ${message}`);
422
1326
  throw error;
423
1327
  }
424
1328
  }
@@ -435,7 +1339,8 @@ var openaiPlugin = {
435
1339
  }
436
1340
  logger.log("generated with test_text_large:", text);
437
1341
  } catch (error) {
438
- logger.error("Error in test_text_large:", error);
1342
+ const message = error instanceof Error ? error.message : String(error);
1343
+ logger.error(`Error in test_text_large: ${message}`);
439
1344
  throw error;
440
1345
  }
441
1346
  }
@@ -452,7 +1357,8 @@ var openaiPlugin = {
452
1357
  }
453
1358
  logger.log("generated with test_text_small:", text);
454
1359
  } catch (error) {
455
- logger.error("Error in test_text_small:", error);
1360
+ const message = error instanceof Error ? error.message : String(error);
1361
+ logger.error(`Error in test_text_small: ${message}`);
456
1362
  throw error;
457
1363
  }
458
1364
  }
@@ -469,7 +1375,8 @@ var openaiPlugin = {
469
1375
  });
470
1376
  logger.log("generated with test_image_generation:", image);
471
1377
  } catch (error) {
472
- logger.error("Error in test_image_generation:", error);
1378
+ const message = error instanceof Error ? error.message : String(error);
1379
+ logger.error(`Error in test_image_generation: ${message}`);
473
1380
  throw error;
474
1381
  }
475
1382
  }
@@ -490,10 +1397,12 @@ var openaiPlugin = {
490
1397
  logger.error("Invalid image description result format:", result);
491
1398
  }
492
1399
  } catch (e) {
493
- logger.error("Error in image description test:", e);
1400
+ const message = e instanceof Error ? e.message : String(e);
1401
+ logger.error(`Error in image description test: ${message}`);
494
1402
  }
495
1403
  } catch (e) {
496
- logger.error("Error in openai_test_image_description:", e);
1404
+ const message = e instanceof Error ? e.message : String(e);
1405
+ logger.error(`Error in openai_test_image_description: ${message}`);
497
1406
  }
498
1407
  }
499
1408
  },
@@ -512,7 +1421,8 @@ var openaiPlugin = {
512
1421
  );
513
1422
  logger.log("generated with test_transcription:", transcription);
514
1423
  } catch (error) {
515
- logger.error("Error in test_transcription:", error);
1424
+ const message = error instanceof Error ? error.message : String(error);
1425
+ logger.error(`Error in test_transcription: ${message}`);
516
1426
  throw error;
517
1427
  }
518
1428
  }
@@ -553,7 +1463,8 @@ var openaiPlugin = {
553
1463
  }
554
1464
  logger.log("Generated speech successfully");
555
1465
  } catch (error) {
556
- logger.error("Error in openai_test_text_to_speech:", error);
1466
+ const message = error instanceof Error ? error.message : String(error);
1467
+ logger.error(`Error in openai_test_text_to_speech: ${message}`);
557
1468
  throw error;
558
1469
  }
559
1470
  }