@elizaos/plugin-openai 1.0.0-beta.41 → 1.0.0-beta.42

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,1494 +1,2 @@
1
- // src/index.ts
2
- import { createOpenAI } from "@ai-sdk/openai";
3
- import { getProviderBaseURL } from "@elizaos/core";
4
- import {
5
- EventType,
6
- logger,
7
- ModelType,
8
- VECTOR_DIMS,
9
- safeReplacer,
10
- ServiceType
11
- } from "@elizaos/core";
12
- import {
13
- generateObject,
14
- generateText,
15
- JSONParseError
16
- } from "ai";
17
- import { encodingForModel } from "js-tiktoken";
18
- import { fetch, FormData } from "undici";
19
-
20
- // ../../node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
21
- var _globalThis = typeof globalThis === "object" ? globalThis : global;
22
-
23
- // ../../node_modules/@opentelemetry/api/build/esm/version.js
24
- var VERSION = "1.9.0";
25
-
26
- // ../../node_modules/@opentelemetry/api/build/esm/internal/semver.js
27
- var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
28
- function _makeCompatibilityCheck(ownVersion) {
29
- var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
30
- var rejectedVersions = /* @__PURE__ */ new Set();
31
- var myVersionMatch = ownVersion.match(re);
32
- if (!myVersionMatch) {
33
- return function() {
34
- return false;
35
- };
36
- }
37
- var ownVersionParsed = {
38
- major: +myVersionMatch[1],
39
- minor: +myVersionMatch[2],
40
- patch: +myVersionMatch[3],
41
- prerelease: myVersionMatch[4]
42
- };
43
- if (ownVersionParsed.prerelease != null) {
44
- return function isExactmatch(globalVersion) {
45
- return globalVersion === ownVersion;
46
- };
47
- }
48
- function _reject(v) {
49
- rejectedVersions.add(v);
50
- return false;
51
- }
52
- function _accept(v) {
53
- acceptedVersions.add(v);
54
- return true;
55
- }
56
- return function isCompatible2(globalVersion) {
57
- if (acceptedVersions.has(globalVersion)) {
58
- return true;
59
- }
60
- if (rejectedVersions.has(globalVersion)) {
61
- return false;
62
- }
63
- var globalVersionMatch = globalVersion.match(re);
64
- if (!globalVersionMatch) {
65
- return _reject(globalVersion);
66
- }
67
- var globalVersionParsed = {
68
- major: +globalVersionMatch[1],
69
- minor: +globalVersionMatch[2],
70
- patch: +globalVersionMatch[3],
71
- prerelease: globalVersionMatch[4]
72
- };
73
- if (globalVersionParsed.prerelease != null) {
74
- return _reject(globalVersion);
75
- }
76
- if (ownVersionParsed.major !== globalVersionParsed.major) {
77
- return _reject(globalVersion);
78
- }
79
- if (ownVersionParsed.major === 0) {
80
- if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
81
- return _accept(globalVersion);
82
- }
83
- return _reject(globalVersion);
84
- }
85
- if (ownVersionParsed.minor <= globalVersionParsed.minor) {
86
- return _accept(globalVersion);
87
- }
88
- return _reject(globalVersion);
89
- };
90
- }
91
- var isCompatible = _makeCompatibilityCheck(VERSION);
92
-
93
- // ../../node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
94
- var major = VERSION.split(".")[0];
95
- var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
96
- var _global = _globalThis;
97
- function registerGlobal(type, instance, diag, allowOverride) {
98
- var _a;
99
- if (allowOverride === void 0) {
100
- allowOverride = false;
101
- }
102
- var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
103
- version: VERSION
104
- };
105
- if (!allowOverride && api[type]) {
106
- var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
107
- diag.error(err.stack || err.message);
108
- return false;
109
- }
110
- if (api.version !== VERSION) {
111
- var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
112
- diag.error(err.stack || err.message);
113
- return false;
114
- }
115
- api[type] = instance;
116
- diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
117
- return true;
118
- }
119
- function getGlobal(type) {
120
- var _a, _b;
121
- var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
122
- if (!globalVersion || !isCompatible(globalVersion)) {
123
- return;
124
- }
125
- return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
126
- }
127
- function unregisterGlobal(type, diag) {
128
- diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
129
- var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
130
- if (api) {
131
- delete api[type];
132
- }
133
- }
134
-
135
- // ../../node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
136
- var __read = function(o, n) {
137
- var m = typeof Symbol === "function" && o[Symbol.iterator];
138
- if (!m) return o;
139
- var i = m.call(o), r, ar = [], e;
140
- try {
141
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
142
- } catch (error) {
143
- e = { error };
144
- } finally {
145
- try {
146
- if (r && !r.done && (m = i["return"])) m.call(i);
147
- } finally {
148
- if (e) throw e.error;
149
- }
150
- }
151
- return ar;
152
- };
153
- var __spreadArray = function(to, from, pack) {
154
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
155
- if (ar || !(i in from)) {
156
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
157
- ar[i] = from[i];
158
- }
159
- }
160
- return to.concat(ar || Array.prototype.slice.call(from));
161
- };
162
- var DiagComponentLogger = (
163
- /** @class */
164
- function() {
165
- function DiagComponentLogger2(props) {
166
- this._namespace = props.namespace || "DiagComponentLogger";
167
- }
168
- DiagComponentLogger2.prototype.debug = function() {
169
- var args = [];
170
- for (var _i = 0; _i < arguments.length; _i++) {
171
- args[_i] = arguments[_i];
172
- }
173
- return logProxy("debug", this._namespace, args);
174
- };
175
- DiagComponentLogger2.prototype.error = function() {
176
- var args = [];
177
- for (var _i = 0; _i < arguments.length; _i++) {
178
- args[_i] = arguments[_i];
179
- }
180
- return logProxy("error", this._namespace, args);
181
- };
182
- DiagComponentLogger2.prototype.info = function() {
183
- var args = [];
184
- for (var _i = 0; _i < arguments.length; _i++) {
185
- args[_i] = arguments[_i];
186
- }
187
- return logProxy("info", this._namespace, args);
188
- };
189
- DiagComponentLogger2.prototype.warn = function() {
190
- var args = [];
191
- for (var _i = 0; _i < arguments.length; _i++) {
192
- args[_i] = arguments[_i];
193
- }
194
- return logProxy("warn", this._namespace, args);
195
- };
196
- DiagComponentLogger2.prototype.verbose = function() {
197
- var args = [];
198
- for (var _i = 0; _i < arguments.length; _i++) {
199
- args[_i] = arguments[_i];
200
- }
201
- return logProxy("verbose", this._namespace, args);
202
- };
203
- return DiagComponentLogger2;
204
- }()
205
- );
206
- function logProxy(funcName, namespace, args) {
207
- var logger2 = getGlobal("diag");
208
- if (!logger2) {
209
- return;
210
- }
211
- args.unshift(namespace);
212
- return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
213
- }
214
-
215
- // ../../node_modules/@opentelemetry/api/build/esm/diag/types.js
216
- var DiagLogLevel;
217
- (function(DiagLogLevel2) {
218
- DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
219
- DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
220
- DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
221
- DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
222
- DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
223
- DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
224
- DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
225
- })(DiagLogLevel || (DiagLogLevel = {}));
226
-
227
- // ../../node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
228
- function createLogLevelDiagLogger(maxLevel, logger2) {
229
- if (maxLevel < DiagLogLevel.NONE) {
230
- maxLevel = DiagLogLevel.NONE;
231
- } else if (maxLevel > DiagLogLevel.ALL) {
232
- maxLevel = DiagLogLevel.ALL;
233
- }
234
- logger2 = logger2 || {};
235
- function _filterFunc(funcName, theLevel) {
236
- var theFunc = logger2[funcName];
237
- if (typeof theFunc === "function" && maxLevel >= theLevel) {
238
- return theFunc.bind(logger2);
239
- }
240
- return function() {
241
- };
242
- }
243
- return {
244
- error: _filterFunc("error", DiagLogLevel.ERROR),
245
- warn: _filterFunc("warn", DiagLogLevel.WARN),
246
- info: _filterFunc("info", DiagLogLevel.INFO),
247
- debug: _filterFunc("debug", DiagLogLevel.DEBUG),
248
- verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
249
- };
250
- }
251
-
252
- // ../../node_modules/@opentelemetry/api/build/esm/api/diag.js
253
- var __read2 = function(o, n) {
254
- var m = typeof Symbol === "function" && o[Symbol.iterator];
255
- if (!m) return o;
256
- var i = m.call(o), r, ar = [], e;
257
- try {
258
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
259
- } catch (error) {
260
- e = { error };
261
- } finally {
262
- try {
263
- if (r && !r.done && (m = i["return"])) m.call(i);
264
- } finally {
265
- if (e) throw e.error;
266
- }
267
- }
268
- return ar;
269
- };
270
- var __spreadArray2 = function(to, from, pack) {
271
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
272
- if (ar || !(i in from)) {
273
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
274
- ar[i] = from[i];
275
- }
276
- }
277
- return to.concat(ar || Array.prototype.slice.call(from));
278
- };
279
- var API_NAME = "diag";
280
- var DiagAPI = (
281
- /** @class */
282
- function() {
283
- function DiagAPI2() {
284
- function _logProxy(funcName) {
285
- return function() {
286
- var args = [];
287
- for (var _i = 0; _i < arguments.length; _i++) {
288
- args[_i] = arguments[_i];
289
- }
290
- var logger2 = getGlobal("diag");
291
- if (!logger2)
292
- return;
293
- return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
294
- };
295
- }
296
- var self = this;
297
- var setLogger = function(logger2, optionsOrLogLevel) {
298
- var _a, _b, _c;
299
- if (optionsOrLogLevel === void 0) {
300
- optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
301
- }
302
- if (logger2 === self) {
303
- var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
304
- self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
305
- return false;
306
- }
307
- if (typeof optionsOrLogLevel === "number") {
308
- optionsOrLogLevel = {
309
- logLevel: optionsOrLogLevel
310
- };
311
- }
312
- var oldLogger = getGlobal("diag");
313
- var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
314
- if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
315
- var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
316
- oldLogger.warn("Current logger will be overwritten from " + stack);
317
- newLogger.warn("Current logger will overwrite one already registered from " + stack);
318
- }
319
- return registerGlobal("diag", newLogger, self, true);
320
- };
321
- self.setLogger = setLogger;
322
- self.disable = function() {
323
- unregisterGlobal(API_NAME, self);
324
- };
325
- self.createComponentLogger = function(options) {
326
- return new DiagComponentLogger(options);
327
- };
328
- self.verbose = _logProxy("verbose");
329
- self.debug = _logProxy("debug");
330
- self.info = _logProxy("info");
331
- self.warn = _logProxy("warn");
332
- self.error = _logProxy("error");
333
- }
334
- DiagAPI2.instance = function() {
335
- if (!this._instance) {
336
- this._instance = new DiagAPI2();
337
- }
338
- return this._instance;
339
- };
340
- return DiagAPI2;
341
- }()
342
- );
343
-
344
- // ../../node_modules/@opentelemetry/api/build/esm/context/context.js
345
- var BaseContext = (
346
- /** @class */
347
- /* @__PURE__ */ function() {
348
- function BaseContext2(parentContext) {
349
- var self = this;
350
- self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
351
- self.getValue = function(key) {
352
- return self._currentContext.get(key);
353
- };
354
- self.setValue = function(key, value) {
355
- var context2 = new BaseContext2(self._currentContext);
356
- context2._currentContext.set(key, value);
357
- return context2;
358
- };
359
- self.deleteValue = function(key) {
360
- var context2 = new BaseContext2(self._currentContext);
361
- context2._currentContext.delete(key);
362
- return context2;
363
- };
364
- }
365
- return BaseContext2;
366
- }()
367
- );
368
- var ROOT_CONTEXT = new BaseContext();
369
-
370
- // ../../node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
371
- var __read3 = function(o, n) {
372
- var m = typeof Symbol === "function" && o[Symbol.iterator];
373
- if (!m) return o;
374
- var i = m.call(o), r, ar = [], e;
375
- try {
376
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
377
- } catch (error) {
378
- e = { error };
379
- } finally {
380
- try {
381
- if (r && !r.done && (m = i["return"])) m.call(i);
382
- } finally {
383
- if (e) throw e.error;
384
- }
385
- }
386
- return ar;
387
- };
388
- var __spreadArray3 = function(to, from, pack) {
389
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
390
- if (ar || !(i in from)) {
391
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
392
- ar[i] = from[i];
393
- }
394
- }
395
- return to.concat(ar || Array.prototype.slice.call(from));
396
- };
397
- var NoopContextManager = (
398
- /** @class */
399
- function() {
400
- function NoopContextManager2() {
401
- }
402
- NoopContextManager2.prototype.active = function() {
403
- return ROOT_CONTEXT;
404
- };
405
- NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
406
- var args = [];
407
- for (var _i = 3; _i < arguments.length; _i++) {
408
- args[_i - 3] = arguments[_i];
409
- }
410
- return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
411
- };
412
- NoopContextManager2.prototype.bind = function(_context, target) {
413
- return target;
414
- };
415
- NoopContextManager2.prototype.enable = function() {
416
- return this;
417
- };
418
- NoopContextManager2.prototype.disable = function() {
419
- return this;
420
- };
421
- return NoopContextManager2;
422
- }()
423
- );
424
-
425
- // ../../node_modules/@opentelemetry/api/build/esm/api/context.js
426
- var __read4 = function(o, n) {
427
- var m = typeof Symbol === "function" && o[Symbol.iterator];
428
- if (!m) return o;
429
- var i = m.call(o), r, ar = [], e;
430
- try {
431
- while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
432
- } catch (error) {
433
- e = { error };
434
- } finally {
435
- try {
436
- if (r && !r.done && (m = i["return"])) m.call(i);
437
- } finally {
438
- if (e) throw e.error;
439
- }
440
- }
441
- return ar;
442
- };
443
- var __spreadArray4 = function(to, from, pack) {
444
- if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
445
- if (ar || !(i in from)) {
446
- if (!ar) ar = Array.prototype.slice.call(from, 0, i);
447
- ar[i] = from[i];
448
- }
449
- }
450
- return to.concat(ar || Array.prototype.slice.call(from));
451
- };
452
- var API_NAME2 = "context";
453
- var NOOP_CONTEXT_MANAGER = new NoopContextManager();
454
- var ContextAPI = (
455
- /** @class */
456
- function() {
457
- function ContextAPI2() {
458
- }
459
- ContextAPI2.getInstance = function() {
460
- if (!this._instance) {
461
- this._instance = new ContextAPI2();
462
- }
463
- return this._instance;
464
- };
465
- ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
466
- return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
467
- };
468
- ContextAPI2.prototype.active = function() {
469
- return this._getContextManager().active();
470
- };
471
- ContextAPI2.prototype.with = function(context2, fn, thisArg) {
472
- var _a;
473
- var args = [];
474
- for (var _i = 3; _i < arguments.length; _i++) {
475
- args[_i - 3] = arguments[_i];
476
- }
477
- return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
478
- };
479
- ContextAPI2.prototype.bind = function(context2, target) {
480
- return this._getContextManager().bind(context2, target);
481
- };
482
- ContextAPI2.prototype._getContextManager = function() {
483
- return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
484
- };
485
- ContextAPI2.prototype.disable = function() {
486
- this._getContextManager().disable();
487
- unregisterGlobal(API_NAME2, DiagAPI.instance());
488
- };
489
- return ContextAPI2;
490
- }()
491
- );
492
-
493
- // ../../node_modules/@opentelemetry/api/build/esm/trace/status.js
494
- var SpanStatusCode;
495
- (function(SpanStatusCode2) {
496
- SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
497
- SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
498
- SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
499
- })(SpanStatusCode || (SpanStatusCode = {}));
500
-
501
- // ../../node_modules/@opentelemetry/api/build/esm/context-api.js
502
- var context = ContextAPI.getInstance();
503
-
504
- // src/index.ts
505
- function getTracer(runtime) {
506
- const availableServices = Array.from(runtime.getAllServices().keys());
507
- logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
508
- logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
509
- const instrumentationService = runtime.getService(
510
- ServiceType.INSTRUMENTATION
511
- );
512
- if (!instrumentationService) {
513
- logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
514
- return null;
515
- }
516
- if (!instrumentationService.isEnabled()) {
517
- logger.debug("[getTracer] Instrumentation service found but is disabled.");
518
- return null;
519
- }
520
- logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
521
- return instrumentationService.getTracer("eliza.llm.openai");
522
- }
523
- async function startLlmSpan(runtime, spanName, attributes, fn) {
524
- const tracer = getTracer(runtime);
525
- if (!tracer) {
526
- const dummySpan = {
527
- setAttribute: () => {
528
- },
529
- setAttributes: () => {
530
- },
531
- addEvent: () => {
532
- },
533
- recordException: () => {
534
- },
535
- setStatus: () => {
536
- },
537
- end: () => {
538
- },
539
- spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
540
- };
541
- return fn(dummySpan);
542
- }
543
- const activeContext = context.active();
544
- return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
545
- try {
546
- const result = await fn(span);
547
- span.setStatus({ code: SpanStatusCode.OK });
548
- span.end();
549
- return result;
550
- } catch (error) {
551
- const message = error instanceof Error ? error.message : String(error);
552
- span.recordException(error);
553
- span.setStatus({ code: SpanStatusCode.ERROR, message });
554
- span.end();
555
- throw error;
556
- }
557
- });
558
- }
559
- function getSetting(runtime, key, defaultValue) {
560
- return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
561
- }
562
- function getBaseURL(runtime) {
563
- const defaultBaseURL = getSetting(runtime, "OPENAI_BASE_URL", "https://api.openai.com/v1");
564
- logger.debug(`[OpenAI] Default base URL: ${defaultBaseURL}`);
565
- return getProviderBaseURL(runtime, "openai", defaultBaseURL);
566
- }
567
- function getApiKey(runtime) {
568
- return getSetting(runtime, "OPENAI_API_KEY");
569
- }
570
- function getSmallModel(runtime) {
571
- return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
572
- }
573
- function getLargeModel(runtime) {
574
- return getSetting(runtime, "OPENAI_LARGE_MODEL") ?? getSetting(runtime, "LARGE_MODEL", "gpt-4o");
575
- }
576
- function createOpenAIClient(runtime) {
577
- return createOpenAI({
578
- apiKey: getApiKey(runtime),
579
- baseURL: getBaseURL(runtime)
580
- });
581
- }
582
- async function tokenizeText(model, prompt) {
583
- const modelName = model === ModelType.TEXT_SMALL ? process.env.OPENAI_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-4o-mini" : process.env.LARGE_MODEL ?? "gpt-4o";
584
- const encoding = encodingForModel(modelName);
585
- const tokens = encoding.encode(prompt);
586
- return tokens;
587
- }
588
- async function detokenizeText(model, tokens) {
589
- const modelName = model === ModelType.TEXT_SMALL ? process.env.OPENAI_SMALL_MODEL ?? process.env.SMALL_MODEL ?? "gpt-4o-mini" : process.env.OPENAI_LARGE_MODEL ?? process.env.LARGE_MODEL ?? "gpt-4o";
590
- const encoding = encodingForModel(modelName);
591
- return encoding.decode(tokens);
592
- }
593
- async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
594
- const openai = createOpenAIClient(runtime);
595
- const modelName = getModelFn(runtime);
596
- logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
597
- const temperature = params.temperature ?? 0;
598
- const schemaPresent = !!params.schema;
599
- const attributes = {
600
- "llm.vendor": "OpenAI",
601
- "llm.request.type": "object_generation",
602
- "llm.request.model": modelName,
603
- "llm.request.temperature": temperature,
604
- "llm.request.schema_present": schemaPresent
605
- };
606
- return startLlmSpan(runtime, "LLM.generateObject", attributes, async (span) => {
607
- span.addEvent("llm.prompt", { "prompt.content": params.prompt });
608
- if (schemaPresent) {
609
- span.addEvent("llm.request.schema", {
610
- schema: JSON.stringify(params.schema, safeReplacer())
611
- });
612
- logger.info(
613
- `Using ${modelType} without schema validation (schema provided but output=no-schema)`
614
- );
615
- }
616
- try {
617
- const { object, usage } = await generateObject({
618
- model: openai.languageModel(modelName),
619
- output: "no-schema",
620
- prompt: params.prompt,
621
- temperature,
622
- experimental_repairText: getJsonRepairFunction()
623
- });
624
- span.addEvent("llm.response.processed", {
625
- "response.object": JSON.stringify(object, safeReplacer())
626
- });
627
- if (usage) {
628
- span.setAttributes({
629
- "llm.usage.prompt_tokens": usage.promptTokens,
630
- "llm.usage.completion_tokens": usage.completionTokens,
631
- "llm.usage.total_tokens": usage.totalTokens
632
- });
633
- emitModelUsageEvent(runtime, modelType, params.prompt, usage);
634
- }
635
- return object;
636
- } catch (error) {
637
- if (error instanceof JSONParseError) {
638
- logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
639
- span.recordException(error);
640
- span.addEvent("llm.error.json_parse", {
641
- "error.message": error.message,
642
- "error.text": error.text
643
- });
644
- span.addEvent("llm.repair.attempt");
645
- const repairFunction = getJsonRepairFunction();
646
- const repairedJsonString = await repairFunction({
647
- text: error.text,
648
- error
649
- });
650
- if (repairedJsonString) {
651
- try {
652
- const repairedObject = JSON.parse(repairedJsonString);
653
- span.addEvent("llm.repair.success", {
654
- repaired_object: JSON.stringify(repairedObject, safeReplacer())
655
- });
656
- logger.info("[generateObject] Successfully repaired JSON.");
657
- span.setStatus({
658
- code: SpanStatusCode.ERROR,
659
- message: "JSON parsing failed but was repaired"
660
- });
661
- return repairedObject;
662
- } catch (repairParseError) {
663
- const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
664
- logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
665
- const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
666
- span.recordException(exception);
667
- span.addEvent("llm.repair.parse_error", {
668
- "error.message": message
669
- });
670
- span.setStatus({
671
- code: SpanStatusCode.ERROR,
672
- message: `JSON repair failed: ${message}`
673
- });
674
- throw repairParseError;
675
- }
676
- } else {
677
- const errMsg = error instanceof Error ? error.message : String(error);
678
- logger.error("[generateObject] JSON repair failed.");
679
- span.addEvent("llm.repair.failed");
680
- span.setStatus({
681
- code: SpanStatusCode.ERROR,
682
- message: `JSON repair failed: ${errMsg}`
683
- });
684
- throw error;
685
- }
686
- } else {
687
- const message = error instanceof Error ? error.message : String(error);
688
- logger.error(`[generateObject] Unknown error: ${message}`);
689
- const exception = error instanceof Error ? error : new Error(message);
690
- span.recordException(exception);
691
- span.setStatus({
692
- code: SpanStatusCode.ERROR,
693
- message
694
- });
695
- throw error;
696
- }
697
- }
698
- });
699
- }
700
- function getJsonRepairFunction() {
701
- return async ({ text, error }) => {
702
- try {
703
- if (error instanceof JSONParseError) {
704
- const cleanedText = text.replace(/```json\n|\n```|```/g, "");
705
- JSON.parse(cleanedText);
706
- return cleanedText;
707
- }
708
- return null;
709
- } catch (jsonError) {
710
- const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
711
- logger.warn(`Failed to repair JSON text: ${message}`);
712
- return null;
713
- }
714
- };
715
- }
716
- function emitModelUsageEvent(runtime, type, prompt, usage) {
717
- runtime.emitEvent(EventType.MODEL_USED, {
718
- provider: "openai",
719
- type,
720
- prompt,
721
- tokens: {
722
- prompt: usage.promptTokens,
723
- completion: usage.completionTokens,
724
- total: usage.totalTokens
725
- }
726
- });
727
- }
728
- async function fetchTextToSpeech(runtime, text) {
729
- const apiKey = getApiKey(runtime);
730
- const model = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
731
- const voice = getSetting(runtime, "OPENAI_TTS_VOICE", "nova");
732
- const instructions = getSetting(runtime, "OPENAI_TTS_INSTRUCTIONS", "");
733
- const baseURL = getBaseURL(runtime);
734
- try {
735
- const res = await fetch(`${baseURL}/audio/speech`, {
736
- method: "POST",
737
- headers: {
738
- Authorization: `Bearer ${apiKey}`,
739
- "Content-Type": "application/json"
740
- },
741
- body: JSON.stringify({
742
- model,
743
- voice,
744
- input: text,
745
- ...instructions && { instructions }
746
- })
747
- });
748
- if (!res.ok) {
749
- const err = await res.text();
750
- throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
751
- }
752
- return res.body;
753
- } catch (err) {
754
- const message = err instanceof Error ? err.message : String(err);
755
- throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
756
- }
757
- }
758
- var openaiPlugin = {
759
- name: "openai",
760
- description: "OpenAI plugin",
761
- config: {
762
- OPENAI_API_KEY: process.env.OPENAI_API_KEY,
763
- OPENAI_BASE_URL: process.env.OPENAI_BASE_URL,
764
- OPENAI_SMALL_MODEL: process.env.OPENAI_SMALL_MODEL,
765
- OPENAI_LARGE_MODEL: process.env.OPENAI_LARGE_MODEL,
766
- SMALL_MODEL: process.env.SMALL_MODEL,
767
- LARGE_MODEL: process.env.LARGE_MODEL,
768
- OPENAI_EMBEDDING_MODEL: process.env.OPENAI_EMBEDDING_MODEL,
769
- OPENAI_EMBEDDING_DIMENSIONS: process.env.OPENAI_EMBEDDING_DIMENSIONS
770
- },
771
- async init(_config, runtime) {
772
- try {
773
- if (!getApiKey(runtime)) {
774
- logger.warn(
775
- "OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited"
776
- );
777
- return;
778
- }
779
- try {
780
- const baseURL = getBaseURL(runtime);
781
- const response = await fetch(`${baseURL}/models`, {
782
- headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
783
- });
784
- if (!response.ok) {
785
- logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
786
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
787
- } else {
788
- logger.log("OpenAI API key validated successfully");
789
- }
790
- } catch (fetchError) {
791
- const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
792
- logger.warn(`Error validating OpenAI API key: ${message}`);
793
- logger.warn("OpenAI functionality will be limited until a valid API key is provided");
794
- }
795
- } catch (error) {
796
- const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
797
- logger.warn(
798
- `OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
799
- );
800
- }
801
- },
802
- models: {
803
- [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
804
- const embeddingModelName = getSetting(
805
- runtime,
806
- "OPENAI_EMBEDDING_MODEL",
807
- "text-embedding-3-small"
808
- );
809
- const embeddingDimension = Number.parseInt(
810
- getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536",
811
- 10
812
- );
813
- logger.debug(
814
- `[OpenAI] Using embedding model: ${embeddingModelName} with dimension: ${embeddingDimension}`
815
- );
816
- if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
817
- const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
818
- logger.error(errorMsg);
819
- throw new Error(errorMsg);
820
- }
821
- if (params === null) {
822
- logger.debug("Creating test embedding for initialization");
823
- const testVector = Array(embeddingDimension).fill(0);
824
- testVector[0] = 0.1;
825
- return testVector;
826
- }
827
- let text;
828
- if (typeof params === "string") {
829
- text = params;
830
- } else if (typeof params === "object" && params.text) {
831
- text = params.text;
832
- } else {
833
- logger.warn("Invalid input format for embedding");
834
- const fallbackVector = Array(embeddingDimension).fill(0);
835
- fallbackVector[0] = 0.2;
836
- return fallbackVector;
837
- }
838
- if (!text.trim()) {
839
- logger.warn("Empty text for embedding");
840
- const emptyVector = Array(embeddingDimension).fill(0);
841
- emptyVector[0] = 0.3;
842
- return emptyVector;
843
- }
844
- const attributes = {
845
- "llm.vendor": "OpenAI",
846
- "llm.request.type": "embedding",
847
- "llm.request.model": embeddingModelName,
848
- "llm.request.embedding.dimensions": embeddingDimension,
849
- "input.text.length": text.length
850
- };
851
- return startLlmSpan(runtime, "LLM.embedding", attributes, async (span) => {
852
- span.addEvent("llm.prompt", { "prompt.content": text });
853
- const baseURL = getBaseURL(runtime);
854
- const apiKey = getApiKey(runtime);
855
- if (!apiKey) {
856
- span.setStatus({
857
- code: SpanStatusCode.ERROR,
858
- message: "OpenAI API key not configured"
859
- });
860
- throw new Error("OpenAI API key not configured");
861
- }
862
- try {
863
- const response = await fetch(`${baseURL}/embeddings`, {
864
- method: "POST",
865
- headers: {
866
- Authorization: `Bearer ${apiKey}`,
867
- "Content-Type": "application/json"
868
- },
869
- body: JSON.stringify({
870
- model: embeddingModelName,
871
- input: text
872
- })
873
- });
874
- const responseClone = response.clone();
875
- const rawResponseBody = await responseClone.text();
876
- span.addEvent("llm.response.raw", {
877
- "response.body": rawResponseBody
878
- });
879
- if (!response.ok) {
880
- logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
881
- span.setAttributes({ "error.api.status": response.status });
882
- span.setStatus({
883
- code: SpanStatusCode.ERROR,
884
- message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
885
- });
886
- const errorVector = Array(embeddingDimension).fill(0);
887
- errorVector[0] = 0.4;
888
- return errorVector;
889
- }
890
- const data = await response.json();
891
- if (!data?.data?.[0]?.embedding) {
892
- logger.error("API returned invalid structure");
893
- span.setStatus({
894
- code: SpanStatusCode.ERROR,
895
- message: "API returned invalid structure"
896
- });
897
- const errorVector = Array(embeddingDimension).fill(0);
898
- errorVector[0] = 0.5;
899
- return errorVector;
900
- }
901
- const embedding = data.data[0].embedding;
902
- span.setAttribute("llm.response.embedding.vector_length", embedding.length);
903
- if (data.usage) {
904
- span.setAttributes({
905
- "llm.usage.prompt_tokens": data.usage.prompt_tokens,
906
- "llm.usage.total_tokens": data.usage.total_tokens
907
- });
908
- }
909
- logger.log(`Got valid embedding with length ${embedding.length}`);
910
- return embedding;
911
- } catch (error) {
912
- const message = error instanceof Error ? error.message : String(error);
913
- logger.error(`Error generating embedding: ${message}`);
914
- const exception = error instanceof Error ? error : new Error(message);
915
- span.recordException(exception);
916
- span.setStatus({ code: SpanStatusCode.ERROR, message });
917
- const errorVector = Array(embeddingDimension).fill(0);
918
- errorVector[0] = 0.6;
919
- return errorVector;
920
- }
921
- });
922
- },
923
- [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
924
- return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
925
- },
926
- [ModelType.TEXT_TOKENIZER_DECODE]: async (_runtime, { tokens, modelType = ModelType.TEXT_LARGE }) => {
927
- return await detokenizeText(modelType ?? ModelType.TEXT_LARGE, tokens);
928
- },
929
- [ModelType.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
930
- const temperature = 0.7;
931
- const frequency_penalty = 0.7;
932
- const presence_penalty = 0.7;
933
- const max_response_length = 8192;
934
- const openai = createOpenAIClient(runtime);
935
- const modelName = getSmallModel(runtime);
936
- logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
937
- logger.log(prompt);
938
- const attributes = {
939
- "llm.vendor": "OpenAI",
940
- "llm.request.type": "completion",
941
- "llm.request.model": modelName,
942
- "llm.request.temperature": temperature,
943
- "llm.request.max_tokens": max_response_length,
944
- "llm.request.frequency_penalty": frequency_penalty,
945
- "llm.request.presence_penalty": presence_penalty,
946
- "llm.request.stop_sequences": JSON.stringify(stopSequences)
947
- };
948
- return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
949
- span.addEvent("llm.prompt", { "prompt.content": prompt });
950
- const { text: openaiResponse, usage } = await generateText({
951
- model: openai.languageModel(modelName),
952
- prompt,
953
- system: runtime.character.system ?? void 0,
954
- temperature,
955
- maxTokens: max_response_length,
956
- frequencyPenalty: frequency_penalty,
957
- presencePenalty: presence_penalty,
958
- stopSequences
959
- });
960
- span.setAttribute("llm.response.processed.length", openaiResponse.length);
961
- span.addEvent("llm.response.processed", {
962
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
963
- });
964
- if (usage) {
965
- span.setAttributes({
966
- "llm.usage.prompt_tokens": usage.promptTokens,
967
- "llm.usage.completion_tokens": usage.completionTokens,
968
- "llm.usage.total_tokens": usage.totalTokens
969
- });
970
- emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
971
- }
972
- return openaiResponse;
973
- });
974
- },
975
- [ModelType.TEXT_LARGE]: async (runtime, {
976
- prompt,
977
- stopSequences = [],
978
- maxTokens = 8192,
979
- temperature = 0.7,
980
- frequencyPenalty = 0.7,
981
- presencePenalty = 0.7
982
- }) => {
983
- const openai = createOpenAIClient(runtime);
984
- const modelName = getLargeModel(runtime);
985
- logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
986
- logger.log(prompt);
987
- const attributes = {
988
- "llm.vendor": "OpenAI",
989
- "llm.request.type": "completion",
990
- "llm.request.model": modelName,
991
- "llm.request.temperature": temperature,
992
- "llm.request.max_tokens": maxTokens,
993
- "llm.request.frequency_penalty": frequencyPenalty,
994
- "llm.request.presence_penalty": presencePenalty,
995
- "llm.request.stop_sequences": JSON.stringify(stopSequences)
996
- };
997
- return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
998
- span.addEvent("llm.prompt", { "prompt.content": prompt });
999
- const { text: openaiResponse, usage } = await generateText({
1000
- model: openai.languageModel(modelName),
1001
- prompt,
1002
- system: runtime.character.system ?? void 0,
1003
- temperature,
1004
- maxTokens,
1005
- frequencyPenalty,
1006
- presencePenalty,
1007
- stopSequences
1008
- });
1009
- span.setAttribute("llm.response.processed.length", openaiResponse.length);
1010
- span.addEvent("llm.response.processed", {
1011
- "response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
1012
- });
1013
- if (usage) {
1014
- span.setAttributes({
1015
- "llm.usage.prompt_tokens": usage.promptTokens,
1016
- "llm.usage.completion_tokens": usage.completionTokens,
1017
- "llm.usage.total_tokens": usage.totalTokens
1018
- });
1019
- emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
1020
- }
1021
- return openaiResponse;
1022
- });
1023
- },
1024
- [ModelType.IMAGE]: async (runtime, params) => {
1025
- const n = params.n || 1;
1026
- const size = params.size || "1024x1024";
1027
- const prompt = params.prompt;
1028
- const modelName = "dall-e-3";
1029
- logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
1030
- const attributes = {
1031
- "llm.vendor": "OpenAI",
1032
- "llm.request.type": "image_generation",
1033
- "llm.request.image.size": size,
1034
- "llm.request.image.count": n
1035
- };
1036
- return startLlmSpan(runtime, "LLM.imageGeneration", attributes, async (span) => {
1037
- span.addEvent("llm.prompt", { "prompt.content": prompt });
1038
- const baseURL = getBaseURL(runtime);
1039
- const apiKey = getApiKey(runtime);
1040
- if (!apiKey) {
1041
- span.setStatus({
1042
- code: SpanStatusCode.ERROR,
1043
- message: "OpenAI API key not configured"
1044
- });
1045
- throw new Error("OpenAI API key not configured");
1046
- }
1047
- try {
1048
- const response = await fetch(`${baseURL}/images/generations`, {
1049
- method: "POST",
1050
- headers: {
1051
- Authorization: `Bearer ${apiKey}`,
1052
- "Content-Type": "application/json"
1053
- },
1054
- body: JSON.stringify({
1055
- prompt,
1056
- n,
1057
- size
1058
- })
1059
- });
1060
- const responseClone = response.clone();
1061
- const rawResponseBody = await responseClone.text();
1062
- span.addEvent("llm.response.raw", {
1063
- "response.body": rawResponseBody
1064
- });
1065
- if (!response.ok) {
1066
- span.setAttributes({ "error.api.status": response.status });
1067
- span.setStatus({
1068
- code: SpanStatusCode.ERROR,
1069
- message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
1070
- });
1071
- throw new Error(`Failed to generate image: ${response.statusText}`);
1072
- }
1073
- const data = await response.json();
1074
- const typedData = data;
1075
- span.addEvent("llm.response.processed", {
1076
- "response.urls": JSON.stringify(typedData.data)
1077
- });
1078
- return typedData.data;
1079
- } catch (error) {
1080
- const message = error instanceof Error ? error.message : String(error);
1081
- const exception = error instanceof Error ? error : new Error(message);
1082
- span.recordException(exception);
1083
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1084
- throw error;
1085
- }
1086
- });
1087
- },
1088
- [ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
1089
- let imageUrl;
1090
- let promptText;
1091
- const modelName = "gpt-4o-mini";
1092
- logger.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${modelName}`);
1093
- const maxTokens = 300;
1094
- if (typeof params === "string") {
1095
- imageUrl = params;
1096
- promptText = "Please analyze this image and provide a title and detailed description.";
1097
- } else {
1098
- imageUrl = params.imageUrl;
1099
- promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
1100
- }
1101
- const attributes = {
1102
- "llm.vendor": "OpenAI",
1103
- "llm.request.type": "chat",
1104
- "llm.request.model": modelName,
1105
- "llm.request.max_tokens": maxTokens,
1106
- "llm.request.image.url": imageUrl
1107
- };
1108
- const messages = [
1109
- {
1110
- role: "user",
1111
- content: [
1112
- { type: "text", text: promptText },
1113
- { type: "image_url", image_url: { url: imageUrl } }
1114
- ]
1115
- }
1116
- ];
1117
- return startLlmSpan(runtime, "LLM.imageDescription", attributes, async (span) => {
1118
- span.addEvent("llm.prompt", {
1119
- "prompt.content": JSON.stringify(messages, safeReplacer())
1120
- });
1121
- const baseURL = getBaseURL(runtime);
1122
- const apiKey = getApiKey(runtime);
1123
- if (!apiKey) {
1124
- logger.error("OpenAI API key not set");
1125
- span.setStatus({
1126
- code: SpanStatusCode.ERROR,
1127
- message: "OpenAI API key not configured"
1128
- });
1129
- return {
1130
- title: "Failed to analyze image",
1131
- description: "API key not configured"
1132
- };
1133
- }
1134
- try {
1135
- const response = await fetch(`${baseURL}/chat/completions`, {
1136
- method: "POST",
1137
- headers: {
1138
- "Content-Type": "application/json",
1139
- Authorization: `Bearer ${apiKey}`
1140
- },
1141
- body: JSON.stringify({
1142
- model: modelName,
1143
- messages,
1144
- max_tokens: maxTokens
1145
- })
1146
- });
1147
- const responseClone = response.clone();
1148
- const rawResponseBody = await responseClone.text();
1149
- span.addEvent("llm.response.raw", {
1150
- "response.body": rawResponseBody
1151
- });
1152
- if (!response.ok) {
1153
- span.setAttributes({ "error.api.status": response.status });
1154
- span.setStatus({
1155
- code: SpanStatusCode.ERROR,
1156
- message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
1157
- });
1158
- throw new Error(`OpenAI API error: ${response.status}`);
1159
- }
1160
- const result = await response.json();
1161
- const typedResult = result;
1162
- const content = typedResult.choices?.[0]?.message?.content;
1163
- if (typedResult.usage) {
1164
- span.setAttributes({
1165
- "llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
1166
- "llm.usage.completion_tokens": typedResult.usage.completion_tokens,
1167
- "llm.usage.total_tokens": typedResult.usage.total_tokens
1168
- });
1169
- }
1170
- if (typedResult.choices?.[0]?.finish_reason) {
1171
- span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
1172
- }
1173
- if (!content) {
1174
- span.setStatus({
1175
- code: SpanStatusCode.ERROR,
1176
- message: "No content in API response"
1177
- });
1178
- return {
1179
- title: "Failed to analyze image",
1180
- description: "No response from API"
1181
- };
1182
- }
1183
- const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
1184
- const title = titleMatch?.[1]?.trim() || "Image Analysis";
1185
- const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
1186
- const processedResult = { title, description };
1187
- span.addEvent("llm.response.processed", {
1188
- "response.object": JSON.stringify(processedResult, safeReplacer())
1189
- });
1190
- return processedResult;
1191
- } catch (error) {
1192
- const message = error instanceof Error ? error.message : String(error);
1193
- logger.error(`Error analyzing image: ${message}`);
1194
- const exception = error instanceof Error ? error : new Error(message);
1195
- span.recordException(exception);
1196
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1197
- return {
1198
- title: "Failed to analyze image",
1199
- description: `Error: ${message}`
1200
- };
1201
- }
1202
- });
1203
- },
1204
- [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
1205
- logger.log("audioBuffer", audioBuffer);
1206
- const modelName = "whisper-1";
1207
- logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
1208
- const attributes = {
1209
- "llm.vendor": "OpenAI",
1210
- "llm.request.type": "transcription",
1211
- "llm.request.model": modelName,
1212
- "llm.request.audio.input_size_bytes": audioBuffer?.length || 0
1213
- };
1214
- return startLlmSpan(runtime, "LLM.transcription", attributes, async (span) => {
1215
- span.addEvent("llm.prompt", {
1216
- "prompt.info": "Audio buffer for transcription"
1217
- });
1218
- const baseURL = getBaseURL(runtime);
1219
- const apiKey = getApiKey(runtime);
1220
- if (!apiKey) {
1221
- span.setStatus({
1222
- code: SpanStatusCode.ERROR,
1223
- message: "OpenAI API key not configured"
1224
- });
1225
- throw new Error("OpenAI API key not configured - Cannot make request");
1226
- }
1227
- if (!audioBuffer || audioBuffer.length === 0) {
1228
- span.setStatus({
1229
- code: SpanStatusCode.ERROR,
1230
- message: "Audio buffer is empty or invalid"
1231
- });
1232
- throw new Error("Audio buffer is empty or invalid for transcription");
1233
- }
1234
- const formData = new FormData();
1235
- formData.append("file", new Blob([audioBuffer]), "recording.mp3");
1236
- formData.append("model", "whisper-1");
1237
- try {
1238
- const response = await fetch(`${baseURL}/audio/transcriptions`, {
1239
- method: "POST",
1240
- headers: {
1241
- Authorization: `Bearer ${apiKey}`
1242
- },
1243
- body: formData
1244
- });
1245
- const responseClone = response.clone();
1246
- const rawResponseBody = await responseClone.text();
1247
- span.addEvent("llm.response.raw", {
1248
- "response.body": rawResponseBody
1249
- });
1250
- logger.log("response", response);
1251
- if (!response.ok) {
1252
- span.setAttributes({ "error.api.status": response.status });
1253
- span.setStatus({
1254
- code: SpanStatusCode.ERROR,
1255
- message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
1256
- });
1257
- throw new Error(`Failed to transcribe audio: ${response.statusText}`);
1258
- }
1259
- const data = await response.json();
1260
- const processedText = data.text;
1261
- span.setAttribute("llm.response.processed.length", processedText.length);
1262
- span.addEvent("llm.response.processed", {
1263
- "response.text": processedText
1264
- });
1265
- return processedText;
1266
- } catch (error) {
1267
- const message = error instanceof Error ? error.message : String(error);
1268
- const exception = error instanceof Error ? error : new Error(message);
1269
- span.recordException(exception);
1270
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1271
- throw error;
1272
- }
1273
- });
1274
- },
1275
- [ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
1276
- const ttsModelName = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
1277
- const attributes = {
1278
- "llm.vendor": "OpenAI",
1279
- "llm.request.type": "tts",
1280
- "llm.request.model": ttsModelName,
1281
- "input.text.length": text.length
1282
- };
1283
- return startLlmSpan(runtime, "LLM.tts", attributes, async (span) => {
1284
- logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${ttsModelName}`);
1285
- span.addEvent("llm.prompt", { "prompt.content": text });
1286
- try {
1287
- const speechStream = await fetchTextToSpeech(runtime, text);
1288
- span.addEvent("llm.response.success", {
1289
- info: "Speech stream generated"
1290
- });
1291
- return speechStream;
1292
- } catch (error) {
1293
- const message = error instanceof Error ? error.message : String(error);
1294
- const exception = error instanceof Error ? error : new Error(message);
1295
- span.recordException(exception);
1296
- span.setStatus({ code: SpanStatusCode.ERROR, message });
1297
- throw error;
1298
- }
1299
- });
1300
- },
1301
- [ModelType.OBJECT_SMALL]: async (runtime, params) => {
1302
- return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
1303
- },
1304
- [ModelType.OBJECT_LARGE]: async (runtime, params) => {
1305
- return generateObjectByModelType(runtime, params, ModelType.OBJECT_LARGE, getLargeModel);
1306
- }
1307
- },
1308
- tests: [
1309
- {
1310
- name: "openai_plugin_tests",
1311
- tests: [
1312
- {
1313
- name: "openai_test_url_and_api_key_validation",
1314
- fn: async (runtime) => {
1315
- const baseURL = getBaseURL(runtime);
1316
- const response = await fetch(`${baseURL}/models`, {
1317
- headers: {
1318
- Authorization: `Bearer ${getApiKey(runtime)}`
1319
- }
1320
- });
1321
- const data = await response.json();
1322
- logger.log("Models Available:", data?.data?.length ?? "N/A");
1323
- if (!response.ok) {
1324
- throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
1325
- }
1326
- }
1327
- },
1328
- {
1329
- name: "openai_test_text_embedding",
1330
- fn: async (runtime) => {
1331
- try {
1332
- const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
1333
- text: "Hello, world!"
1334
- });
1335
- logger.log("embedding", embedding);
1336
- } catch (error) {
1337
- const message = error instanceof Error ? error.message : String(error);
1338
- logger.error(`Error in test_text_embedding: ${message}`);
1339
- throw error;
1340
- }
1341
- }
1342
- },
1343
- {
1344
- name: "openai_test_text_large",
1345
- fn: async (runtime) => {
1346
- try {
1347
- const text = await runtime.useModel(ModelType.TEXT_LARGE, {
1348
- prompt: "What is the nature of reality in 10 words?"
1349
- });
1350
- if (text.length === 0) {
1351
- throw new Error("Failed to generate text");
1352
- }
1353
- logger.log("generated with test_text_large:", text);
1354
- } catch (error) {
1355
- const message = error instanceof Error ? error.message : String(error);
1356
- logger.error(`Error in test_text_large: ${message}`);
1357
- throw error;
1358
- }
1359
- }
1360
- },
1361
- {
1362
- name: "openai_test_text_small",
1363
- fn: async (runtime) => {
1364
- try {
1365
- const text = await runtime.useModel(ModelType.TEXT_SMALL, {
1366
- prompt: "What is the nature of reality in 10 words?"
1367
- });
1368
- if (text.length === 0) {
1369
- throw new Error("Failed to generate text");
1370
- }
1371
- logger.log("generated with test_text_small:", text);
1372
- } catch (error) {
1373
- const message = error instanceof Error ? error.message : String(error);
1374
- logger.error(`Error in test_text_small: ${message}`);
1375
- throw error;
1376
- }
1377
- }
1378
- },
1379
- {
1380
- name: "openai_test_image_generation",
1381
- fn: async (runtime) => {
1382
- logger.log("openai_test_image_generation");
1383
- try {
1384
- const image = await runtime.useModel(ModelType.IMAGE, {
1385
- prompt: "A beautiful sunset over a calm ocean",
1386
- n: 1,
1387
- size: "1024x1024"
1388
- });
1389
- logger.log("generated with test_image_generation:", image);
1390
- } catch (error) {
1391
- const message = error instanceof Error ? error.message : String(error);
1392
- logger.error(`Error in test_image_generation: ${message}`);
1393
- throw error;
1394
- }
1395
- }
1396
- },
1397
- {
1398
- name: "image-description",
1399
- fn: async (runtime) => {
1400
- try {
1401
- logger.log("openai_test_image_description");
1402
- try {
1403
- const result = await runtime.useModel(
1404
- ModelType.IMAGE_DESCRIPTION,
1405
- "https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg"
1406
- );
1407
- if (result && typeof result === "object" && "title" in result && "description" in result) {
1408
- logger.log("Image description:", result);
1409
- } else {
1410
- logger.error("Invalid image description result format:", result);
1411
- }
1412
- } catch (e) {
1413
- const message = e instanceof Error ? e.message : String(e);
1414
- logger.error(`Error in image description test: ${message}`);
1415
- }
1416
- } catch (e) {
1417
- const message = e instanceof Error ? e.message : String(e);
1418
- logger.error(`Error in openai_test_image_description: ${message}`);
1419
- }
1420
- }
1421
- },
1422
- {
1423
- name: "openai_test_transcription",
1424
- fn: async (runtime) => {
1425
- logger.log("openai_test_transcription");
1426
- try {
1427
- const response = await fetch(
1428
- "https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg"
1429
- );
1430
- const arrayBuffer = await response.arrayBuffer();
1431
- const transcription = await runtime.useModel(
1432
- ModelType.TRANSCRIPTION,
1433
- Buffer.from(new Uint8Array(arrayBuffer))
1434
- );
1435
- logger.log("generated with test_transcription:", transcription);
1436
- } catch (error) {
1437
- const message = error instanceof Error ? error.message : String(error);
1438
- logger.error(`Error in test_transcription: ${message}`);
1439
- throw error;
1440
- }
1441
- }
1442
- },
1443
- {
1444
- name: "openai_test_text_tokenizer_encode",
1445
- fn: async (runtime) => {
1446
- const prompt = "Hello tokenizer encode!";
1447
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1448
- if (!Array.isArray(tokens) || tokens.length === 0) {
1449
- throw new Error("Failed to tokenize text: expected non-empty array of tokens");
1450
- }
1451
- logger.log("Tokenized output:", tokens);
1452
- }
1453
- },
1454
- {
1455
- name: "openai_test_text_tokenizer_decode",
1456
- fn: async (runtime) => {
1457
- const prompt = "Hello tokenizer decode!";
1458
- const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
1459
- const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, { tokens });
1460
- if (decodedText !== prompt) {
1461
- throw new Error(
1462
- `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`
1463
- );
1464
- }
1465
- logger.log("Decoded text:", decodedText);
1466
- }
1467
- },
1468
- {
1469
- name: "openai_test_text_to_speech",
1470
- fn: async (runtime) => {
1471
- try {
1472
- const text = "Hello, this is a test for text-to-speech.";
1473
- const response = await fetchTextToSpeech(runtime, text);
1474
- if (!response) {
1475
- throw new Error("Failed to generate speech");
1476
- }
1477
- logger.log("Generated speech successfully");
1478
- } catch (error) {
1479
- const message = error instanceof Error ? error.message : String(error);
1480
- logger.error(`Error in openai_test_text_to_speech: ${message}`);
1481
- throw error;
1482
- }
1483
- }
1484
- }
1485
- ]
1486
- }
1487
- ]
1488
- };
1489
- var index_default = openaiPlugin;
1490
- export {
1491
- index_default as default,
1492
- openaiPlugin
1493
- };
1
+ import{createOpenAI as Ae}from"@ai-sdk/openai";import{getProviderBaseURL as he}from"@elizaos/core";import{EventType as Ie,logger as c,ModelType as d,VECTOR_DIMS as Y,safeReplacer as k,ServiceType as G}from"@elizaos/core";import{generateObject as Oe,generateText as H,JSONParseError as re}from"ai";import{encodingForModel as ne}from"js-tiktoken";import{fetch as v,FormData as ve}from"undici";var j=typeof globalThis=="object"?globalThis:global;var I="1.9.0";var q=/^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;function ae(t){var e=new Set([t]),r=new Set,n=t.match(q);if(!n)return function(){return!1};var o={major:+n[1],minor:+n[2],patch:+n[3],prerelease:n[4]};if(o.prerelease!=null)return function(p){return p===t};function s(l){return r.add(l),!1}function a(l){return e.add(l),!0}return function(p){if(e.has(p))return!0;if(r.has(p))return!1;var u=p.match(q);if(!u)return s(p);var i={major:+u[1],minor:+u[2],patch:+u[3],prerelease:u[4]};return i.prerelease!=null||o.major!==i.major?s(p):o.major===0?o.minor===i.minor&&o.patch<=i.patch?a(p):s(p):o.minor<=i.minor?a(p):s(p)}}var z=ae(I);var ie=I.split(".")[0],w=Symbol.for("opentelemetry.js.api."+ie),x=j;function M(t,e,r,n){var o;n===void 0&&(n=!1);var s=x[w]=(o=x[w])!==null&&o!==void 0?o:{version:I};if(!n&&s[t]){var a=new Error("@opentelemetry/api: Attempted duplicate registration of API: "+t);return r.error(a.stack||a.message),!1}if(s.version!==I){var a=new Error("@opentelemetry/api: Registration of version v"+s.version+" for "+t+" does not match previously registered API v"+I);return r.error(a.stack||a.message),!1}return s[t]=e,r.debug("@opentelemetry/api: Registered a global for "+t+" v"+I+"."),!0}function b(t){var e,r,n=(e=x[w])===null||e===void 0?void 0:e.version;if(!(!n||!z(n)))return(r=x[w])===null||r===void 0?void 0:r[t]}function P(t,e){e.debug("@opentelemetry/api: Unregistering a global for "+t+" v"+I+".");var r=x[w];r&&delete r[t]}var ce=function(t,e){var r=typeof Symbol=="function"&&t[Symbol.iterator];if(!r)return t;var n=r.call(t),o,s=[],a;try{for(;(e===void 0||e-- >0)&&!(o=n.next()).done;)s.push(o.value)}catch(l){a={error:l}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return s},le=function(t,e,r){if(r||arguments.length===2)for(var n=0,o=e.length,s;n<o;n++)(s||!(n in e))&&(s||(s=Array.prototype.slice.call(e,0,n)),s[n]=e[n]);return t.concat(s||Array.prototype.slice.call(e))},F=function(){function t(e){this._namespace=e.namespace||"DiagComponentLogger"}return t.prototype.debug=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return S("debug",this._namespace,e)},t.prototype.error=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return S("error",this._namespace,e)},t.prototype.info=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return S("info",this._namespace,e)},t.prototype.warn=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return S("warn",this._namespace,e)},t.prototype.verbose=function(){for(var e=[],r=0;r<arguments.length;r++)e[r]=arguments[r];return S("verbose",this._namespace,e)},t}();function S(t,e,r){var n=b("diag");if(n)return r.unshift(e),n[t].apply(n,le([],ce(r),!1))}var y;(function(t){t[t.NONE=0]="NONE",t[t.ERROR=30]="ERROR",t[t.WARN=50]="WARN",t[t.INFO=60]="INFO",t[t.DEBUG=70]="DEBUG",t[t.VERBOSE=80]="VERBOSE",t[t.ALL=9999]="ALL"})(y||(y={}));function J(t,e){t<y.NONE?t=y.NONE:t>y.ALL&&(t=y.ALL),e=e||{};function r(n,o){var s=e[n];return typeof s=="function"&&t>=o?s.bind(e):function(){}}return{error:r("error",y.ERROR),warn:r("warn",y.WARN),info:r("info",y.INFO),debug:r("debug",y.DEBUG),verbose:r("verbose",y.VERBOSE)}}var pe=function(t,e){var r=typeof Symbol=="function"&&t[Symbol.iterator];if(!r)return t;var n=r.call(t),o,s=[],a;try{for(;(e===void 0||e-- >0)&&!(o=n.next()).done;)s.push(o.value)}catch(l){a={error:l}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return s},ue=function(t,e,r){if(r||arguments.length===2)for(var n=0,o=e.length,s;n<o;n++)(s||!(n in e))&&(s||(s=Array.prototype.slice.call(e,0,n)),s[n]=e[n]);return t.concat(s||Array.prototype.slice.call(e))},me="diag",D=function(){function t(){function e(o){return function(){for(var s=[],a=0;a<arguments.length;a++)s[a]=arguments[a];var l=b("diag");if(l)return l[o].apply(l,ue([],pe(s),!1))}}var r=this,n=function(o,s){var a,l,p;if(s===void 0&&(s={logLevel:y.INFO}),o===r){var u=new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");return r.error((a=u.stack)!==null&&a!==void 0?a:u.message),!1}typeof s=="number"&&(s={logLevel:s});var i=b("diag"),m=J((l=s.logLevel)!==null&&l!==void 0?l:y.INFO,o);if(i&&!s.suppressOverrideMessage){var g=(p=new Error().stack)!==null&&p!==void 0?p:"<failed to generate stacktrace>";i.warn("Current logger will be overwritten from "+g),m.warn("Current logger will overwrite one already registered from "+g)}return M("diag",m,r,!0)};r.setLogger=n,r.disable=function(){P(me,r)},r.createComponentLogger=function(o){return new F(o)},r.verbose=e("verbose"),r.debug=e("debug"),r.info=e("info"),r.warn=e("warn"),r.error=e("error")}return t.instance=function(){return this._instance||(this._instance=new t),this._instance},t}();var ge=function(){function t(e){var r=this;r._currentContext=e?new Map(e):new Map,r.getValue=function(n){return r._currentContext.get(n)},r.setValue=function(n,o){var s=new t(r._currentContext);return s._currentContext.set(n,o),s},r.deleteValue=function(n){var o=new t(r._currentContext);return o._currentContext.delete(n),o}}return t}(),X=new ge;var de=function(t,e){var r=typeof Symbol=="function"&&t[Symbol.iterator];if(!r)return t;var n=r.call(t),o,s=[],a;try{for(;(e===void 0||e-- >0)&&!(o=n.next()).done;)s.push(o.value)}catch(l){a={error:l}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return s},fe=function(t,e,r){if(r||arguments.length===2)for(var n=0,o=e.length,s;n<o;n++)(s||!(n in e))&&(s||(s=Array.prototype.slice.call(e,0,n)),s[n]=e[n]);return t.concat(s||Array.prototype.slice.call(e))},V=function(){function t(){}return t.prototype.active=function(){return X},t.prototype.with=function(e,r,n){for(var o=[],s=3;s<arguments.length;s++)o[s-3]=arguments[s];return r.call.apply(r,fe([n],de(o),!1))},t.prototype.bind=function(e,r){return r},t.prototype.enable=function(){return this},t.prototype.disable=function(){return this},t}();var Ee=function(t,e){var r=typeof Symbol=="function"&&t[Symbol.iterator];if(!r)return t;var n=r.call(t),o,s=[],a;try{for(;(e===void 0||e-- >0)&&!(o=n.next()).done;)s.push(o.value)}catch(l){a={error:l}}finally{try{o&&!o.done&&(r=n.return)&&r.call(n)}finally{if(a)throw a.error}}return s},_e=function(t,e,r){if(r||arguments.length===2)for(var n=0,o=e.length,s;n<o;n++)(s||!(n in e))&&(s||(s=Array.prototype.slice.call(e,0,n)),s[n]=e[n]);return t.concat(s||Array.prototype.slice.call(e))},C="context",ye=new V,K=function(){function t(){}return t.getInstance=function(){return this._instance||(this._instance=new t),this._instance},t.prototype.setGlobalContextManager=function(e){return M(C,e,D.instance())},t.prototype.active=function(){return this._getContextManager().active()},t.prototype.with=function(e,r,n){for(var o,s=[],a=3;a<arguments.length;a++)s[a-3]=arguments[a];return(o=this._getContextManager()).with.apply(o,_e([e,r,n],Ee(s),!1))},t.prototype.bind=function(e,r){return this._getContextManager().bind(e,r)},t.prototype._getContextManager=function(){return b(C)||ye},t.prototype.disable=function(){this._getContextManager().disable(),P(C,D.instance())},t}();var f;(function(t){t[t.UNSET=0]="UNSET",t[t.OK=1]="OK",t[t.ERROR=2]="ERROR"})(f||(f={}));var $=K.getInstance();function Te(t){let e=Array.from(t.getAllServices().keys());c.debug(`[getTracer] Available services: ${JSON.stringify(e)}`),c.debug(`[getTracer] Attempting to get service with key: ${G.INSTRUMENTATION}`);let r=t.getService(G.INSTRUMENTATION);return r?r.isEnabled()?(c.debug("[getTracer] Successfully retrieved enabled instrumentation service."),r.getTracer("eliza.llm.openai")):(c.debug("[getTracer] Instrumentation service found but is disabled."),null):(c.warn(`[getTracer] Service ${G.INSTRUMENTATION} not found in runtime.`),null)}async function T(t,e,r,n){let o=Te(t);if(!o)return n({setAttribute:()=>{},setAttributes:()=>{},addEvent:()=>{},recordException:()=>{},setStatus:()=>{},end:()=>{},spanContext:()=>({traceId:"",spanId:"",traceFlags:0})});let s=$.active();return o.startActiveSpan(e,{attributes:r},s,async a=>{try{let l=await n(a);return a.setStatus({code:f.OK}),a.end(),l}catch(l){let p=l instanceof Error?l.message:String(l);throw a.recordException(l),a.setStatus({code:f.ERROR,message:p}),a.end(),l}})}function A(t,e,r){return t.getSetting(e)??process.env[e]??r}function R(t){let e=A(t,"OPENAI_BASE_URL","https://api.openai.com/v1");return c.debug(`[OpenAI] Default base URL: ${e}`),he(t,"openai",e)}function Re(t){let e=A(t,"OPENAI_EMBEDDING_URL");return e?(c.debug(`[OpenAI] Using specific embedding base URL: ${e}`),e):(c.debug("[OpenAI] Falling back to general base URL for embeddings."),R(t))}function O(t){return A(t,"OPENAI_API_KEY")}function W(t){return A(t,"OPENAI_SMALL_MODEL")??A(t,"SMALL_MODEL","gpt-4o-mini")}function Z(t){return A(t,"OPENAI_LARGE_MODEL")??A(t,"LARGE_MODEL","gpt-4o")}function B(t){return Ae({apiKey:O(t),baseURL:R(t)})}async function be(t,e){let r=t===d.TEXT_SMALL?process.env.OPENAI_SMALL_MODEL??process.env.SMALL_MODEL??"gpt-4o-mini":process.env.LARGE_MODEL??"gpt-4o";return ne(r).encode(e)}async function we(t,e){let r=t===d.TEXT_SMALL?process.env.OPENAI_SMALL_MODEL??process.env.SMALL_MODEL??"gpt-4o-mini":process.env.OPENAI_LARGE_MODEL??process.env.LARGE_MODEL??"gpt-4o";return ne(r).decode(e)}async function Q(t,e,r,n){let o=B(t),s=n(t);c.log(`[OpenAI] Using ${r} model: ${s}`);let a=e.temperature??0,l=!!e.schema;return T(t,"LLM.generateObject",{"llm.vendor":"OpenAI","llm.request.type":"object_generation","llm.request.model":s,"llm.request.temperature":a,"llm.request.schema_present":l},async u=>{u.addEvent("llm.prompt",{"prompt.content":e.prompt}),l&&(u.addEvent("llm.request.schema",{schema:JSON.stringify(e.schema,k())}),c.info(`Using ${r} without schema validation (schema provided but output=no-schema)`));try{let{object:i,usage:m}=await Oe({model:o.languageModel(s),output:"no-schema",prompt:e.prompt,temperature:a,experimental_repairText:ee()});return u.addEvent("llm.response.processed",{"response.object":JSON.stringify(i,k())}),m&&(u.setAttributes({"llm.usage.prompt_tokens":m.promptTokens,"llm.usage.completion_tokens":m.completionTokens,"llm.usage.total_tokens":m.totalTokens}),N(t,r,e.prompt,m)),i}catch(i){if(i instanceof re){c.error(`[generateObject] Failed to parse JSON: ${i.message}`),u.recordException(i),u.addEvent("llm.error.json_parse",{"error.message":i.message,"error.text":i.text}),u.addEvent("llm.repair.attempt");let g=await ee()({text:i.text,error:i});if(g)try{let E=JSON.parse(g);return u.addEvent("llm.repair.success",{repaired_object:JSON.stringify(E,k())}),c.info("[generateObject] Successfully repaired JSON."),u.setStatus({code:f.ERROR,message:"JSON parsing failed but was repaired"}),E}catch(E){let _=E instanceof Error?E.message:String(E);c.error(`[generateObject] Failed to parse repaired JSON: ${_}`);let h=E instanceof Error?E:new Error(_);throw u.recordException(h),u.addEvent("llm.repair.parse_error",{"error.message":_}),u.setStatus({code:f.ERROR,message:`JSON repair failed: ${_}`}),E}else{let E=i instanceof Error?i.message:String(i);throw c.error("[generateObject] JSON repair failed."),u.addEvent("llm.repair.failed"),u.setStatus({code:f.ERROR,message:`JSON repair failed: ${E}`}),i}}else{let m=i instanceof Error?i.message:String(i);c.error(`[generateObject] Unknown error: ${m}`);let g=i instanceof Error?i:new Error(m);throw u.recordException(g),u.setStatus({code:f.ERROR,message:m}),i}}})}function ee(){return async({text:t,error:e})=>{try{if(e instanceof re){let r=t.replace(/```json\n|\n```|```/g,"");return JSON.parse(r),r}return null}catch(r){let n=r instanceof Error?r.message:String(r);return c.warn(`Failed to repair JSON text: ${n}`),null}}}function N(t,e,r,n){t.emitEvent(Ie.MODEL_USED,{provider:"openai",type:e,prompt:r,tokens:{prompt:n.promptTokens,completion:n.completionTokens,total:n.totalTokens}})}async function te(t,e){let r=O(t),n=A(t,"OPENAI_TTS_MODEL","gpt-4o-mini-tts"),o=A(t,"OPENAI_TTS_VOICE","nova"),s=A(t,"OPENAI_TTS_INSTRUCTIONS",""),a=R(t);try{let l=await v(`${a}/audio/speech`,{method:"POST",headers:{Authorization:`Bearer ${r}`,"Content-Type":"application/json"},body:JSON.stringify({model:n,voice:o,input:e,...s&&{instructions:s}})});if(!l.ok){let p=await l.text();throw new Error(`OpenAI TTS error ${l.status}: ${p}`)}return l.body}catch(l){let p=l instanceof Error?l.message:String(l);throw new Error(`Failed to fetch speech from OpenAI TTS: ${p}`)}}var xe={name:"openai",description:"OpenAI plugin",config:{OPENAI_API_KEY:process.env.OPENAI_API_KEY,OPENAI_BASE_URL:process.env.OPENAI_BASE_URL,OPENAI_SMALL_MODEL:process.env.OPENAI_SMALL_MODEL,OPENAI_LARGE_MODEL:process.env.OPENAI_LARGE_MODEL,SMALL_MODEL:process.env.SMALL_MODEL,LARGE_MODEL:process.env.LARGE_MODEL,OPENAI_EMBEDDING_MODEL:process.env.OPENAI_EMBEDDING_MODEL,OPENAI_EMBEDDING_URL:process.env.OPENAI_EMBEDDING_URL,OPENAI_EMBEDDING_DIMENSIONS:process.env.OPENAI_EMBEDDING_DIMENSIONS},async init(t,e){try{if(!O(e)){c.warn("OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited");return}try{let r=R(e),n=await v(`${r}/models`,{headers:{Authorization:`Bearer ${O(e)}`}});n.ok?c.log("OpenAI API key validated successfully"):(c.warn(`OpenAI API key validation failed: ${n.statusText}`),c.warn("OpenAI functionality will be limited until a valid API key is provided"))}catch(r){let n=r instanceof Error?r.message:String(r);c.warn(`Error validating OpenAI API key: ${n}`),c.warn("OpenAI functionality will be limited until a valid API key is provided")}}catch(r){let n=r?.errors?.map(o=>o.message).join(", ")||(r instanceof Error?r.message:String(r));c.warn(`OpenAI plugin configuration issue: ${n} - You need to configure the OPENAI_API_KEY in your environment variables`)}},models:{[d.TEXT_EMBEDDING]:async(t,e)=>{let r=A(t,"OPENAI_EMBEDDING_MODEL","text-embedding-3-small"),n=Number.parseInt(A(t,"OPENAI_EMBEDDING_DIMENSIONS","1536")||"1536",10);if(c.debug(`[OpenAI] Using embedding model: ${r} with dimension: ${n}`),!Object.values(Y).includes(n)){let a=`Invalid embedding dimension: ${n}. Must be one of: ${Object.values(Y).join(", ")}`;throw c.error(a),new Error(a)}if(e===null){c.debug("Creating test embedding for initialization");let a=Array(n).fill(0);return a[0]=.1,a}let o;if(typeof e=="string")o=e;else if(typeof e=="object"&&e.text)o=e.text;else{c.warn("Invalid input format for embedding");let a=Array(n).fill(0);return a[0]=.2,a}if(!o.trim()){c.warn("Empty text for embedding");let a=Array(n).fill(0);return a[0]=.3,a}let s={"llm.vendor":"OpenAI","llm.request.type":"embedding","llm.request.model":r,"llm.request.embedding.dimensions":n,"input.text.length":o.length};return T(t,"LLM.embedding",s,async a=>{a.addEvent("llm.prompt",{"prompt.content":o});let l=Re(t),p=O(t);if(!p)throw a.setStatus({code:f.ERROR,message:"OpenAI API key not configured"}),new Error("OpenAI API key not configured");try{let u=await v(`${l}/embeddings`,{method:"POST",headers:{Authorization:`Bearer ${p}`,"Content-Type":"application/json"},body:JSON.stringify({model:r,input:o})}),m=await u.clone().text();if(a.addEvent("llm.response.raw",{"response.body":m}),!u.ok){c.error(`OpenAI API error: ${u.status} - ${u.statusText}`),a.setAttributes({"error.api.status":u.status}),a.setStatus({code:f.ERROR,message:`OpenAI API error: ${u.status} - ${u.statusText}. Response: ${m}`});let _=Array(n).fill(0);return _[0]=.4,_}let g=await u.json();if(!g?.data?.[0]?.embedding){c.error("API returned invalid structure"),a.setStatus({code:f.ERROR,message:"API returned invalid structure"});let _=Array(n).fill(0);return _[0]=.5,_}let E=g.data[0].embedding;if(a.setAttribute("llm.response.embedding.vector_length",E.length),g.usage){a.setAttributes({"llm.usage.prompt_tokens":g.usage.prompt_tokens,"llm.usage.total_tokens":g.usage.total_tokens});let _={promptTokens:g.usage.prompt_tokens,completionTokens:0,totalTokens:g.usage.total_tokens};N(t,d.TEXT_EMBEDDING,o,_)}return c.log(`Got valid embedding with length ${E.length}`),E}catch(u){let i=u instanceof Error?u.message:String(u);c.error(`Error generating embedding: ${i}`);let m=u instanceof Error?u:new Error(i);a.recordException(m),a.setStatus({code:f.ERROR,message:i});let g=Array(n).fill(0);return g[0]=.6,g}})},[d.TEXT_TOKENIZER_ENCODE]:async(t,{prompt:e,modelType:r=d.TEXT_LARGE})=>await be(r??d.TEXT_LARGE,e),[d.TEXT_TOKENIZER_DECODE]:async(t,{tokens:e,modelType:r=d.TEXT_LARGE})=>await we(r??d.TEXT_LARGE,e),[d.TEXT_SMALL]:async(t,{prompt:e,stopSequences:r=[]})=>{let l=B(t),p=W(t);c.log(`[OpenAI] Using TEXT_SMALL model: ${p}`),c.log(e);let u={"llm.vendor":"OpenAI","llm.request.type":"completion","llm.request.model":p,"llm.request.temperature":.7,"llm.request.max_tokens":8192,"llm.request.frequency_penalty":.7,"llm.request.presence_penalty":.7,"llm.request.stop_sequences":JSON.stringify(r)};return T(t,"LLM.generateText",u,async i=>{i.addEvent("llm.prompt",{"prompt.content":e});let{text:m,usage:g}=await H({model:l.languageModel(p),prompt:e,system:t.character.system??void 0,temperature:.7,maxTokens:8192,frequencyPenalty:.7,presencePenalty:.7,stopSequences:r});return i.setAttribute("llm.response.processed.length",m.length),i.addEvent("llm.response.processed",{"response.content":m.substring(0,200)+(m.length>200?"...":"")}),g&&(i.setAttributes({"llm.usage.prompt_tokens":g.promptTokens,"llm.usage.completion_tokens":g.completionTokens,"llm.usage.total_tokens":g.totalTokens}),N(t,d.TEXT_SMALL,e,g)),m})},[d.TEXT_LARGE]:async(t,{prompt:e,stopSequences:r=[],maxTokens:n=8192,temperature:o=.7,frequencyPenalty:s=.7,presencePenalty:a=.7})=>{let l=B(t),p=Z(t);c.log(`[OpenAI] Using TEXT_LARGE model: ${p}`),c.log(e);let u={"llm.vendor":"OpenAI","llm.request.type":"completion","llm.request.model":p,"llm.request.temperature":o,"llm.request.max_tokens":n,"llm.request.frequency_penalty":s,"llm.request.presence_penalty":a,"llm.request.stop_sequences":JSON.stringify(r)};return T(t,"LLM.generateText",u,async i=>{i.addEvent("llm.prompt",{"prompt.content":e});let{text:m,usage:g}=await H({model:l.languageModel(p),prompt:e,system:t.character.system??void 0,temperature:o,maxTokens:n,frequencyPenalty:s,presencePenalty:a,stopSequences:r});return i.setAttribute("llm.response.processed.length",m.length),i.addEvent("llm.response.processed",{"response.content":m.substring(0,200)+(m.length>200?"...":"")}),g&&(i.setAttributes({"llm.usage.prompt_tokens":g.promptTokens,"llm.usage.completion_tokens":g.completionTokens,"llm.usage.total_tokens":g.totalTokens}),N(t,d.TEXT_LARGE,e,g)),m})},[d.IMAGE]:async(t,e)=>{let r=e.n||1,n=e.size||"1024x1024",o=e.prompt;return c.log("[OpenAI] Using IMAGE model: dall-e-3"),T(t,"LLM.imageGeneration",{"llm.vendor":"OpenAI","llm.request.type":"image_generation","llm.request.image.size":n,"llm.request.image.count":r},async l=>{l.addEvent("llm.prompt",{"prompt.content":o});let p=R(t),u=O(t);if(!u)throw l.setStatus({code:f.ERROR,message:"OpenAI API key not configured"}),new Error("OpenAI API key not configured");try{let i=await v(`${p}/images/generations`,{method:"POST",headers:{Authorization:`Bearer ${u}`,"Content-Type":"application/json"},body:JSON.stringify({prompt:o,n:r,size:n})}),g=await i.clone().text();if(l.addEvent("llm.response.raw",{"response.body":g}),!i.ok)throw l.setAttributes({"error.api.status":i.status}),l.setStatus({code:f.ERROR,message:`Failed to generate image: ${i.statusText}. Response: ${g}`}),new Error(`Failed to generate image: ${i.statusText}`);let _=await i.json();return l.addEvent("llm.response.processed",{"response.urls":JSON.stringify(_.data)}),_.data}catch(i){let m=i instanceof Error?i.message:String(i),g=i instanceof Error?i:new Error(m);throw l.recordException(g),l.setStatus({code:f.ERROR,message:m}),i}})},[d.IMAGE_DESCRIPTION]:async(t,e)=>{let r,n,o="gpt-4o-mini";c.log(`[OpenAI] Using IMAGE_DESCRIPTION model: ${o}`);let s=300;typeof e=="string"?(r=e,n="Please analyze this image and provide a title and detailed description."):(r=e.imageUrl,n=e.prompt||"Please analyze this image and provide a title and detailed description.");let a={"llm.vendor":"OpenAI","llm.request.type":"chat","llm.request.model":o,"llm.request.max_tokens":s,"llm.request.image.url":r},l=[{role:"user",content:[{type:"text",text:n},{type:"image_url",image_url:{url:r}}]}];return T(t,"LLM.imageDescription",a,async p=>{p.addEvent("llm.prompt",{"prompt.content":JSON.stringify(l,k())});let u=R(t),i=O(t);if(!i)return c.error("OpenAI API key not set"),p.setStatus({code:f.ERROR,message:"OpenAI API key not configured"}),{title:"Failed to analyze image",description:"API key not configured"};try{let m=await v(`${u}/chat/completions`,{method:"POST",headers:{"Content-Type":"application/json",Authorization:`Bearer ${i}`},body:JSON.stringify({model:o,messages:l,max_tokens:s})}),E=await m.clone().text();if(p.addEvent("llm.response.raw",{"response.body":E}),!m.ok)throw p.setAttributes({"error.api.status":m.status}),p.setStatus({code:f.ERROR,message:`OpenAI API error: ${m.status}. Response: ${E}`}),new Error(`OpenAI API error: ${m.status}`);let h=await m.json(),L=h.choices?.[0]?.message?.content;if(h.usage&&(p.setAttributes({"llm.usage.prompt_tokens":h.usage.prompt_tokens,"llm.usage.completion_tokens":h.usage.completion_tokens,"llm.usage.total_tokens":h.usage.total_tokens}),N(t,d.IMAGE_DESCRIPTION,typeof e=="string"?e:e.prompt||"",{promptTokens:h.usage.prompt_tokens,completionTokens:h.usage.completion_tokens,totalTokens:h.usage.total_tokens})),h.choices?.[0]?.finish_reason&&p.setAttribute("llm.response.finish_reason",h.choices[0].finish_reason),!L)return p.setStatus({code:f.ERROR,message:"No content in API response"}),{title:"Failed to analyze image",description:"No response from API"};let oe=L.match(/title[:\s]+(.+?)(?:\n|$)/i)?.[1]?.trim()||"Image Analysis",se=L.replace(/title[:\s]+(.+?)(?:\n|$)/i,"").trim(),U={title:oe,description:se};return p.addEvent("llm.response.processed",{"response.object":JSON.stringify(U,k())}),U}catch(m){let g=m instanceof Error?m.message:String(m);c.error(`Error analyzing image: ${g}`);let E=m instanceof Error?m:new Error(g);return p.recordException(E),p.setStatus({code:f.ERROR,message:g}),{title:"Failed to analyze image",description:`Error: ${g}`}}})},[d.TRANSCRIPTION]:async(t,e)=>{c.log("audioBuffer",e);let r="whisper-1";c.log(`[OpenAI] Using TRANSCRIPTION model: ${r}`);let n={"llm.vendor":"OpenAI","llm.request.type":"transcription","llm.request.model":r,"llm.request.audio.input_size_bytes":e?.length||0};return T(t,"LLM.transcription",n,async o=>{o.addEvent("llm.prompt",{"prompt.info":"Audio buffer for transcription"});let s=R(t),a=O(t);if(!a)throw o.setStatus({code:f.ERROR,message:"OpenAI API key not configured"}),new Error("OpenAI API key not configured - Cannot make request");if(!e||e.length===0)throw o.setStatus({code:f.ERROR,message:"Audio buffer is empty or invalid"}),new Error("Audio buffer is empty or invalid for transcription");let l=new ve;l.append("file",new Blob([e]),"recording.mp3"),l.append("model","whisper-1");try{let p=await v(`${s}/audio/transcriptions`,{method:"POST",headers:{Authorization:`Bearer ${a}`},body:l}),i=await p.clone().text();if(o.addEvent("llm.response.raw",{"response.body":i}),c.log("response",p),!p.ok)throw o.setAttributes({"error.api.status":p.status}),o.setStatus({code:f.ERROR,message:`Failed to transcribe audio: ${p.statusText}. Response: ${i}`}),new Error(`Failed to transcribe audio: ${p.statusText}`);let g=(await p.json()).text;return o.setAttribute("llm.response.processed.length",g.length),o.addEvent("llm.response.processed",{"response.text":g}),g}catch(p){let u=p instanceof Error?p.message:String(p),i=p instanceof Error?p:new Error(u);throw o.recordException(i),o.setStatus({code:f.ERROR,message:u}),p}})},[d.TEXT_TO_SPEECH]:async(t,e)=>{let r=A(t,"OPENAI_TTS_MODEL","gpt-4o-mini-tts"),n={"llm.vendor":"OpenAI","llm.request.type":"tts","llm.request.model":r,"input.text.length":e.length};return T(t,"LLM.tts",n,async o=>{c.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${r}`),o.addEvent("llm.prompt",{"prompt.content":e});try{let s=await te(t,e);return o.addEvent("llm.response.success",{info:"Speech stream generated"}),s}catch(s){let a=s instanceof Error?s.message:String(s),l=s instanceof Error?s:new Error(a);throw o.recordException(l),o.setStatus({code:f.ERROR,message:a}),s}})},[d.OBJECT_SMALL]:async(t,e)=>Q(t,e,d.OBJECT_SMALL,W),[d.OBJECT_LARGE]:async(t,e)=>Q(t,e,d.OBJECT_LARGE,Z)},tests:[{name:"openai_plugin_tests",tests:[{name:"openai_test_url_and_api_key_validation",fn:async t=>{let e=R(t),r=await v(`${e}/models`,{headers:{Authorization:`Bearer ${O(t)}`}}),n=await r.json();if(c.log("Models Available:",n?.data?.length??"N/A"),!r.ok)throw new Error(`Failed to validate OpenAI API key: ${r.statusText}`)}},{name:"openai_test_text_embedding",fn:async t=>{try{let e=await t.useModel(d.TEXT_EMBEDDING,{text:"Hello, world!"});c.log("embedding",e)}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in test_text_embedding: ${r}`),e}}},{name:"openai_test_text_large",fn:async t=>{try{let e=await t.useModel(d.TEXT_LARGE,{prompt:"What is the nature of reality in 10 words?"});if(e.length===0)throw new Error("Failed to generate text");c.log("generated with test_text_large:",e)}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in test_text_large: ${r}`),e}}},{name:"openai_test_text_small",fn:async t=>{try{let e=await t.useModel(d.TEXT_SMALL,{prompt:"What is the nature of reality in 10 words?"});if(e.length===0)throw new Error("Failed to generate text");c.log("generated with test_text_small:",e)}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in test_text_small: ${r}`),e}}},{name:"openai_test_image_generation",fn:async t=>{c.log("openai_test_image_generation");try{let e=await t.useModel(d.IMAGE,{prompt:"A beautiful sunset over a calm ocean",n:1,size:"1024x1024"});c.log("generated with test_image_generation:",e)}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in test_image_generation: ${r}`),e}}},{name:"image-description",fn:async t=>{try{c.log("openai_test_image_description");try{let e=await t.useModel(d.IMAGE_DESCRIPTION,"https://upload.wikimedia.org/wikipedia/commons/thumb/1/1c/Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg/537px-Vitalik_Buterin_TechCrunch_London_2015_%28cropped%29.jpg");e&&typeof e=="object"&&"title"in e&&"description"in e?c.log("Image description:",e):c.error("Invalid image description result format:",e)}catch(e){let r=e instanceof Error?e.message:String(e);c.error(`Error in image description test: ${r}`)}}catch(e){let r=e instanceof Error?e.message:String(e);c.error(`Error in openai_test_image_description: ${r}`)}}},{name:"openai_test_transcription",fn:async t=>{c.log("openai_test_transcription");try{let r=await(await v("https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg")).arrayBuffer(),n=await t.useModel(d.TRANSCRIPTION,Buffer.from(new Uint8Array(r)));c.log("generated with test_transcription:",n)}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in test_transcription: ${r}`),e}}},{name:"openai_test_text_tokenizer_encode",fn:async t=>{let r=await t.useModel(d.TEXT_TOKENIZER_ENCODE,{prompt:"Hello tokenizer encode!"});if(!Array.isArray(r)||r.length===0)throw new Error("Failed to tokenize text: expected non-empty array of tokens");c.log("Tokenized output:",r)}},{name:"openai_test_text_tokenizer_decode",fn:async t=>{let e="Hello tokenizer decode!",r=await t.useModel(d.TEXT_TOKENIZER_ENCODE,{prompt:e}),n=await t.useModel(d.TEXT_TOKENIZER_DECODE,{tokens:r});if(n!==e)throw new Error(`Decoded text does not match original. Expected "${e}", got "${n}"`);c.log("Decoded text:",n)}},{name:"openai_test_text_to_speech",fn:async t=>{try{if(!await te(t,"Hello, this is a test for text-to-speech."))throw new Error("Failed to generate speech");c.log("Generated speech successfully")}catch(e){let r=e instanceof Error?e.message:String(e);throw c.error(`Error in openai_test_text_to_speech: ${r}`),e}}}]}]},ft=xe;export{ft as default,xe as openaiPlugin};
1494
2
  //# sourceMappingURL=index.js.map