@elizaos/plugin-openai 1.0.0-beta.34 → 1.0.0-beta.38
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +1094 -181
- package/dist/index.js.map +1 -1
- package/package.json +6 -5
package/dist/index.js
CHANGED
|
@@ -1,19 +1,567 @@
|
|
|
1
1
|
// src/index.ts
|
|
2
2
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
3
|
+
import { getProviderBaseURL } from "@elizaos/core";
|
|
3
4
|
import {
|
|
4
|
-
|
|
5
|
+
EventType,
|
|
5
6
|
logger,
|
|
6
|
-
|
|
7
|
+
ModelType,
|
|
8
|
+
VECTOR_DIMS,
|
|
9
|
+
safeReplacer,
|
|
10
|
+
ServiceType
|
|
7
11
|
} from "@elizaos/core";
|
|
8
|
-
import {
|
|
12
|
+
import {
|
|
13
|
+
generateObject,
|
|
14
|
+
generateText,
|
|
15
|
+
JSONParseError
|
|
16
|
+
} from "ai";
|
|
9
17
|
import { encodingForModel } from "js-tiktoken";
|
|
10
|
-
import FormData from "
|
|
11
|
-
|
|
18
|
+
import { fetch, FormData } from "undici";
|
|
19
|
+
|
|
20
|
+
// ../../node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
|
|
21
|
+
var _globalThis = typeof globalThis === "object" ? globalThis : global;
|
|
22
|
+
|
|
23
|
+
// ../../node_modules/@opentelemetry/api/build/esm/version.js
|
|
24
|
+
var VERSION = "1.9.0";
|
|
25
|
+
|
|
26
|
+
// ../../node_modules/@opentelemetry/api/build/esm/internal/semver.js
|
|
27
|
+
var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
|
|
28
|
+
function _makeCompatibilityCheck(ownVersion) {
|
|
29
|
+
var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
|
|
30
|
+
var rejectedVersions = /* @__PURE__ */ new Set();
|
|
31
|
+
var myVersionMatch = ownVersion.match(re);
|
|
32
|
+
if (!myVersionMatch) {
|
|
33
|
+
return function() {
|
|
34
|
+
return false;
|
|
35
|
+
};
|
|
36
|
+
}
|
|
37
|
+
var ownVersionParsed = {
|
|
38
|
+
major: +myVersionMatch[1],
|
|
39
|
+
minor: +myVersionMatch[2],
|
|
40
|
+
patch: +myVersionMatch[3],
|
|
41
|
+
prerelease: myVersionMatch[4]
|
|
42
|
+
};
|
|
43
|
+
if (ownVersionParsed.prerelease != null) {
|
|
44
|
+
return function isExactmatch(globalVersion) {
|
|
45
|
+
return globalVersion === ownVersion;
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
function _reject(v) {
|
|
49
|
+
rejectedVersions.add(v);
|
|
50
|
+
return false;
|
|
51
|
+
}
|
|
52
|
+
function _accept(v) {
|
|
53
|
+
acceptedVersions.add(v);
|
|
54
|
+
return true;
|
|
55
|
+
}
|
|
56
|
+
return function isCompatible2(globalVersion) {
|
|
57
|
+
if (acceptedVersions.has(globalVersion)) {
|
|
58
|
+
return true;
|
|
59
|
+
}
|
|
60
|
+
if (rejectedVersions.has(globalVersion)) {
|
|
61
|
+
return false;
|
|
62
|
+
}
|
|
63
|
+
var globalVersionMatch = globalVersion.match(re);
|
|
64
|
+
if (!globalVersionMatch) {
|
|
65
|
+
return _reject(globalVersion);
|
|
66
|
+
}
|
|
67
|
+
var globalVersionParsed = {
|
|
68
|
+
major: +globalVersionMatch[1],
|
|
69
|
+
minor: +globalVersionMatch[2],
|
|
70
|
+
patch: +globalVersionMatch[3],
|
|
71
|
+
prerelease: globalVersionMatch[4]
|
|
72
|
+
};
|
|
73
|
+
if (globalVersionParsed.prerelease != null) {
|
|
74
|
+
return _reject(globalVersion);
|
|
75
|
+
}
|
|
76
|
+
if (ownVersionParsed.major !== globalVersionParsed.major) {
|
|
77
|
+
return _reject(globalVersion);
|
|
78
|
+
}
|
|
79
|
+
if (ownVersionParsed.major === 0) {
|
|
80
|
+
if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
|
|
81
|
+
return _accept(globalVersion);
|
|
82
|
+
}
|
|
83
|
+
return _reject(globalVersion);
|
|
84
|
+
}
|
|
85
|
+
if (ownVersionParsed.minor <= globalVersionParsed.minor) {
|
|
86
|
+
return _accept(globalVersion);
|
|
87
|
+
}
|
|
88
|
+
return _reject(globalVersion);
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
var isCompatible = _makeCompatibilityCheck(VERSION);
|
|
92
|
+
|
|
93
|
+
// ../../node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
|
|
94
|
+
var major = VERSION.split(".")[0];
|
|
95
|
+
var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
|
|
96
|
+
var _global = _globalThis;
|
|
97
|
+
function registerGlobal(type, instance, diag, allowOverride) {
|
|
98
|
+
var _a;
|
|
99
|
+
if (allowOverride === void 0) {
|
|
100
|
+
allowOverride = false;
|
|
101
|
+
}
|
|
102
|
+
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
|
|
103
|
+
version: VERSION
|
|
104
|
+
};
|
|
105
|
+
if (!allowOverride && api[type]) {
|
|
106
|
+
var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
|
|
107
|
+
diag.error(err.stack || err.message);
|
|
108
|
+
return false;
|
|
109
|
+
}
|
|
110
|
+
if (api.version !== VERSION) {
|
|
111
|
+
var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
|
|
112
|
+
diag.error(err.stack || err.message);
|
|
113
|
+
return false;
|
|
114
|
+
}
|
|
115
|
+
api[type] = instance;
|
|
116
|
+
diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
|
|
117
|
+
return true;
|
|
118
|
+
}
|
|
119
|
+
function getGlobal(type) {
|
|
120
|
+
var _a, _b;
|
|
121
|
+
var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
|
|
122
|
+
if (!globalVersion || !isCompatible(globalVersion)) {
|
|
123
|
+
return;
|
|
124
|
+
}
|
|
125
|
+
return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
|
|
126
|
+
}
|
|
127
|
+
function unregisterGlobal(type, diag) {
|
|
128
|
+
diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
|
|
129
|
+
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
|
|
130
|
+
if (api) {
|
|
131
|
+
delete api[type];
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// ../../node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
|
|
136
|
+
var __read = function(o, n) {
|
|
137
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
138
|
+
if (!m) return o;
|
|
139
|
+
var i = m.call(o), r, ar = [], e;
|
|
140
|
+
try {
|
|
141
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
142
|
+
} catch (error) {
|
|
143
|
+
e = { error };
|
|
144
|
+
} finally {
|
|
145
|
+
try {
|
|
146
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
147
|
+
} finally {
|
|
148
|
+
if (e) throw e.error;
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
return ar;
|
|
152
|
+
};
|
|
153
|
+
var __spreadArray = function(to, from, pack) {
|
|
154
|
+
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
155
|
+
if (ar || !(i in from)) {
|
|
156
|
+
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
157
|
+
ar[i] = from[i];
|
|
158
|
+
}
|
|
159
|
+
}
|
|
160
|
+
return to.concat(ar || Array.prototype.slice.call(from));
|
|
161
|
+
};
|
|
162
|
+
var DiagComponentLogger = (
|
|
163
|
+
/** @class */
|
|
164
|
+
function() {
|
|
165
|
+
function DiagComponentLogger2(props) {
|
|
166
|
+
this._namespace = props.namespace || "DiagComponentLogger";
|
|
167
|
+
}
|
|
168
|
+
DiagComponentLogger2.prototype.debug = function() {
|
|
169
|
+
var args = [];
|
|
170
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
171
|
+
args[_i] = arguments[_i];
|
|
172
|
+
}
|
|
173
|
+
return logProxy("debug", this._namespace, args);
|
|
174
|
+
};
|
|
175
|
+
DiagComponentLogger2.prototype.error = function() {
|
|
176
|
+
var args = [];
|
|
177
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
178
|
+
args[_i] = arguments[_i];
|
|
179
|
+
}
|
|
180
|
+
return logProxy("error", this._namespace, args);
|
|
181
|
+
};
|
|
182
|
+
DiagComponentLogger2.prototype.info = function() {
|
|
183
|
+
var args = [];
|
|
184
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
185
|
+
args[_i] = arguments[_i];
|
|
186
|
+
}
|
|
187
|
+
return logProxy("info", this._namespace, args);
|
|
188
|
+
};
|
|
189
|
+
DiagComponentLogger2.prototype.warn = function() {
|
|
190
|
+
var args = [];
|
|
191
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
192
|
+
args[_i] = arguments[_i];
|
|
193
|
+
}
|
|
194
|
+
return logProxy("warn", this._namespace, args);
|
|
195
|
+
};
|
|
196
|
+
DiagComponentLogger2.prototype.verbose = function() {
|
|
197
|
+
var args = [];
|
|
198
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
199
|
+
args[_i] = arguments[_i];
|
|
200
|
+
}
|
|
201
|
+
return logProxy("verbose", this._namespace, args);
|
|
202
|
+
};
|
|
203
|
+
return DiagComponentLogger2;
|
|
204
|
+
}()
|
|
205
|
+
);
|
|
206
|
+
function logProxy(funcName, namespace, args) {
|
|
207
|
+
var logger2 = getGlobal("diag");
|
|
208
|
+
if (!logger2) {
|
|
209
|
+
return;
|
|
210
|
+
}
|
|
211
|
+
args.unshift(namespace);
|
|
212
|
+
return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// ../../node_modules/@opentelemetry/api/build/esm/diag/types.js
|
|
216
|
+
var DiagLogLevel;
|
|
217
|
+
(function(DiagLogLevel2) {
|
|
218
|
+
DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
|
|
219
|
+
DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
|
|
220
|
+
DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
|
|
221
|
+
DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
|
|
222
|
+
DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
|
|
223
|
+
DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
|
|
224
|
+
DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
|
|
225
|
+
})(DiagLogLevel || (DiagLogLevel = {}));
|
|
226
|
+
|
|
227
|
+
// ../../node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
|
|
228
|
+
function createLogLevelDiagLogger(maxLevel, logger2) {
|
|
229
|
+
if (maxLevel < DiagLogLevel.NONE) {
|
|
230
|
+
maxLevel = DiagLogLevel.NONE;
|
|
231
|
+
} else if (maxLevel > DiagLogLevel.ALL) {
|
|
232
|
+
maxLevel = DiagLogLevel.ALL;
|
|
233
|
+
}
|
|
234
|
+
logger2 = logger2 || {};
|
|
235
|
+
function _filterFunc(funcName, theLevel) {
|
|
236
|
+
var theFunc = logger2[funcName];
|
|
237
|
+
if (typeof theFunc === "function" && maxLevel >= theLevel) {
|
|
238
|
+
return theFunc.bind(logger2);
|
|
239
|
+
}
|
|
240
|
+
return function() {
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
return {
|
|
244
|
+
error: _filterFunc("error", DiagLogLevel.ERROR),
|
|
245
|
+
warn: _filterFunc("warn", DiagLogLevel.WARN),
|
|
246
|
+
info: _filterFunc("info", DiagLogLevel.INFO),
|
|
247
|
+
debug: _filterFunc("debug", DiagLogLevel.DEBUG),
|
|
248
|
+
verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
|
|
249
|
+
};
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// ../../node_modules/@opentelemetry/api/build/esm/api/diag.js
|
|
253
|
+
var __read2 = function(o, n) {
|
|
254
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
255
|
+
if (!m) return o;
|
|
256
|
+
var i = m.call(o), r, ar = [], e;
|
|
257
|
+
try {
|
|
258
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
259
|
+
} catch (error) {
|
|
260
|
+
e = { error };
|
|
261
|
+
} finally {
|
|
262
|
+
try {
|
|
263
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
264
|
+
} finally {
|
|
265
|
+
if (e) throw e.error;
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
return ar;
|
|
269
|
+
};
|
|
270
|
+
var __spreadArray2 = function(to, from, pack) {
|
|
271
|
+
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
272
|
+
if (ar || !(i in from)) {
|
|
273
|
+
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
274
|
+
ar[i] = from[i];
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
return to.concat(ar || Array.prototype.slice.call(from));
|
|
278
|
+
};
|
|
279
|
+
var API_NAME = "diag";
|
|
280
|
+
var DiagAPI = (
|
|
281
|
+
/** @class */
|
|
282
|
+
function() {
|
|
283
|
+
function DiagAPI2() {
|
|
284
|
+
function _logProxy(funcName) {
|
|
285
|
+
return function() {
|
|
286
|
+
var args = [];
|
|
287
|
+
for (var _i = 0; _i < arguments.length; _i++) {
|
|
288
|
+
args[_i] = arguments[_i];
|
|
289
|
+
}
|
|
290
|
+
var logger2 = getGlobal("diag");
|
|
291
|
+
if (!logger2)
|
|
292
|
+
return;
|
|
293
|
+
return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
|
|
294
|
+
};
|
|
295
|
+
}
|
|
296
|
+
var self = this;
|
|
297
|
+
var setLogger = function(logger2, optionsOrLogLevel) {
|
|
298
|
+
var _a, _b, _c;
|
|
299
|
+
if (optionsOrLogLevel === void 0) {
|
|
300
|
+
optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
|
|
301
|
+
}
|
|
302
|
+
if (logger2 === self) {
|
|
303
|
+
var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
|
|
304
|
+
self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
|
|
305
|
+
return false;
|
|
306
|
+
}
|
|
307
|
+
if (typeof optionsOrLogLevel === "number") {
|
|
308
|
+
optionsOrLogLevel = {
|
|
309
|
+
logLevel: optionsOrLogLevel
|
|
310
|
+
};
|
|
311
|
+
}
|
|
312
|
+
var oldLogger = getGlobal("diag");
|
|
313
|
+
var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
|
|
314
|
+
if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
|
|
315
|
+
var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
|
|
316
|
+
oldLogger.warn("Current logger will be overwritten from " + stack);
|
|
317
|
+
newLogger.warn("Current logger will overwrite one already registered from " + stack);
|
|
318
|
+
}
|
|
319
|
+
return registerGlobal("diag", newLogger, self, true);
|
|
320
|
+
};
|
|
321
|
+
self.setLogger = setLogger;
|
|
322
|
+
self.disable = function() {
|
|
323
|
+
unregisterGlobal(API_NAME, self);
|
|
324
|
+
};
|
|
325
|
+
self.createComponentLogger = function(options) {
|
|
326
|
+
return new DiagComponentLogger(options);
|
|
327
|
+
};
|
|
328
|
+
self.verbose = _logProxy("verbose");
|
|
329
|
+
self.debug = _logProxy("debug");
|
|
330
|
+
self.info = _logProxy("info");
|
|
331
|
+
self.warn = _logProxy("warn");
|
|
332
|
+
self.error = _logProxy("error");
|
|
333
|
+
}
|
|
334
|
+
DiagAPI2.instance = function() {
|
|
335
|
+
if (!this._instance) {
|
|
336
|
+
this._instance = new DiagAPI2();
|
|
337
|
+
}
|
|
338
|
+
return this._instance;
|
|
339
|
+
};
|
|
340
|
+
return DiagAPI2;
|
|
341
|
+
}()
|
|
342
|
+
);
|
|
343
|
+
|
|
344
|
+
// ../../node_modules/@opentelemetry/api/build/esm/context/context.js
|
|
345
|
+
var BaseContext = (
|
|
346
|
+
/** @class */
|
|
347
|
+
/* @__PURE__ */ function() {
|
|
348
|
+
function BaseContext2(parentContext) {
|
|
349
|
+
var self = this;
|
|
350
|
+
self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
|
|
351
|
+
self.getValue = function(key) {
|
|
352
|
+
return self._currentContext.get(key);
|
|
353
|
+
};
|
|
354
|
+
self.setValue = function(key, value) {
|
|
355
|
+
var context2 = new BaseContext2(self._currentContext);
|
|
356
|
+
context2._currentContext.set(key, value);
|
|
357
|
+
return context2;
|
|
358
|
+
};
|
|
359
|
+
self.deleteValue = function(key) {
|
|
360
|
+
var context2 = new BaseContext2(self._currentContext);
|
|
361
|
+
context2._currentContext.delete(key);
|
|
362
|
+
return context2;
|
|
363
|
+
};
|
|
364
|
+
}
|
|
365
|
+
return BaseContext2;
|
|
366
|
+
}()
|
|
367
|
+
);
|
|
368
|
+
var ROOT_CONTEXT = new BaseContext();
|
|
369
|
+
|
|
370
|
+
// ../../node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
|
|
371
|
+
var __read3 = function(o, n) {
|
|
372
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
373
|
+
if (!m) return o;
|
|
374
|
+
var i = m.call(o), r, ar = [], e;
|
|
375
|
+
try {
|
|
376
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
377
|
+
} catch (error) {
|
|
378
|
+
e = { error };
|
|
379
|
+
} finally {
|
|
380
|
+
try {
|
|
381
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
382
|
+
} finally {
|
|
383
|
+
if (e) throw e.error;
|
|
384
|
+
}
|
|
385
|
+
}
|
|
386
|
+
return ar;
|
|
387
|
+
};
|
|
388
|
+
var __spreadArray3 = function(to, from, pack) {
|
|
389
|
+
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
390
|
+
if (ar || !(i in from)) {
|
|
391
|
+
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
392
|
+
ar[i] = from[i];
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
return to.concat(ar || Array.prototype.slice.call(from));
|
|
396
|
+
};
|
|
397
|
+
var NoopContextManager = (
|
|
398
|
+
/** @class */
|
|
399
|
+
function() {
|
|
400
|
+
function NoopContextManager2() {
|
|
401
|
+
}
|
|
402
|
+
NoopContextManager2.prototype.active = function() {
|
|
403
|
+
return ROOT_CONTEXT;
|
|
404
|
+
};
|
|
405
|
+
NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
|
|
406
|
+
var args = [];
|
|
407
|
+
for (var _i = 3; _i < arguments.length; _i++) {
|
|
408
|
+
args[_i - 3] = arguments[_i];
|
|
409
|
+
}
|
|
410
|
+
return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
|
|
411
|
+
};
|
|
412
|
+
NoopContextManager2.prototype.bind = function(_context, target) {
|
|
413
|
+
return target;
|
|
414
|
+
};
|
|
415
|
+
NoopContextManager2.prototype.enable = function() {
|
|
416
|
+
return this;
|
|
417
|
+
};
|
|
418
|
+
NoopContextManager2.prototype.disable = function() {
|
|
419
|
+
return this;
|
|
420
|
+
};
|
|
421
|
+
return NoopContextManager2;
|
|
422
|
+
}()
|
|
423
|
+
);
|
|
424
|
+
|
|
425
|
+
// ../../node_modules/@opentelemetry/api/build/esm/api/context.js
|
|
426
|
+
var __read4 = function(o, n) {
|
|
427
|
+
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
428
|
+
if (!m) return o;
|
|
429
|
+
var i = m.call(o), r, ar = [], e;
|
|
430
|
+
try {
|
|
431
|
+
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
432
|
+
} catch (error) {
|
|
433
|
+
e = { error };
|
|
434
|
+
} finally {
|
|
435
|
+
try {
|
|
436
|
+
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
437
|
+
} finally {
|
|
438
|
+
if (e) throw e.error;
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
return ar;
|
|
442
|
+
};
|
|
443
|
+
var __spreadArray4 = function(to, from, pack) {
|
|
444
|
+
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
445
|
+
if (ar || !(i in from)) {
|
|
446
|
+
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
447
|
+
ar[i] = from[i];
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
return to.concat(ar || Array.prototype.slice.call(from));
|
|
451
|
+
};
|
|
452
|
+
var API_NAME2 = "context";
|
|
453
|
+
var NOOP_CONTEXT_MANAGER = new NoopContextManager();
|
|
454
|
+
var ContextAPI = (
|
|
455
|
+
/** @class */
|
|
456
|
+
function() {
|
|
457
|
+
function ContextAPI2() {
|
|
458
|
+
}
|
|
459
|
+
ContextAPI2.getInstance = function() {
|
|
460
|
+
if (!this._instance) {
|
|
461
|
+
this._instance = new ContextAPI2();
|
|
462
|
+
}
|
|
463
|
+
return this._instance;
|
|
464
|
+
};
|
|
465
|
+
ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
|
|
466
|
+
return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
|
|
467
|
+
};
|
|
468
|
+
ContextAPI2.prototype.active = function() {
|
|
469
|
+
return this._getContextManager().active();
|
|
470
|
+
};
|
|
471
|
+
ContextAPI2.prototype.with = function(context2, fn, thisArg) {
|
|
472
|
+
var _a;
|
|
473
|
+
var args = [];
|
|
474
|
+
for (var _i = 3; _i < arguments.length; _i++) {
|
|
475
|
+
args[_i - 3] = arguments[_i];
|
|
476
|
+
}
|
|
477
|
+
return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
|
|
478
|
+
};
|
|
479
|
+
ContextAPI2.prototype.bind = function(context2, target) {
|
|
480
|
+
return this._getContextManager().bind(context2, target);
|
|
481
|
+
};
|
|
482
|
+
ContextAPI2.prototype._getContextManager = function() {
|
|
483
|
+
return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
|
|
484
|
+
};
|
|
485
|
+
ContextAPI2.prototype.disable = function() {
|
|
486
|
+
this._getContextManager().disable();
|
|
487
|
+
unregisterGlobal(API_NAME2, DiagAPI.instance());
|
|
488
|
+
};
|
|
489
|
+
return ContextAPI2;
|
|
490
|
+
}()
|
|
491
|
+
);
|
|
492
|
+
|
|
493
|
+
// ../../node_modules/@opentelemetry/api/build/esm/trace/status.js
|
|
494
|
+
var SpanStatusCode;
|
|
495
|
+
(function(SpanStatusCode2) {
|
|
496
|
+
SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
|
|
497
|
+
SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
|
|
498
|
+
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
499
|
+
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
500
|
+
|
|
501
|
+
// ../../node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
502
|
+
var context = ContextAPI.getInstance();
|
|
503
|
+
|
|
504
|
+
// src/index.ts
|
|
505
|
+
function getTracer(runtime) {
|
|
506
|
+
const availableServices = Array.from(runtime.getAllServices().keys());
|
|
507
|
+
logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
|
|
508
|
+
logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
|
|
509
|
+
const instrumentationService = runtime.getService(
|
|
510
|
+
ServiceType.INSTRUMENTATION
|
|
511
|
+
);
|
|
512
|
+
if (!instrumentationService) {
|
|
513
|
+
logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
|
|
514
|
+
return null;
|
|
515
|
+
}
|
|
516
|
+
if (!instrumentationService.isEnabled()) {
|
|
517
|
+
logger.debug("[getTracer] Instrumentation service found but is disabled.");
|
|
518
|
+
return null;
|
|
519
|
+
}
|
|
520
|
+
logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
|
|
521
|
+
return instrumentationService.getTracer("eliza.llm.openai");
|
|
522
|
+
}
|
|
523
|
+
async function startLlmSpan(runtime, spanName, attributes, fn) {
|
|
524
|
+
const tracer = getTracer(runtime);
|
|
525
|
+
if (!tracer) {
|
|
526
|
+
const dummySpan = {
|
|
527
|
+
setAttribute: () => {
|
|
528
|
+
},
|
|
529
|
+
setAttributes: () => {
|
|
530
|
+
},
|
|
531
|
+
addEvent: () => {
|
|
532
|
+
},
|
|
533
|
+
recordException: () => {
|
|
534
|
+
},
|
|
535
|
+
setStatus: () => {
|
|
536
|
+
},
|
|
537
|
+
end: () => {
|
|
538
|
+
},
|
|
539
|
+
spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
|
|
540
|
+
};
|
|
541
|
+
return fn(dummySpan);
|
|
542
|
+
}
|
|
543
|
+
const activeContext = context.active();
|
|
544
|
+
return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
|
|
545
|
+
try {
|
|
546
|
+
const result = await fn(span);
|
|
547
|
+
span.setStatus({ code: SpanStatusCode.OK });
|
|
548
|
+
span.end();
|
|
549
|
+
return result;
|
|
550
|
+
} catch (error) {
|
|
551
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
552
|
+
span.recordException(error);
|
|
553
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
554
|
+
span.end();
|
|
555
|
+
throw error;
|
|
556
|
+
}
|
|
557
|
+
});
|
|
558
|
+
}
|
|
12
559
|
function getSetting(runtime, key, defaultValue) {
|
|
13
560
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
14
561
|
}
|
|
15
562
|
function getBaseURL(runtime) {
|
|
16
|
-
|
|
563
|
+
const defaultBaseURL = getSetting(runtime, "OPENAI_BASE_URL", "https://api.openai.com/v1");
|
|
564
|
+
return getProviderBaseURL(runtime, "openai", defaultBaseURL);
|
|
17
565
|
}
|
|
18
566
|
function getApiKey(runtime) {
|
|
19
567
|
return getSetting(runtime, "OPENAI_API_KEY");
|
|
@@ -43,23 +591,109 @@ async function detokenizeText(model, tokens) {
|
|
|
43
591
|
}
|
|
44
592
|
async function generateObjectByModelType(runtime, params, modelType, getModelFn) {
|
|
45
593
|
const openai = createOpenAIClient(runtime);
|
|
46
|
-
const
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
594
|
+
const modelName = getModelFn(runtime);
|
|
595
|
+
const temperature = params.temperature ?? 0;
|
|
596
|
+
const schemaPresent = !!params.schema;
|
|
597
|
+
const attributes = {
|
|
598
|
+
"llm.vendor": "OpenAI",
|
|
599
|
+
"llm.request.type": "object_generation",
|
|
600
|
+
"llm.request.model": modelName,
|
|
601
|
+
"llm.request.temperature": temperature,
|
|
602
|
+
"llm.request.schema_present": schemaPresent
|
|
603
|
+
};
|
|
604
|
+
return startLlmSpan(runtime, "LLM.generateObject", attributes, async (span) => {
|
|
605
|
+
span.addEvent("llm.prompt", { "prompt.content": params.prompt });
|
|
606
|
+
if (schemaPresent) {
|
|
607
|
+
span.addEvent("llm.request.schema", {
|
|
608
|
+
schema: JSON.stringify(params.schema, safeReplacer())
|
|
609
|
+
});
|
|
610
|
+
logger.info(
|
|
611
|
+
`Using ${modelType} without schema validation (schema provided but output=no-schema)`
|
|
612
|
+
);
|
|
50
613
|
}
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
614
|
+
try {
|
|
615
|
+
const { object, usage } = await generateObject({
|
|
616
|
+
model: openai.languageModel(modelName),
|
|
617
|
+
output: "no-schema",
|
|
618
|
+
prompt: params.prompt,
|
|
619
|
+
temperature,
|
|
620
|
+
experimental_repairText: getJsonRepairFunction()
|
|
621
|
+
});
|
|
622
|
+
span.addEvent("llm.response.processed", {
|
|
623
|
+
"response.object": JSON.stringify(object, safeReplacer())
|
|
624
|
+
});
|
|
625
|
+
if (usage) {
|
|
626
|
+
span.setAttributes({
|
|
627
|
+
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
628
|
+
"llm.usage.completion_tokens": usage.completionTokens,
|
|
629
|
+
"llm.usage.total_tokens": usage.totalTokens
|
|
630
|
+
});
|
|
631
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
632
|
+
}
|
|
633
|
+
return object;
|
|
634
|
+
} catch (error) {
|
|
635
|
+
if (error instanceof JSONParseError) {
|
|
636
|
+
logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
637
|
+
span.recordException(error);
|
|
638
|
+
span.addEvent("llm.error.json_parse", {
|
|
639
|
+
"error.message": error.message,
|
|
640
|
+
"error.text": error.text
|
|
641
|
+
});
|
|
642
|
+
span.addEvent("llm.repair.attempt");
|
|
643
|
+
const repairFunction = getJsonRepairFunction();
|
|
644
|
+
const repairedJsonString = await repairFunction({
|
|
645
|
+
text: error.text,
|
|
646
|
+
error
|
|
647
|
+
});
|
|
648
|
+
if (repairedJsonString) {
|
|
649
|
+
try {
|
|
650
|
+
const repairedObject = JSON.parse(repairedJsonString);
|
|
651
|
+
span.addEvent("llm.repair.success", {
|
|
652
|
+
repaired_object: JSON.stringify(repairedObject, safeReplacer())
|
|
653
|
+
});
|
|
654
|
+
logger.info("[generateObject] Successfully repaired JSON.");
|
|
655
|
+
span.setStatus({
|
|
656
|
+
code: SpanStatusCode.ERROR,
|
|
657
|
+
message: "JSON parsing failed but was repaired"
|
|
658
|
+
});
|
|
659
|
+
return repairedObject;
|
|
660
|
+
} catch (repairParseError) {
|
|
661
|
+
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
662
|
+
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
663
|
+
const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
|
|
664
|
+
span.recordException(exception);
|
|
665
|
+
span.addEvent("llm.repair.parse_error", {
|
|
666
|
+
"error.message": message
|
|
667
|
+
});
|
|
668
|
+
span.setStatus({
|
|
669
|
+
code: SpanStatusCode.ERROR,
|
|
670
|
+
message: `JSON repair failed: ${message}`
|
|
671
|
+
});
|
|
672
|
+
throw repairParseError;
|
|
673
|
+
}
|
|
674
|
+
} else {
|
|
675
|
+
const errMsg = error instanceof Error ? error.message : String(error);
|
|
676
|
+
logger.error("[generateObject] JSON repair failed.");
|
|
677
|
+
span.addEvent("llm.repair.failed");
|
|
678
|
+
span.setStatus({
|
|
679
|
+
code: SpanStatusCode.ERROR,
|
|
680
|
+
message: `JSON repair failed: ${errMsg}`
|
|
681
|
+
});
|
|
682
|
+
throw error;
|
|
683
|
+
}
|
|
684
|
+
} else {
|
|
685
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
686
|
+
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
687
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
688
|
+
span.recordException(exception);
|
|
689
|
+
span.setStatus({
|
|
690
|
+
code: SpanStatusCode.ERROR,
|
|
691
|
+
message
|
|
692
|
+
});
|
|
693
|
+
throw error;
|
|
694
|
+
}
|
|
695
|
+
}
|
|
696
|
+
});
|
|
63
697
|
}
|
|
64
698
|
function getJsonRepairFunction() {
|
|
65
699
|
return async ({ text, error }) => {
|
|
@@ -69,12 +703,26 @@ function getJsonRepairFunction() {
|
|
|
69
703
|
JSON.parse(cleanedText);
|
|
70
704
|
return cleanedText;
|
|
71
705
|
}
|
|
706
|
+
return null;
|
|
72
707
|
} catch (jsonError) {
|
|
73
|
-
|
|
708
|
+
const message = jsonError instanceof Error ? jsonError.message : String(jsonError);
|
|
709
|
+
logger.warn(`Failed to repair JSON text: ${message}`);
|
|
74
710
|
return null;
|
|
75
711
|
}
|
|
76
712
|
};
|
|
77
713
|
}
|
|
714
|
+
function emitModelUsageEvent(runtime, type, prompt, usage) {
|
|
715
|
+
runtime.emitEvent(EventType.MODEL_USED, {
|
|
716
|
+
provider: "openai",
|
|
717
|
+
type,
|
|
718
|
+
prompt,
|
|
719
|
+
tokens: {
|
|
720
|
+
prompt: usage.promptTokens,
|
|
721
|
+
completion: usage.completionTokens,
|
|
722
|
+
total: usage.totalTokens
|
|
723
|
+
}
|
|
724
|
+
});
|
|
725
|
+
}
|
|
78
726
|
async function fetchTextToSpeech(runtime, text) {
|
|
79
727
|
const apiKey = getApiKey(runtime);
|
|
80
728
|
const model = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
@@ -101,7 +749,8 @@ async function fetchTextToSpeech(runtime, text) {
|
|
|
101
749
|
}
|
|
102
750
|
return res.body;
|
|
103
751
|
} catch (err) {
|
|
104
|
-
|
|
752
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
753
|
+
throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
|
|
105
754
|
}
|
|
106
755
|
}
|
|
107
756
|
var openaiPlugin = {
|
|
@@ -134,29 +783,35 @@ var openaiPlugin = {
|
|
|
134
783
|
logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
135
784
|
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
136
785
|
} else {
|
|
786
|
+
logger.log("OpenAI API key validated successfully");
|
|
137
787
|
}
|
|
138
788
|
} catch (fetchError) {
|
|
139
|
-
|
|
789
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
790
|
+
logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
140
791
|
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
141
792
|
}
|
|
142
793
|
} catch (error) {
|
|
794
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
143
795
|
logger.warn(
|
|
144
|
-
`OpenAI plugin configuration issue: ${
|
|
796
|
+
`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
|
|
145
797
|
);
|
|
146
798
|
}
|
|
147
799
|
},
|
|
148
800
|
models: {
|
|
149
801
|
[ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
150
|
-
const
|
|
151
|
-
|
|
802
|
+
const embeddingModelName = getSetting(
|
|
803
|
+
runtime,
|
|
804
|
+
"OPENAI_EMBEDDING_MODEL",
|
|
805
|
+
"text-embedding-3-small"
|
|
806
|
+
);
|
|
807
|
+
const embeddingDimension = Number.parseInt(
|
|
808
|
+
getSetting(runtime, "OPENAI_EMBEDDING_DIMENSIONS", "1536") || "1536",
|
|
809
|
+
10
|
|
152
810
|
);
|
|
153
811
|
if (!Object.values(VECTOR_DIMS).includes(embeddingDimension)) {
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
);
|
|
157
|
-
throw new Error(
|
|
158
|
-
`Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`
|
|
159
|
-
);
|
|
812
|
+
const errorMsg = `Invalid embedding dimension: ${embeddingDimension}. Must be one of: ${Object.values(VECTOR_DIMS).join(", ")}`;
|
|
813
|
+
logger.error(errorMsg);
|
|
814
|
+
throw new Error(errorMsg);
|
|
160
815
|
}
|
|
161
816
|
if (params === null) {
|
|
162
817
|
logger.debug("Creating test embedding for initialization");
|
|
@@ -181,41 +836,84 @@ var openaiPlugin = {
|
|
|
181
836
|
emptyVector[0] = 0.3;
|
|
182
837
|
return emptyVector;
|
|
183
838
|
}
|
|
184
|
-
|
|
839
|
+
const attributes = {
|
|
840
|
+
"llm.vendor": "OpenAI",
|
|
841
|
+
"llm.request.type": "embedding",
|
|
842
|
+
"llm.request.model": embeddingModelName,
|
|
843
|
+
"llm.request.embedding.dimensions": embeddingDimension,
|
|
844
|
+
"input.text.length": text.length
|
|
845
|
+
};
|
|
846
|
+
return startLlmSpan(runtime, "LLM.embedding", attributes, async (span) => {
|
|
847
|
+
span.addEvent("llm.prompt", { "prompt.content": text });
|
|
185
848
|
const baseURL = getBaseURL(runtime);
|
|
186
|
-
const
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
model: getSetting(runtime, "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small"),
|
|
194
|
-
input: text
|
|
195
|
-
})
|
|
196
|
-
});
|
|
197
|
-
if (!response.ok) {
|
|
198
|
-
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
199
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
200
|
-
errorVector[0] = 0.4;
|
|
201
|
-
return errorVector;
|
|
849
|
+
const apiKey = getApiKey(runtime);
|
|
850
|
+
if (!apiKey) {
|
|
851
|
+
span.setStatus({
|
|
852
|
+
code: SpanStatusCode.ERROR,
|
|
853
|
+
message: "OpenAI API key not configured"
|
|
854
|
+
});
|
|
855
|
+
throw new Error("OpenAI API key not configured");
|
|
202
856
|
}
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
857
|
+
try {
|
|
858
|
+
const response = await fetch(`${baseURL}/embeddings`, {
|
|
859
|
+
method: "POST",
|
|
860
|
+
headers: {
|
|
861
|
+
Authorization: `Bearer ${apiKey}`,
|
|
862
|
+
"Content-Type": "application/json"
|
|
863
|
+
},
|
|
864
|
+
body: JSON.stringify({
|
|
865
|
+
model: embeddingModelName,
|
|
866
|
+
input: text
|
|
867
|
+
})
|
|
868
|
+
});
|
|
869
|
+
const responseClone = response.clone();
|
|
870
|
+
const rawResponseBody = await responseClone.text();
|
|
871
|
+
span.addEvent("llm.response.raw", {
|
|
872
|
+
"response.body": rawResponseBody
|
|
873
|
+
});
|
|
874
|
+
if (!response.ok) {
|
|
875
|
+
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
876
|
+
span.setAttributes({ "error.api.status": response.status });
|
|
877
|
+
span.setStatus({
|
|
878
|
+
code: SpanStatusCode.ERROR,
|
|
879
|
+
message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
|
|
880
|
+
});
|
|
881
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
882
|
+
errorVector[0] = 0.4;
|
|
883
|
+
return errorVector;
|
|
884
|
+
}
|
|
885
|
+
const data = await response.json();
|
|
886
|
+
if (!data?.data?.[0]?.embedding) {
|
|
887
|
+
logger.error("API returned invalid structure");
|
|
888
|
+
span.setStatus({
|
|
889
|
+
code: SpanStatusCode.ERROR,
|
|
890
|
+
message: "API returned invalid structure"
|
|
891
|
+
});
|
|
892
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
893
|
+
errorVector[0] = 0.5;
|
|
894
|
+
return errorVector;
|
|
895
|
+
}
|
|
896
|
+
const embedding = data.data[0].embedding;
|
|
897
|
+
span.setAttribute("llm.response.embedding.vector_length", embedding.length);
|
|
898
|
+
if (data.usage) {
|
|
899
|
+
span.setAttributes({
|
|
900
|
+
"llm.usage.prompt_tokens": data.usage.prompt_tokens,
|
|
901
|
+
"llm.usage.total_tokens": data.usage.total_tokens
|
|
902
|
+
});
|
|
903
|
+
}
|
|
904
|
+
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
905
|
+
return embedding;
|
|
906
|
+
} catch (error) {
|
|
907
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
908
|
+
logger.error(`Error generating embedding: ${message}`);
|
|
909
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
910
|
+
span.recordException(exception);
|
|
911
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
206
912
|
const errorVector = Array(embeddingDimension).fill(0);
|
|
207
|
-
errorVector[0] = 0.
|
|
913
|
+
errorVector[0] = 0.6;
|
|
208
914
|
return errorVector;
|
|
209
915
|
}
|
|
210
|
-
|
|
211
|
-
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
212
|
-
return embedding;
|
|
213
|
-
} catch (error) {
|
|
214
|
-
logger.error("Error generating embedding:", error);
|
|
215
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
216
|
-
errorVector[0] = 0.6;
|
|
217
|
-
return errorVector;
|
|
218
|
-
}
|
|
916
|
+
});
|
|
219
917
|
},
|
|
220
918
|
[ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
|
|
221
919
|
return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
|
|
@@ -229,20 +927,45 @@ var openaiPlugin = {
|
|
|
229
927
|
const presence_penalty = 0.7;
|
|
230
928
|
const max_response_length = 8192;
|
|
231
929
|
const openai = createOpenAIClient(runtime);
|
|
232
|
-
const
|
|
930
|
+
const modelName = getSmallModel(runtime);
|
|
233
931
|
logger.log("generating text");
|
|
234
932
|
logger.log(prompt);
|
|
235
|
-
const
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
temperature,
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
stopSequences
|
|
933
|
+
const attributes = {
|
|
934
|
+
"llm.vendor": "OpenAI",
|
|
935
|
+
"llm.request.type": "completion",
|
|
936
|
+
"llm.request.model": modelName,
|
|
937
|
+
"llm.request.temperature": temperature,
|
|
938
|
+
"llm.request.max_tokens": max_response_length,
|
|
939
|
+
"llm.request.frequency_penalty": frequency_penalty,
|
|
940
|
+
"llm.request.presence_penalty": presence_penalty,
|
|
941
|
+
"llm.request.stop_sequences": JSON.stringify(stopSequences)
|
|
942
|
+
};
|
|
943
|
+
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
944
|
+
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
945
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
946
|
+
model: openai.languageModel(modelName),
|
|
947
|
+
prompt,
|
|
948
|
+
system: runtime.character.system ?? void 0,
|
|
949
|
+
temperature,
|
|
950
|
+
maxTokens: max_response_length,
|
|
951
|
+
frequencyPenalty: frequency_penalty,
|
|
952
|
+
presencePenalty: presence_penalty,
|
|
953
|
+
stopSequences
|
|
954
|
+
});
|
|
955
|
+
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
956
|
+
span.addEvent("llm.response.processed", {
|
|
957
|
+
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
958
|
+
});
|
|
959
|
+
if (usage) {
|
|
960
|
+
span.setAttributes({
|
|
961
|
+
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
962
|
+
"llm.usage.completion_tokens": usage.completionTokens,
|
|
963
|
+
"llm.usage.total_tokens": usage.totalTokens
|
|
964
|
+
});
|
|
965
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
966
|
+
}
|
|
967
|
+
return openaiResponse;
|
|
244
968
|
});
|
|
245
|
-
return openaiResponse;
|
|
246
969
|
},
|
|
247
970
|
[ModelType.TEXT_LARGE]: async (runtime, {
|
|
248
971
|
prompt,
|
|
@@ -253,134 +976,316 @@ var openaiPlugin = {
|
|
|
253
976
|
presencePenalty = 0.7
|
|
254
977
|
}) => {
|
|
255
978
|
const openai = createOpenAIClient(runtime);
|
|
256
|
-
const
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
979
|
+
const modelName = getLargeModel(runtime);
|
|
980
|
+
logger.log("generating text");
|
|
981
|
+
logger.log(prompt);
|
|
982
|
+
const attributes = {
|
|
983
|
+
"llm.vendor": "OpenAI",
|
|
984
|
+
"llm.request.type": "completion",
|
|
985
|
+
"llm.request.model": modelName,
|
|
986
|
+
"llm.request.temperature": temperature,
|
|
987
|
+
"llm.request.max_tokens": maxTokens,
|
|
988
|
+
"llm.request.frequency_penalty": frequencyPenalty,
|
|
989
|
+
"llm.request.presence_penalty": presencePenalty,
|
|
990
|
+
"llm.request.stop_sequences": JSON.stringify(stopSequences)
|
|
991
|
+
};
|
|
992
|
+
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
993
|
+
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
994
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
995
|
+
model: openai.languageModel(modelName),
|
|
996
|
+
prompt,
|
|
997
|
+
system: runtime.character.system ?? void 0,
|
|
998
|
+
temperature,
|
|
999
|
+
maxTokens,
|
|
1000
|
+
frequencyPenalty,
|
|
1001
|
+
presencePenalty,
|
|
1002
|
+
stopSequences
|
|
1003
|
+
});
|
|
1004
|
+
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
1005
|
+
span.addEvent("llm.response.processed", {
|
|
1006
|
+
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
1007
|
+
});
|
|
1008
|
+
if (usage) {
|
|
1009
|
+
span.setAttributes({
|
|
1010
|
+
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
1011
|
+
"llm.usage.completion_tokens": usage.completionTokens,
|
|
1012
|
+
"llm.usage.total_tokens": usage.totalTokens
|
|
1013
|
+
});
|
|
1014
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
1015
|
+
}
|
|
1016
|
+
return openaiResponse;
|
|
266
1017
|
});
|
|
267
|
-
return openaiResponse;
|
|
268
1018
|
},
|
|
269
1019
|
[ModelType.IMAGE]: async (runtime, params) => {
|
|
270
|
-
const
|
|
271
|
-
const
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
1020
|
+
const n = params.n || 1;
|
|
1021
|
+
const size = params.size || "1024x1024";
|
|
1022
|
+
const prompt = params.prompt;
|
|
1023
|
+
const attributes = {
|
|
1024
|
+
"llm.vendor": "OpenAI",
|
|
1025
|
+
"llm.request.type": "image_generation",
|
|
1026
|
+
"llm.request.image.size": size,
|
|
1027
|
+
"llm.request.image.count": n
|
|
1028
|
+
};
|
|
1029
|
+
return startLlmSpan(runtime, "LLM.imageGeneration", attributes, async (span) => {
|
|
1030
|
+
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
1031
|
+
const baseURL = getBaseURL(runtime);
|
|
1032
|
+
const apiKey = getApiKey(runtime);
|
|
1033
|
+
if (!apiKey) {
|
|
1034
|
+
span.setStatus({
|
|
1035
|
+
code: SpanStatusCode.ERROR,
|
|
1036
|
+
message: "OpenAI API key not configured"
|
|
1037
|
+
});
|
|
1038
|
+
throw new Error("OpenAI API key not configured");
|
|
1039
|
+
}
|
|
1040
|
+
try {
|
|
1041
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
1042
|
+
method: "POST",
|
|
1043
|
+
headers: {
|
|
1044
|
+
Authorization: `Bearer ${apiKey}`,
|
|
1045
|
+
"Content-Type": "application/json"
|
|
1046
|
+
},
|
|
1047
|
+
body: JSON.stringify({
|
|
1048
|
+
prompt,
|
|
1049
|
+
n,
|
|
1050
|
+
size
|
|
1051
|
+
})
|
|
1052
|
+
});
|
|
1053
|
+
const responseClone = response.clone();
|
|
1054
|
+
const rawResponseBody = await responseClone.text();
|
|
1055
|
+
span.addEvent("llm.response.raw", {
|
|
1056
|
+
"response.body": rawResponseBody
|
|
1057
|
+
});
|
|
1058
|
+
if (!response.ok) {
|
|
1059
|
+
span.setAttributes({ "error.api.status": response.status });
|
|
1060
|
+
span.setStatus({
|
|
1061
|
+
code: SpanStatusCode.ERROR,
|
|
1062
|
+
message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1063
|
+
});
|
|
1064
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
1065
|
+
}
|
|
1066
|
+
const data = await response.json();
|
|
1067
|
+
const typedData = data;
|
|
1068
|
+
span.addEvent("llm.response.processed", {
|
|
1069
|
+
"response.urls": JSON.stringify(typedData.data)
|
|
1070
|
+
});
|
|
1071
|
+
return typedData.data;
|
|
1072
|
+
} catch (error) {
|
|
1073
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1074
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
1075
|
+
span.recordException(exception);
|
|
1076
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1077
|
+
throw error;
|
|
1078
|
+
}
|
|
282
1079
|
});
|
|
283
|
-
if (!response.ok) {
|
|
284
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
285
|
-
}
|
|
286
|
-
const data = await response.json();
|
|
287
|
-
const typedData = data;
|
|
288
|
-
return typedData.data;
|
|
289
1080
|
},
|
|
290
1081
|
[ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
291
1082
|
let imageUrl;
|
|
292
|
-
let
|
|
1083
|
+
let promptText;
|
|
1084
|
+
const modelName = "gpt-4o-mini";
|
|
1085
|
+
const maxTokens = 300;
|
|
293
1086
|
if (typeof params === "string") {
|
|
294
1087
|
imageUrl = params;
|
|
295
|
-
|
|
1088
|
+
promptText = "Please analyze this image and provide a title and detailed description.";
|
|
296
1089
|
} else {
|
|
297
1090
|
imageUrl = params.imageUrl;
|
|
298
|
-
|
|
1091
|
+
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
299
1092
|
}
|
|
300
|
-
|
|
1093
|
+
const attributes = {
|
|
1094
|
+
"llm.vendor": "OpenAI",
|
|
1095
|
+
"llm.request.type": "chat",
|
|
1096
|
+
"llm.request.model": modelName,
|
|
1097
|
+
"llm.request.max_tokens": maxTokens,
|
|
1098
|
+
"llm.request.image.url": imageUrl
|
|
1099
|
+
};
|
|
1100
|
+
const messages = [
|
|
1101
|
+
{
|
|
1102
|
+
role: "user",
|
|
1103
|
+
content: [
|
|
1104
|
+
{ type: "text", text: promptText },
|
|
1105
|
+
{ type: "image_url", image_url: { url: imageUrl } }
|
|
1106
|
+
]
|
|
1107
|
+
}
|
|
1108
|
+
];
|
|
1109
|
+
return startLlmSpan(runtime, "LLM.imageDescription", attributes, async (span) => {
|
|
1110
|
+
span.addEvent("llm.prompt", {
|
|
1111
|
+
"prompt.content": JSON.stringify(messages, safeReplacer())
|
|
1112
|
+
});
|
|
301
1113
|
const baseURL = getBaseURL(runtime);
|
|
302
1114
|
const apiKey = getApiKey(runtime);
|
|
303
1115
|
if (!apiKey) {
|
|
304
1116
|
logger.error("OpenAI API key not set");
|
|
1117
|
+
span.setStatus({
|
|
1118
|
+
code: SpanStatusCode.ERROR,
|
|
1119
|
+
message: "OpenAI API key not configured"
|
|
1120
|
+
});
|
|
305
1121
|
return {
|
|
306
1122
|
title: "Failed to analyze image",
|
|
307
1123
|
description: "API key not configured"
|
|
308
1124
|
};
|
|
309
1125
|
}
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
1126
|
+
try {
|
|
1127
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
1128
|
+
method: "POST",
|
|
1129
|
+
headers: {
|
|
1130
|
+
"Content-Type": "application/json",
|
|
1131
|
+
Authorization: `Bearer ${apiKey}`
|
|
1132
|
+
},
|
|
1133
|
+
body: JSON.stringify({
|
|
1134
|
+
model: modelName,
|
|
1135
|
+
messages,
|
|
1136
|
+
max_tokens: maxTokens
|
|
1137
|
+
})
|
|
1138
|
+
});
|
|
1139
|
+
const responseClone = response.clone();
|
|
1140
|
+
const rawResponseBody = await responseClone.text();
|
|
1141
|
+
span.addEvent("llm.response.raw", {
|
|
1142
|
+
"response.body": rawResponseBody
|
|
1143
|
+
});
|
|
1144
|
+
if (!response.ok) {
|
|
1145
|
+
span.setAttributes({ "error.api.status": response.status });
|
|
1146
|
+
span.setStatus({
|
|
1147
|
+
code: SpanStatusCode.ERROR,
|
|
1148
|
+
message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
|
|
1149
|
+
});
|
|
1150
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1151
|
+
}
|
|
1152
|
+
const result = await response.json();
|
|
1153
|
+
const typedResult = result;
|
|
1154
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
1155
|
+
if (typedResult.usage) {
|
|
1156
|
+
span.setAttributes({
|
|
1157
|
+
"llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
|
|
1158
|
+
"llm.usage.completion_tokens": typedResult.usage.completion_tokens,
|
|
1159
|
+
"llm.usage.total_tokens": typedResult.usage.total_tokens
|
|
1160
|
+
});
|
|
1161
|
+
}
|
|
1162
|
+
if (typedResult.choices?.[0]?.finish_reason) {
|
|
1163
|
+
span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
|
|
1164
|
+
}
|
|
1165
|
+
if (!content) {
|
|
1166
|
+
span.setStatus({
|
|
1167
|
+
code: SpanStatusCode.ERROR,
|
|
1168
|
+
message: "No content in API response"
|
|
1169
|
+
});
|
|
1170
|
+
return {
|
|
1171
|
+
title: "Failed to analyze image",
|
|
1172
|
+
description: "No response from API"
|
|
1173
|
+
};
|
|
1174
|
+
}
|
|
1175
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
1176
|
+
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
1177
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
1178
|
+
const processedResult = { title, description };
|
|
1179
|
+
span.addEvent("llm.response.processed", {
|
|
1180
|
+
"response.object": JSON.stringify(processedResult, safeReplacer())
|
|
1181
|
+
});
|
|
1182
|
+
return processedResult;
|
|
1183
|
+
} catch (error) {
|
|
1184
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1185
|
+
logger.error(`Error analyzing image: ${message}`);
|
|
1186
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
1187
|
+
span.recordException(exception);
|
|
1188
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
342
1189
|
return {
|
|
343
1190
|
title: "Failed to analyze image",
|
|
344
|
-
description:
|
|
1191
|
+
description: `Error: ${message}`
|
|
345
1192
|
};
|
|
346
1193
|
}
|
|
347
|
-
|
|
348
|
-
const title = titleMatch?.[1] || "Image Analysis";
|
|
349
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
350
|
-
return { title, description };
|
|
351
|
-
} catch (error) {
|
|
352
|
-
logger.error("Error analyzing image:", error);
|
|
353
|
-
return {
|
|
354
|
-
title: "Failed to analyze image",
|
|
355
|
-
description: `Error: ${error instanceof Error ? error.message : String(error)}`
|
|
356
|
-
};
|
|
357
|
-
}
|
|
1194
|
+
});
|
|
358
1195
|
},
|
|
359
1196
|
[ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
|
|
360
1197
|
logger.log("audioBuffer", audioBuffer);
|
|
361
|
-
const
|
|
362
|
-
const
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
1198
|
+
const modelName = "whisper-1";
|
|
1199
|
+
const attributes = {
|
|
1200
|
+
"llm.vendor": "OpenAI",
|
|
1201
|
+
"llm.request.type": "transcription",
|
|
1202
|
+
"llm.request.model": modelName,
|
|
1203
|
+
"llm.request.audio.input_size_bytes": audioBuffer?.length || 0
|
|
1204
|
+
};
|
|
1205
|
+
return startLlmSpan(runtime, "LLM.transcription", attributes, async (span) => {
|
|
1206
|
+
span.addEvent("llm.prompt", {
|
|
1207
|
+
"prompt.info": "Audio buffer for transcription"
|
|
1208
|
+
});
|
|
1209
|
+
const baseURL = getBaseURL(runtime);
|
|
1210
|
+
const apiKey = getApiKey(runtime);
|
|
1211
|
+
if (!apiKey) {
|
|
1212
|
+
span.setStatus({
|
|
1213
|
+
code: SpanStatusCode.ERROR,
|
|
1214
|
+
message: "OpenAI API key not configured"
|
|
1215
|
+
});
|
|
1216
|
+
throw new Error("OpenAI API key not configured - Cannot make request");
|
|
1217
|
+
}
|
|
1218
|
+
if (!audioBuffer || audioBuffer.length === 0) {
|
|
1219
|
+
span.setStatus({
|
|
1220
|
+
code: SpanStatusCode.ERROR,
|
|
1221
|
+
message: "Audio buffer is empty or invalid"
|
|
1222
|
+
});
|
|
1223
|
+
throw new Error("Audio buffer is empty or invalid for transcription");
|
|
1224
|
+
}
|
|
1225
|
+
const formData = new FormData();
|
|
1226
|
+
formData.append("file", new Blob([audioBuffer]), "recording.mp3");
|
|
1227
|
+
formData.append("model", "whisper-1");
|
|
1228
|
+
try {
|
|
1229
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
1230
|
+
method: "POST",
|
|
1231
|
+
headers: {
|
|
1232
|
+
Authorization: `Bearer ${apiKey}`
|
|
1233
|
+
},
|
|
1234
|
+
body: formData
|
|
1235
|
+
});
|
|
1236
|
+
const responseClone = response.clone();
|
|
1237
|
+
const rawResponseBody = await responseClone.text();
|
|
1238
|
+
span.addEvent("llm.response.raw", {
|
|
1239
|
+
"response.body": rawResponseBody
|
|
1240
|
+
});
|
|
1241
|
+
logger.log("response", response);
|
|
1242
|
+
if (!response.ok) {
|
|
1243
|
+
span.setAttributes({ "error.api.status": response.status });
|
|
1244
|
+
span.setStatus({
|
|
1245
|
+
code: SpanStatusCode.ERROR,
|
|
1246
|
+
message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1247
|
+
});
|
|
1248
|
+
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
1249
|
+
}
|
|
1250
|
+
const data = await response.json();
|
|
1251
|
+
const processedText = data.text;
|
|
1252
|
+
span.setAttribute("llm.response.processed.length", processedText.length);
|
|
1253
|
+
span.addEvent("llm.response.processed", {
|
|
1254
|
+
"response.text": processedText
|
|
1255
|
+
});
|
|
1256
|
+
return processedText;
|
|
1257
|
+
} catch (error) {
|
|
1258
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1259
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
1260
|
+
span.recordException(exception);
|
|
1261
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1262
|
+
throw error;
|
|
1263
|
+
}
|
|
374
1264
|
});
|
|
375
|
-
logger.log("response", response);
|
|
376
|
-
if (!response.ok) {
|
|
377
|
-
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
378
|
-
}
|
|
379
|
-
const data = await response.json();
|
|
380
|
-
return data.text;
|
|
381
1265
|
},
|
|
382
1266
|
[ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
|
|
383
|
-
|
|
1267
|
+
const attributes = {
|
|
1268
|
+
"llm.vendor": "OpenAI",
|
|
1269
|
+
"llm.request.type": "tts",
|
|
1270
|
+
"llm.request.model": getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts"),
|
|
1271
|
+
"input.text.length": text.length
|
|
1272
|
+
};
|
|
1273
|
+
return startLlmSpan(runtime, "LLM.tts", attributes, async (span) => {
|
|
1274
|
+
span.addEvent("llm.prompt", { "prompt.content": text });
|
|
1275
|
+
try {
|
|
1276
|
+
const speechStream = await fetchTextToSpeech(runtime, text);
|
|
1277
|
+
span.addEvent("llm.response.success", {
|
|
1278
|
+
info: "Speech stream generated"
|
|
1279
|
+
});
|
|
1280
|
+
return speechStream;
|
|
1281
|
+
} catch (error) {
|
|
1282
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1283
|
+
const exception = error instanceof Error ? error : new Error(message);
|
|
1284
|
+
span.recordException(exception);
|
|
1285
|
+
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1286
|
+
throw error;
|
|
1287
|
+
}
|
|
1288
|
+
});
|
|
384
1289
|
},
|
|
385
1290
|
[ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
386
1291
|
return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
|
|
@@ -403,7 +1308,7 @@ var openaiPlugin = {
|
|
|
403
1308
|
}
|
|
404
1309
|
});
|
|
405
1310
|
const data = await response.json();
|
|
406
|
-
logger.log("Models Available:", data?.data
|
|
1311
|
+
logger.log("Models Available:", data?.data?.length ?? "N/A");
|
|
407
1312
|
if (!response.ok) {
|
|
408
1313
|
throw new Error(`Failed to validate OpenAI API key: ${response.statusText}`);
|
|
409
1314
|
}
|
|
@@ -418,7 +1323,8 @@ var openaiPlugin = {
|
|
|
418
1323
|
});
|
|
419
1324
|
logger.log("embedding", embedding);
|
|
420
1325
|
} catch (error) {
|
|
421
|
-
|
|
1326
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1327
|
+
logger.error(`Error in test_text_embedding: ${message}`);
|
|
422
1328
|
throw error;
|
|
423
1329
|
}
|
|
424
1330
|
}
|
|
@@ -435,7 +1341,8 @@ var openaiPlugin = {
|
|
|
435
1341
|
}
|
|
436
1342
|
logger.log("generated with test_text_large:", text);
|
|
437
1343
|
} catch (error) {
|
|
438
|
-
|
|
1344
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1345
|
+
logger.error(`Error in test_text_large: ${message}`);
|
|
439
1346
|
throw error;
|
|
440
1347
|
}
|
|
441
1348
|
}
|
|
@@ -452,7 +1359,8 @@ var openaiPlugin = {
|
|
|
452
1359
|
}
|
|
453
1360
|
logger.log("generated with test_text_small:", text);
|
|
454
1361
|
} catch (error) {
|
|
455
|
-
|
|
1362
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1363
|
+
logger.error(`Error in test_text_small: ${message}`);
|
|
456
1364
|
throw error;
|
|
457
1365
|
}
|
|
458
1366
|
}
|
|
@@ -469,7 +1377,8 @@ var openaiPlugin = {
|
|
|
469
1377
|
});
|
|
470
1378
|
logger.log("generated with test_image_generation:", image);
|
|
471
1379
|
} catch (error) {
|
|
472
|
-
|
|
1380
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1381
|
+
logger.error(`Error in test_image_generation: ${message}`);
|
|
473
1382
|
throw error;
|
|
474
1383
|
}
|
|
475
1384
|
}
|
|
@@ -490,10 +1399,12 @@ var openaiPlugin = {
|
|
|
490
1399
|
logger.error("Invalid image description result format:", result);
|
|
491
1400
|
}
|
|
492
1401
|
} catch (e) {
|
|
493
|
-
|
|
1402
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
1403
|
+
logger.error(`Error in image description test: ${message}`);
|
|
494
1404
|
}
|
|
495
1405
|
} catch (e) {
|
|
496
|
-
|
|
1406
|
+
const message = e instanceof Error ? e.message : String(e);
|
|
1407
|
+
logger.error(`Error in openai_test_image_description: ${message}`);
|
|
497
1408
|
}
|
|
498
1409
|
}
|
|
499
1410
|
},
|
|
@@ -512,7 +1423,8 @@ var openaiPlugin = {
|
|
|
512
1423
|
);
|
|
513
1424
|
logger.log("generated with test_transcription:", transcription);
|
|
514
1425
|
} catch (error) {
|
|
515
|
-
|
|
1426
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1427
|
+
logger.error(`Error in test_transcription: ${message}`);
|
|
516
1428
|
throw error;
|
|
517
1429
|
}
|
|
518
1430
|
}
|
|
@@ -553,7 +1465,8 @@ var openaiPlugin = {
|
|
|
553
1465
|
}
|
|
554
1466
|
logger.log("Generated speech successfully");
|
|
555
1467
|
} catch (error) {
|
|
556
|
-
|
|
1468
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
1469
|
+
logger.error(`Error in openai_test_text_to_speech: ${message}`);
|
|
557
1470
|
throw error;
|
|
558
1471
|
}
|
|
559
1472
|
}
|