@elizaos/plugin-openai 1.0.3 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +3 -0
- package/dist/index.js +277 -1060
- package/dist/index.js.map +1 -1
- package/package.json +9 -2
package/dist/index.js
CHANGED
|
@@ -4,496 +4,8 @@ import {
|
|
|
4
4
|
EventType,
|
|
5
5
|
logger,
|
|
6
6
|
ModelType,
|
|
7
|
-
safeReplacer,
|
|
8
|
-
ServiceType,
|
|
9
7
|
VECTOR_DIMS
|
|
10
8
|
} from "@elizaos/core";
|
|
11
|
-
|
|
12
|
-
// node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
|
|
13
|
-
var _globalThis = typeof globalThis === "object" ? globalThis : global;
|
|
14
|
-
|
|
15
|
-
// node_modules/@opentelemetry/api/build/esm/version.js
|
|
16
|
-
var VERSION = "1.9.0";
|
|
17
|
-
|
|
18
|
-
// node_modules/@opentelemetry/api/build/esm/internal/semver.js
|
|
19
|
-
var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
|
|
20
|
-
function _makeCompatibilityCheck(ownVersion) {
|
|
21
|
-
var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
|
|
22
|
-
var rejectedVersions = /* @__PURE__ */ new Set();
|
|
23
|
-
var myVersionMatch = ownVersion.match(re);
|
|
24
|
-
if (!myVersionMatch) {
|
|
25
|
-
return function() {
|
|
26
|
-
return false;
|
|
27
|
-
};
|
|
28
|
-
}
|
|
29
|
-
var ownVersionParsed = {
|
|
30
|
-
major: +myVersionMatch[1],
|
|
31
|
-
minor: +myVersionMatch[2],
|
|
32
|
-
patch: +myVersionMatch[3],
|
|
33
|
-
prerelease: myVersionMatch[4]
|
|
34
|
-
};
|
|
35
|
-
if (ownVersionParsed.prerelease != null) {
|
|
36
|
-
return function isExactmatch(globalVersion) {
|
|
37
|
-
return globalVersion === ownVersion;
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
function _reject(v) {
|
|
41
|
-
rejectedVersions.add(v);
|
|
42
|
-
return false;
|
|
43
|
-
}
|
|
44
|
-
function _accept(v) {
|
|
45
|
-
acceptedVersions.add(v);
|
|
46
|
-
return true;
|
|
47
|
-
}
|
|
48
|
-
return function isCompatible2(globalVersion) {
|
|
49
|
-
if (acceptedVersions.has(globalVersion)) {
|
|
50
|
-
return true;
|
|
51
|
-
}
|
|
52
|
-
if (rejectedVersions.has(globalVersion)) {
|
|
53
|
-
return false;
|
|
54
|
-
}
|
|
55
|
-
var globalVersionMatch = globalVersion.match(re);
|
|
56
|
-
if (!globalVersionMatch) {
|
|
57
|
-
return _reject(globalVersion);
|
|
58
|
-
}
|
|
59
|
-
var globalVersionParsed = {
|
|
60
|
-
major: +globalVersionMatch[1],
|
|
61
|
-
minor: +globalVersionMatch[2],
|
|
62
|
-
patch: +globalVersionMatch[3],
|
|
63
|
-
prerelease: globalVersionMatch[4]
|
|
64
|
-
};
|
|
65
|
-
if (globalVersionParsed.prerelease != null) {
|
|
66
|
-
return _reject(globalVersion);
|
|
67
|
-
}
|
|
68
|
-
if (ownVersionParsed.major !== globalVersionParsed.major) {
|
|
69
|
-
return _reject(globalVersion);
|
|
70
|
-
}
|
|
71
|
-
if (ownVersionParsed.major === 0) {
|
|
72
|
-
if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
|
|
73
|
-
return _accept(globalVersion);
|
|
74
|
-
}
|
|
75
|
-
return _reject(globalVersion);
|
|
76
|
-
}
|
|
77
|
-
if (ownVersionParsed.minor <= globalVersionParsed.minor) {
|
|
78
|
-
return _accept(globalVersion);
|
|
79
|
-
}
|
|
80
|
-
return _reject(globalVersion);
|
|
81
|
-
};
|
|
82
|
-
}
|
|
83
|
-
var isCompatible = _makeCompatibilityCheck(VERSION);
|
|
84
|
-
|
|
85
|
-
// node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
|
|
86
|
-
var major = VERSION.split(".")[0];
|
|
87
|
-
var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
|
|
88
|
-
var _global = _globalThis;
|
|
89
|
-
function registerGlobal(type, instance, diag, allowOverride) {
|
|
90
|
-
var _a;
|
|
91
|
-
if (allowOverride === void 0) {
|
|
92
|
-
allowOverride = false;
|
|
93
|
-
}
|
|
94
|
-
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
|
|
95
|
-
version: VERSION
|
|
96
|
-
};
|
|
97
|
-
if (!allowOverride && api[type]) {
|
|
98
|
-
var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
|
|
99
|
-
diag.error(err.stack || err.message);
|
|
100
|
-
return false;
|
|
101
|
-
}
|
|
102
|
-
if (api.version !== VERSION) {
|
|
103
|
-
var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
|
|
104
|
-
diag.error(err.stack || err.message);
|
|
105
|
-
return false;
|
|
106
|
-
}
|
|
107
|
-
api[type] = instance;
|
|
108
|
-
diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
|
|
109
|
-
return true;
|
|
110
|
-
}
|
|
111
|
-
function getGlobal(type) {
|
|
112
|
-
var _a, _b;
|
|
113
|
-
var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
|
|
114
|
-
if (!globalVersion || !isCompatible(globalVersion)) {
|
|
115
|
-
return;
|
|
116
|
-
}
|
|
117
|
-
return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
|
|
118
|
-
}
|
|
119
|
-
function unregisterGlobal(type, diag) {
|
|
120
|
-
diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
|
|
121
|
-
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
|
|
122
|
-
if (api) {
|
|
123
|
-
delete api[type];
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
// node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
|
|
128
|
-
var __read = function(o, n) {
|
|
129
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
130
|
-
if (!m) return o;
|
|
131
|
-
var i = m.call(o), r, ar = [], e;
|
|
132
|
-
try {
|
|
133
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
134
|
-
} catch (error) {
|
|
135
|
-
e = { error };
|
|
136
|
-
} finally {
|
|
137
|
-
try {
|
|
138
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
139
|
-
} finally {
|
|
140
|
-
if (e) throw e.error;
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
return ar;
|
|
144
|
-
};
|
|
145
|
-
var __spreadArray = function(to, from, pack) {
|
|
146
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
147
|
-
if (ar || !(i in from)) {
|
|
148
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
149
|
-
ar[i] = from[i];
|
|
150
|
-
}
|
|
151
|
-
}
|
|
152
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
153
|
-
};
|
|
154
|
-
var DiagComponentLogger = (
|
|
155
|
-
/** @class */
|
|
156
|
-
function() {
|
|
157
|
-
function DiagComponentLogger2(props) {
|
|
158
|
-
this._namespace = props.namespace || "DiagComponentLogger";
|
|
159
|
-
}
|
|
160
|
-
DiagComponentLogger2.prototype.debug = function() {
|
|
161
|
-
var args = [];
|
|
162
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
163
|
-
args[_i] = arguments[_i];
|
|
164
|
-
}
|
|
165
|
-
return logProxy("debug", this._namespace, args);
|
|
166
|
-
};
|
|
167
|
-
DiagComponentLogger2.prototype.error = function() {
|
|
168
|
-
var args = [];
|
|
169
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
170
|
-
args[_i] = arguments[_i];
|
|
171
|
-
}
|
|
172
|
-
return logProxy("error", this._namespace, args);
|
|
173
|
-
};
|
|
174
|
-
DiagComponentLogger2.prototype.info = function() {
|
|
175
|
-
var args = [];
|
|
176
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
177
|
-
args[_i] = arguments[_i];
|
|
178
|
-
}
|
|
179
|
-
return logProxy("info", this._namespace, args);
|
|
180
|
-
};
|
|
181
|
-
DiagComponentLogger2.prototype.warn = function() {
|
|
182
|
-
var args = [];
|
|
183
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
184
|
-
args[_i] = arguments[_i];
|
|
185
|
-
}
|
|
186
|
-
return logProxy("warn", this._namespace, args);
|
|
187
|
-
};
|
|
188
|
-
DiagComponentLogger2.prototype.verbose = function() {
|
|
189
|
-
var args = [];
|
|
190
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
191
|
-
args[_i] = arguments[_i];
|
|
192
|
-
}
|
|
193
|
-
return logProxy("verbose", this._namespace, args);
|
|
194
|
-
};
|
|
195
|
-
return DiagComponentLogger2;
|
|
196
|
-
}()
|
|
197
|
-
);
|
|
198
|
-
function logProxy(funcName, namespace, args) {
|
|
199
|
-
var logger2 = getGlobal("diag");
|
|
200
|
-
if (!logger2) {
|
|
201
|
-
return;
|
|
202
|
-
}
|
|
203
|
-
args.unshift(namespace);
|
|
204
|
-
return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
// node_modules/@opentelemetry/api/build/esm/diag/types.js
|
|
208
|
-
var DiagLogLevel;
|
|
209
|
-
(function(DiagLogLevel2) {
|
|
210
|
-
DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
|
|
211
|
-
DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
|
|
212
|
-
DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
|
|
213
|
-
DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
|
|
214
|
-
DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
|
|
215
|
-
DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
|
|
216
|
-
DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
|
|
217
|
-
})(DiagLogLevel || (DiagLogLevel = {}));
|
|
218
|
-
|
|
219
|
-
// node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
|
|
220
|
-
function createLogLevelDiagLogger(maxLevel, logger2) {
|
|
221
|
-
if (maxLevel < DiagLogLevel.NONE) {
|
|
222
|
-
maxLevel = DiagLogLevel.NONE;
|
|
223
|
-
} else if (maxLevel > DiagLogLevel.ALL) {
|
|
224
|
-
maxLevel = DiagLogLevel.ALL;
|
|
225
|
-
}
|
|
226
|
-
logger2 = logger2 || {};
|
|
227
|
-
function _filterFunc(funcName, theLevel) {
|
|
228
|
-
var theFunc = logger2[funcName];
|
|
229
|
-
if (typeof theFunc === "function" && maxLevel >= theLevel) {
|
|
230
|
-
return theFunc.bind(logger2);
|
|
231
|
-
}
|
|
232
|
-
return function() {
|
|
233
|
-
};
|
|
234
|
-
}
|
|
235
|
-
return {
|
|
236
|
-
error: _filterFunc("error", DiagLogLevel.ERROR),
|
|
237
|
-
warn: _filterFunc("warn", DiagLogLevel.WARN),
|
|
238
|
-
info: _filterFunc("info", DiagLogLevel.INFO),
|
|
239
|
-
debug: _filterFunc("debug", DiagLogLevel.DEBUG),
|
|
240
|
-
verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
|
|
241
|
-
};
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
// node_modules/@opentelemetry/api/build/esm/api/diag.js
|
|
245
|
-
var __read2 = function(o, n) {
|
|
246
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
247
|
-
if (!m) return o;
|
|
248
|
-
var i = m.call(o), r, ar = [], e;
|
|
249
|
-
try {
|
|
250
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
251
|
-
} catch (error) {
|
|
252
|
-
e = { error };
|
|
253
|
-
} finally {
|
|
254
|
-
try {
|
|
255
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
256
|
-
} finally {
|
|
257
|
-
if (e) throw e.error;
|
|
258
|
-
}
|
|
259
|
-
}
|
|
260
|
-
return ar;
|
|
261
|
-
};
|
|
262
|
-
var __spreadArray2 = function(to, from, pack) {
|
|
263
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
264
|
-
if (ar || !(i in from)) {
|
|
265
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
266
|
-
ar[i] = from[i];
|
|
267
|
-
}
|
|
268
|
-
}
|
|
269
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
270
|
-
};
|
|
271
|
-
var API_NAME = "diag";
|
|
272
|
-
var DiagAPI = (
|
|
273
|
-
/** @class */
|
|
274
|
-
function() {
|
|
275
|
-
function DiagAPI2() {
|
|
276
|
-
function _logProxy(funcName) {
|
|
277
|
-
return function() {
|
|
278
|
-
var args = [];
|
|
279
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
280
|
-
args[_i] = arguments[_i];
|
|
281
|
-
}
|
|
282
|
-
var logger2 = getGlobal("diag");
|
|
283
|
-
if (!logger2)
|
|
284
|
-
return;
|
|
285
|
-
return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
|
|
286
|
-
};
|
|
287
|
-
}
|
|
288
|
-
var self = this;
|
|
289
|
-
var setLogger = function(logger2, optionsOrLogLevel) {
|
|
290
|
-
var _a, _b, _c;
|
|
291
|
-
if (optionsOrLogLevel === void 0) {
|
|
292
|
-
optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
|
|
293
|
-
}
|
|
294
|
-
if (logger2 === self) {
|
|
295
|
-
var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
|
|
296
|
-
self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
|
|
297
|
-
return false;
|
|
298
|
-
}
|
|
299
|
-
if (typeof optionsOrLogLevel === "number") {
|
|
300
|
-
optionsOrLogLevel = {
|
|
301
|
-
logLevel: optionsOrLogLevel
|
|
302
|
-
};
|
|
303
|
-
}
|
|
304
|
-
var oldLogger = getGlobal("diag");
|
|
305
|
-
var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
|
|
306
|
-
if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
|
|
307
|
-
var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
|
|
308
|
-
oldLogger.warn("Current logger will be overwritten from " + stack);
|
|
309
|
-
newLogger.warn("Current logger will overwrite one already registered from " + stack);
|
|
310
|
-
}
|
|
311
|
-
return registerGlobal("diag", newLogger, self, true);
|
|
312
|
-
};
|
|
313
|
-
self.setLogger = setLogger;
|
|
314
|
-
self.disable = function() {
|
|
315
|
-
unregisterGlobal(API_NAME, self);
|
|
316
|
-
};
|
|
317
|
-
self.createComponentLogger = function(options) {
|
|
318
|
-
return new DiagComponentLogger(options);
|
|
319
|
-
};
|
|
320
|
-
self.verbose = _logProxy("verbose");
|
|
321
|
-
self.debug = _logProxy("debug");
|
|
322
|
-
self.info = _logProxy("info");
|
|
323
|
-
self.warn = _logProxy("warn");
|
|
324
|
-
self.error = _logProxy("error");
|
|
325
|
-
}
|
|
326
|
-
DiagAPI2.instance = function() {
|
|
327
|
-
if (!this._instance) {
|
|
328
|
-
this._instance = new DiagAPI2();
|
|
329
|
-
}
|
|
330
|
-
return this._instance;
|
|
331
|
-
};
|
|
332
|
-
return DiagAPI2;
|
|
333
|
-
}()
|
|
334
|
-
);
|
|
335
|
-
|
|
336
|
-
// node_modules/@opentelemetry/api/build/esm/context/context.js
|
|
337
|
-
var BaseContext = (
|
|
338
|
-
/** @class */
|
|
339
|
-
/* @__PURE__ */ function() {
|
|
340
|
-
function BaseContext2(parentContext) {
|
|
341
|
-
var self = this;
|
|
342
|
-
self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
|
|
343
|
-
self.getValue = function(key) {
|
|
344
|
-
return self._currentContext.get(key);
|
|
345
|
-
};
|
|
346
|
-
self.setValue = function(key, value) {
|
|
347
|
-
var context2 = new BaseContext2(self._currentContext);
|
|
348
|
-
context2._currentContext.set(key, value);
|
|
349
|
-
return context2;
|
|
350
|
-
};
|
|
351
|
-
self.deleteValue = function(key) {
|
|
352
|
-
var context2 = new BaseContext2(self._currentContext);
|
|
353
|
-
context2._currentContext.delete(key);
|
|
354
|
-
return context2;
|
|
355
|
-
};
|
|
356
|
-
}
|
|
357
|
-
return BaseContext2;
|
|
358
|
-
}()
|
|
359
|
-
);
|
|
360
|
-
var ROOT_CONTEXT = new BaseContext();
|
|
361
|
-
|
|
362
|
-
// node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
|
|
363
|
-
var __read3 = function(o, n) {
|
|
364
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
365
|
-
if (!m) return o;
|
|
366
|
-
var i = m.call(o), r, ar = [], e;
|
|
367
|
-
try {
|
|
368
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
369
|
-
} catch (error) {
|
|
370
|
-
e = { error };
|
|
371
|
-
} finally {
|
|
372
|
-
try {
|
|
373
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
374
|
-
} finally {
|
|
375
|
-
if (e) throw e.error;
|
|
376
|
-
}
|
|
377
|
-
}
|
|
378
|
-
return ar;
|
|
379
|
-
};
|
|
380
|
-
var __spreadArray3 = function(to, from, pack) {
|
|
381
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
382
|
-
if (ar || !(i in from)) {
|
|
383
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
384
|
-
ar[i] = from[i];
|
|
385
|
-
}
|
|
386
|
-
}
|
|
387
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
388
|
-
};
|
|
389
|
-
var NoopContextManager = (
|
|
390
|
-
/** @class */
|
|
391
|
-
function() {
|
|
392
|
-
function NoopContextManager2() {
|
|
393
|
-
}
|
|
394
|
-
NoopContextManager2.prototype.active = function() {
|
|
395
|
-
return ROOT_CONTEXT;
|
|
396
|
-
};
|
|
397
|
-
NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
|
|
398
|
-
var args = [];
|
|
399
|
-
for (var _i = 3; _i < arguments.length; _i++) {
|
|
400
|
-
args[_i - 3] = arguments[_i];
|
|
401
|
-
}
|
|
402
|
-
return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
|
|
403
|
-
};
|
|
404
|
-
NoopContextManager2.prototype.bind = function(_context, target) {
|
|
405
|
-
return target;
|
|
406
|
-
};
|
|
407
|
-
NoopContextManager2.prototype.enable = function() {
|
|
408
|
-
return this;
|
|
409
|
-
};
|
|
410
|
-
NoopContextManager2.prototype.disable = function() {
|
|
411
|
-
return this;
|
|
412
|
-
};
|
|
413
|
-
return NoopContextManager2;
|
|
414
|
-
}()
|
|
415
|
-
);
|
|
416
|
-
|
|
417
|
-
// node_modules/@opentelemetry/api/build/esm/api/context.js
|
|
418
|
-
var __read4 = function(o, n) {
|
|
419
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
420
|
-
if (!m) return o;
|
|
421
|
-
var i = m.call(o), r, ar = [], e;
|
|
422
|
-
try {
|
|
423
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
424
|
-
} catch (error) {
|
|
425
|
-
e = { error };
|
|
426
|
-
} finally {
|
|
427
|
-
try {
|
|
428
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
429
|
-
} finally {
|
|
430
|
-
if (e) throw e.error;
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
return ar;
|
|
434
|
-
};
|
|
435
|
-
var __spreadArray4 = function(to, from, pack) {
|
|
436
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
437
|
-
if (ar || !(i in from)) {
|
|
438
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
439
|
-
ar[i] = from[i];
|
|
440
|
-
}
|
|
441
|
-
}
|
|
442
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
443
|
-
};
|
|
444
|
-
var API_NAME2 = "context";
|
|
445
|
-
var NOOP_CONTEXT_MANAGER = new NoopContextManager();
|
|
446
|
-
var ContextAPI = (
|
|
447
|
-
/** @class */
|
|
448
|
-
function() {
|
|
449
|
-
function ContextAPI2() {
|
|
450
|
-
}
|
|
451
|
-
ContextAPI2.getInstance = function() {
|
|
452
|
-
if (!this._instance) {
|
|
453
|
-
this._instance = new ContextAPI2();
|
|
454
|
-
}
|
|
455
|
-
return this._instance;
|
|
456
|
-
};
|
|
457
|
-
ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
|
|
458
|
-
return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
|
|
459
|
-
};
|
|
460
|
-
ContextAPI2.prototype.active = function() {
|
|
461
|
-
return this._getContextManager().active();
|
|
462
|
-
};
|
|
463
|
-
ContextAPI2.prototype.with = function(context2, fn, thisArg) {
|
|
464
|
-
var _a;
|
|
465
|
-
var args = [];
|
|
466
|
-
for (var _i = 3; _i < arguments.length; _i++) {
|
|
467
|
-
args[_i - 3] = arguments[_i];
|
|
468
|
-
}
|
|
469
|
-
return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
|
|
470
|
-
};
|
|
471
|
-
ContextAPI2.prototype.bind = function(context2, target) {
|
|
472
|
-
return this._getContextManager().bind(context2, target);
|
|
473
|
-
};
|
|
474
|
-
ContextAPI2.prototype._getContextManager = function() {
|
|
475
|
-
return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
|
|
476
|
-
};
|
|
477
|
-
ContextAPI2.prototype.disable = function() {
|
|
478
|
-
this._getContextManager().disable();
|
|
479
|
-
unregisterGlobal(API_NAME2, DiagAPI.instance());
|
|
480
|
-
};
|
|
481
|
-
return ContextAPI2;
|
|
482
|
-
}()
|
|
483
|
-
);
|
|
484
|
-
|
|
485
|
-
// node_modules/@opentelemetry/api/build/esm/trace/status.js
|
|
486
|
-
var SpanStatusCode;
|
|
487
|
-
(function(SpanStatusCode2) {
|
|
488
|
-
SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
|
|
489
|
-
SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
|
|
490
|
-
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
491
|
-
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
492
|
-
|
|
493
|
-
// node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
494
|
-
var context = ContextAPI.getInstance();
|
|
495
|
-
|
|
496
|
-
// src/index.ts
|
|
497
9
|
import {
|
|
498
10
|
generateObject,
|
|
499
11
|
generateText,
|
|
@@ -501,60 +13,6 @@ import {
|
|
|
501
13
|
} from "ai";
|
|
502
14
|
import { encodingForModel } from "js-tiktoken";
|
|
503
15
|
import { fetch, FormData } from "undici";
|
|
504
|
-
function getTracer(runtime) {
|
|
505
|
-
const availableServices = Array.from(runtime.getAllServices().keys());
|
|
506
|
-
logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
|
|
507
|
-
logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
|
|
508
|
-
const instrumentationService = runtime.getService(
|
|
509
|
-
ServiceType.INSTRUMENTATION
|
|
510
|
-
);
|
|
511
|
-
if (!instrumentationService) {
|
|
512
|
-
logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
|
|
513
|
-
return null;
|
|
514
|
-
}
|
|
515
|
-
if (!instrumentationService.isEnabled()) {
|
|
516
|
-
logger.debug("[getTracer] Instrumentation service found but is disabled.");
|
|
517
|
-
return null;
|
|
518
|
-
}
|
|
519
|
-
logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
|
|
520
|
-
return instrumentationService.getTracer("eliza.llm.openai");
|
|
521
|
-
}
|
|
522
|
-
async function startLlmSpan(runtime, spanName, attributes, fn) {
|
|
523
|
-
const tracer = getTracer(runtime);
|
|
524
|
-
if (!tracer) {
|
|
525
|
-
const dummySpan = {
|
|
526
|
-
setAttribute: () => {
|
|
527
|
-
},
|
|
528
|
-
setAttributes: () => {
|
|
529
|
-
},
|
|
530
|
-
addEvent: () => {
|
|
531
|
-
},
|
|
532
|
-
recordException: () => {
|
|
533
|
-
},
|
|
534
|
-
setStatus: () => {
|
|
535
|
-
},
|
|
536
|
-
end: () => {
|
|
537
|
-
},
|
|
538
|
-
spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
|
|
539
|
-
};
|
|
540
|
-
return fn(dummySpan);
|
|
541
|
-
}
|
|
542
|
-
const activeContext = context.active();
|
|
543
|
-
return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
|
|
544
|
-
try {
|
|
545
|
-
const result = await fn(span);
|
|
546
|
-
span.setStatus({ code: SpanStatusCode.OK });
|
|
547
|
-
span.end();
|
|
548
|
-
return result;
|
|
549
|
-
} catch (error) {
|
|
550
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
551
|
-
span.recordException(error);
|
|
552
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
553
|
-
span.end();
|
|
554
|
-
throw error;
|
|
555
|
-
}
|
|
556
|
-
});
|
|
557
|
-
}
|
|
558
16
|
function getSetting(runtime, key, defaultValue) {
|
|
559
17
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
560
18
|
}
|
|
@@ -575,6 +33,15 @@ function getEmbeddingBaseURL(runtime) {
|
|
|
575
33
|
function getApiKey(runtime) {
|
|
576
34
|
return getSetting(runtime, "OPENAI_API_KEY");
|
|
577
35
|
}
|
|
36
|
+
function getEmbeddingApiKey(runtime) {
|
|
37
|
+
const embeddingApiKey = getSetting(runtime, "OPENAI_EMBEDDING_API_KEY");
|
|
38
|
+
if (embeddingApiKey) {
|
|
39
|
+
logger.debug(`[OpenAI] Using specific embedding API key: ${embeddingApiKey}`);
|
|
40
|
+
return embeddingApiKey;
|
|
41
|
+
}
|
|
42
|
+
logger.debug("[OpenAI] Falling back to general API key for embeddings.");
|
|
43
|
+
return getApiKey(runtime);
|
|
44
|
+
}
|
|
578
45
|
function getSmallModel(runtime) {
|
|
579
46
|
return getSetting(runtime, "OPENAI_SMALL_MODEL") ?? getSetting(runtime, "SMALL_MODEL", "gpt-4o-mini");
|
|
580
47
|
}
|
|
@@ -607,106 +74,51 @@ async function generateObjectByModelType(runtime, params, modelType, getModelFn)
|
|
|
607
74
|
logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
608
75
|
const temperature = params.temperature ?? 0;
|
|
609
76
|
const schemaPresent = !!params.schema;
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
);
|
|
77
|
+
if (schemaPresent) {
|
|
78
|
+
logger.info(
|
|
79
|
+
`Using ${modelType} without schema validation (schema provided but output=no-schema)`
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
try {
|
|
83
|
+
const { object, usage } = await generateObject({
|
|
84
|
+
model: openai.languageModel(modelName),
|
|
85
|
+
output: "no-schema",
|
|
86
|
+
prompt: params.prompt,
|
|
87
|
+
temperature,
|
|
88
|
+
experimental_repairText: getJsonRepairFunction()
|
|
89
|
+
});
|
|
90
|
+
if (usage) {
|
|
91
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
626
92
|
}
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
span.addEvent("llm.response.processed", {
|
|
636
|
-
"response.object": JSON.stringify(object, safeReplacer())
|
|
93
|
+
return object;
|
|
94
|
+
} catch (error) {
|
|
95
|
+
if (error instanceof JSONParseError) {
|
|
96
|
+
logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
97
|
+
const repairFunction = getJsonRepairFunction();
|
|
98
|
+
const repairedJsonString = await repairFunction({
|
|
99
|
+
text: error.text,
|
|
100
|
+
error
|
|
637
101
|
});
|
|
638
|
-
if (
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
})
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
} catch (error) {
|
|
648
|
-
if (error instanceof JSONParseError) {
|
|
649
|
-
logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
650
|
-
span.recordException(error);
|
|
651
|
-
span.addEvent("llm.error.json_parse", {
|
|
652
|
-
"error.message": error.message,
|
|
653
|
-
"error.text": error.text
|
|
654
|
-
});
|
|
655
|
-
span.addEvent("llm.repair.attempt");
|
|
656
|
-
const repairFunction = getJsonRepairFunction();
|
|
657
|
-
const repairedJsonString = await repairFunction({
|
|
658
|
-
text: error.text,
|
|
659
|
-
error
|
|
660
|
-
});
|
|
661
|
-
if (repairedJsonString) {
|
|
662
|
-
try {
|
|
663
|
-
const repairedObject = JSON.parse(repairedJsonString);
|
|
664
|
-
span.addEvent("llm.repair.success", {
|
|
665
|
-
repaired_object: JSON.stringify(repairedObject, safeReplacer())
|
|
666
|
-
});
|
|
667
|
-
logger.info("[generateObject] Successfully repaired JSON.");
|
|
668
|
-
span.setStatus({
|
|
669
|
-
code: SpanStatusCode.ERROR,
|
|
670
|
-
message: "JSON parsing failed but was repaired"
|
|
671
|
-
});
|
|
672
|
-
return repairedObject;
|
|
673
|
-
} catch (repairParseError) {
|
|
674
|
-
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
675
|
-
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
676
|
-
const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
|
|
677
|
-
span.recordException(exception);
|
|
678
|
-
span.addEvent("llm.repair.parse_error", {
|
|
679
|
-
"error.message": message
|
|
680
|
-
});
|
|
681
|
-
span.setStatus({
|
|
682
|
-
code: SpanStatusCode.ERROR,
|
|
683
|
-
message: `JSON repair failed: ${message}`
|
|
684
|
-
});
|
|
685
|
-
throw repairParseError;
|
|
686
|
-
}
|
|
687
|
-
} else {
|
|
688
|
-
const errMsg = error instanceof Error ? error.message : String(error);
|
|
689
|
-
logger.error("[generateObject] JSON repair failed.");
|
|
690
|
-
span.addEvent("llm.repair.failed");
|
|
691
|
-
span.setStatus({
|
|
692
|
-
code: SpanStatusCode.ERROR,
|
|
693
|
-
message: `JSON repair failed: ${errMsg}`
|
|
694
|
-
});
|
|
695
|
-
throw error;
|
|
102
|
+
if (repairedJsonString) {
|
|
103
|
+
try {
|
|
104
|
+
const repairedObject = JSON.parse(repairedJsonString);
|
|
105
|
+
logger.info("[generateObject] Successfully repaired JSON.");
|
|
106
|
+
return repairedObject;
|
|
107
|
+
} catch (repairParseError) {
|
|
108
|
+
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
109
|
+
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
110
|
+
throw repairParseError;
|
|
696
111
|
}
|
|
697
112
|
} else {
|
|
698
|
-
|
|
699
|
-
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
700
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
701
|
-
span.recordException(exception);
|
|
702
|
-
span.setStatus({
|
|
703
|
-
code: SpanStatusCode.ERROR,
|
|
704
|
-
message
|
|
705
|
-
});
|
|
113
|
+
logger.error("[generateObject] JSON repair failed.");
|
|
706
114
|
throw error;
|
|
707
115
|
}
|
|
116
|
+
} else {
|
|
117
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
118
|
+
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
119
|
+
throw error;
|
|
708
120
|
}
|
|
709
|
-
}
|
|
121
|
+
}
|
|
710
122
|
}
|
|
711
123
|
function getJsonRepairFunction() {
|
|
712
124
|
return async ({ text, error }) => {
|
|
@@ -777,41 +189,45 @@ var openaiPlugin = {
|
|
|
777
189
|
SMALL_MODEL: process.env.SMALL_MODEL,
|
|
778
190
|
LARGE_MODEL: process.env.LARGE_MODEL,
|
|
779
191
|
OPENAI_EMBEDDING_MODEL: process.env.OPENAI_EMBEDDING_MODEL,
|
|
192
|
+
OPENAI_EMBEDDING_API_KEY: process.env.OPENAI_EMBEDDING_API_KEY,
|
|
780
193
|
OPENAI_EMBEDDING_URL: process.env.OPENAI_EMBEDDING_URL,
|
|
781
194
|
OPENAI_EMBEDDING_DIMENSIONS: process.env.OPENAI_EMBEDDING_DIMENSIONS,
|
|
782
195
|
OPENAI_IMAGE_DESCRIPTION_MODEL: process.env.OPENAI_IMAGE_DESCRIPTION_MODEL,
|
|
783
196
|
OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS: process.env.OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS
|
|
784
197
|
},
|
|
785
198
|
async init(_config, runtime) {
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
logger.warn(
|
|
789
|
-
"OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited"
|
|
790
|
-
);
|
|
791
|
-
return;
|
|
792
|
-
}
|
|
199
|
+
new Promise(async (resolve) => {
|
|
200
|
+
resolve();
|
|
793
201
|
try {
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
202
|
+
if (!getApiKey(runtime)) {
|
|
203
|
+
logger.warn(
|
|
204
|
+
"OPENAI_API_KEY is not set in environment - OpenAI functionality will be limited"
|
|
205
|
+
);
|
|
206
|
+
return;
|
|
207
|
+
}
|
|
208
|
+
try {
|
|
209
|
+
const baseURL = getBaseURL(runtime);
|
|
210
|
+
const response = await fetch(`${baseURL}/models`, {
|
|
211
|
+
headers: { Authorization: `Bearer ${getApiKey(runtime)}` }
|
|
212
|
+
});
|
|
213
|
+
if (!response.ok) {
|
|
214
|
+
logger.warn(`OpenAI API key validation failed: ${response.statusText}`);
|
|
215
|
+
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
216
|
+
} else {
|
|
217
|
+
logger.log("OpenAI API key validated successfully");
|
|
218
|
+
}
|
|
219
|
+
} catch (fetchError) {
|
|
220
|
+
const message = fetchError instanceof Error ? fetchError.message : String(fetchError);
|
|
221
|
+
logger.warn(`Error validating OpenAI API key: ${message}`);
|
|
800
222
|
logger.warn("OpenAI functionality will be limited until a valid API key is provided");
|
|
801
|
-
} else {
|
|
802
|
-
logger.log("OpenAI API key validated successfully");
|
|
803
223
|
}
|
|
804
|
-
} catch (
|
|
805
|
-
const message =
|
|
806
|
-
logger.warn(
|
|
807
|
-
|
|
224
|
+
} catch (error) {
|
|
225
|
+
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
226
|
+
logger.warn(
|
|
227
|
+
`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
|
|
228
|
+
);
|
|
808
229
|
}
|
|
809
|
-
}
|
|
810
|
-
const message = error?.errors?.map((e) => e.message).join(", ") || (error instanceof Error ? error.message : String(error));
|
|
811
|
-
logger.warn(
|
|
812
|
-
`OpenAI plugin configuration issue: ${message} - You need to configure the OPENAI_API_KEY in your environment variables`
|
|
813
|
-
);
|
|
814
|
-
}
|
|
230
|
+
});
|
|
815
231
|
},
|
|
816
232
|
models: {
|
|
817
233
|
[ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
|
|
@@ -855,90 +271,56 @@ var openaiPlugin = {
|
|
|
855
271
|
emptyVector[0] = 0.3;
|
|
856
272
|
return emptyVector;
|
|
857
273
|
}
|
|
858
|
-
const
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
"
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
})
|
|
874
|
-
|
|
274
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
275
|
+
const apiKey = getEmbeddingApiKey(runtime);
|
|
276
|
+
if (!apiKey) {
|
|
277
|
+
throw new Error("OpenAI API key not configured");
|
|
278
|
+
}
|
|
279
|
+
try {
|
|
280
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
281
|
+
method: "POST",
|
|
282
|
+
headers: {
|
|
283
|
+
Authorization: `Bearer ${apiKey}`,
|
|
284
|
+
"Content-Type": "application/json"
|
|
285
|
+
},
|
|
286
|
+
body: JSON.stringify({
|
|
287
|
+
model: embeddingModelName,
|
|
288
|
+
input: text
|
|
289
|
+
})
|
|
290
|
+
});
|
|
291
|
+
const responseClone = response.clone();
|
|
292
|
+
const rawResponseBody = await responseClone.text();
|
|
293
|
+
if (!response.ok) {
|
|
294
|
+
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
295
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
296
|
+
errorVector[0] = 0.4;
|
|
297
|
+
return errorVector;
|
|
875
298
|
}
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
headers: {
|
|
880
|
-
Authorization: `Bearer ${apiKey}`,
|
|
881
|
-
"Content-Type": "application/json"
|
|
882
|
-
},
|
|
883
|
-
body: JSON.stringify({
|
|
884
|
-
model: embeddingModelName,
|
|
885
|
-
input: text
|
|
886
|
-
})
|
|
887
|
-
});
|
|
888
|
-
const responseClone = response.clone();
|
|
889
|
-
const rawResponseBody = await responseClone.text();
|
|
890
|
-
span.addEvent("llm.response.raw", {
|
|
891
|
-
"response.body": rawResponseBody
|
|
892
|
-
});
|
|
893
|
-
if (!response.ok) {
|
|
894
|
-
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
895
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
896
|
-
span.setStatus({
|
|
897
|
-
code: SpanStatusCode.ERROR,
|
|
898
|
-
message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
|
|
899
|
-
});
|
|
900
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
901
|
-
errorVector[0] = 0.4;
|
|
902
|
-
return errorVector;
|
|
903
|
-
}
|
|
904
|
-
const data = await response.json();
|
|
905
|
-
if (!data?.data?.[0]?.embedding) {
|
|
906
|
-
logger.error("API returned invalid structure");
|
|
907
|
-
span.setStatus({
|
|
908
|
-
code: SpanStatusCode.ERROR,
|
|
909
|
-
message: "API returned invalid structure"
|
|
910
|
-
});
|
|
911
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
912
|
-
errorVector[0] = 0.5;
|
|
913
|
-
return errorVector;
|
|
914
|
-
}
|
|
915
|
-
const embedding = data.data[0].embedding;
|
|
916
|
-
span.setAttribute("llm.response.embedding.vector_length", embedding.length);
|
|
917
|
-
if (data.usage) {
|
|
918
|
-
span.setAttributes({
|
|
919
|
-
"llm.usage.prompt_tokens": data.usage.prompt_tokens,
|
|
920
|
-
"llm.usage.total_tokens": data.usage.total_tokens
|
|
921
|
-
});
|
|
922
|
-
const usage = {
|
|
923
|
-
promptTokens: data.usage.prompt_tokens,
|
|
924
|
-
completionTokens: 0,
|
|
925
|
-
totalTokens: data.usage.total_tokens
|
|
926
|
-
};
|
|
927
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
|
|
928
|
-
}
|
|
929
|
-
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
930
|
-
return embedding;
|
|
931
|
-
} catch (error) {
|
|
932
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
933
|
-
logger.error(`Error generating embedding: ${message}`);
|
|
934
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
935
|
-
span.recordException(exception);
|
|
936
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
299
|
+
const data = await response.json();
|
|
300
|
+
if (!data?.data?.[0]?.embedding) {
|
|
301
|
+
logger.error("API returned invalid structure");
|
|
937
302
|
const errorVector = Array(embeddingDimension).fill(0);
|
|
938
|
-
errorVector[0] = 0.
|
|
303
|
+
errorVector[0] = 0.5;
|
|
939
304
|
return errorVector;
|
|
940
305
|
}
|
|
941
|
-
|
|
306
|
+
const embedding = data.data[0].embedding;
|
|
307
|
+
if (data.usage) {
|
|
308
|
+
const usage = {
|
|
309
|
+
promptTokens: data.usage.prompt_tokens,
|
|
310
|
+
completionTokens: 0,
|
|
311
|
+
totalTokens: data.usage.total_tokens
|
|
312
|
+
};
|
|
313
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
|
|
314
|
+
}
|
|
315
|
+
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
316
|
+
return embedding;
|
|
317
|
+
} catch (error) {
|
|
318
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
319
|
+
logger.error(`Error generating embedding: ${message}`);
|
|
320
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
321
|
+
errorVector[0] = 0.6;
|
|
322
|
+
return errorVector;
|
|
323
|
+
}
|
|
942
324
|
},
|
|
943
325
|
[ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
|
|
944
326
|
return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
|
|
@@ -955,42 +337,20 @@ var openaiPlugin = {
|
|
|
955
337
|
const modelName = getSmallModel(runtime);
|
|
956
338
|
logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
957
339
|
logger.log(prompt);
|
|
958
|
-
const
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
};
|
|
968
|
-
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
969
|
-
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
970
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
971
|
-
model: openai.languageModel(modelName),
|
|
972
|
-
prompt,
|
|
973
|
-
system: runtime.character.system ?? void 0,
|
|
974
|
-
temperature,
|
|
975
|
-
maxTokens: max_response_length,
|
|
976
|
-
frequencyPenalty: frequency_penalty,
|
|
977
|
-
presencePenalty: presence_penalty,
|
|
978
|
-
stopSequences
|
|
979
|
-
});
|
|
980
|
-
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
981
|
-
span.addEvent("llm.response.processed", {
|
|
982
|
-
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
983
|
-
});
|
|
984
|
-
if (usage) {
|
|
985
|
-
span.setAttributes({
|
|
986
|
-
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
987
|
-
"llm.usage.completion_tokens": usage.completionTokens,
|
|
988
|
-
"llm.usage.total_tokens": usage.totalTokens
|
|
989
|
-
});
|
|
990
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
991
|
-
}
|
|
992
|
-
return openaiResponse;
|
|
340
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
341
|
+
model: openai.languageModel(modelName),
|
|
342
|
+
prompt,
|
|
343
|
+
system: runtime.character.system ?? void 0,
|
|
344
|
+
temperature,
|
|
345
|
+
maxTokens: max_response_length,
|
|
346
|
+
frequencyPenalty: frequency_penalty,
|
|
347
|
+
presencePenalty: presence_penalty,
|
|
348
|
+
stopSequences
|
|
993
349
|
});
|
|
350
|
+
if (usage) {
|
|
351
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
352
|
+
}
|
|
353
|
+
return openaiResponse;
|
|
994
354
|
},
|
|
995
355
|
[ModelType.TEXT_LARGE]: async (runtime, {
|
|
996
356
|
prompt,
|
|
@@ -1004,42 +364,20 @@ var openaiPlugin = {
|
|
|
1004
364
|
const modelName = getLargeModel(runtime);
|
|
1005
365
|
logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
1006
366
|
logger.log(prompt);
|
|
1007
|
-
const
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
};
|
|
1017
|
-
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
1018
|
-
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
1019
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
1020
|
-
model: openai.languageModel(modelName),
|
|
1021
|
-
prompt,
|
|
1022
|
-
system: runtime.character.system ?? void 0,
|
|
1023
|
-
temperature,
|
|
1024
|
-
maxTokens,
|
|
1025
|
-
frequencyPenalty,
|
|
1026
|
-
presencePenalty,
|
|
1027
|
-
stopSequences
|
|
1028
|
-
});
|
|
1029
|
-
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
1030
|
-
span.addEvent("llm.response.processed", {
|
|
1031
|
-
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
1032
|
-
});
|
|
1033
|
-
if (usage) {
|
|
1034
|
-
span.setAttributes({
|
|
1035
|
-
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
1036
|
-
"llm.usage.completion_tokens": usage.completionTokens,
|
|
1037
|
-
"llm.usage.total_tokens": usage.totalTokens
|
|
1038
|
-
});
|
|
1039
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
1040
|
-
}
|
|
1041
|
-
return openaiResponse;
|
|
367
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
368
|
+
model: openai.languageModel(modelName),
|
|
369
|
+
prompt,
|
|
370
|
+
system: runtime.character.system ?? void 0,
|
|
371
|
+
temperature,
|
|
372
|
+
maxTokens,
|
|
373
|
+
frequencyPenalty,
|
|
374
|
+
presencePenalty,
|
|
375
|
+
stopSequences
|
|
1042
376
|
});
|
|
377
|
+
if (usage) {
|
|
378
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
379
|
+
}
|
|
380
|
+
return openaiResponse;
|
|
1043
381
|
},
|
|
1044
382
|
[ModelType.IMAGE]: async (runtime, params) => {
|
|
1045
383
|
const n = params.n || 1;
|
|
@@ -1047,63 +385,36 @@ var openaiPlugin = {
|
|
|
1047
385
|
const prompt = params.prompt;
|
|
1048
386
|
const modelName = "dall-e-3";
|
|
1049
387
|
logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
1050
|
-
const
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
"
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
"Content-Type": "application/json"
|
|
1073
|
-
},
|
|
1074
|
-
body: JSON.stringify({
|
|
1075
|
-
prompt,
|
|
1076
|
-
n,
|
|
1077
|
-
size
|
|
1078
|
-
})
|
|
1079
|
-
});
|
|
1080
|
-
const responseClone = response.clone();
|
|
1081
|
-
const rawResponseBody = await responseClone.text();
|
|
1082
|
-
span.addEvent("llm.response.raw", {
|
|
1083
|
-
"response.body": rawResponseBody
|
|
1084
|
-
});
|
|
1085
|
-
if (!response.ok) {
|
|
1086
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1087
|
-
span.setStatus({
|
|
1088
|
-
code: SpanStatusCode.ERROR,
|
|
1089
|
-
message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1090
|
-
});
|
|
1091
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
1092
|
-
}
|
|
1093
|
-
const data = await response.json();
|
|
1094
|
-
const typedData = data;
|
|
1095
|
-
span.addEvent("llm.response.processed", {
|
|
1096
|
-
"response.urls": JSON.stringify(typedData.data)
|
|
1097
|
-
});
|
|
1098
|
-
return typedData.data;
|
|
1099
|
-
} catch (error) {
|
|
1100
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1101
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1102
|
-
span.recordException(exception);
|
|
1103
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1104
|
-
throw error;
|
|
388
|
+
const baseURL = getBaseURL(runtime);
|
|
389
|
+
const apiKey = getApiKey(runtime);
|
|
390
|
+
if (!apiKey) {
|
|
391
|
+
throw new Error("OpenAI API key not configured");
|
|
392
|
+
}
|
|
393
|
+
try {
|
|
394
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
395
|
+
method: "POST",
|
|
396
|
+
headers: {
|
|
397
|
+
Authorization: `Bearer ${apiKey}`,
|
|
398
|
+
"Content-Type": "application/json"
|
|
399
|
+
},
|
|
400
|
+
body: JSON.stringify({
|
|
401
|
+
prompt,
|
|
402
|
+
n,
|
|
403
|
+
size
|
|
404
|
+
})
|
|
405
|
+
});
|
|
406
|
+
const responseClone = response.clone();
|
|
407
|
+
const rawResponseBody = await responseClone.text();
|
|
408
|
+
if (!response.ok) {
|
|
409
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
1105
410
|
}
|
|
1106
|
-
|
|
411
|
+
const data = await response.json();
|
|
412
|
+
const typedData = data;
|
|
413
|
+
return typedData.data;
|
|
414
|
+
} catch (error) {
|
|
415
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
416
|
+
throw error;
|
|
417
|
+
}
|
|
1107
418
|
},
|
|
1108
419
|
[ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
1109
420
|
let imageUrl;
|
|
@@ -1121,13 +432,6 @@ var openaiPlugin = {
|
|
|
1121
432
|
imageUrl = params.imageUrl;
|
|
1122
433
|
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
1123
434
|
}
|
|
1124
|
-
const attributes = {
|
|
1125
|
-
"llm.vendor": "OpenAI",
|
|
1126
|
-
"llm.request.type": "chat",
|
|
1127
|
-
"llm.request.model": modelName,
|
|
1128
|
-
"llm.request.max_tokens": maxTokens,
|
|
1129
|
-
"llm.request.image.url": imageUrl
|
|
1130
|
-
};
|
|
1131
435
|
const messages = [
|
|
1132
436
|
{
|
|
1133
437
|
role: "user",
|
|
@@ -1137,209 +441,122 @@ var openaiPlugin = {
|
|
|
1137
441
|
]
|
|
1138
442
|
}
|
|
1139
443
|
];
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
444
|
+
const baseURL = getBaseURL(runtime);
|
|
445
|
+
const apiKey = getApiKey(runtime);
|
|
446
|
+
if (!apiKey) {
|
|
447
|
+
logger.error("OpenAI API key not set");
|
|
448
|
+
return {
|
|
449
|
+
title: "Failed to analyze image",
|
|
450
|
+
description: "API key not configured"
|
|
451
|
+
};
|
|
452
|
+
}
|
|
453
|
+
try {
|
|
454
|
+
const requestBody = {
|
|
455
|
+
model: modelName,
|
|
456
|
+
messages,
|
|
457
|
+
max_tokens: maxTokens
|
|
458
|
+
};
|
|
459
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
460
|
+
method: "POST",
|
|
461
|
+
headers: {
|
|
462
|
+
"Content-Type": "application/json",
|
|
463
|
+
Authorization: `Bearer ${apiKey}`
|
|
464
|
+
},
|
|
465
|
+
body: JSON.stringify(requestBody)
|
|
1143
466
|
});
|
|
1144
|
-
const
|
|
1145
|
-
const
|
|
1146
|
-
if (!
|
|
1147
|
-
|
|
1148
|
-
span.setStatus({
|
|
1149
|
-
code: SpanStatusCode.ERROR,
|
|
1150
|
-
message: "OpenAI API key not configured"
|
|
1151
|
-
});
|
|
1152
|
-
return {
|
|
1153
|
-
title: "Failed to analyze image",
|
|
1154
|
-
description: "API key not configured"
|
|
1155
|
-
};
|
|
467
|
+
const responseClone = response.clone();
|
|
468
|
+
const rawResponseBody = await responseClone.text();
|
|
469
|
+
if (!response.ok) {
|
|
470
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1156
471
|
}
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
"response.body": rawResponseBody
|
|
1175
|
-
});
|
|
1176
|
-
if (!response.ok) {
|
|
1177
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1178
|
-
span.setStatus({
|
|
1179
|
-
code: SpanStatusCode.ERROR,
|
|
1180
|
-
message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
|
|
1181
|
-
});
|
|
1182
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1183
|
-
}
|
|
1184
|
-
const result = await response.json();
|
|
1185
|
-
const typedResult = result;
|
|
1186
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
1187
|
-
console.log("############## CONTENT", content);
|
|
1188
|
-
if (typedResult.usage) {
|
|
1189
|
-
span.setAttributes({
|
|
1190
|
-
"llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
|
|
1191
|
-
"llm.usage.completion_tokens": typedResult.usage.completion_tokens,
|
|
1192
|
-
"llm.usage.total_tokens": typedResult.usage.total_tokens
|
|
1193
|
-
});
|
|
1194
|
-
emitModelUsageEvent(
|
|
1195
|
-
runtime,
|
|
1196
|
-
ModelType.IMAGE_DESCRIPTION,
|
|
1197
|
-
typeof params === "string" ? params : params.prompt || "",
|
|
1198
|
-
{
|
|
1199
|
-
promptTokens: typedResult.usage.prompt_tokens,
|
|
1200
|
-
completionTokens: typedResult.usage.completion_tokens,
|
|
1201
|
-
totalTokens: typedResult.usage.total_tokens
|
|
1202
|
-
}
|
|
1203
|
-
);
|
|
1204
|
-
}
|
|
1205
|
-
if (typedResult.choices?.[0]?.finish_reason) {
|
|
1206
|
-
span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
|
|
1207
|
-
}
|
|
1208
|
-
if (!content) {
|
|
1209
|
-
span.setStatus({
|
|
1210
|
-
code: SpanStatusCode.ERROR,
|
|
1211
|
-
message: "No content in API response"
|
|
1212
|
-
});
|
|
1213
|
-
return {
|
|
1214
|
-
title: "Failed to analyze image",
|
|
1215
|
-
description: "No response from API"
|
|
1216
|
-
};
|
|
1217
|
-
}
|
|
1218
|
-
console.log("######################## CONTENT", content);
|
|
1219
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
1220
|
-
if (isCustomPrompt) {
|
|
1221
|
-
span.addEvent("llm.response.raw_content", {
|
|
1222
|
-
"response.content": content
|
|
1223
|
-
});
|
|
1224
|
-
return content;
|
|
1225
|
-
}
|
|
1226
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
1227
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
1228
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
1229
|
-
const processedResult = { title, description };
|
|
1230
|
-
span.addEvent("llm.response.processed", {
|
|
1231
|
-
"response.object": JSON.stringify(processedResult, safeReplacer())
|
|
1232
|
-
});
|
|
1233
|
-
return processedResult;
|
|
1234
|
-
} catch (error) {
|
|
1235
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1236
|
-
logger.error(`Error analyzing image: ${message}`);
|
|
1237
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1238
|
-
span.recordException(exception);
|
|
1239
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
472
|
+
const result = await response.json();
|
|
473
|
+
const typedResult = result;
|
|
474
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
475
|
+
console.log("############## CONTENT", content);
|
|
476
|
+
if (typedResult.usage) {
|
|
477
|
+
emitModelUsageEvent(
|
|
478
|
+
runtime,
|
|
479
|
+
ModelType.IMAGE_DESCRIPTION,
|
|
480
|
+
typeof params === "string" ? params : params.prompt || "",
|
|
481
|
+
{
|
|
482
|
+
promptTokens: typedResult.usage.prompt_tokens,
|
|
483
|
+
completionTokens: typedResult.usage.completion_tokens,
|
|
484
|
+
totalTokens: typedResult.usage.total_tokens
|
|
485
|
+
}
|
|
486
|
+
);
|
|
487
|
+
}
|
|
488
|
+
if (!content) {
|
|
1240
489
|
return {
|
|
1241
490
|
title: "Failed to analyze image",
|
|
1242
|
-
description:
|
|
491
|
+
description: "No response from API"
|
|
1243
492
|
};
|
|
1244
493
|
}
|
|
1245
|
-
|
|
494
|
+
console.log("######################## CONTENT", content);
|
|
495
|
+
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
496
|
+
if (isCustomPrompt) {
|
|
497
|
+
return content;
|
|
498
|
+
}
|
|
499
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
500
|
+
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
501
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
502
|
+
const processedResult = { title, description };
|
|
503
|
+
return processedResult;
|
|
504
|
+
} catch (error) {
|
|
505
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
506
|
+
logger.error(`Error analyzing image: ${message}`);
|
|
507
|
+
return {
|
|
508
|
+
title: "Failed to analyze image",
|
|
509
|
+
description: `Error: ${message}`
|
|
510
|
+
};
|
|
511
|
+
}
|
|
1246
512
|
},
|
|
1247
513
|
[ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
|
|
1248
514
|
logger.log("audioBuffer", audioBuffer);
|
|
1249
515
|
const modelName = "whisper-1";
|
|
1250
516
|
logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
1251
|
-
const
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
"
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
517
|
+
const baseURL = getBaseURL(runtime);
|
|
518
|
+
const apiKey = getApiKey(runtime);
|
|
519
|
+
if (!apiKey) {
|
|
520
|
+
throw new Error("OpenAI API key not configured - Cannot make request");
|
|
521
|
+
}
|
|
522
|
+
if (!audioBuffer || audioBuffer.length === 0) {
|
|
523
|
+
throw new Error("Audio buffer is empty or invalid for transcription");
|
|
524
|
+
}
|
|
525
|
+
const formData = new FormData();
|
|
526
|
+
formData.append("file", new Blob([audioBuffer]), "recording.mp3");
|
|
527
|
+
formData.append("model", "whisper-1");
|
|
528
|
+
try {
|
|
529
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
530
|
+
method: "POST",
|
|
531
|
+
headers: {
|
|
532
|
+
Authorization: `Bearer ${apiKey}`
|
|
533
|
+
},
|
|
534
|
+
body: formData
|
|
1260
535
|
});
|
|
1261
|
-
const
|
|
1262
|
-
const
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
message: "OpenAI API key not configured"
|
|
1267
|
-
});
|
|
1268
|
-
throw new Error("OpenAI API key not configured - Cannot make request");
|
|
1269
|
-
}
|
|
1270
|
-
if (!audioBuffer || audioBuffer.length === 0) {
|
|
1271
|
-
span.setStatus({
|
|
1272
|
-
code: SpanStatusCode.ERROR,
|
|
1273
|
-
message: "Audio buffer is empty or invalid"
|
|
1274
|
-
});
|
|
1275
|
-
throw new Error("Audio buffer is empty or invalid for transcription");
|
|
1276
|
-
}
|
|
1277
|
-
const formData = new FormData();
|
|
1278
|
-
formData.append("file", new Blob([audioBuffer]), "recording.mp3");
|
|
1279
|
-
formData.append("model", "whisper-1");
|
|
1280
|
-
try {
|
|
1281
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
1282
|
-
method: "POST",
|
|
1283
|
-
headers: {
|
|
1284
|
-
Authorization: `Bearer ${apiKey}`
|
|
1285
|
-
},
|
|
1286
|
-
body: formData
|
|
1287
|
-
});
|
|
1288
|
-
const responseClone = response.clone();
|
|
1289
|
-
const rawResponseBody = await responseClone.text();
|
|
1290
|
-
span.addEvent("llm.response.raw", {
|
|
1291
|
-
"response.body": rawResponseBody
|
|
1292
|
-
});
|
|
1293
|
-
logger.log("response", response);
|
|
1294
|
-
if (!response.ok) {
|
|
1295
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1296
|
-
span.setStatus({
|
|
1297
|
-
code: SpanStatusCode.ERROR,
|
|
1298
|
-
message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1299
|
-
});
|
|
1300
|
-
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
1301
|
-
}
|
|
1302
|
-
const data = await response.json();
|
|
1303
|
-
const processedText = data.text;
|
|
1304
|
-
span.setAttribute("llm.response.processed.length", processedText.length);
|
|
1305
|
-
span.addEvent("llm.response.processed", {
|
|
1306
|
-
"response.text": processedText
|
|
1307
|
-
});
|
|
1308
|
-
return processedText;
|
|
1309
|
-
} catch (error) {
|
|
1310
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1311
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1312
|
-
span.recordException(exception);
|
|
1313
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1314
|
-
throw error;
|
|
536
|
+
const responseClone = response.clone();
|
|
537
|
+
const rawResponseBody = await responseClone.text();
|
|
538
|
+
logger.log("response", response);
|
|
539
|
+
if (!response.ok) {
|
|
540
|
+
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
1315
541
|
}
|
|
1316
|
-
|
|
542
|
+
const data = await response.json();
|
|
543
|
+
const processedText = data.text;
|
|
544
|
+
return processedText;
|
|
545
|
+
} catch (error) {
|
|
546
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
547
|
+
throw error;
|
|
548
|
+
}
|
|
1317
549
|
},
|
|
1318
550
|
[ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
|
|
1319
551
|
const ttsModelName = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
span.addEvent("llm.prompt", { "prompt.content": text });
|
|
1329
|
-
try {
|
|
1330
|
-
const speechStream = await fetchTextToSpeech(runtime, text);
|
|
1331
|
-
span.addEvent("llm.response.success", {
|
|
1332
|
-
info: "Speech stream generated"
|
|
1333
|
-
});
|
|
1334
|
-
return speechStream;
|
|
1335
|
-
} catch (error) {
|
|
1336
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1337
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1338
|
-
span.recordException(exception);
|
|
1339
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1340
|
-
throw error;
|
|
1341
|
-
}
|
|
1342
|
-
});
|
|
552
|
+
logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${ttsModelName}`);
|
|
553
|
+
try {
|
|
554
|
+
const speechStream = await fetchTextToSpeech(runtime, text);
|
|
555
|
+
return speechStream;
|
|
556
|
+
} catch (error) {
|
|
557
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
558
|
+
throw error;
|
|
559
|
+
}
|
|
1343
560
|
},
|
|
1344
561
|
[ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
1345
562
|
return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
|