@elizaos/plugin-openai 1.0.3 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +239 -1035
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -4,496 +4,8 @@ import {
|
|
|
4
4
|
EventType,
|
|
5
5
|
logger,
|
|
6
6
|
ModelType,
|
|
7
|
-
safeReplacer,
|
|
8
|
-
ServiceType,
|
|
9
7
|
VECTOR_DIMS
|
|
10
8
|
} from "@elizaos/core";
|
|
11
|
-
|
|
12
|
-
// node_modules/@opentelemetry/api/build/esm/platform/node/globalThis.js
|
|
13
|
-
var _globalThis = typeof globalThis === "object" ? globalThis : global;
|
|
14
|
-
|
|
15
|
-
// node_modules/@opentelemetry/api/build/esm/version.js
|
|
16
|
-
var VERSION = "1.9.0";
|
|
17
|
-
|
|
18
|
-
// node_modules/@opentelemetry/api/build/esm/internal/semver.js
|
|
19
|
-
var re = /^(\d+)\.(\d+)\.(\d+)(-(.+))?$/;
|
|
20
|
-
function _makeCompatibilityCheck(ownVersion) {
|
|
21
|
-
var acceptedVersions = /* @__PURE__ */ new Set([ownVersion]);
|
|
22
|
-
var rejectedVersions = /* @__PURE__ */ new Set();
|
|
23
|
-
var myVersionMatch = ownVersion.match(re);
|
|
24
|
-
if (!myVersionMatch) {
|
|
25
|
-
return function() {
|
|
26
|
-
return false;
|
|
27
|
-
};
|
|
28
|
-
}
|
|
29
|
-
var ownVersionParsed = {
|
|
30
|
-
major: +myVersionMatch[1],
|
|
31
|
-
minor: +myVersionMatch[2],
|
|
32
|
-
patch: +myVersionMatch[3],
|
|
33
|
-
prerelease: myVersionMatch[4]
|
|
34
|
-
};
|
|
35
|
-
if (ownVersionParsed.prerelease != null) {
|
|
36
|
-
return function isExactmatch(globalVersion) {
|
|
37
|
-
return globalVersion === ownVersion;
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
function _reject(v) {
|
|
41
|
-
rejectedVersions.add(v);
|
|
42
|
-
return false;
|
|
43
|
-
}
|
|
44
|
-
function _accept(v) {
|
|
45
|
-
acceptedVersions.add(v);
|
|
46
|
-
return true;
|
|
47
|
-
}
|
|
48
|
-
return function isCompatible2(globalVersion) {
|
|
49
|
-
if (acceptedVersions.has(globalVersion)) {
|
|
50
|
-
return true;
|
|
51
|
-
}
|
|
52
|
-
if (rejectedVersions.has(globalVersion)) {
|
|
53
|
-
return false;
|
|
54
|
-
}
|
|
55
|
-
var globalVersionMatch = globalVersion.match(re);
|
|
56
|
-
if (!globalVersionMatch) {
|
|
57
|
-
return _reject(globalVersion);
|
|
58
|
-
}
|
|
59
|
-
var globalVersionParsed = {
|
|
60
|
-
major: +globalVersionMatch[1],
|
|
61
|
-
minor: +globalVersionMatch[2],
|
|
62
|
-
patch: +globalVersionMatch[3],
|
|
63
|
-
prerelease: globalVersionMatch[4]
|
|
64
|
-
};
|
|
65
|
-
if (globalVersionParsed.prerelease != null) {
|
|
66
|
-
return _reject(globalVersion);
|
|
67
|
-
}
|
|
68
|
-
if (ownVersionParsed.major !== globalVersionParsed.major) {
|
|
69
|
-
return _reject(globalVersion);
|
|
70
|
-
}
|
|
71
|
-
if (ownVersionParsed.major === 0) {
|
|
72
|
-
if (ownVersionParsed.minor === globalVersionParsed.minor && ownVersionParsed.patch <= globalVersionParsed.patch) {
|
|
73
|
-
return _accept(globalVersion);
|
|
74
|
-
}
|
|
75
|
-
return _reject(globalVersion);
|
|
76
|
-
}
|
|
77
|
-
if (ownVersionParsed.minor <= globalVersionParsed.minor) {
|
|
78
|
-
return _accept(globalVersion);
|
|
79
|
-
}
|
|
80
|
-
return _reject(globalVersion);
|
|
81
|
-
};
|
|
82
|
-
}
|
|
83
|
-
var isCompatible = _makeCompatibilityCheck(VERSION);
|
|
84
|
-
|
|
85
|
-
// node_modules/@opentelemetry/api/build/esm/internal/global-utils.js
|
|
86
|
-
var major = VERSION.split(".")[0];
|
|
87
|
-
var GLOBAL_OPENTELEMETRY_API_KEY = Symbol.for("opentelemetry.js.api." + major);
|
|
88
|
-
var _global = _globalThis;
|
|
89
|
-
function registerGlobal(type, instance, diag, allowOverride) {
|
|
90
|
-
var _a;
|
|
91
|
-
if (allowOverride === void 0) {
|
|
92
|
-
allowOverride = false;
|
|
93
|
-
}
|
|
94
|
-
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY] = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) !== null && _a !== void 0 ? _a : {
|
|
95
|
-
version: VERSION
|
|
96
|
-
};
|
|
97
|
-
if (!allowOverride && api[type]) {
|
|
98
|
-
var err = new Error("@opentelemetry/api: Attempted duplicate registration of API: " + type);
|
|
99
|
-
diag.error(err.stack || err.message);
|
|
100
|
-
return false;
|
|
101
|
-
}
|
|
102
|
-
if (api.version !== VERSION) {
|
|
103
|
-
var err = new Error("@opentelemetry/api: Registration of version v" + api.version + " for " + type + " does not match previously registered API v" + VERSION);
|
|
104
|
-
diag.error(err.stack || err.message);
|
|
105
|
-
return false;
|
|
106
|
-
}
|
|
107
|
-
api[type] = instance;
|
|
108
|
-
diag.debug("@opentelemetry/api: Registered a global for " + type + " v" + VERSION + ".");
|
|
109
|
-
return true;
|
|
110
|
-
}
|
|
111
|
-
function getGlobal(type) {
|
|
112
|
-
var _a, _b;
|
|
113
|
-
var globalVersion = (_a = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _a === void 0 ? void 0 : _a.version;
|
|
114
|
-
if (!globalVersion || !isCompatible(globalVersion)) {
|
|
115
|
-
return;
|
|
116
|
-
}
|
|
117
|
-
return (_b = _global[GLOBAL_OPENTELEMETRY_API_KEY]) === null || _b === void 0 ? void 0 : _b[type];
|
|
118
|
-
}
|
|
119
|
-
function unregisterGlobal(type, diag) {
|
|
120
|
-
diag.debug("@opentelemetry/api: Unregistering a global for " + type + " v" + VERSION + ".");
|
|
121
|
-
var api = _global[GLOBAL_OPENTELEMETRY_API_KEY];
|
|
122
|
-
if (api) {
|
|
123
|
-
delete api[type];
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
// node_modules/@opentelemetry/api/build/esm/diag/ComponentLogger.js
|
|
128
|
-
var __read = function(o, n) {
|
|
129
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
130
|
-
if (!m) return o;
|
|
131
|
-
var i = m.call(o), r, ar = [], e;
|
|
132
|
-
try {
|
|
133
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
134
|
-
} catch (error) {
|
|
135
|
-
e = { error };
|
|
136
|
-
} finally {
|
|
137
|
-
try {
|
|
138
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
139
|
-
} finally {
|
|
140
|
-
if (e) throw e.error;
|
|
141
|
-
}
|
|
142
|
-
}
|
|
143
|
-
return ar;
|
|
144
|
-
};
|
|
145
|
-
var __spreadArray = function(to, from, pack) {
|
|
146
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
147
|
-
if (ar || !(i in from)) {
|
|
148
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
149
|
-
ar[i] = from[i];
|
|
150
|
-
}
|
|
151
|
-
}
|
|
152
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
153
|
-
};
|
|
154
|
-
var DiagComponentLogger = (
|
|
155
|
-
/** @class */
|
|
156
|
-
function() {
|
|
157
|
-
function DiagComponentLogger2(props) {
|
|
158
|
-
this._namespace = props.namespace || "DiagComponentLogger";
|
|
159
|
-
}
|
|
160
|
-
DiagComponentLogger2.prototype.debug = function() {
|
|
161
|
-
var args = [];
|
|
162
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
163
|
-
args[_i] = arguments[_i];
|
|
164
|
-
}
|
|
165
|
-
return logProxy("debug", this._namespace, args);
|
|
166
|
-
};
|
|
167
|
-
DiagComponentLogger2.prototype.error = function() {
|
|
168
|
-
var args = [];
|
|
169
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
170
|
-
args[_i] = arguments[_i];
|
|
171
|
-
}
|
|
172
|
-
return logProxy("error", this._namespace, args);
|
|
173
|
-
};
|
|
174
|
-
DiagComponentLogger2.prototype.info = function() {
|
|
175
|
-
var args = [];
|
|
176
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
177
|
-
args[_i] = arguments[_i];
|
|
178
|
-
}
|
|
179
|
-
return logProxy("info", this._namespace, args);
|
|
180
|
-
};
|
|
181
|
-
DiagComponentLogger2.prototype.warn = function() {
|
|
182
|
-
var args = [];
|
|
183
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
184
|
-
args[_i] = arguments[_i];
|
|
185
|
-
}
|
|
186
|
-
return logProxy("warn", this._namespace, args);
|
|
187
|
-
};
|
|
188
|
-
DiagComponentLogger2.prototype.verbose = function() {
|
|
189
|
-
var args = [];
|
|
190
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
191
|
-
args[_i] = arguments[_i];
|
|
192
|
-
}
|
|
193
|
-
return logProxy("verbose", this._namespace, args);
|
|
194
|
-
};
|
|
195
|
-
return DiagComponentLogger2;
|
|
196
|
-
}()
|
|
197
|
-
);
|
|
198
|
-
function logProxy(funcName, namespace, args) {
|
|
199
|
-
var logger2 = getGlobal("diag");
|
|
200
|
-
if (!logger2) {
|
|
201
|
-
return;
|
|
202
|
-
}
|
|
203
|
-
args.unshift(namespace);
|
|
204
|
-
return logger2[funcName].apply(logger2, __spreadArray([], __read(args), false));
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
// node_modules/@opentelemetry/api/build/esm/diag/types.js
|
|
208
|
-
var DiagLogLevel;
|
|
209
|
-
(function(DiagLogLevel2) {
|
|
210
|
-
DiagLogLevel2[DiagLogLevel2["NONE"] = 0] = "NONE";
|
|
211
|
-
DiagLogLevel2[DiagLogLevel2["ERROR"] = 30] = "ERROR";
|
|
212
|
-
DiagLogLevel2[DiagLogLevel2["WARN"] = 50] = "WARN";
|
|
213
|
-
DiagLogLevel2[DiagLogLevel2["INFO"] = 60] = "INFO";
|
|
214
|
-
DiagLogLevel2[DiagLogLevel2["DEBUG"] = 70] = "DEBUG";
|
|
215
|
-
DiagLogLevel2[DiagLogLevel2["VERBOSE"] = 80] = "VERBOSE";
|
|
216
|
-
DiagLogLevel2[DiagLogLevel2["ALL"] = 9999] = "ALL";
|
|
217
|
-
})(DiagLogLevel || (DiagLogLevel = {}));
|
|
218
|
-
|
|
219
|
-
// node_modules/@opentelemetry/api/build/esm/diag/internal/logLevelLogger.js
|
|
220
|
-
function createLogLevelDiagLogger(maxLevel, logger2) {
|
|
221
|
-
if (maxLevel < DiagLogLevel.NONE) {
|
|
222
|
-
maxLevel = DiagLogLevel.NONE;
|
|
223
|
-
} else if (maxLevel > DiagLogLevel.ALL) {
|
|
224
|
-
maxLevel = DiagLogLevel.ALL;
|
|
225
|
-
}
|
|
226
|
-
logger2 = logger2 || {};
|
|
227
|
-
function _filterFunc(funcName, theLevel) {
|
|
228
|
-
var theFunc = logger2[funcName];
|
|
229
|
-
if (typeof theFunc === "function" && maxLevel >= theLevel) {
|
|
230
|
-
return theFunc.bind(logger2);
|
|
231
|
-
}
|
|
232
|
-
return function() {
|
|
233
|
-
};
|
|
234
|
-
}
|
|
235
|
-
return {
|
|
236
|
-
error: _filterFunc("error", DiagLogLevel.ERROR),
|
|
237
|
-
warn: _filterFunc("warn", DiagLogLevel.WARN),
|
|
238
|
-
info: _filterFunc("info", DiagLogLevel.INFO),
|
|
239
|
-
debug: _filterFunc("debug", DiagLogLevel.DEBUG),
|
|
240
|
-
verbose: _filterFunc("verbose", DiagLogLevel.VERBOSE)
|
|
241
|
-
};
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
// node_modules/@opentelemetry/api/build/esm/api/diag.js
|
|
245
|
-
var __read2 = function(o, n) {
|
|
246
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
247
|
-
if (!m) return o;
|
|
248
|
-
var i = m.call(o), r, ar = [], e;
|
|
249
|
-
try {
|
|
250
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
251
|
-
} catch (error) {
|
|
252
|
-
e = { error };
|
|
253
|
-
} finally {
|
|
254
|
-
try {
|
|
255
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
256
|
-
} finally {
|
|
257
|
-
if (e) throw e.error;
|
|
258
|
-
}
|
|
259
|
-
}
|
|
260
|
-
return ar;
|
|
261
|
-
};
|
|
262
|
-
var __spreadArray2 = function(to, from, pack) {
|
|
263
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
264
|
-
if (ar || !(i in from)) {
|
|
265
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
266
|
-
ar[i] = from[i];
|
|
267
|
-
}
|
|
268
|
-
}
|
|
269
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
270
|
-
};
|
|
271
|
-
var API_NAME = "diag";
|
|
272
|
-
var DiagAPI = (
|
|
273
|
-
/** @class */
|
|
274
|
-
function() {
|
|
275
|
-
function DiagAPI2() {
|
|
276
|
-
function _logProxy(funcName) {
|
|
277
|
-
return function() {
|
|
278
|
-
var args = [];
|
|
279
|
-
for (var _i = 0; _i < arguments.length; _i++) {
|
|
280
|
-
args[_i] = arguments[_i];
|
|
281
|
-
}
|
|
282
|
-
var logger2 = getGlobal("diag");
|
|
283
|
-
if (!logger2)
|
|
284
|
-
return;
|
|
285
|
-
return logger2[funcName].apply(logger2, __spreadArray2([], __read2(args), false));
|
|
286
|
-
};
|
|
287
|
-
}
|
|
288
|
-
var self = this;
|
|
289
|
-
var setLogger = function(logger2, optionsOrLogLevel) {
|
|
290
|
-
var _a, _b, _c;
|
|
291
|
-
if (optionsOrLogLevel === void 0) {
|
|
292
|
-
optionsOrLogLevel = { logLevel: DiagLogLevel.INFO };
|
|
293
|
-
}
|
|
294
|
-
if (logger2 === self) {
|
|
295
|
-
var err = new Error("Cannot use diag as the logger for itself. Please use a DiagLogger implementation like ConsoleDiagLogger or a custom implementation");
|
|
296
|
-
self.error((_a = err.stack) !== null && _a !== void 0 ? _a : err.message);
|
|
297
|
-
return false;
|
|
298
|
-
}
|
|
299
|
-
if (typeof optionsOrLogLevel === "number") {
|
|
300
|
-
optionsOrLogLevel = {
|
|
301
|
-
logLevel: optionsOrLogLevel
|
|
302
|
-
};
|
|
303
|
-
}
|
|
304
|
-
var oldLogger = getGlobal("diag");
|
|
305
|
-
var newLogger = createLogLevelDiagLogger((_b = optionsOrLogLevel.logLevel) !== null && _b !== void 0 ? _b : DiagLogLevel.INFO, logger2);
|
|
306
|
-
if (oldLogger && !optionsOrLogLevel.suppressOverrideMessage) {
|
|
307
|
-
var stack = (_c = new Error().stack) !== null && _c !== void 0 ? _c : "<failed to generate stacktrace>";
|
|
308
|
-
oldLogger.warn("Current logger will be overwritten from " + stack);
|
|
309
|
-
newLogger.warn("Current logger will overwrite one already registered from " + stack);
|
|
310
|
-
}
|
|
311
|
-
return registerGlobal("diag", newLogger, self, true);
|
|
312
|
-
};
|
|
313
|
-
self.setLogger = setLogger;
|
|
314
|
-
self.disable = function() {
|
|
315
|
-
unregisterGlobal(API_NAME, self);
|
|
316
|
-
};
|
|
317
|
-
self.createComponentLogger = function(options) {
|
|
318
|
-
return new DiagComponentLogger(options);
|
|
319
|
-
};
|
|
320
|
-
self.verbose = _logProxy("verbose");
|
|
321
|
-
self.debug = _logProxy("debug");
|
|
322
|
-
self.info = _logProxy("info");
|
|
323
|
-
self.warn = _logProxy("warn");
|
|
324
|
-
self.error = _logProxy("error");
|
|
325
|
-
}
|
|
326
|
-
DiagAPI2.instance = function() {
|
|
327
|
-
if (!this._instance) {
|
|
328
|
-
this._instance = new DiagAPI2();
|
|
329
|
-
}
|
|
330
|
-
return this._instance;
|
|
331
|
-
};
|
|
332
|
-
return DiagAPI2;
|
|
333
|
-
}()
|
|
334
|
-
);
|
|
335
|
-
|
|
336
|
-
// node_modules/@opentelemetry/api/build/esm/context/context.js
|
|
337
|
-
var BaseContext = (
|
|
338
|
-
/** @class */
|
|
339
|
-
/* @__PURE__ */ function() {
|
|
340
|
-
function BaseContext2(parentContext) {
|
|
341
|
-
var self = this;
|
|
342
|
-
self._currentContext = parentContext ? new Map(parentContext) : /* @__PURE__ */ new Map();
|
|
343
|
-
self.getValue = function(key) {
|
|
344
|
-
return self._currentContext.get(key);
|
|
345
|
-
};
|
|
346
|
-
self.setValue = function(key, value) {
|
|
347
|
-
var context2 = new BaseContext2(self._currentContext);
|
|
348
|
-
context2._currentContext.set(key, value);
|
|
349
|
-
return context2;
|
|
350
|
-
};
|
|
351
|
-
self.deleteValue = function(key) {
|
|
352
|
-
var context2 = new BaseContext2(self._currentContext);
|
|
353
|
-
context2._currentContext.delete(key);
|
|
354
|
-
return context2;
|
|
355
|
-
};
|
|
356
|
-
}
|
|
357
|
-
return BaseContext2;
|
|
358
|
-
}()
|
|
359
|
-
);
|
|
360
|
-
var ROOT_CONTEXT = new BaseContext();
|
|
361
|
-
|
|
362
|
-
// node_modules/@opentelemetry/api/build/esm/context/NoopContextManager.js
|
|
363
|
-
var __read3 = function(o, n) {
|
|
364
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
365
|
-
if (!m) return o;
|
|
366
|
-
var i = m.call(o), r, ar = [], e;
|
|
367
|
-
try {
|
|
368
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
369
|
-
} catch (error) {
|
|
370
|
-
e = { error };
|
|
371
|
-
} finally {
|
|
372
|
-
try {
|
|
373
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
374
|
-
} finally {
|
|
375
|
-
if (e) throw e.error;
|
|
376
|
-
}
|
|
377
|
-
}
|
|
378
|
-
return ar;
|
|
379
|
-
};
|
|
380
|
-
var __spreadArray3 = function(to, from, pack) {
|
|
381
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
382
|
-
if (ar || !(i in from)) {
|
|
383
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
384
|
-
ar[i] = from[i];
|
|
385
|
-
}
|
|
386
|
-
}
|
|
387
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
388
|
-
};
|
|
389
|
-
var NoopContextManager = (
|
|
390
|
-
/** @class */
|
|
391
|
-
function() {
|
|
392
|
-
function NoopContextManager2() {
|
|
393
|
-
}
|
|
394
|
-
NoopContextManager2.prototype.active = function() {
|
|
395
|
-
return ROOT_CONTEXT;
|
|
396
|
-
};
|
|
397
|
-
NoopContextManager2.prototype.with = function(_context, fn, thisArg) {
|
|
398
|
-
var args = [];
|
|
399
|
-
for (var _i = 3; _i < arguments.length; _i++) {
|
|
400
|
-
args[_i - 3] = arguments[_i];
|
|
401
|
-
}
|
|
402
|
-
return fn.call.apply(fn, __spreadArray3([thisArg], __read3(args), false));
|
|
403
|
-
};
|
|
404
|
-
NoopContextManager2.prototype.bind = function(_context, target) {
|
|
405
|
-
return target;
|
|
406
|
-
};
|
|
407
|
-
NoopContextManager2.prototype.enable = function() {
|
|
408
|
-
return this;
|
|
409
|
-
};
|
|
410
|
-
NoopContextManager2.prototype.disable = function() {
|
|
411
|
-
return this;
|
|
412
|
-
};
|
|
413
|
-
return NoopContextManager2;
|
|
414
|
-
}()
|
|
415
|
-
);
|
|
416
|
-
|
|
417
|
-
// node_modules/@opentelemetry/api/build/esm/api/context.js
|
|
418
|
-
var __read4 = function(o, n) {
|
|
419
|
-
var m = typeof Symbol === "function" && o[Symbol.iterator];
|
|
420
|
-
if (!m) return o;
|
|
421
|
-
var i = m.call(o), r, ar = [], e;
|
|
422
|
-
try {
|
|
423
|
-
while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);
|
|
424
|
-
} catch (error) {
|
|
425
|
-
e = { error };
|
|
426
|
-
} finally {
|
|
427
|
-
try {
|
|
428
|
-
if (r && !r.done && (m = i["return"])) m.call(i);
|
|
429
|
-
} finally {
|
|
430
|
-
if (e) throw e.error;
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
return ar;
|
|
434
|
-
};
|
|
435
|
-
var __spreadArray4 = function(to, from, pack) {
|
|
436
|
-
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
|
|
437
|
-
if (ar || !(i in from)) {
|
|
438
|
-
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
|
|
439
|
-
ar[i] = from[i];
|
|
440
|
-
}
|
|
441
|
-
}
|
|
442
|
-
return to.concat(ar || Array.prototype.slice.call(from));
|
|
443
|
-
};
|
|
444
|
-
var API_NAME2 = "context";
|
|
445
|
-
var NOOP_CONTEXT_MANAGER = new NoopContextManager();
|
|
446
|
-
var ContextAPI = (
|
|
447
|
-
/** @class */
|
|
448
|
-
function() {
|
|
449
|
-
function ContextAPI2() {
|
|
450
|
-
}
|
|
451
|
-
ContextAPI2.getInstance = function() {
|
|
452
|
-
if (!this._instance) {
|
|
453
|
-
this._instance = new ContextAPI2();
|
|
454
|
-
}
|
|
455
|
-
return this._instance;
|
|
456
|
-
};
|
|
457
|
-
ContextAPI2.prototype.setGlobalContextManager = function(contextManager) {
|
|
458
|
-
return registerGlobal(API_NAME2, contextManager, DiagAPI.instance());
|
|
459
|
-
};
|
|
460
|
-
ContextAPI2.prototype.active = function() {
|
|
461
|
-
return this._getContextManager().active();
|
|
462
|
-
};
|
|
463
|
-
ContextAPI2.prototype.with = function(context2, fn, thisArg) {
|
|
464
|
-
var _a;
|
|
465
|
-
var args = [];
|
|
466
|
-
for (var _i = 3; _i < arguments.length; _i++) {
|
|
467
|
-
args[_i - 3] = arguments[_i];
|
|
468
|
-
}
|
|
469
|
-
return (_a = this._getContextManager()).with.apply(_a, __spreadArray4([context2, fn, thisArg], __read4(args), false));
|
|
470
|
-
};
|
|
471
|
-
ContextAPI2.prototype.bind = function(context2, target) {
|
|
472
|
-
return this._getContextManager().bind(context2, target);
|
|
473
|
-
};
|
|
474
|
-
ContextAPI2.prototype._getContextManager = function() {
|
|
475
|
-
return getGlobal(API_NAME2) || NOOP_CONTEXT_MANAGER;
|
|
476
|
-
};
|
|
477
|
-
ContextAPI2.prototype.disable = function() {
|
|
478
|
-
this._getContextManager().disable();
|
|
479
|
-
unregisterGlobal(API_NAME2, DiagAPI.instance());
|
|
480
|
-
};
|
|
481
|
-
return ContextAPI2;
|
|
482
|
-
}()
|
|
483
|
-
);
|
|
484
|
-
|
|
485
|
-
// node_modules/@opentelemetry/api/build/esm/trace/status.js
|
|
486
|
-
var SpanStatusCode;
|
|
487
|
-
(function(SpanStatusCode2) {
|
|
488
|
-
SpanStatusCode2[SpanStatusCode2["UNSET"] = 0] = "UNSET";
|
|
489
|
-
SpanStatusCode2[SpanStatusCode2["OK"] = 1] = "OK";
|
|
490
|
-
SpanStatusCode2[SpanStatusCode2["ERROR"] = 2] = "ERROR";
|
|
491
|
-
})(SpanStatusCode || (SpanStatusCode = {}));
|
|
492
|
-
|
|
493
|
-
// node_modules/@opentelemetry/api/build/esm/context-api.js
|
|
494
|
-
var context = ContextAPI.getInstance();
|
|
495
|
-
|
|
496
|
-
// src/index.ts
|
|
497
9
|
import {
|
|
498
10
|
generateObject,
|
|
499
11
|
generateText,
|
|
@@ -501,60 +13,6 @@ import {
|
|
|
501
13
|
} from "ai";
|
|
502
14
|
import { encodingForModel } from "js-tiktoken";
|
|
503
15
|
import { fetch, FormData } from "undici";
|
|
504
|
-
function getTracer(runtime) {
|
|
505
|
-
const availableServices = Array.from(runtime.getAllServices().keys());
|
|
506
|
-
logger.debug(`[getTracer] Available services: ${JSON.stringify(availableServices)}`);
|
|
507
|
-
logger.debug(`[getTracer] Attempting to get service with key: ${ServiceType.INSTRUMENTATION}`);
|
|
508
|
-
const instrumentationService = runtime.getService(
|
|
509
|
-
ServiceType.INSTRUMENTATION
|
|
510
|
-
);
|
|
511
|
-
if (!instrumentationService) {
|
|
512
|
-
logger.warn(`[getTracer] Service ${ServiceType.INSTRUMENTATION} not found in runtime.`);
|
|
513
|
-
return null;
|
|
514
|
-
}
|
|
515
|
-
if (!instrumentationService.isEnabled()) {
|
|
516
|
-
logger.debug("[getTracer] Instrumentation service found but is disabled.");
|
|
517
|
-
return null;
|
|
518
|
-
}
|
|
519
|
-
logger.debug("[getTracer] Successfully retrieved enabled instrumentation service.");
|
|
520
|
-
return instrumentationService.getTracer("eliza.llm.openai");
|
|
521
|
-
}
|
|
522
|
-
async function startLlmSpan(runtime, spanName, attributes, fn) {
|
|
523
|
-
const tracer = getTracer(runtime);
|
|
524
|
-
if (!tracer) {
|
|
525
|
-
const dummySpan = {
|
|
526
|
-
setAttribute: () => {
|
|
527
|
-
},
|
|
528
|
-
setAttributes: () => {
|
|
529
|
-
},
|
|
530
|
-
addEvent: () => {
|
|
531
|
-
},
|
|
532
|
-
recordException: () => {
|
|
533
|
-
},
|
|
534
|
-
setStatus: () => {
|
|
535
|
-
},
|
|
536
|
-
end: () => {
|
|
537
|
-
},
|
|
538
|
-
spanContext: () => ({ traceId: "", spanId: "", traceFlags: 0 })
|
|
539
|
-
};
|
|
540
|
-
return fn(dummySpan);
|
|
541
|
-
}
|
|
542
|
-
const activeContext = context.active();
|
|
543
|
-
return tracer.startActiveSpan(spanName, { attributes }, activeContext, async (span) => {
|
|
544
|
-
try {
|
|
545
|
-
const result = await fn(span);
|
|
546
|
-
span.setStatus({ code: SpanStatusCode.OK });
|
|
547
|
-
span.end();
|
|
548
|
-
return result;
|
|
549
|
-
} catch (error) {
|
|
550
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
551
|
-
span.recordException(error);
|
|
552
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
553
|
-
span.end();
|
|
554
|
-
throw error;
|
|
555
|
-
}
|
|
556
|
-
});
|
|
557
|
-
}
|
|
558
16
|
function getSetting(runtime, key, defaultValue) {
|
|
559
17
|
return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
|
|
560
18
|
}
|
|
@@ -607,106 +65,51 @@ async function generateObjectByModelType(runtime, params, modelType, getModelFn)
|
|
|
607
65
|
logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
|
|
608
66
|
const temperature = params.temperature ?? 0;
|
|
609
67
|
const schemaPresent = !!params.schema;
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
);
|
|
68
|
+
if (schemaPresent) {
|
|
69
|
+
logger.info(
|
|
70
|
+
`Using ${modelType} without schema validation (schema provided but output=no-schema)`
|
|
71
|
+
);
|
|
72
|
+
}
|
|
73
|
+
try {
|
|
74
|
+
const { object, usage } = await generateObject({
|
|
75
|
+
model: openai.languageModel(modelName),
|
|
76
|
+
output: "no-schema",
|
|
77
|
+
prompt: params.prompt,
|
|
78
|
+
temperature,
|
|
79
|
+
experimental_repairText: getJsonRepairFunction()
|
|
80
|
+
});
|
|
81
|
+
if (usage) {
|
|
82
|
+
emitModelUsageEvent(runtime, modelType, params.prompt, usage);
|
|
626
83
|
}
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
span.addEvent("llm.response.processed", {
|
|
636
|
-
"response.object": JSON.stringify(object, safeReplacer())
|
|
84
|
+
return object;
|
|
85
|
+
} catch (error) {
|
|
86
|
+
if (error instanceof JSONParseError) {
|
|
87
|
+
logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
88
|
+
const repairFunction = getJsonRepairFunction();
|
|
89
|
+
const repairedJsonString = await repairFunction({
|
|
90
|
+
text: error.text,
|
|
91
|
+
error
|
|
637
92
|
});
|
|
638
|
-
if (
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
})
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
} catch (error) {
|
|
648
|
-
if (error instanceof JSONParseError) {
|
|
649
|
-
logger.error(`[generateObject] Failed to parse JSON: ${error.message}`);
|
|
650
|
-
span.recordException(error);
|
|
651
|
-
span.addEvent("llm.error.json_parse", {
|
|
652
|
-
"error.message": error.message,
|
|
653
|
-
"error.text": error.text
|
|
654
|
-
});
|
|
655
|
-
span.addEvent("llm.repair.attempt");
|
|
656
|
-
const repairFunction = getJsonRepairFunction();
|
|
657
|
-
const repairedJsonString = await repairFunction({
|
|
658
|
-
text: error.text,
|
|
659
|
-
error
|
|
660
|
-
});
|
|
661
|
-
if (repairedJsonString) {
|
|
662
|
-
try {
|
|
663
|
-
const repairedObject = JSON.parse(repairedJsonString);
|
|
664
|
-
span.addEvent("llm.repair.success", {
|
|
665
|
-
repaired_object: JSON.stringify(repairedObject, safeReplacer())
|
|
666
|
-
});
|
|
667
|
-
logger.info("[generateObject] Successfully repaired JSON.");
|
|
668
|
-
span.setStatus({
|
|
669
|
-
code: SpanStatusCode.ERROR,
|
|
670
|
-
message: "JSON parsing failed but was repaired"
|
|
671
|
-
});
|
|
672
|
-
return repairedObject;
|
|
673
|
-
} catch (repairParseError) {
|
|
674
|
-
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
675
|
-
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
676
|
-
const exception = repairParseError instanceof Error ? repairParseError : new Error(message);
|
|
677
|
-
span.recordException(exception);
|
|
678
|
-
span.addEvent("llm.repair.parse_error", {
|
|
679
|
-
"error.message": message
|
|
680
|
-
});
|
|
681
|
-
span.setStatus({
|
|
682
|
-
code: SpanStatusCode.ERROR,
|
|
683
|
-
message: `JSON repair failed: ${message}`
|
|
684
|
-
});
|
|
685
|
-
throw repairParseError;
|
|
686
|
-
}
|
|
687
|
-
} else {
|
|
688
|
-
const errMsg = error instanceof Error ? error.message : String(error);
|
|
689
|
-
logger.error("[generateObject] JSON repair failed.");
|
|
690
|
-
span.addEvent("llm.repair.failed");
|
|
691
|
-
span.setStatus({
|
|
692
|
-
code: SpanStatusCode.ERROR,
|
|
693
|
-
message: `JSON repair failed: ${errMsg}`
|
|
694
|
-
});
|
|
695
|
-
throw error;
|
|
93
|
+
if (repairedJsonString) {
|
|
94
|
+
try {
|
|
95
|
+
const repairedObject = JSON.parse(repairedJsonString);
|
|
96
|
+
logger.info("[generateObject] Successfully repaired JSON.");
|
|
97
|
+
return repairedObject;
|
|
98
|
+
} catch (repairParseError) {
|
|
99
|
+
const message = repairParseError instanceof Error ? repairParseError.message : String(repairParseError);
|
|
100
|
+
logger.error(`[generateObject] Failed to parse repaired JSON: ${message}`);
|
|
101
|
+
throw repairParseError;
|
|
696
102
|
}
|
|
697
103
|
} else {
|
|
698
|
-
|
|
699
|
-
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
700
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
701
|
-
span.recordException(exception);
|
|
702
|
-
span.setStatus({
|
|
703
|
-
code: SpanStatusCode.ERROR,
|
|
704
|
-
message
|
|
705
|
-
});
|
|
104
|
+
logger.error("[generateObject] JSON repair failed.");
|
|
706
105
|
throw error;
|
|
707
106
|
}
|
|
107
|
+
} else {
|
|
108
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
109
|
+
logger.error(`[generateObject] Unknown error: ${message}`);
|
|
110
|
+
throw error;
|
|
708
111
|
}
|
|
709
|
-
}
|
|
112
|
+
}
|
|
710
113
|
}
|
|
711
114
|
function getJsonRepairFunction() {
|
|
712
115
|
return async ({ text, error }) => {
|
|
@@ -855,90 +258,56 @@ var openaiPlugin = {
|
|
|
855
258
|
emptyVector[0] = 0.3;
|
|
856
259
|
return emptyVector;
|
|
857
260
|
}
|
|
858
|
-
const
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
"
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
})
|
|
874
|
-
|
|
261
|
+
const embeddingBaseURL = getEmbeddingBaseURL(runtime);
|
|
262
|
+
const apiKey = getApiKey(runtime);
|
|
263
|
+
if (!apiKey) {
|
|
264
|
+
throw new Error("OpenAI API key not configured");
|
|
265
|
+
}
|
|
266
|
+
try {
|
|
267
|
+
const response = await fetch(`${embeddingBaseURL}/embeddings`, {
|
|
268
|
+
method: "POST",
|
|
269
|
+
headers: {
|
|
270
|
+
Authorization: `Bearer ${apiKey}`,
|
|
271
|
+
"Content-Type": "application/json"
|
|
272
|
+
},
|
|
273
|
+
body: JSON.stringify({
|
|
274
|
+
model: embeddingModelName,
|
|
275
|
+
input: text
|
|
276
|
+
})
|
|
277
|
+
});
|
|
278
|
+
const responseClone = response.clone();
|
|
279
|
+
const rawResponseBody = await responseClone.text();
|
|
280
|
+
if (!response.ok) {
|
|
281
|
+
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
282
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
283
|
+
errorVector[0] = 0.4;
|
|
284
|
+
return errorVector;
|
|
875
285
|
}
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
headers: {
|
|
880
|
-
Authorization: `Bearer ${apiKey}`,
|
|
881
|
-
"Content-Type": "application/json"
|
|
882
|
-
},
|
|
883
|
-
body: JSON.stringify({
|
|
884
|
-
model: embeddingModelName,
|
|
885
|
-
input: text
|
|
886
|
-
})
|
|
887
|
-
});
|
|
888
|
-
const responseClone = response.clone();
|
|
889
|
-
const rawResponseBody = await responseClone.text();
|
|
890
|
-
span.addEvent("llm.response.raw", {
|
|
891
|
-
"response.body": rawResponseBody
|
|
892
|
-
});
|
|
893
|
-
if (!response.ok) {
|
|
894
|
-
logger.error(`OpenAI API error: ${response.status} - ${response.statusText}`);
|
|
895
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
896
|
-
span.setStatus({
|
|
897
|
-
code: SpanStatusCode.ERROR,
|
|
898
|
-
message: `OpenAI API error: ${response.status} - ${response.statusText}. Response: ${rawResponseBody}`
|
|
899
|
-
});
|
|
900
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
901
|
-
errorVector[0] = 0.4;
|
|
902
|
-
return errorVector;
|
|
903
|
-
}
|
|
904
|
-
const data = await response.json();
|
|
905
|
-
if (!data?.data?.[0]?.embedding) {
|
|
906
|
-
logger.error("API returned invalid structure");
|
|
907
|
-
span.setStatus({
|
|
908
|
-
code: SpanStatusCode.ERROR,
|
|
909
|
-
message: "API returned invalid structure"
|
|
910
|
-
});
|
|
911
|
-
const errorVector = Array(embeddingDimension).fill(0);
|
|
912
|
-
errorVector[0] = 0.5;
|
|
913
|
-
return errorVector;
|
|
914
|
-
}
|
|
915
|
-
const embedding = data.data[0].embedding;
|
|
916
|
-
span.setAttribute("llm.response.embedding.vector_length", embedding.length);
|
|
917
|
-
if (data.usage) {
|
|
918
|
-
span.setAttributes({
|
|
919
|
-
"llm.usage.prompt_tokens": data.usage.prompt_tokens,
|
|
920
|
-
"llm.usage.total_tokens": data.usage.total_tokens
|
|
921
|
-
});
|
|
922
|
-
const usage = {
|
|
923
|
-
promptTokens: data.usage.prompt_tokens,
|
|
924
|
-
completionTokens: 0,
|
|
925
|
-
totalTokens: data.usage.total_tokens
|
|
926
|
-
};
|
|
927
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
|
|
928
|
-
}
|
|
929
|
-
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
930
|
-
return embedding;
|
|
931
|
-
} catch (error) {
|
|
932
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
933
|
-
logger.error(`Error generating embedding: ${message}`);
|
|
934
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
935
|
-
span.recordException(exception);
|
|
936
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
286
|
+
const data = await response.json();
|
|
287
|
+
if (!data?.data?.[0]?.embedding) {
|
|
288
|
+
logger.error("API returned invalid structure");
|
|
937
289
|
const errorVector = Array(embeddingDimension).fill(0);
|
|
938
|
-
errorVector[0] = 0.
|
|
290
|
+
errorVector[0] = 0.5;
|
|
939
291
|
return errorVector;
|
|
940
292
|
}
|
|
941
|
-
|
|
293
|
+
const embedding = data.data[0].embedding;
|
|
294
|
+
if (data.usage) {
|
|
295
|
+
const usage = {
|
|
296
|
+
promptTokens: data.usage.prompt_tokens,
|
|
297
|
+
completionTokens: 0,
|
|
298
|
+
totalTokens: data.usage.total_tokens
|
|
299
|
+
};
|
|
300
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_EMBEDDING, text, usage);
|
|
301
|
+
}
|
|
302
|
+
logger.log(`Got valid embedding with length ${embedding.length}`);
|
|
303
|
+
return embedding;
|
|
304
|
+
} catch (error) {
|
|
305
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
306
|
+
logger.error(`Error generating embedding: ${message}`);
|
|
307
|
+
const errorVector = Array(embeddingDimension).fill(0);
|
|
308
|
+
errorVector[0] = 0.6;
|
|
309
|
+
return errorVector;
|
|
310
|
+
}
|
|
942
311
|
},
|
|
943
312
|
[ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
|
|
944
313
|
return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
|
|
@@ -955,42 +324,20 @@ var openaiPlugin = {
|
|
|
955
324
|
const modelName = getSmallModel(runtime);
|
|
956
325
|
logger.log(`[OpenAI] Using TEXT_SMALL model: ${modelName}`);
|
|
957
326
|
logger.log(prompt);
|
|
958
|
-
const
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
};
|
|
968
|
-
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
969
|
-
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
970
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
971
|
-
model: openai.languageModel(modelName),
|
|
972
|
-
prompt,
|
|
973
|
-
system: runtime.character.system ?? void 0,
|
|
974
|
-
temperature,
|
|
975
|
-
maxTokens: max_response_length,
|
|
976
|
-
frequencyPenalty: frequency_penalty,
|
|
977
|
-
presencePenalty: presence_penalty,
|
|
978
|
-
stopSequences
|
|
979
|
-
});
|
|
980
|
-
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
981
|
-
span.addEvent("llm.response.processed", {
|
|
982
|
-
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
983
|
-
});
|
|
984
|
-
if (usage) {
|
|
985
|
-
span.setAttributes({
|
|
986
|
-
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
987
|
-
"llm.usage.completion_tokens": usage.completionTokens,
|
|
988
|
-
"llm.usage.total_tokens": usage.totalTokens
|
|
989
|
-
});
|
|
990
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
991
|
-
}
|
|
992
|
-
return openaiResponse;
|
|
327
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
328
|
+
model: openai.languageModel(modelName),
|
|
329
|
+
prompt,
|
|
330
|
+
system: runtime.character.system ?? void 0,
|
|
331
|
+
temperature,
|
|
332
|
+
maxTokens: max_response_length,
|
|
333
|
+
frequencyPenalty: frequency_penalty,
|
|
334
|
+
presencePenalty: presence_penalty,
|
|
335
|
+
stopSequences
|
|
993
336
|
});
|
|
337
|
+
if (usage) {
|
|
338
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_SMALL, prompt, usage);
|
|
339
|
+
}
|
|
340
|
+
return openaiResponse;
|
|
994
341
|
},
|
|
995
342
|
[ModelType.TEXT_LARGE]: async (runtime, {
|
|
996
343
|
prompt,
|
|
@@ -1004,42 +351,20 @@ var openaiPlugin = {
|
|
|
1004
351
|
const modelName = getLargeModel(runtime);
|
|
1005
352
|
logger.log(`[OpenAI] Using TEXT_LARGE model: ${modelName}`);
|
|
1006
353
|
logger.log(prompt);
|
|
1007
|
-
const
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
};
|
|
1017
|
-
return startLlmSpan(runtime, "LLM.generateText", attributes, async (span) => {
|
|
1018
|
-
span.addEvent("llm.prompt", { "prompt.content": prompt });
|
|
1019
|
-
const { text: openaiResponse, usage } = await generateText({
|
|
1020
|
-
model: openai.languageModel(modelName),
|
|
1021
|
-
prompt,
|
|
1022
|
-
system: runtime.character.system ?? void 0,
|
|
1023
|
-
temperature,
|
|
1024
|
-
maxTokens,
|
|
1025
|
-
frequencyPenalty,
|
|
1026
|
-
presencePenalty,
|
|
1027
|
-
stopSequences
|
|
1028
|
-
});
|
|
1029
|
-
span.setAttribute("llm.response.processed.length", openaiResponse.length);
|
|
1030
|
-
span.addEvent("llm.response.processed", {
|
|
1031
|
-
"response.content": openaiResponse.substring(0, 200) + (openaiResponse.length > 200 ? "..." : "")
|
|
1032
|
-
});
|
|
1033
|
-
if (usage) {
|
|
1034
|
-
span.setAttributes({
|
|
1035
|
-
"llm.usage.prompt_tokens": usage.promptTokens,
|
|
1036
|
-
"llm.usage.completion_tokens": usage.completionTokens,
|
|
1037
|
-
"llm.usage.total_tokens": usage.totalTokens
|
|
1038
|
-
});
|
|
1039
|
-
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
1040
|
-
}
|
|
1041
|
-
return openaiResponse;
|
|
354
|
+
const { text: openaiResponse, usage } = await generateText({
|
|
355
|
+
model: openai.languageModel(modelName),
|
|
356
|
+
prompt,
|
|
357
|
+
system: runtime.character.system ?? void 0,
|
|
358
|
+
temperature,
|
|
359
|
+
maxTokens,
|
|
360
|
+
frequencyPenalty,
|
|
361
|
+
presencePenalty,
|
|
362
|
+
stopSequences
|
|
1042
363
|
});
|
|
364
|
+
if (usage) {
|
|
365
|
+
emitModelUsageEvent(runtime, ModelType.TEXT_LARGE, prompt, usage);
|
|
366
|
+
}
|
|
367
|
+
return openaiResponse;
|
|
1043
368
|
},
|
|
1044
369
|
[ModelType.IMAGE]: async (runtime, params) => {
|
|
1045
370
|
const n = params.n || 1;
|
|
@@ -1047,63 +372,36 @@ var openaiPlugin = {
|
|
|
1047
372
|
const prompt = params.prompt;
|
|
1048
373
|
const modelName = "dall-e-3";
|
|
1049
374
|
logger.log(`[OpenAI] Using IMAGE model: ${modelName}`);
|
|
1050
|
-
const
|
|
1051
|
-
|
|
1052
|
-
|
|
1053
|
-
"
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
"Content-Type": "application/json"
|
|
1073
|
-
},
|
|
1074
|
-
body: JSON.stringify({
|
|
1075
|
-
prompt,
|
|
1076
|
-
n,
|
|
1077
|
-
size
|
|
1078
|
-
})
|
|
1079
|
-
});
|
|
1080
|
-
const responseClone = response.clone();
|
|
1081
|
-
const rawResponseBody = await responseClone.text();
|
|
1082
|
-
span.addEvent("llm.response.raw", {
|
|
1083
|
-
"response.body": rawResponseBody
|
|
1084
|
-
});
|
|
1085
|
-
if (!response.ok) {
|
|
1086
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1087
|
-
span.setStatus({
|
|
1088
|
-
code: SpanStatusCode.ERROR,
|
|
1089
|
-
message: `Failed to generate image: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1090
|
-
});
|
|
1091
|
-
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
1092
|
-
}
|
|
1093
|
-
const data = await response.json();
|
|
1094
|
-
const typedData = data;
|
|
1095
|
-
span.addEvent("llm.response.processed", {
|
|
1096
|
-
"response.urls": JSON.stringify(typedData.data)
|
|
1097
|
-
});
|
|
1098
|
-
return typedData.data;
|
|
1099
|
-
} catch (error) {
|
|
1100
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1101
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1102
|
-
span.recordException(exception);
|
|
1103
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1104
|
-
throw error;
|
|
375
|
+
const baseURL = getBaseURL(runtime);
|
|
376
|
+
const apiKey = getApiKey(runtime);
|
|
377
|
+
if (!apiKey) {
|
|
378
|
+
throw new Error("OpenAI API key not configured");
|
|
379
|
+
}
|
|
380
|
+
try {
|
|
381
|
+
const response = await fetch(`${baseURL}/images/generations`, {
|
|
382
|
+
method: "POST",
|
|
383
|
+
headers: {
|
|
384
|
+
Authorization: `Bearer ${apiKey}`,
|
|
385
|
+
"Content-Type": "application/json"
|
|
386
|
+
},
|
|
387
|
+
body: JSON.stringify({
|
|
388
|
+
prompt,
|
|
389
|
+
n,
|
|
390
|
+
size
|
|
391
|
+
})
|
|
392
|
+
});
|
|
393
|
+
const responseClone = response.clone();
|
|
394
|
+
const rawResponseBody = await responseClone.text();
|
|
395
|
+
if (!response.ok) {
|
|
396
|
+
throw new Error(`Failed to generate image: ${response.statusText}`);
|
|
1105
397
|
}
|
|
1106
|
-
|
|
398
|
+
const data = await response.json();
|
|
399
|
+
const typedData = data;
|
|
400
|
+
return typedData.data;
|
|
401
|
+
} catch (error) {
|
|
402
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
403
|
+
throw error;
|
|
404
|
+
}
|
|
1107
405
|
},
|
|
1108
406
|
[ModelType.IMAGE_DESCRIPTION]: async (runtime, params) => {
|
|
1109
407
|
let imageUrl;
|
|
@@ -1121,13 +419,6 @@ var openaiPlugin = {
|
|
|
1121
419
|
imageUrl = params.imageUrl;
|
|
1122
420
|
promptText = params.prompt || "Please analyze this image and provide a title and detailed description.";
|
|
1123
421
|
}
|
|
1124
|
-
const attributes = {
|
|
1125
|
-
"llm.vendor": "OpenAI",
|
|
1126
|
-
"llm.request.type": "chat",
|
|
1127
|
-
"llm.request.model": modelName,
|
|
1128
|
-
"llm.request.max_tokens": maxTokens,
|
|
1129
|
-
"llm.request.image.url": imageUrl
|
|
1130
|
-
};
|
|
1131
422
|
const messages = [
|
|
1132
423
|
{
|
|
1133
424
|
role: "user",
|
|
@@ -1137,209 +428,122 @@ var openaiPlugin = {
|
|
|
1137
428
|
]
|
|
1138
429
|
}
|
|
1139
430
|
];
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
431
|
+
const baseURL = getBaseURL(runtime);
|
|
432
|
+
const apiKey = getApiKey(runtime);
|
|
433
|
+
if (!apiKey) {
|
|
434
|
+
logger.error("OpenAI API key not set");
|
|
435
|
+
return {
|
|
436
|
+
title: "Failed to analyze image",
|
|
437
|
+
description: "API key not configured"
|
|
438
|
+
};
|
|
439
|
+
}
|
|
440
|
+
try {
|
|
441
|
+
const requestBody = {
|
|
442
|
+
model: modelName,
|
|
443
|
+
messages,
|
|
444
|
+
max_tokens: maxTokens
|
|
445
|
+
};
|
|
446
|
+
const response = await fetch(`${baseURL}/chat/completions`, {
|
|
447
|
+
method: "POST",
|
|
448
|
+
headers: {
|
|
449
|
+
"Content-Type": "application/json",
|
|
450
|
+
Authorization: `Bearer ${apiKey}`
|
|
451
|
+
},
|
|
452
|
+
body: JSON.stringify(requestBody)
|
|
1143
453
|
});
|
|
1144
|
-
const
|
|
1145
|
-
const
|
|
1146
|
-
if (!
|
|
1147
|
-
|
|
1148
|
-
span.setStatus({
|
|
1149
|
-
code: SpanStatusCode.ERROR,
|
|
1150
|
-
message: "OpenAI API key not configured"
|
|
1151
|
-
});
|
|
1152
|
-
return {
|
|
1153
|
-
title: "Failed to analyze image",
|
|
1154
|
-
description: "API key not configured"
|
|
1155
|
-
};
|
|
454
|
+
const responseClone = response.clone();
|
|
455
|
+
const rawResponseBody = await responseClone.text();
|
|
456
|
+
if (!response.ok) {
|
|
457
|
+
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1156
458
|
}
|
|
1157
|
-
|
|
1158
|
-
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1162
|
-
|
|
1163
|
-
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
|
|
1169
|
-
|
|
1170
|
-
|
|
1171
|
-
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
"response.body": rawResponseBody
|
|
1175
|
-
});
|
|
1176
|
-
if (!response.ok) {
|
|
1177
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1178
|
-
span.setStatus({
|
|
1179
|
-
code: SpanStatusCode.ERROR,
|
|
1180
|
-
message: `OpenAI API error: ${response.status}. Response: ${rawResponseBody}`
|
|
1181
|
-
});
|
|
1182
|
-
throw new Error(`OpenAI API error: ${response.status}`);
|
|
1183
|
-
}
|
|
1184
|
-
const result = await response.json();
|
|
1185
|
-
const typedResult = result;
|
|
1186
|
-
const content = typedResult.choices?.[0]?.message?.content;
|
|
1187
|
-
console.log("############## CONTENT", content);
|
|
1188
|
-
if (typedResult.usage) {
|
|
1189
|
-
span.setAttributes({
|
|
1190
|
-
"llm.usage.prompt_tokens": typedResult.usage.prompt_tokens,
|
|
1191
|
-
"llm.usage.completion_tokens": typedResult.usage.completion_tokens,
|
|
1192
|
-
"llm.usage.total_tokens": typedResult.usage.total_tokens
|
|
1193
|
-
});
|
|
1194
|
-
emitModelUsageEvent(
|
|
1195
|
-
runtime,
|
|
1196
|
-
ModelType.IMAGE_DESCRIPTION,
|
|
1197
|
-
typeof params === "string" ? params : params.prompt || "",
|
|
1198
|
-
{
|
|
1199
|
-
promptTokens: typedResult.usage.prompt_tokens,
|
|
1200
|
-
completionTokens: typedResult.usage.completion_tokens,
|
|
1201
|
-
totalTokens: typedResult.usage.total_tokens
|
|
1202
|
-
}
|
|
1203
|
-
);
|
|
1204
|
-
}
|
|
1205
|
-
if (typedResult.choices?.[0]?.finish_reason) {
|
|
1206
|
-
span.setAttribute("llm.response.finish_reason", typedResult.choices[0].finish_reason);
|
|
1207
|
-
}
|
|
1208
|
-
if (!content) {
|
|
1209
|
-
span.setStatus({
|
|
1210
|
-
code: SpanStatusCode.ERROR,
|
|
1211
|
-
message: "No content in API response"
|
|
1212
|
-
});
|
|
1213
|
-
return {
|
|
1214
|
-
title: "Failed to analyze image",
|
|
1215
|
-
description: "No response from API"
|
|
1216
|
-
};
|
|
1217
|
-
}
|
|
1218
|
-
console.log("######################## CONTENT", content);
|
|
1219
|
-
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
1220
|
-
if (isCustomPrompt) {
|
|
1221
|
-
span.addEvent("llm.response.raw_content", {
|
|
1222
|
-
"response.content": content
|
|
1223
|
-
});
|
|
1224
|
-
return content;
|
|
1225
|
-
}
|
|
1226
|
-
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
1227
|
-
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
1228
|
-
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
1229
|
-
const processedResult = { title, description };
|
|
1230
|
-
span.addEvent("llm.response.processed", {
|
|
1231
|
-
"response.object": JSON.stringify(processedResult, safeReplacer())
|
|
1232
|
-
});
|
|
1233
|
-
return processedResult;
|
|
1234
|
-
} catch (error) {
|
|
1235
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1236
|
-
logger.error(`Error analyzing image: ${message}`);
|
|
1237
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1238
|
-
span.recordException(exception);
|
|
1239
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
459
|
+
const result = await response.json();
|
|
460
|
+
const typedResult = result;
|
|
461
|
+
const content = typedResult.choices?.[0]?.message?.content;
|
|
462
|
+
console.log("############## CONTENT", content);
|
|
463
|
+
if (typedResult.usage) {
|
|
464
|
+
emitModelUsageEvent(
|
|
465
|
+
runtime,
|
|
466
|
+
ModelType.IMAGE_DESCRIPTION,
|
|
467
|
+
typeof params === "string" ? params : params.prompt || "",
|
|
468
|
+
{
|
|
469
|
+
promptTokens: typedResult.usage.prompt_tokens,
|
|
470
|
+
completionTokens: typedResult.usage.completion_tokens,
|
|
471
|
+
totalTokens: typedResult.usage.total_tokens
|
|
472
|
+
}
|
|
473
|
+
);
|
|
474
|
+
}
|
|
475
|
+
if (!content) {
|
|
1240
476
|
return {
|
|
1241
477
|
title: "Failed to analyze image",
|
|
1242
|
-
description:
|
|
478
|
+
description: "No response from API"
|
|
1243
479
|
};
|
|
1244
480
|
}
|
|
1245
|
-
|
|
481
|
+
console.log("######################## CONTENT", content);
|
|
482
|
+
const isCustomPrompt = typeof params === "object" && params.prompt && params.prompt !== "Please analyze this image and provide a title and detailed description.";
|
|
483
|
+
if (isCustomPrompt) {
|
|
484
|
+
return content;
|
|
485
|
+
}
|
|
486
|
+
const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
|
|
487
|
+
const title = titleMatch?.[1]?.trim() || "Image Analysis";
|
|
488
|
+
const description = content.replace(/title[:\s]+(.+?)(?:\n|$)/i, "").trim();
|
|
489
|
+
const processedResult = { title, description };
|
|
490
|
+
return processedResult;
|
|
491
|
+
} catch (error) {
|
|
492
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
493
|
+
logger.error(`Error analyzing image: ${message}`);
|
|
494
|
+
return {
|
|
495
|
+
title: "Failed to analyze image",
|
|
496
|
+
description: `Error: ${message}`
|
|
497
|
+
};
|
|
498
|
+
}
|
|
1246
499
|
},
|
|
1247
500
|
[ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
|
|
1248
501
|
logger.log("audioBuffer", audioBuffer);
|
|
1249
502
|
const modelName = "whisper-1";
|
|
1250
503
|
logger.log(`[OpenAI] Using TRANSCRIPTION model: ${modelName}`);
|
|
1251
|
-
const
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
"
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
504
|
+
const baseURL = getBaseURL(runtime);
|
|
505
|
+
const apiKey = getApiKey(runtime);
|
|
506
|
+
if (!apiKey) {
|
|
507
|
+
throw new Error("OpenAI API key not configured - Cannot make request");
|
|
508
|
+
}
|
|
509
|
+
if (!audioBuffer || audioBuffer.length === 0) {
|
|
510
|
+
throw new Error("Audio buffer is empty or invalid for transcription");
|
|
511
|
+
}
|
|
512
|
+
const formData = new FormData();
|
|
513
|
+
formData.append("file", new Blob([audioBuffer]), "recording.mp3");
|
|
514
|
+
formData.append("model", "whisper-1");
|
|
515
|
+
try {
|
|
516
|
+
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
517
|
+
method: "POST",
|
|
518
|
+
headers: {
|
|
519
|
+
Authorization: `Bearer ${apiKey}`
|
|
520
|
+
},
|
|
521
|
+
body: formData
|
|
1260
522
|
});
|
|
1261
|
-
const
|
|
1262
|
-
const
|
|
1263
|
-
|
|
1264
|
-
|
|
1265
|
-
|
|
1266
|
-
message: "OpenAI API key not configured"
|
|
1267
|
-
});
|
|
1268
|
-
throw new Error("OpenAI API key not configured - Cannot make request");
|
|
1269
|
-
}
|
|
1270
|
-
if (!audioBuffer || audioBuffer.length === 0) {
|
|
1271
|
-
span.setStatus({
|
|
1272
|
-
code: SpanStatusCode.ERROR,
|
|
1273
|
-
message: "Audio buffer is empty or invalid"
|
|
1274
|
-
});
|
|
1275
|
-
throw new Error("Audio buffer is empty or invalid for transcription");
|
|
1276
|
-
}
|
|
1277
|
-
const formData = new FormData();
|
|
1278
|
-
formData.append("file", new Blob([audioBuffer]), "recording.mp3");
|
|
1279
|
-
formData.append("model", "whisper-1");
|
|
1280
|
-
try {
|
|
1281
|
-
const response = await fetch(`${baseURL}/audio/transcriptions`, {
|
|
1282
|
-
method: "POST",
|
|
1283
|
-
headers: {
|
|
1284
|
-
Authorization: `Bearer ${apiKey}`
|
|
1285
|
-
},
|
|
1286
|
-
body: formData
|
|
1287
|
-
});
|
|
1288
|
-
const responseClone = response.clone();
|
|
1289
|
-
const rawResponseBody = await responseClone.text();
|
|
1290
|
-
span.addEvent("llm.response.raw", {
|
|
1291
|
-
"response.body": rawResponseBody
|
|
1292
|
-
});
|
|
1293
|
-
logger.log("response", response);
|
|
1294
|
-
if (!response.ok) {
|
|
1295
|
-
span.setAttributes({ "error.api.status": response.status });
|
|
1296
|
-
span.setStatus({
|
|
1297
|
-
code: SpanStatusCode.ERROR,
|
|
1298
|
-
message: `Failed to transcribe audio: ${response.statusText}. Response: ${rawResponseBody}`
|
|
1299
|
-
});
|
|
1300
|
-
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
1301
|
-
}
|
|
1302
|
-
const data = await response.json();
|
|
1303
|
-
const processedText = data.text;
|
|
1304
|
-
span.setAttribute("llm.response.processed.length", processedText.length);
|
|
1305
|
-
span.addEvent("llm.response.processed", {
|
|
1306
|
-
"response.text": processedText
|
|
1307
|
-
});
|
|
1308
|
-
return processedText;
|
|
1309
|
-
} catch (error) {
|
|
1310
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1311
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1312
|
-
span.recordException(exception);
|
|
1313
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1314
|
-
throw error;
|
|
523
|
+
const responseClone = response.clone();
|
|
524
|
+
const rawResponseBody = await responseClone.text();
|
|
525
|
+
logger.log("response", response);
|
|
526
|
+
if (!response.ok) {
|
|
527
|
+
throw new Error(`Failed to transcribe audio: ${response.statusText}`);
|
|
1315
528
|
}
|
|
1316
|
-
|
|
529
|
+
const data = await response.json();
|
|
530
|
+
const processedText = data.text;
|
|
531
|
+
return processedText;
|
|
532
|
+
} catch (error) {
|
|
533
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
534
|
+
throw error;
|
|
535
|
+
}
|
|
1317
536
|
},
|
|
1318
537
|
[ModelType.TEXT_TO_SPEECH]: async (runtime, text) => {
|
|
1319
538
|
const ttsModelName = getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
span.addEvent("llm.prompt", { "prompt.content": text });
|
|
1329
|
-
try {
|
|
1330
|
-
const speechStream = await fetchTextToSpeech(runtime, text);
|
|
1331
|
-
span.addEvent("llm.response.success", {
|
|
1332
|
-
info: "Speech stream generated"
|
|
1333
|
-
});
|
|
1334
|
-
return speechStream;
|
|
1335
|
-
} catch (error) {
|
|
1336
|
-
const message = error instanceof Error ? error.message : String(error);
|
|
1337
|
-
const exception = error instanceof Error ? error : new Error(message);
|
|
1338
|
-
span.recordException(exception);
|
|
1339
|
-
span.setStatus({ code: SpanStatusCode.ERROR, message });
|
|
1340
|
-
throw error;
|
|
1341
|
-
}
|
|
1342
|
-
});
|
|
539
|
+
logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${ttsModelName}`);
|
|
540
|
+
try {
|
|
541
|
+
const speechStream = await fetchTextToSpeech(runtime, text);
|
|
542
|
+
return speechStream;
|
|
543
|
+
} catch (error) {
|
|
544
|
+
const message = error instanceof Error ? error.message : String(error);
|
|
545
|
+
throw error;
|
|
546
|
+
}
|
|
1343
547
|
},
|
|
1344
548
|
[ModelType.OBJECT_SMALL]: async (runtime, params) => {
|
|
1345
549
|
return generateObjectByModelType(runtime, params, ModelType.OBJECT_SMALL, getSmallModel);
|