@fallom/trace 0.1.10 → 0.1.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +146 -3
- package/dist/index.d.ts +146 -3
- package/dist/index.js +573 -66
- package/dist/index.mjs +569 -66
- package/package.json +3 -1
package/dist/index.js
CHANGED
|
@@ -269,10 +269,14 @@ var init_prompts = __esm({
|
|
|
269
269
|
// src/index.ts
|
|
270
270
|
var index_exports = {};
|
|
271
271
|
__export(index_exports, {
|
|
272
|
+
FallomExporter: () => FallomExporter,
|
|
273
|
+
clearMastraPrompt: () => clearMastraPrompt,
|
|
272
274
|
default: () => index_default,
|
|
273
275
|
init: () => init4,
|
|
274
276
|
models: () => models_exports,
|
|
275
277
|
prompts: () => prompts_exports,
|
|
278
|
+
setMastraPrompt: () => setMastraPrompt,
|
|
279
|
+
setMastraPromptAB: () => setMastraPromptAB,
|
|
276
280
|
trace: () => trace_exports
|
|
277
281
|
});
|
|
278
282
|
module.exports = __toCommonJS(index_exports);
|
|
@@ -290,6 +294,7 @@ __export(trace_exports, {
|
|
|
290
294
|
wrapAISDK: () => wrapAISDK,
|
|
291
295
|
wrapAnthropic: () => wrapAnthropic,
|
|
292
296
|
wrapGoogleAI: () => wrapGoogleAI,
|
|
297
|
+
wrapMastraAgent: () => wrapMastraAgent,
|
|
293
298
|
wrapOpenAI: () => wrapOpenAI
|
|
294
299
|
});
|
|
295
300
|
var import_async_hooks = require("async_hooks");
|
|
@@ -299,7 +304,7 @@ var import_exporter_trace_otlp_http = require("@opentelemetry/exporter-trace-otl
|
|
|
299
304
|
// node_modules/@opentelemetry/resources/build/esm/Resource.js
|
|
300
305
|
var import_api = require("@opentelemetry/api");
|
|
301
306
|
|
|
302
|
-
// node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
|
|
307
|
+
// node_modules/@opentelemetry/resources/node_modules/@opentelemetry/semantic-conventions/build/esm/resource/SemanticResourceAttributes.js
|
|
303
308
|
var SemanticResourceAttributes = {
|
|
304
309
|
/**
|
|
305
310
|
* Name of the cloud provider.
|
|
@@ -679,35 +684,9 @@ var SemanticResourceAttributes = {
|
|
|
679
684
|
*/
|
|
680
685
|
WEBENGINE_DESCRIPTION: "webengine.description"
|
|
681
686
|
};
|
|
682
|
-
var TelemetrySdkLanguageValues = {
|
|
683
|
-
/** cpp. */
|
|
684
|
-
CPP: "cpp",
|
|
685
|
-
/** dotnet. */
|
|
686
|
-
DOTNET: "dotnet",
|
|
687
|
-
/** erlang. */
|
|
688
|
-
ERLANG: "erlang",
|
|
689
|
-
/** go. */
|
|
690
|
-
GO: "go",
|
|
691
|
-
/** java. */
|
|
692
|
-
JAVA: "java",
|
|
693
|
-
/** nodejs. */
|
|
694
|
-
NODEJS: "nodejs",
|
|
695
|
-
/** php. */
|
|
696
|
-
PHP: "php",
|
|
697
|
-
/** python. */
|
|
698
|
-
PYTHON: "python",
|
|
699
|
-
/** ruby. */
|
|
700
|
-
RUBY: "ruby",
|
|
701
|
-
/** webjs. */
|
|
702
|
-
WEBJS: "webjs"
|
|
703
|
-
};
|
|
704
|
-
|
|
705
|
-
// node_modules/@opentelemetry/core/build/esm/version.js
|
|
706
|
-
var VERSION = "1.19.0";
|
|
707
687
|
|
|
708
|
-
// node_modules/@opentelemetry/
|
|
709
|
-
var
|
|
710
|
-
var SDK_INFO = (_a = {}, _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = "opentelemetry", _a[SemanticResourceAttributes.PROCESS_RUNTIME_NAME] = "node", _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = TelemetrySdkLanguageValues.NODEJS, _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = VERSION, _a);
|
|
688
|
+
// node_modules/@opentelemetry/resources/build/esm/Resource.js
|
|
689
|
+
var import_core = require("@opentelemetry/core");
|
|
711
690
|
|
|
712
691
|
// node_modules/@opentelemetry/resources/build/esm/platform/node/default-service-name.js
|
|
713
692
|
function defaultServiceName() {
|
|
@@ -844,10 +823,10 @@ var Resource = (
|
|
|
844
823
|
(function() {
|
|
845
824
|
function Resource2(attributes, asyncAttributesPromise) {
|
|
846
825
|
var _this = this;
|
|
847
|
-
var
|
|
826
|
+
var _a;
|
|
848
827
|
this._attributes = attributes;
|
|
849
828
|
this.asyncAttributesPending = asyncAttributesPromise != null;
|
|
850
|
-
this._syncAttributes = (
|
|
829
|
+
this._syncAttributes = (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
|
|
851
830
|
this._asyncAttributesPromise = asyncAttributesPromise === null || asyncAttributesPromise === void 0 ? void 0 : asyncAttributesPromise.then(function(asyncAttributes) {
|
|
852
831
|
_this._attributes = Object.assign({}, _this._attributes, asyncAttributes);
|
|
853
832
|
_this.asyncAttributesPending = false;
|
|
@@ -862,30 +841,30 @@ var Resource = (
|
|
|
862
841
|
return Resource2.EMPTY;
|
|
863
842
|
};
|
|
864
843
|
Resource2.default = function() {
|
|
865
|
-
var
|
|
866
|
-
return new Resource2((
|
|
844
|
+
var _a;
|
|
845
|
+
return new Resource2((_a = {}, _a[SemanticResourceAttributes.SERVICE_NAME] = defaultServiceName(), _a[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE] = import_core.SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_LANGUAGE], _a[SemanticResourceAttributes.TELEMETRY_SDK_NAME] = import_core.SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_NAME], _a[SemanticResourceAttributes.TELEMETRY_SDK_VERSION] = import_core.SDK_INFO[SemanticResourceAttributes.TELEMETRY_SDK_VERSION], _a));
|
|
867
846
|
};
|
|
868
847
|
Object.defineProperty(Resource2.prototype, "attributes", {
|
|
869
848
|
get: function() {
|
|
870
|
-
var
|
|
849
|
+
var _a;
|
|
871
850
|
if (this.asyncAttributesPending) {
|
|
872
851
|
import_api.diag.error("Accessing resource attributes before async attributes settled");
|
|
873
852
|
}
|
|
874
|
-
return (
|
|
853
|
+
return (_a = this._attributes) !== null && _a !== void 0 ? _a : {};
|
|
875
854
|
},
|
|
876
855
|
enumerable: false,
|
|
877
856
|
configurable: true
|
|
878
857
|
});
|
|
879
858
|
Resource2.prototype.waitForAsyncAttributes = function() {
|
|
880
859
|
return __awaiter(this, void 0, void 0, function() {
|
|
881
|
-
return __generator(this, function(
|
|
882
|
-
switch (
|
|
860
|
+
return __generator(this, function(_a) {
|
|
861
|
+
switch (_a.label) {
|
|
883
862
|
case 0:
|
|
884
863
|
if (!this.asyncAttributesPending) return [3, 2];
|
|
885
864
|
return [4, this._asyncAttributesPromise];
|
|
886
865
|
case 1:
|
|
887
|
-
|
|
888
|
-
|
|
866
|
+
_a.sent();
|
|
867
|
+
_a.label = 2;
|
|
889
868
|
case 2:
|
|
890
869
|
return [
|
|
891
870
|
2
|
|
@@ -897,19 +876,19 @@ var Resource = (
|
|
|
897
876
|
};
|
|
898
877
|
Resource2.prototype.merge = function(other) {
|
|
899
878
|
var _this = this;
|
|
900
|
-
var
|
|
879
|
+
var _a;
|
|
901
880
|
if (!other)
|
|
902
881
|
return this;
|
|
903
|
-
var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (
|
|
882
|
+
var mergedSyncAttributes = __assign(__assign({}, this._syncAttributes), (_a = other._syncAttributes) !== null && _a !== void 0 ? _a : other.attributes);
|
|
904
883
|
if (!this._asyncAttributesPromise && !other._asyncAttributesPromise) {
|
|
905
884
|
return new Resource2(mergedSyncAttributes);
|
|
906
885
|
}
|
|
907
886
|
var mergedAttributesPromise = Promise.all([
|
|
908
887
|
this._asyncAttributesPromise,
|
|
909
888
|
other._asyncAttributesPromise
|
|
910
|
-
]).then(function(
|
|
889
|
+
]).then(function(_a2) {
|
|
911
890
|
var _b;
|
|
912
|
-
var _c = __read(
|
|
891
|
+
var _c = __read(_a2, 2), thisAsyncAttributes = _c[0], otherAsyncAttributes = _c[1];
|
|
913
892
|
return __assign(__assign(__assign(__assign({}, _this._syncAttributes), thisAsyncAttributes), (_b = other._syncAttributes) !== null && _b !== void 0 ? _b : other.attributes), otherAsyncAttributes);
|
|
914
893
|
});
|
|
915
894
|
return new Resource2(mergedSyncAttributes, mergedAttributesPromise);
|
|
@@ -1215,7 +1194,13 @@ function wrapOpenAI(client) {
|
|
|
1215
1194
|
response?.choices?.[0]?.message,
|
|
1216
1195
|
response?.model || params?.model,
|
|
1217
1196
|
response?.id
|
|
1218
|
-
) :
|
|
1197
|
+
) : {};
|
|
1198
|
+
if (response?.usage) {
|
|
1199
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
1200
|
+
}
|
|
1201
|
+
if (response?.choices?.[0]?.finish_reason) {
|
|
1202
|
+
attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
|
|
1203
|
+
}
|
|
1219
1204
|
sendTrace({
|
|
1220
1205
|
config_key: ctx.configKey,
|
|
1221
1206
|
session_id: ctx.sessionId,
|
|
@@ -1233,7 +1218,7 @@ function wrapOpenAI(client) {
|
|
|
1233
1218
|
prompt_tokens: response?.usage?.prompt_tokens,
|
|
1234
1219
|
completion_tokens: response?.usage?.completion_tokens,
|
|
1235
1220
|
total_tokens: response?.usage?.total_tokens,
|
|
1236
|
-
attributes,
|
|
1221
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
1237
1222
|
prompt_key: promptCtx?.promptKey,
|
|
1238
1223
|
prompt_version: promptCtx?.promptVersion,
|
|
1239
1224
|
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
@@ -1306,10 +1291,16 @@ function wrapAnthropic(client) {
|
|
|
1306
1291
|
{ role: "assistant", content: response?.content?.[0]?.text || "" },
|
|
1307
1292
|
response?.model || params?.model,
|
|
1308
1293
|
response?.id
|
|
1309
|
-
) :
|
|
1310
|
-
if (
|
|
1294
|
+
) : {};
|
|
1295
|
+
if (params?.system) {
|
|
1311
1296
|
attributes["gen_ai.system_prompt"] = params.system;
|
|
1312
1297
|
}
|
|
1298
|
+
if (response?.usage) {
|
|
1299
|
+
attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
|
|
1300
|
+
}
|
|
1301
|
+
if (response?.stop_reason) {
|
|
1302
|
+
attributes["gen_ai.response.finish_reason"] = response.stop_reason;
|
|
1303
|
+
}
|
|
1313
1304
|
sendTrace({
|
|
1314
1305
|
config_key: ctx.configKey,
|
|
1315
1306
|
session_id: ctx.sessionId,
|
|
@@ -1327,7 +1318,7 @@ function wrapAnthropic(client) {
|
|
|
1327
1318
|
prompt_tokens: response?.usage?.input_tokens,
|
|
1328
1319
|
completion_tokens: response?.usage?.output_tokens,
|
|
1329
1320
|
total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
|
|
1330
|
-
attributes,
|
|
1321
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
1331
1322
|
prompt_key: promptCtx?.promptKey,
|
|
1332
1323
|
prompt_version: promptCtx?.promptVersion,
|
|
1333
1324
|
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
@@ -1420,6 +1411,13 @@ function wrapGoogleAI(model) {
|
|
|
1420
1411
|
attributes["gen_ai.completion.0.content"] = outputText;
|
|
1421
1412
|
}
|
|
1422
1413
|
}
|
|
1414
|
+
if (usage) {
|
|
1415
|
+
attributes["fallom.raw.usage"] = JSON.stringify(usage);
|
|
1416
|
+
}
|
|
1417
|
+
const candidate = result?.candidates?.[0];
|
|
1418
|
+
if (candidate?.finishReason) {
|
|
1419
|
+
attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
|
|
1420
|
+
}
|
|
1423
1421
|
sendTrace({
|
|
1424
1422
|
config_key: ctx.configKey,
|
|
1425
1423
|
session_id: ctx.sessionId,
|
|
@@ -1437,7 +1435,7 @@ function wrapGoogleAI(model) {
|
|
|
1437
1435
|
prompt_tokens: usage?.promptTokenCount,
|
|
1438
1436
|
completion_tokens: usage?.candidatesTokenCount,
|
|
1439
1437
|
total_tokens: usage?.totalTokenCount,
|
|
1440
|
-
attributes:
|
|
1438
|
+
attributes: Object.keys(attributes).length > 0 ? attributes : void 0,
|
|
1441
1439
|
prompt_key: promptCtx?.promptKey,
|
|
1442
1440
|
prompt_version: promptCtx?.promptVersion,
|
|
1443
1441
|
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
@@ -1485,8 +1483,37 @@ function wrapGoogleAI(model) {
|
|
|
1485
1483
|
};
|
|
1486
1484
|
return model;
|
|
1487
1485
|
}
|
|
1488
|
-
|
|
1486
|
+
var aiSdkDebug = false;
|
|
1487
|
+
function extractUsageFromResult(result, directUsage) {
|
|
1488
|
+
let usage = directUsage ?? result?.usage;
|
|
1489
|
+
const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
|
|
1490
|
+
let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : void 0;
|
|
1491
|
+
let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : void 0;
|
|
1492
|
+
let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : void 0;
|
|
1493
|
+
let cost;
|
|
1494
|
+
const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
|
|
1495
|
+
if (orUsage) {
|
|
1496
|
+
if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
|
|
1497
|
+
promptTokens = orUsage.promptTokens;
|
|
1498
|
+
}
|
|
1499
|
+
if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
|
|
1500
|
+
completionTokens = orUsage.completionTokens;
|
|
1501
|
+
}
|
|
1502
|
+
if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
|
|
1503
|
+
totalTokens = orUsage.totalTokens;
|
|
1504
|
+
}
|
|
1505
|
+
if (isValidNumber(orUsage.cost)) {
|
|
1506
|
+
cost = orUsage.cost;
|
|
1507
|
+
}
|
|
1508
|
+
}
|
|
1509
|
+
if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
|
|
1510
|
+
totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
|
|
1511
|
+
}
|
|
1512
|
+
return { promptTokens, completionTokens, totalTokens, cost };
|
|
1513
|
+
}
|
|
1514
|
+
function wrapAISDK(ai, options) {
|
|
1489
1515
|
const aiModule = ai;
|
|
1516
|
+
aiSdkDebug = options?.debug ?? false;
|
|
1490
1517
|
return {
|
|
1491
1518
|
generateText: createGenerateTextWrapper(aiModule),
|
|
1492
1519
|
streamText: createStreamTextWrapper(aiModule),
|
|
@@ -1515,6 +1542,28 @@ function createGenerateTextWrapper(aiModule) {
|
|
|
1515
1542
|
try {
|
|
1516
1543
|
const result = await aiModule.generateText(...args);
|
|
1517
1544
|
const endTime = Date.now();
|
|
1545
|
+
if (aiSdkDebug) {
|
|
1546
|
+
console.log(
|
|
1547
|
+
"\n\u{1F50D} [Fallom Debug] generateText result keys:",
|
|
1548
|
+
Object.keys(result || {})
|
|
1549
|
+
);
|
|
1550
|
+
console.log(
|
|
1551
|
+
"\u{1F50D} [Fallom Debug] result.usage:",
|
|
1552
|
+
JSON.stringify(result?.usage, null, 2)
|
|
1553
|
+
);
|
|
1554
|
+
console.log(
|
|
1555
|
+
"\u{1F50D} [Fallom Debug] result.response keys:",
|
|
1556
|
+
Object.keys(result?.response || {})
|
|
1557
|
+
);
|
|
1558
|
+
console.log(
|
|
1559
|
+
"\u{1F50D} [Fallom Debug] result.response.usage:",
|
|
1560
|
+
JSON.stringify(result?.response?.usage, null, 2)
|
|
1561
|
+
);
|
|
1562
|
+
console.log(
|
|
1563
|
+
"\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
|
|
1564
|
+
JSON.stringify(result?.experimental_providerMetadata, null, 2)
|
|
1565
|
+
);
|
|
1566
|
+
}
|
|
1518
1567
|
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
1519
1568
|
const attributes = {};
|
|
1520
1569
|
if (captureContent) {
|
|
@@ -1538,6 +1587,18 @@ function createGenerateTextWrapper(aiModule) {
|
|
|
1538
1587
|
attributes["gen_ai.response.id"] = result.response.id;
|
|
1539
1588
|
}
|
|
1540
1589
|
}
|
|
1590
|
+
if (result?.usage) {
|
|
1591
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
|
|
1592
|
+
}
|
|
1593
|
+
if (result?.experimental_providerMetadata) {
|
|
1594
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(
|
|
1595
|
+
result.experimental_providerMetadata
|
|
1596
|
+
);
|
|
1597
|
+
}
|
|
1598
|
+
if (result?.finishReason) {
|
|
1599
|
+
attributes["gen_ai.response.finish_reason"] = result.finishReason;
|
|
1600
|
+
}
|
|
1601
|
+
const usage = extractUsageFromResult(result);
|
|
1541
1602
|
sendTrace({
|
|
1542
1603
|
config_key: ctx.configKey,
|
|
1543
1604
|
session_id: ctx.sessionId,
|
|
@@ -1552,9 +1613,9 @@ function createGenerateTextWrapper(aiModule) {
|
|
|
1552
1613
|
end_time: new Date(endTime).toISOString(),
|
|
1553
1614
|
duration_ms: endTime - startTime,
|
|
1554
1615
|
status: "OK",
|
|
1555
|
-
prompt_tokens:
|
|
1556
|
-
completion_tokens:
|
|
1557
|
-
total_tokens:
|
|
1616
|
+
prompt_tokens: usage.promptTokens,
|
|
1617
|
+
completion_tokens: usage.completionTokens,
|
|
1618
|
+
total_tokens: usage.totalTokens,
|
|
1558
1619
|
attributes: captureContent ? attributes : void 0,
|
|
1559
1620
|
prompt_key: promptCtx?.promptKey,
|
|
1560
1621
|
prompt_version: promptCtx?.promptVersion,
|
|
@@ -1613,9 +1674,31 @@ function createStreamTextWrapper(aiModule) {
|
|
|
1613
1674
|
} catch {
|
|
1614
1675
|
}
|
|
1615
1676
|
if (result?.usage) {
|
|
1616
|
-
result.usage.then((
|
|
1677
|
+
result.usage.then(async (rawUsage) => {
|
|
1617
1678
|
const endTime = Date.now();
|
|
1618
|
-
|
|
1679
|
+
if (aiSdkDebug) {
|
|
1680
|
+
console.log(
|
|
1681
|
+
"\n\u{1F50D} [Fallom Debug] streamText usage:",
|
|
1682
|
+
JSON.stringify(rawUsage, null, 2)
|
|
1683
|
+
);
|
|
1684
|
+
console.log(
|
|
1685
|
+
"\u{1F50D} [Fallom Debug] streamText result keys:",
|
|
1686
|
+
Object.keys(result || {})
|
|
1687
|
+
);
|
|
1688
|
+
}
|
|
1689
|
+
log2("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
|
|
1690
|
+
let providerMetadata = result?.experimental_providerMetadata;
|
|
1691
|
+
if (providerMetadata && typeof providerMetadata.then === "function") {
|
|
1692
|
+
try {
|
|
1693
|
+
providerMetadata = await providerMetadata;
|
|
1694
|
+
} catch {
|
|
1695
|
+
providerMetadata = void 0;
|
|
1696
|
+
}
|
|
1697
|
+
}
|
|
1698
|
+
const usage = extractUsageFromResult(
|
|
1699
|
+
{ experimental_providerMetadata: providerMetadata },
|
|
1700
|
+
rawUsage
|
|
1701
|
+
);
|
|
1619
1702
|
const attributes = {};
|
|
1620
1703
|
if (captureContent) {
|
|
1621
1704
|
attributes["gen_ai.request.model"] = modelId;
|
|
@@ -1627,6 +1710,12 @@ function createStreamTextWrapper(aiModule) {
|
|
|
1627
1710
|
if (firstTokenTime) {
|
|
1628
1711
|
attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
|
|
1629
1712
|
}
|
|
1713
|
+
if (rawUsage) {
|
|
1714
|
+
attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
|
|
1715
|
+
}
|
|
1716
|
+
if (providerMetadata) {
|
|
1717
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
|
|
1718
|
+
}
|
|
1630
1719
|
const tracePayload = {
|
|
1631
1720
|
config_key: ctx.configKey,
|
|
1632
1721
|
session_id: ctx.sessionId,
|
|
@@ -1641,9 +1730,9 @@ function createStreamTextWrapper(aiModule) {
|
|
|
1641
1730
|
end_time: new Date(endTime).toISOString(),
|
|
1642
1731
|
duration_ms: endTime - startTime,
|
|
1643
1732
|
status: "OK",
|
|
1644
|
-
prompt_tokens: usage
|
|
1645
|
-
completion_tokens: usage
|
|
1646
|
-
total_tokens: usage
|
|
1733
|
+
prompt_tokens: usage.promptTokens,
|
|
1734
|
+
completion_tokens: usage.completionTokens,
|
|
1735
|
+
total_tokens: usage.totalTokens,
|
|
1647
1736
|
time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
|
|
1648
1737
|
attributes: captureContent ? attributes : void 0,
|
|
1649
1738
|
prompt_key: promptCtx?.promptKey,
|
|
@@ -1723,6 +1812,24 @@ function createGenerateObjectWrapper(aiModule) {
|
|
|
1723
1812
|
try {
|
|
1724
1813
|
const result = await aiModule.generateObject(...args);
|
|
1725
1814
|
const endTime = Date.now();
|
|
1815
|
+
if (aiSdkDebug) {
|
|
1816
|
+
console.log(
|
|
1817
|
+
"\n\u{1F50D} [Fallom Debug] generateObject result keys:",
|
|
1818
|
+
Object.keys(result || {})
|
|
1819
|
+
);
|
|
1820
|
+
console.log(
|
|
1821
|
+
"\u{1F50D} [Fallom Debug] result.usage:",
|
|
1822
|
+
JSON.stringify(result?.usage, null, 2)
|
|
1823
|
+
);
|
|
1824
|
+
console.log(
|
|
1825
|
+
"\u{1F50D} [Fallom Debug] result.response keys:",
|
|
1826
|
+
Object.keys(result?.response || {})
|
|
1827
|
+
);
|
|
1828
|
+
console.log(
|
|
1829
|
+
"\u{1F50D} [Fallom Debug] result.response.usage:",
|
|
1830
|
+
JSON.stringify(result?.response?.usage, null, 2)
|
|
1831
|
+
);
|
|
1832
|
+
}
|
|
1726
1833
|
const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
|
|
1727
1834
|
const attributes = {};
|
|
1728
1835
|
if (captureContent) {
|
|
@@ -1735,6 +1842,18 @@ function createGenerateObjectWrapper(aiModule) {
|
|
|
1735
1842
|
);
|
|
1736
1843
|
}
|
|
1737
1844
|
}
|
|
1845
|
+
if (result?.usage) {
|
|
1846
|
+
attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
|
|
1847
|
+
}
|
|
1848
|
+
if (result?.experimental_providerMetadata) {
|
|
1849
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(
|
|
1850
|
+
result.experimental_providerMetadata
|
|
1851
|
+
);
|
|
1852
|
+
}
|
|
1853
|
+
if (result?.finishReason) {
|
|
1854
|
+
attributes["gen_ai.response.finish_reason"] = result.finishReason;
|
|
1855
|
+
}
|
|
1856
|
+
const usage = extractUsageFromResult(result);
|
|
1738
1857
|
sendTrace({
|
|
1739
1858
|
config_key: ctx.configKey,
|
|
1740
1859
|
session_id: ctx.sessionId,
|
|
@@ -1749,9 +1868,9 @@ function createGenerateObjectWrapper(aiModule) {
|
|
|
1749
1868
|
end_time: new Date(endTime).toISOString(),
|
|
1750
1869
|
duration_ms: endTime - startTime,
|
|
1751
1870
|
status: "OK",
|
|
1752
|
-
prompt_tokens:
|
|
1753
|
-
completion_tokens:
|
|
1754
|
-
total_tokens:
|
|
1871
|
+
prompt_tokens: usage.promptTokens,
|
|
1872
|
+
completion_tokens: usage.completionTokens,
|
|
1873
|
+
total_tokens: usage.totalTokens,
|
|
1755
1874
|
attributes: captureContent ? attributes : void 0,
|
|
1756
1875
|
prompt_key: promptCtx?.promptKey,
|
|
1757
1876
|
prompt_version: promptCtx?.promptVersion,
|
|
@@ -1811,9 +1930,31 @@ function createStreamObjectWrapper(aiModule) {
|
|
|
1811
1930
|
} catch {
|
|
1812
1931
|
}
|
|
1813
1932
|
if (result?.usage) {
|
|
1814
|
-
result.usage.then((
|
|
1933
|
+
result.usage.then(async (rawUsage) => {
|
|
1815
1934
|
const endTime = Date.now();
|
|
1816
|
-
|
|
1935
|
+
if (aiSdkDebug) {
|
|
1936
|
+
console.log(
|
|
1937
|
+
"\n\u{1F50D} [Fallom Debug] streamObject usage:",
|
|
1938
|
+
JSON.stringify(rawUsage, null, 2)
|
|
1939
|
+
);
|
|
1940
|
+
console.log(
|
|
1941
|
+
"\u{1F50D} [Fallom Debug] streamObject result keys:",
|
|
1942
|
+
Object.keys(result || {})
|
|
1943
|
+
);
|
|
1944
|
+
}
|
|
1945
|
+
log2("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
|
|
1946
|
+
let providerMetadata = result?.experimental_providerMetadata;
|
|
1947
|
+
if (providerMetadata && typeof providerMetadata.then === "function") {
|
|
1948
|
+
try {
|
|
1949
|
+
providerMetadata = await providerMetadata;
|
|
1950
|
+
} catch {
|
|
1951
|
+
providerMetadata = void 0;
|
|
1952
|
+
}
|
|
1953
|
+
}
|
|
1954
|
+
const usage = extractUsageFromResult(
|
|
1955
|
+
{ experimental_providerMetadata: providerMetadata },
|
|
1956
|
+
rawUsage
|
|
1957
|
+
);
|
|
1817
1958
|
const attributes = {};
|
|
1818
1959
|
if (captureContent) {
|
|
1819
1960
|
attributes["gen_ai.request.model"] = modelId;
|
|
@@ -1821,6 +1962,12 @@ function createStreamObjectWrapper(aiModule) {
|
|
|
1821
1962
|
if (firstTokenTime) {
|
|
1822
1963
|
attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
|
|
1823
1964
|
}
|
|
1965
|
+
if (rawUsage) {
|
|
1966
|
+
attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
|
|
1967
|
+
}
|
|
1968
|
+
if (providerMetadata) {
|
|
1969
|
+
attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
|
|
1970
|
+
}
|
|
1824
1971
|
sendTrace({
|
|
1825
1972
|
config_key: ctx.configKey,
|
|
1826
1973
|
session_id: ctx.sessionId,
|
|
@@ -1835,9 +1982,9 @@ function createStreamObjectWrapper(aiModule) {
|
|
|
1835
1982
|
end_time: new Date(endTime).toISOString(),
|
|
1836
1983
|
duration_ms: endTime - startTime,
|
|
1837
1984
|
status: "OK",
|
|
1838
|
-
prompt_tokens: usage
|
|
1839
|
-
completion_tokens: usage
|
|
1840
|
-
total_tokens: usage
|
|
1985
|
+
prompt_tokens: usage.promptTokens,
|
|
1986
|
+
completion_tokens: usage.completionTokens,
|
|
1987
|
+
total_tokens: usage.totalTokens,
|
|
1841
1988
|
attributes: captureContent ? attributes : void 0,
|
|
1842
1989
|
prompt_key: promptCtx?.promptKey,
|
|
1843
1990
|
prompt_version: promptCtx?.promptVersion,
|
|
@@ -1893,6 +2040,127 @@ function createStreamObjectWrapper(aiModule) {
|
|
|
1893
2040
|
return result;
|
|
1894
2041
|
};
|
|
1895
2042
|
}
|
|
2043
|
+
function wrapMastraAgent(agent) {
|
|
2044
|
+
const originalGenerate = agent.generate.bind(agent);
|
|
2045
|
+
const agentName = agent.name || "MastraAgent";
|
|
2046
|
+
agent.generate = async function(...args) {
|
|
2047
|
+
const ctx = sessionStorage.getStore() || fallbackSession;
|
|
2048
|
+
if (!ctx || !initialized2) {
|
|
2049
|
+
return originalGenerate(...args);
|
|
2050
|
+
}
|
|
2051
|
+
let promptCtx = null;
|
|
2052
|
+
try {
|
|
2053
|
+
const { getPromptContext: getPromptContext2 } = await Promise.resolve().then(() => (init_prompts(), prompts_exports));
|
|
2054
|
+
promptCtx = getPromptContext2();
|
|
2055
|
+
} catch {
|
|
2056
|
+
}
|
|
2057
|
+
const traceId = generateHexId(32);
|
|
2058
|
+
const spanId = generateHexId(16);
|
|
2059
|
+
const startTime = Date.now();
|
|
2060
|
+
const messages = args[0] || [];
|
|
2061
|
+
try {
|
|
2062
|
+
const result = await originalGenerate(...args);
|
|
2063
|
+
const endTime = Date.now();
|
|
2064
|
+
const model = result?.model?.modelId || "unknown";
|
|
2065
|
+
const toolCalls = [];
|
|
2066
|
+
if (result?.steps?.length) {
|
|
2067
|
+
for (const step of result.steps) {
|
|
2068
|
+
if (step.toolCalls?.length) {
|
|
2069
|
+
for (let i = 0; i < step.toolCalls.length; i++) {
|
|
2070
|
+
const tc = step.toolCalls[i];
|
|
2071
|
+
const tr = step.toolResults?.[i];
|
|
2072
|
+
toolCalls.push({
|
|
2073
|
+
name: tc.toolName,
|
|
2074
|
+
arguments: tc.args,
|
|
2075
|
+
result: tr?.result
|
|
2076
|
+
});
|
|
2077
|
+
}
|
|
2078
|
+
}
|
|
2079
|
+
}
|
|
2080
|
+
}
|
|
2081
|
+
const attributes = {
|
|
2082
|
+
"gen_ai.system": "Mastra",
|
|
2083
|
+
"gen_ai.request.model": model,
|
|
2084
|
+
"gen_ai.response.model": model,
|
|
2085
|
+
"fallom.source": "mastra-agent",
|
|
2086
|
+
"llm.request.type": "chat"
|
|
2087
|
+
};
|
|
2088
|
+
if (Array.isArray(messages)) {
|
|
2089
|
+
messages.forEach((msg, i) => {
|
|
2090
|
+
attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
|
|
2091
|
+
attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
|
|
2092
|
+
});
|
|
2093
|
+
}
|
|
2094
|
+
if (result?.text) {
|
|
2095
|
+
attributes["gen_ai.completion.0.role"] = "assistant";
|
|
2096
|
+
attributes["gen_ai.completion.0.content"] = result.text;
|
|
2097
|
+
attributes["gen_ai.completion.0.finish_reason"] = "stop";
|
|
2098
|
+
}
|
|
2099
|
+
if (toolCalls.length > 0) {
|
|
2100
|
+
attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
|
|
2101
|
+
toolCalls.forEach((tc, i) => {
|
|
2102
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
|
|
2103
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
|
|
2104
|
+
attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
|
|
2105
|
+
});
|
|
2106
|
+
}
|
|
2107
|
+
if (result?.usage) {
|
|
2108
|
+
attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
|
|
2109
|
+
attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
|
|
2110
|
+
attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
|
|
2111
|
+
}
|
|
2112
|
+
const traceData = {
|
|
2113
|
+
config_key: ctx.configKey,
|
|
2114
|
+
session_id: ctx.sessionId,
|
|
2115
|
+
customer_id: ctx.customerId,
|
|
2116
|
+
trace_id: traceId,
|
|
2117
|
+
span_id: spanId,
|
|
2118
|
+
name: `mastra.${agentName}.generate`,
|
|
2119
|
+
kind: "client",
|
|
2120
|
+
model,
|
|
2121
|
+
start_time: new Date(startTime).toISOString(),
|
|
2122
|
+
end_time: new Date(endTime).toISOString(),
|
|
2123
|
+
duration_ms: endTime - startTime,
|
|
2124
|
+
status: "OK",
|
|
2125
|
+
prompt_tokens: result?.usage?.promptTokens,
|
|
2126
|
+
completion_tokens: result?.usage?.completionTokens,
|
|
2127
|
+
total_tokens: result?.usage?.totalTokens,
|
|
2128
|
+
attributes,
|
|
2129
|
+
prompt_key: promptCtx?.promptKey,
|
|
2130
|
+
prompt_version: promptCtx?.promptVersion,
|
|
2131
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
2132
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
2133
|
+
};
|
|
2134
|
+
sendTrace(traceData).catch(() => {
|
|
2135
|
+
});
|
|
2136
|
+
return result;
|
|
2137
|
+
} catch (error) {
|
|
2138
|
+
const endTime = Date.now();
|
|
2139
|
+
const traceData = {
|
|
2140
|
+
config_key: ctx.configKey,
|
|
2141
|
+
session_id: ctx.sessionId,
|
|
2142
|
+
customer_id: ctx.customerId,
|
|
2143
|
+
trace_id: traceId,
|
|
2144
|
+
span_id: spanId,
|
|
2145
|
+
name: `mastra.${agentName}.generate`,
|
|
2146
|
+
kind: "client",
|
|
2147
|
+
start_time: new Date(startTime).toISOString(),
|
|
2148
|
+
end_time: new Date(endTime).toISOString(),
|
|
2149
|
+
duration_ms: endTime - startTime,
|
|
2150
|
+
status: "ERROR",
|
|
2151
|
+
error_message: error instanceof Error ? error.message : String(error),
|
|
2152
|
+
prompt_key: promptCtx?.promptKey,
|
|
2153
|
+
prompt_version: promptCtx?.promptVersion,
|
|
2154
|
+
prompt_ab_test_key: promptCtx?.abTestKey,
|
|
2155
|
+
prompt_variant_index: promptCtx?.variantIndex
|
|
2156
|
+
};
|
|
2157
|
+
sendTrace(traceData).catch(() => {
|
|
2158
|
+
});
|
|
2159
|
+
throw error;
|
|
2160
|
+
}
|
|
2161
|
+
};
|
|
2162
|
+
return agent;
|
|
2163
|
+
}
|
|
1896
2164
|
|
|
1897
2165
|
// src/models.ts
|
|
1898
2166
|
var models_exports = {};
|
|
@@ -2162,6 +2430,241 @@ async function init4(options = {}) {
|
|
|
2162
2430
|
});
|
|
2163
2431
|
}
|
|
2164
2432
|
|
|
2433
|
+
// src/mastra.ts
|
|
2434
|
+
var import_core2 = require("@opentelemetry/core");
|
|
2435
|
+
var promptContext2 = {};
|
|
2436
|
+
function setMastraPrompt(promptKey, version) {
|
|
2437
|
+
promptContext2 = {
|
|
2438
|
+
promptKey,
|
|
2439
|
+
promptVersion: version,
|
|
2440
|
+
promptAbTestKey: void 0,
|
|
2441
|
+
promptVariantIndex: void 0
|
|
2442
|
+
};
|
|
2443
|
+
}
|
|
2444
|
+
function setMastraPromptAB(abTestKey, variantIndex) {
|
|
2445
|
+
promptContext2 = {
|
|
2446
|
+
promptKey: void 0,
|
|
2447
|
+
promptVersion: void 0,
|
|
2448
|
+
promptAbTestKey: abTestKey,
|
|
2449
|
+
promptVariantIndex: variantIndex
|
|
2450
|
+
};
|
|
2451
|
+
}
|
|
2452
|
+
function clearMastraPrompt() {
|
|
2453
|
+
promptContext2 = {};
|
|
2454
|
+
}
|
|
2455
|
+
var FallomExporter = class {
|
|
2456
|
+
constructor(options = {}) {
|
|
2457
|
+
this.pendingExports = [];
|
|
2458
|
+
this.apiKey = options.apiKey ?? process.env.FALLOM_API_KEY ?? "";
|
|
2459
|
+
this.baseUrl = options.baseUrl ?? "https://traces.fallom.com";
|
|
2460
|
+
this.debug = options.debug ?? false;
|
|
2461
|
+
console.log("[FallomExporter] Constructor called, debug:", this.debug);
|
|
2462
|
+
console.log("[FallomExporter] API key present:", !!this.apiKey);
|
|
2463
|
+
console.log("[FallomExporter] Base URL:", this.baseUrl);
|
|
2464
|
+
if (!this.apiKey) {
|
|
2465
|
+
console.warn(
|
|
2466
|
+
"[FallomExporter] No API key provided. Set FALLOM_API_KEY env var or pass apiKey option."
|
|
2467
|
+
);
|
|
2468
|
+
}
|
|
2469
|
+
}
|
|
2470
|
+
log(...args) {
|
|
2471
|
+
if (this.debug) {
|
|
2472
|
+
console.log("[FallomExporter]", ...args);
|
|
2473
|
+
}
|
|
2474
|
+
}
|
|
2475
|
+
/**
|
|
2476
|
+
* Export spans to Fallom.
|
|
2477
|
+
*/
|
|
2478
|
+
export(spans, resultCallback) {
|
|
2479
|
+
if (spans.length === 0) {
|
|
2480
|
+
resultCallback({ code: import_core2.ExportResultCode.SUCCESS });
|
|
2481
|
+
return;
|
|
2482
|
+
}
|
|
2483
|
+
this.log(`Exporting ${spans.length} spans...`);
|
|
2484
|
+
if (this.debug) {
|
|
2485
|
+
for (const span2 of spans) {
|
|
2486
|
+
this.log(` - ${span2.name}`, {
|
|
2487
|
+
attributes: Object.fromEntries(
|
|
2488
|
+
Object.entries(span2.attributes).filter(
|
|
2489
|
+
([k]) => k.startsWith("gen_ai") || k.startsWith("llm")
|
|
2490
|
+
)
|
|
2491
|
+
)
|
|
2492
|
+
});
|
|
2493
|
+
}
|
|
2494
|
+
}
|
|
2495
|
+
const exportPromise = this.sendSpans(spans).then(() => {
|
|
2496
|
+
this.log("Export successful");
|
|
2497
|
+
resultCallback({ code: import_core2.ExportResultCode.SUCCESS });
|
|
2498
|
+
}).catch((error) => {
|
|
2499
|
+
console.error("[FallomExporter] Export failed:", error);
|
|
2500
|
+
resultCallback({
|
|
2501
|
+
code: import_core2.ExportResultCode.FAILED,
|
|
2502
|
+
error: error instanceof Error ? error : new Error(String(error))
|
|
2503
|
+
});
|
|
2504
|
+
});
|
|
2505
|
+
this.pendingExports.push(exportPromise);
|
|
2506
|
+
}
|
|
2507
|
+
/**
|
|
2508
|
+
* Shutdown the exporter, waiting for pending exports.
|
|
2509
|
+
*/
|
|
2510
|
+
async shutdown() {
|
|
2511
|
+
await Promise.all(this.pendingExports);
|
|
2512
|
+
this.pendingExports = [];
|
|
2513
|
+
}
|
|
2514
|
+
/**
|
|
2515
|
+
* Force flush pending exports.
|
|
2516
|
+
*/
|
|
2517
|
+
async forceFlush() {
|
|
2518
|
+
await Promise.all(this.pendingExports);
|
|
2519
|
+
}
|
|
2520
|
+
/**
|
|
2521
|
+
* Send spans to Fallom's OTLP endpoint.
|
|
2522
|
+
*/
|
|
2523
|
+
async sendSpans(spans) {
|
|
2524
|
+
const session = getSession();
|
|
2525
|
+
const resourceSpans = this.spansToOtlpJson(spans);
|
|
2526
|
+
const headers = {
|
|
2527
|
+
"Content-Type": "application/json",
|
|
2528
|
+
Authorization: `Bearer ${this.apiKey}`
|
|
2529
|
+
};
|
|
2530
|
+
if (session?.configKey) {
|
|
2531
|
+
headers["X-Fallom-Config-Key"] = session.configKey;
|
|
2532
|
+
}
|
|
2533
|
+
if (session?.sessionId) {
|
|
2534
|
+
headers["X-Fallom-Session-Id"] = session.sessionId;
|
|
2535
|
+
}
|
|
2536
|
+
if (session?.customerId) {
|
|
2537
|
+
headers["X-Fallom-Customer-Id"] = session.customerId;
|
|
2538
|
+
}
|
|
2539
|
+
if (promptContext2.promptKey) {
|
|
2540
|
+
headers["X-Fallom-Prompt-Key"] = promptContext2.promptKey;
|
|
2541
|
+
}
|
|
2542
|
+
if (promptContext2.promptVersion !== void 0) {
|
|
2543
|
+
headers["X-Fallom-Prompt-Version"] = String(promptContext2.promptVersion);
|
|
2544
|
+
}
|
|
2545
|
+
if (promptContext2.promptAbTestKey) {
|
|
2546
|
+
headers["X-Fallom-Prompt-AB-Test"] = promptContext2.promptAbTestKey;
|
|
2547
|
+
}
|
|
2548
|
+
if (promptContext2.promptVariantIndex !== void 0) {
|
|
2549
|
+
headers["X-Fallom-Prompt-Variant"] = String(
|
|
2550
|
+
promptContext2.promptVariantIndex
|
|
2551
|
+
);
|
|
2552
|
+
}
|
|
2553
|
+
const endpoint = `${this.baseUrl}/v1/traces`;
|
|
2554
|
+
this.log("Sending to", endpoint);
|
|
2555
|
+
this.log("Headers:", {
|
|
2556
|
+
...headers,
|
|
2557
|
+
Authorization: "Bearer ***"
|
|
2558
|
+
});
|
|
2559
|
+
const response = await fetch(endpoint, {
|
|
2560
|
+
method: "POST",
|
|
2561
|
+
headers,
|
|
2562
|
+
body: JSON.stringify({ resourceSpans })
|
|
2563
|
+
});
|
|
2564
|
+
if (!response.ok) {
|
|
2565
|
+
const text = await response.text();
|
|
2566
|
+
throw new Error(`Failed to export: ${response.status} ${text}`);
|
|
2567
|
+
}
|
|
2568
|
+
}
|
|
2569
|
+
/**
|
|
2570
|
+
* Convert OpenTelemetry spans to OTLP JSON format.
|
|
2571
|
+
*/
|
|
2572
|
+
spansToOtlpJson(spans) {
|
|
2573
|
+
const resourceMap = /* @__PURE__ */ new Map();
|
|
2574
|
+
for (const span2 of spans) {
|
|
2575
|
+
const resourceKey = JSON.stringify(span2.resource.attributes);
|
|
2576
|
+
if (!resourceMap.has(resourceKey)) {
|
|
2577
|
+
resourceMap.set(resourceKey, []);
|
|
2578
|
+
}
|
|
2579
|
+
resourceMap.get(resourceKey).push(span2);
|
|
2580
|
+
}
|
|
2581
|
+
const resourceSpans = [];
|
|
2582
|
+
for (const [_resourceKey, resourceSpanList] of resourceMap) {
|
|
2583
|
+
const firstSpan = resourceSpanList[0];
|
|
2584
|
+
resourceSpans.push({
|
|
2585
|
+
resource: {
|
|
2586
|
+
attributes: this.attributesToOtlp(firstSpan.resource.attributes)
|
|
2587
|
+
},
|
|
2588
|
+
scopeSpans: [
|
|
2589
|
+
{
|
|
2590
|
+
scope: {
|
|
2591
|
+
name: firstSpan.instrumentationLibrary.name,
|
|
2592
|
+
version: firstSpan.instrumentationLibrary.version
|
|
2593
|
+
},
|
|
2594
|
+
spans: resourceSpanList.map((span2) => this.spanToOtlp(span2))
|
|
2595
|
+
}
|
|
2596
|
+
]
|
|
2597
|
+
});
|
|
2598
|
+
}
|
|
2599
|
+
return resourceSpans;
|
|
2600
|
+
}
|
|
2601
|
+
/**
|
|
2602
|
+
* Convert a single span to OTLP format.
|
|
2603
|
+
*/
|
|
2604
|
+
spanToOtlp(span2) {
|
|
2605
|
+
return {
|
|
2606
|
+
traceId: span2.spanContext().traceId,
|
|
2607
|
+
spanId: span2.spanContext().spanId,
|
|
2608
|
+
parentSpanId: span2.parentSpanId,
|
|
2609
|
+
name: span2.name,
|
|
2610
|
+
kind: span2.kind,
|
|
2611
|
+
startTimeUnixNano: this.hrTimeToNanos(span2.startTime),
|
|
2612
|
+
endTimeUnixNano: this.hrTimeToNanos(span2.endTime),
|
|
2613
|
+
attributes: this.attributesToOtlp(span2.attributes),
|
|
2614
|
+
status: {
|
|
2615
|
+
code: span2.status.code,
|
|
2616
|
+
message: span2.status.message
|
|
2617
|
+
},
|
|
2618
|
+
events: span2.events.map((event) => ({
|
|
2619
|
+
timeUnixNano: this.hrTimeToNanos(event.time),
|
|
2620
|
+
name: event.name,
|
|
2621
|
+
attributes: this.attributesToOtlp(event.attributes || {})
|
|
2622
|
+
}))
|
|
2623
|
+
};
|
|
2624
|
+
}
|
|
2625
|
+
/**
|
|
2626
|
+
* Convert attributes to OTLP format.
|
|
2627
|
+
*/
|
|
2628
|
+
attributesToOtlp(attrs) {
|
|
2629
|
+
return Object.entries(attrs).map(([key, value]) => ({
|
|
2630
|
+
key,
|
|
2631
|
+
value: this.valueToOtlp(value)
|
|
2632
|
+
}));
|
|
2633
|
+
}
|
|
2634
|
+
/**
|
|
2635
|
+
* Convert a value to OTLP AnyValue format.
|
|
2636
|
+
*/
|
|
2637
|
+
valueToOtlp(value) {
|
|
2638
|
+
if (typeof value === "string") {
|
|
2639
|
+
return { stringValue: value };
|
|
2640
|
+
}
|
|
2641
|
+
if (typeof value === "number") {
|
|
2642
|
+
if (Number.isInteger(value)) {
|
|
2643
|
+
return { intValue: value };
|
|
2644
|
+
}
|
|
2645
|
+
return { doubleValue: value };
|
|
2646
|
+
}
|
|
2647
|
+
if (typeof value === "boolean") {
|
|
2648
|
+
return { boolValue: value };
|
|
2649
|
+
}
|
|
2650
|
+
if (Array.isArray(value)) {
|
|
2651
|
+
return {
|
|
2652
|
+
arrayValue: {
|
|
2653
|
+
values: value.map((v) => this.valueToOtlp(v))
|
|
2654
|
+
}
|
|
2655
|
+
};
|
|
2656
|
+
}
|
|
2657
|
+
return { stringValue: String(value) };
|
|
2658
|
+
}
|
|
2659
|
+
/**
|
|
2660
|
+
* Convert HrTime to nanoseconds string.
|
|
2661
|
+
*/
|
|
2662
|
+
hrTimeToNanos(hrTime) {
|
|
2663
|
+
const [seconds, nanos] = hrTime;
|
|
2664
|
+
return String(BigInt(seconds) * BigInt(1e9) + BigInt(nanos));
|
|
2665
|
+
}
|
|
2666
|
+
};
|
|
2667
|
+
|
|
2165
2668
|
// src/index.ts
|
|
2166
2669
|
init_prompts();
|
|
2167
2670
|
var index_default = {
|
|
@@ -2172,8 +2675,12 @@ var index_default = {
|
|
|
2172
2675
|
};
|
|
2173
2676
|
// Annotate the CommonJS export names for ESM import in node:
|
|
2174
2677
|
0 && (module.exports = {
|
|
2678
|
+
FallomExporter,
|
|
2679
|
+
clearMastraPrompt,
|
|
2175
2680
|
init,
|
|
2176
2681
|
models,
|
|
2177
2682
|
prompts,
|
|
2683
|
+
setMastraPrompt,
|
|
2684
|
+
setMastraPromptAB,
|
|
2178
2685
|
trace
|
|
2179
2686
|
});
|