@fallom/trace 0.2.2 → 0.2.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,7 +26,7 @@ __export(models_exports, {
26
26
  get: () => get,
27
27
  init: () => init2
28
28
  });
29
- function log4(msg) {
29
+ function log3(msg) {
30
30
  if (debugMode2) {
31
31
  console.log(`[Fallom] ${msg}`);
32
32
  }
@@ -58,11 +58,11 @@ function ensureInit() {
58
58
  }
59
59
  async function fetchConfigs(timeout = SYNC_TIMEOUT) {
60
60
  if (!apiKey2) {
61
- log4("_fetchConfigs: No API key, skipping");
61
+ log3("_fetchConfigs: No API key, skipping");
62
62
  return;
63
63
  }
64
64
  try {
65
- log4(`Fetching configs from ${baseUrl2}/configs`);
65
+ log3(`Fetching configs from ${baseUrl2}/configs`);
66
66
  const controller = new AbortController();
67
67
  const timeoutId = setTimeout(() => controller.abort(), timeout);
68
68
  const resp = await fetch(`${baseUrl2}/configs`, {
@@ -70,15 +70,15 @@ async function fetchConfigs(timeout = SYNC_TIMEOUT) {
70
70
  signal: controller.signal
71
71
  });
72
72
  clearTimeout(timeoutId);
73
- log4(`Response status: ${resp.status}`);
73
+ log3(`Response status: ${resp.status}`);
74
74
  if (resp.ok) {
75
75
  const data = await resp.json();
76
76
  const configs = data.configs || [];
77
- log4(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
77
+ log3(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
78
78
  for (const c of configs) {
79
79
  const key = c.key;
80
80
  const version = c.version || 1;
81
- log4(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
81
+ log3(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
82
82
  if (!configCache.has(key)) {
83
83
  configCache.set(key, { versions: /* @__PURE__ */ new Map(), latest: null });
84
84
  }
@@ -87,10 +87,10 @@ async function fetchConfigs(timeout = SYNC_TIMEOUT) {
87
87
  cached.latest = version;
88
88
  }
89
89
  } else {
90
- log4(`Fetch failed: ${resp.statusText}`);
90
+ log3(`Fetch failed: ${resp.statusText}`);
91
91
  }
92
92
  } catch (e) {
93
- log4(`Fetch exception: ${e}`);
93
+ log3(`Fetch exception: ${e}`);
94
94
  }
95
95
  }
96
96
  async function fetchSpecificVersion(configKey, version, timeout = SYNC_TIMEOUT) {
@@ -122,24 +122,24 @@ async function get(configKey, sessionId, options = {}) {
122
122
  const { version, fallback, debug = false } = options;
123
123
  debugMode2 = debug;
124
124
  ensureInit();
125
- log4(
125
+ log3(
126
126
  `get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
127
127
  );
128
128
  try {
129
129
  let configData = configCache.get(configKey);
130
- log4(
130
+ log3(
131
131
  `Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
132
132
  );
133
133
  if (!configData) {
134
- log4("Not in cache, fetching...");
134
+ log3("Not in cache, fetching...");
135
135
  await fetchConfigs(SYNC_TIMEOUT);
136
136
  configData = configCache.get(configKey);
137
- log4(
137
+ log3(
138
138
  `After fetch, cache lookup: ${configData ? "found" : "still not found"}`
139
139
  );
140
140
  }
141
141
  if (!configData) {
142
- log4(`Config not found, using fallback: ${fallback}`);
142
+ log3(`Config not found, using fallback: ${fallback}`);
143
143
  if (fallback) {
144
144
  console.warn(
145
145
  `[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
@@ -183,20 +183,20 @@ async function get(configKey, sessionId, options = {}) {
183
183
  const variantsRaw = config.variants;
184
184
  const configVersion = config.version || targetVersion;
185
185
  const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
186
- log4(
186
+ log3(
187
187
  `Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
188
188
  variants
189
189
  )}`
190
190
  );
191
191
  const hashBytes = (0, import_crypto.createHash)("md5").update(sessionId).digest();
192
192
  const hashVal = hashBytes.readUInt32BE(0) % 1e6;
193
- log4(`Session hash: ${hashVal} (out of 1,000,000)`);
193
+ log3(`Session hash: ${hashVal} (out of 1,000,000)`);
194
194
  let cumulative = 0;
195
195
  let assignedModel = variants[variants.length - 1].model;
196
196
  for (const v of variants) {
197
197
  const oldCumulative = cumulative;
198
198
  cumulative += v.weight * 1e4;
199
- log4(
199
+ log3(
200
200
  `Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
201
201
  );
202
202
  if (hashVal < cumulative) {
@@ -204,7 +204,7 @@ async function get(configKey, sessionId, options = {}) {
204
204
  break;
205
205
  }
206
206
  }
207
- log4(`\u2705 Assigned model: ${assignedModel}`);
207
+ log3(`\u2705 Assigned model: ${assignedModel}`);
208
208
  return returnModel(configKey, sessionId, assignedModel, configVersion);
209
209
  } catch (e) {
210
210
  if (e instanceof Error && e.message.includes("not found")) {
@@ -1073,32 +1073,6 @@ function generateHexId(length) {
1073
1073
  crypto.getRandomValues(bytes);
1074
1074
  return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("");
1075
1075
  }
1076
- function messagesToOtelAttributes(messages, completion, model, responseId) {
1077
- const attrs = {};
1078
- if (model) {
1079
- attrs["gen_ai.request.model"] = model;
1080
- attrs["gen_ai.response.model"] = model;
1081
- }
1082
- if (responseId) {
1083
- attrs["gen_ai.response.id"] = responseId;
1084
- }
1085
- if (messages) {
1086
- messages.forEach((msg, i) => {
1087
- attrs[`gen_ai.prompt.${i}.role`] = msg.role;
1088
- attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1089
- });
1090
- }
1091
- if (completion) {
1092
- attrs["gen_ai.completion.0.role"] = completion.role;
1093
- attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
1094
- if (completion.tool_calls) {
1095
- attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
1096
- completion.tool_calls
1097
- );
1098
- }
1099
- }
1100
- return attrs;
1101
- }
1102
1076
 
1103
1077
  // src/trace/wrappers/openai.ts
1104
1078
  function wrapOpenAI(client, sessionCtx) {
@@ -1120,18 +1094,25 @@ function wrapOpenAI(client, sessionCtx) {
1120
1094
  try {
1121
1095
  const response = await originalCreate(...args);
1122
1096
  const endTime = Date.now();
1123
- const attributes = captureContent2 ? messagesToOtelAttributes(
1124
- params?.messages,
1125
- response?.choices?.[0]?.message,
1126
- response?.model || params?.model,
1127
- response?.id
1128
- ) : {};
1097
+ const attributes = {
1098
+ "fallom.sdk_version": "2",
1099
+ "fallom.method": "chat.completions.create"
1100
+ };
1101
+ if (captureContent2) {
1102
+ attributes["fallom.raw.request"] = JSON.stringify({
1103
+ messages: params?.messages,
1104
+ model: params?.model
1105
+ });
1106
+ attributes["fallom.raw.response"] = JSON.stringify({
1107
+ text: response?.choices?.[0]?.message?.content,
1108
+ finishReason: response?.choices?.[0]?.finish_reason,
1109
+ responseId: response?.id,
1110
+ model: response?.model
1111
+ });
1112
+ }
1129
1113
  if (response?.usage) {
1130
1114
  attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1131
1115
  }
1132
- if (response?.choices?.[0]?.finish_reason) {
1133
- attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
1134
- }
1135
1116
  sendTrace({
1136
1117
  config_key: ctx.configKey,
1137
1118
  session_id: ctx.sessionId,
@@ -1146,24 +1127,12 @@ function wrapOpenAI(client, sessionCtx) {
1146
1127
  end_time: new Date(endTime).toISOString(),
1147
1128
  duration_ms: endTime - startTime,
1148
1129
  status: "OK",
1149
- prompt_tokens: response?.usage?.prompt_tokens,
1150
- completion_tokens: response?.usage?.completion_tokens,
1151
- total_tokens: response?.usage?.total_tokens,
1152
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1130
+ attributes
1153
1131
  }).catch(() => {
1154
1132
  });
1155
1133
  return response;
1156
1134
  } catch (error) {
1157
1135
  const endTime = Date.now();
1158
- const attributes = captureContent2 ? messagesToOtelAttributes(
1159
- params?.messages,
1160
- void 0,
1161
- params?.model,
1162
- void 0
1163
- ) : void 0;
1164
- if (attributes) {
1165
- attributes["error.message"] = error?.message;
1166
- }
1167
1136
  sendTrace({
1168
1137
  config_key: ctx.configKey,
1169
1138
  session_id: ctx.sessionId,
@@ -1179,7 +1148,10 @@ function wrapOpenAI(client, sessionCtx) {
1179
1148
  duration_ms: endTime - startTime,
1180
1149
  status: "ERROR",
1181
1150
  error_message: error?.message,
1182
- attributes
1151
+ attributes: {
1152
+ "fallom.sdk_version": "2",
1153
+ "fallom.method": "chat.completions.create"
1154
+ }
1183
1155
  }).catch(() => {
1184
1156
  });
1185
1157
  throw error;
@@ -1206,21 +1178,26 @@ function wrapAnthropic(client, sessionCtx) {
1206
1178
  try {
1207
1179
  const response = await originalCreate(...args);
1208
1180
  const endTime = Date.now();
1209
- const attributes = captureContent2 ? messagesToOtelAttributes(
1210
- params?.messages,
1211
- { role: "assistant", content: response?.content?.[0]?.text || "" },
1212
- response?.model || params?.model,
1213
- response?.id
1214
- ) : {};
1215
- if (params?.system) {
1216
- attributes["gen_ai.system_prompt"] = params.system;
1181
+ const attributes = {
1182
+ "fallom.sdk_version": "2",
1183
+ "fallom.method": "messages.create"
1184
+ };
1185
+ if (captureContent2) {
1186
+ attributes["fallom.raw.request"] = JSON.stringify({
1187
+ messages: params?.messages,
1188
+ system: params?.system,
1189
+ model: params?.model
1190
+ });
1191
+ attributes["fallom.raw.response"] = JSON.stringify({
1192
+ text: response?.content?.[0]?.text,
1193
+ finishReason: response?.stop_reason,
1194
+ responseId: response?.id,
1195
+ model: response?.model
1196
+ });
1217
1197
  }
1218
1198
  if (response?.usage) {
1219
1199
  attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1220
1200
  }
1221
- if (response?.stop_reason) {
1222
- attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1223
- }
1224
1201
  sendTrace({
1225
1202
  config_key: ctx.configKey,
1226
1203
  session_id: ctx.sessionId,
@@ -1235,27 +1212,12 @@ function wrapAnthropic(client, sessionCtx) {
1235
1212
  end_time: new Date(endTime).toISOString(),
1236
1213
  duration_ms: endTime - startTime,
1237
1214
  status: "OK",
1238
- prompt_tokens: response?.usage?.input_tokens,
1239
- completion_tokens: response?.usage?.output_tokens,
1240
- total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1241
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1215
+ attributes
1242
1216
  }).catch(() => {
1243
1217
  });
1244
1218
  return response;
1245
1219
  } catch (error) {
1246
1220
  const endTime = Date.now();
1247
- const attributes = captureContent2 ? messagesToOtelAttributes(
1248
- params?.messages,
1249
- void 0,
1250
- params?.model,
1251
- void 0
1252
- ) : void 0;
1253
- if (attributes) {
1254
- attributes["error.message"] = error?.message;
1255
- if (params?.system) {
1256
- attributes["gen_ai.system_prompt"] = params.system;
1257
- }
1258
- }
1259
1221
  sendTrace({
1260
1222
  config_key: ctx.configKey,
1261
1223
  session_id: ctx.sessionId,
@@ -1271,7 +1233,10 @@ function wrapAnthropic(client, sessionCtx) {
1271
1233
  duration_ms: endTime - startTime,
1272
1234
  status: "ERROR",
1273
1235
  error_message: error?.message,
1274
- attributes
1236
+ attributes: {
1237
+ "fallom.sdk_version": "2",
1238
+ "fallom.method": "messages.create"
1239
+ }
1275
1240
  }).catch(() => {
1276
1241
  });
1277
1242
  throw error;
@@ -1282,50 +1247,36 @@ function wrapAnthropic(client, sessionCtx) {
1282
1247
 
1283
1248
  // src/trace/wrappers/google-ai.ts
1284
1249
  function wrapGoogleAI(model, sessionCtx) {
1285
- const originalGenerate = model.generateContent.bind(model);
1250
+ const originalGenerateContent = model.generateContent.bind(model);
1286
1251
  const ctx = sessionCtx;
1287
1252
  model.generateContent = async function(...args) {
1288
1253
  if (!isInitialized()) {
1289
- return originalGenerate(...args);
1254
+ return originalGenerateContent(...args);
1290
1255
  }
1291
1256
  const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1292
1257
  const traceId = traceCtx?.traceId || generateHexId(32);
1293
1258
  const spanId = generateHexId(16);
1294
1259
  const parentSpanId = traceCtx?.parentSpanId;
1260
+ const request = args[0];
1295
1261
  const startTime = Date.now();
1296
1262
  const captureContent2 = shouldCaptureContent();
1297
1263
  try {
1298
- const response = await originalGenerate(...args);
1264
+ const response = await originalGenerateContent(...args);
1299
1265
  const endTime = Date.now();
1300
- const result = response?.response;
1301
- const usage = result?.usageMetadata;
1302
- const modelName = model?.model || "gemini";
1303
- const attributes = {};
1266
+ const result = response?.response || response;
1267
+ const attributes = {
1268
+ "fallom.sdk_version": "2",
1269
+ "fallom.method": "generateContent"
1270
+ };
1304
1271
  if (captureContent2) {
1305
- attributes["gen_ai.request.model"] = modelName;
1306
- attributes["gen_ai.response.model"] = modelName;
1307
- const input = args[0];
1308
- if (typeof input === "string") {
1309
- attributes["gen_ai.prompt.0.role"] = "user";
1310
- attributes["gen_ai.prompt.0.content"] = input;
1311
- } else if (input?.contents) {
1312
- input.contents.forEach((content, i) => {
1313
- attributes[`gen_ai.prompt.${i}.role`] = content.role || "user";
1314
- attributes[`gen_ai.prompt.${i}.content`] = content.parts?.[0]?.text || JSON.stringify(content.parts);
1315
- });
1316
- }
1317
- const outputText = result?.text?.();
1318
- if (outputText) {
1319
- attributes["gen_ai.completion.0.role"] = "assistant";
1320
- attributes["gen_ai.completion.0.content"] = outputText;
1321
- }
1322
- }
1323
- if (usage) {
1324
- attributes["fallom.raw.usage"] = JSON.stringify(usage);
1272
+ attributes["fallom.raw.request"] = JSON.stringify(request);
1273
+ attributes["fallom.raw.response"] = JSON.stringify({
1274
+ text: result?.text?.(),
1275
+ candidates: result?.candidates
1276
+ });
1325
1277
  }
1326
- const candidate = result?.candidates?.[0];
1327
- if (candidate?.finishReason) {
1328
- attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1278
+ if (result?.usageMetadata) {
1279
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usageMetadata);
1329
1280
  }
1330
1281
  sendTrace({
1331
1282
  config_key: ctx.configKey,
@@ -1336,31 +1287,17 @@ function wrapGoogleAI(model, sessionCtx) {
1336
1287
  parent_span_id: parentSpanId,
1337
1288
  name: "generateContent",
1338
1289
  kind: "llm",
1339
- model: modelName,
1290
+ model: model.model || "gemini",
1340
1291
  start_time: new Date(startTime).toISOString(),
1341
1292
  end_time: new Date(endTime).toISOString(),
1342
1293
  duration_ms: endTime - startTime,
1343
1294
  status: "OK",
1344
- prompt_tokens: usage?.promptTokenCount,
1345
- completion_tokens: usage?.candidatesTokenCount,
1346
- total_tokens: usage?.totalTokenCount,
1347
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1295
+ attributes
1348
1296
  }).catch(() => {
1349
1297
  });
1350
1298
  return response;
1351
1299
  } catch (error) {
1352
1300
  const endTime = Date.now();
1353
- const modelName = model?.model || "gemini";
1354
- const attributes = {};
1355
- if (captureContent2) {
1356
- attributes["gen_ai.request.model"] = modelName;
1357
- attributes["error.message"] = error?.message;
1358
- const input = args[0];
1359
- if (typeof input === "string") {
1360
- attributes["gen_ai.prompt.0.role"] = "user";
1361
- attributes["gen_ai.prompt.0.content"] = input;
1362
- }
1363
- }
1364
1301
  sendTrace({
1365
1302
  config_key: ctx.configKey,
1366
1303
  session_id: ctx.sessionId,
@@ -1370,13 +1307,16 @@ function wrapGoogleAI(model, sessionCtx) {
1370
1307
  parent_span_id: parentSpanId,
1371
1308
  name: "generateContent",
1372
1309
  kind: "llm",
1373
- model: modelName,
1310
+ model: model.model || "gemini",
1374
1311
  start_time: new Date(startTime).toISOString(),
1375
1312
  end_time: new Date(endTime).toISOString(),
1376
1313
  duration_ms: endTime - startTime,
1377
1314
  status: "ERROR",
1378
1315
  error_message: error?.message,
1379
- attributes: captureContent2 ? attributes : void 0
1316
+ attributes: {
1317
+ "fallom.sdk_version": "2",
1318
+ "fallom.method": "generateContent"
1319
+ }
1380
1320
  }).catch(() => {
1381
1321
  });
1382
1322
  throw error;
@@ -1385,35 +1325,6 @@ function wrapGoogleAI(model, sessionCtx) {
1385
1325
  return model;
1386
1326
  }
1387
1327
 
1388
- // src/trace/wrappers/vercel-ai/utils.ts
1389
- function extractUsageFromResult(result, directUsage) {
1390
- let usage = directUsage ?? result?.usage;
1391
- const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1392
- let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : isValidNumber(usage?.inputTokens) ? usage.inputTokens : isValidNumber(usage?.prompt_tokens) ? usage.prompt_tokens : void 0;
1393
- let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : isValidNumber(usage?.outputTokens) ? usage.outputTokens : isValidNumber(usage?.completion_tokens) ? usage.completion_tokens : void 0;
1394
- let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : isValidNumber(usage?.total_tokens) ? usage.total_tokens : void 0;
1395
- let cost;
1396
- const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1397
- if (orUsage) {
1398
- if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1399
- promptTokens = orUsage.promptTokens;
1400
- }
1401
- if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1402
- completionTokens = orUsage.completionTokens;
1403
- }
1404
- if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1405
- totalTokens = orUsage.totalTokens;
1406
- }
1407
- if (isValidNumber(orUsage.cost)) {
1408
- cost = orUsage.cost;
1409
- }
1410
- }
1411
- if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1412
- totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1413
- }
1414
- return { promptTokens, completionTokens, totalTokens, cost };
1415
- }
1416
-
1417
1328
  // src/trace/wrappers/vercel-ai/generate-text.ts
1418
1329
  function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1419
1330
  const ctx = sessionCtx;
@@ -1432,54 +1343,33 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1432
1343
  const result = await aiModule.generateText(...args);
1433
1344
  const endTime = Date.now();
1434
1345
  if (debug || isDebugMode()) {
1435
- console.log(
1436
- "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1437
- Object.keys(result || {})
1438
- );
1439
- console.log(
1440
- "\u{1F50D} [Fallom Debug] result.usage:",
1441
- JSON.stringify(result?.usage, null, 2)
1442
- );
1443
- console.log(
1444
- "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1445
- JSON.stringify(result?.experimental_providerMetadata, null, 2)
1446
- );
1346
+ console.log("\n\u{1F50D} [Fallom Debug] generateText raw result:", JSON.stringify(result, null, 2));
1447
1347
  }
1448
1348
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1449
- const attributes = {};
1349
+ const attributes = {
1350
+ "fallom.sdk_version": "2",
1351
+ "fallom.method": "generateText"
1352
+ };
1450
1353
  if (captureContent2) {
1451
- attributes["gen_ai.request.model"] = modelId;
1452
- attributes["gen_ai.response.model"] = modelId;
1453
- if (params?.prompt) {
1454
- attributes["gen_ai.prompt.0.role"] = "user";
1455
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1456
- }
1457
- if (params?.messages) {
1458
- params.messages.forEach((msg, i) => {
1459
- attributes[`gen_ai.prompt.${i}.role`] = msg.role;
1460
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1461
- });
1462
- }
1463
- if (result?.text) {
1464
- attributes["gen_ai.completion.0.role"] = "assistant";
1465
- attributes["gen_ai.completion.0.content"] = result.text;
1466
- }
1467
- if (result?.response?.id) {
1468
- attributes["gen_ai.response.id"] = result.response.id;
1469
- }
1354
+ attributes["fallom.raw.request"] = JSON.stringify({
1355
+ prompt: params?.prompt,
1356
+ messages: params?.messages,
1357
+ system: params?.system,
1358
+ model: modelId
1359
+ });
1360
+ attributes["fallom.raw.response"] = JSON.stringify({
1361
+ text: result?.text,
1362
+ finishReason: result?.finishReason,
1363
+ responseId: result?.response?.id,
1364
+ modelId: result?.response?.modelId
1365
+ });
1470
1366
  }
1471
1367
  if (result?.usage) {
1472
1368
  attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1473
1369
  }
1474
1370
  if (result?.experimental_providerMetadata) {
1475
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1476
- result.experimental_providerMetadata
1477
- );
1478
- }
1479
- if (result?.finishReason) {
1480
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1371
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(result.experimental_providerMetadata);
1481
1372
  }
1482
- const usage = extractUsageFromResult(result);
1483
1373
  sendTrace({
1484
1374
  config_key: ctx.configKey,
1485
1375
  session_id: ctx.sessionId,
@@ -1494,10 +1384,7 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1494
1384
  end_time: new Date(endTime).toISOString(),
1495
1385
  duration_ms: endTime - startTime,
1496
1386
  status: "OK",
1497
- prompt_tokens: usage.promptTokens,
1498
- completion_tokens: usage.completionTokens,
1499
- total_tokens: usage.totalTokens,
1500
- attributes: captureContent2 ? attributes : void 0
1387
+ attributes
1501
1388
  }).catch(() => {
1502
1389
  });
1503
1390
  return result;
@@ -1518,7 +1405,17 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1518
1405
  end_time: new Date(endTime).toISOString(),
1519
1406
  duration_ms: endTime - startTime,
1520
1407
  status: "ERROR",
1521
- error_message: error?.message
1408
+ error_message: error?.message,
1409
+ attributes: {
1410
+ "fallom.sdk_version": "2",
1411
+ "fallom.method": "generateText",
1412
+ "fallom.raw.request": JSON.stringify({
1413
+ prompt: params?.prompt,
1414
+ messages: params?.messages,
1415
+ system: params?.system,
1416
+ model: modelId
1417
+ })
1418
+ }
1522
1419
  }).catch(() => {
1523
1420
  });
1524
1421
  throw error;
@@ -1547,15 +1444,17 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1547
1444
  let firstTokenTime = null;
1548
1445
  const modelId = params?.model?.modelId || String(params?.model || "unknown");
1549
1446
  if (result?.usage) {
1550
- result.usage.then(async (rawUsage) => {
1447
+ Promise.all([
1448
+ result.usage.catch(() => null),
1449
+ result.text?.catch(() => null),
1450
+ result.finishReason?.catch(() => null)
1451
+ ]).then(async ([rawUsage, responseText, finishReason]) => {
1551
1452
  const endTime = Date.now();
1552
1453
  if (debug || isDebugMode()) {
1553
- console.log(
1554
- "\n\u{1F50D} [Fallom Debug] streamText usage:",
1555
- JSON.stringify(rawUsage, null, 2)
1556
- );
1454
+ console.log("\n\u{1F50D} [Fallom Debug] streamText raw usage:", JSON.stringify(rawUsage, null, 2));
1455
+ console.log("\u{1F50D} [Fallom Debug] streamText response text:", responseText?.slice(0, 100));
1456
+ console.log("\u{1F50D} [Fallom Debug] streamText finish reason:", finishReason);
1557
1457
  }
1558
- log2("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1559
1458
  let providerMetadata = result?.experimental_providerMetadata;
1560
1459
  if (providerMetadata && typeof providerMetadata.then === "function") {
1561
1460
  try {
@@ -1564,28 +1463,35 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1564
1463
  providerMetadata = void 0;
1565
1464
  }
1566
1465
  }
1567
- const usage = extractUsageFromResult(
1568
- { experimental_providerMetadata: providerMetadata },
1569
- rawUsage
1570
- );
1571
- const attributes = {};
1466
+ const attributes = {
1467
+ "fallom.sdk_version": "2",
1468
+ "fallom.method": "streamText",
1469
+ "fallom.is_streaming": true
1470
+ };
1572
1471
  if (captureContent2) {
1573
- attributes["gen_ai.request.model"] = modelId;
1574
- if (params?.prompt) {
1575
- attributes["gen_ai.prompt.0.role"] = "user";
1576
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1472
+ attributes["fallom.raw.request"] = JSON.stringify({
1473
+ prompt: params?.prompt,
1474
+ messages: params?.messages,
1475
+ system: params?.system,
1476
+ model: modelId
1477
+ });
1478
+ if (responseText || finishReason) {
1479
+ attributes["fallom.raw.response"] = JSON.stringify({
1480
+ text: responseText,
1481
+ finishReason
1482
+ });
1577
1483
  }
1578
1484
  }
1579
- if (firstTokenTime) {
1580
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1581
- }
1582
1485
  if (rawUsage) {
1583
1486
  attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
1584
1487
  }
1585
1488
  if (providerMetadata) {
1586
1489
  attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1587
1490
  }
1588
- const tracePayload = {
1491
+ if (firstTokenTime) {
1492
+ attributes["fallom.time_to_first_token_ms"] = firstTokenTime - startTime;
1493
+ }
1494
+ sendTrace({
1589
1495
  config_key: ctx.configKey,
1590
1496
  session_id: ctx.sessionId,
1591
1497
  customer_id: ctx.customerId,
@@ -1599,13 +1505,10 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1599
1505
  end_time: new Date(endTime).toISOString(),
1600
1506
  duration_ms: endTime - startTime,
1601
1507
  status: "OK",
1602
- prompt_tokens: usage.promptTokens,
1603
- completion_tokens: usage.completionTokens,
1604
- total_tokens: usage.totalTokens,
1605
1508
  time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1606
- attributes: captureContent2 ? attributes : void 0
1607
- };
1608
- sendTrace(tracePayload).catch(() => {
1509
+ is_streaming: true,
1510
+ attributes
1511
+ }).catch(() => {
1609
1512
  });
1610
1513
  }).catch((error) => {
1611
1514
  const endTime = Date.now();
@@ -1624,7 +1527,12 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1624
1527
  end_time: new Date(endTime).toISOString(),
1625
1528
  duration_ms: endTime - startTime,
1626
1529
  status: "ERROR",
1627
- error_message: error?.message
1530
+ error_message: error?.message,
1531
+ attributes: {
1532
+ "fallom.sdk_version": "2",
1533
+ "fallom.method": "streamText",
1534
+ "fallom.is_streaming": true
1535
+ }
1628
1536
  }).catch(() => {
1629
1537
  });
1630
1538
  });
@@ -1672,25 +1580,30 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1672
1580
  const endTime = Date.now();
1673
1581
  if (debug || isDebugMode()) {
1674
1582
  console.log(
1675
- "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1676
- Object.keys(result || {})
1677
- );
1678
- console.log(
1679
- "\u{1F50D} [Fallom Debug] result.usage:",
1680
- JSON.stringify(result?.usage, null, 2)
1583
+ "\n\u{1F50D} [Fallom Debug] generateObject raw result:",
1584
+ JSON.stringify(result, null, 2)
1681
1585
  );
1682
1586
  }
1683
1587
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1684
- const attributes = {};
1588
+ const attributes = {
1589
+ "fallom.sdk_version": "2",
1590
+ "fallom.method": "generateObject"
1591
+ };
1685
1592
  if (captureContent2) {
1686
- attributes["gen_ai.request.model"] = modelId;
1687
- attributes["gen_ai.response.model"] = modelId;
1688
- if (result?.object) {
1689
- attributes["gen_ai.completion.0.role"] = "assistant";
1690
- attributes["gen_ai.completion.0.content"] = JSON.stringify(
1691
- result.object
1692
- );
1693
- }
1593
+ attributes["fallom.raw.request"] = JSON.stringify({
1594
+ prompt: params?.prompt,
1595
+ messages: params?.messages,
1596
+ system: params?.system,
1597
+ model: modelId,
1598
+ schema: params?.schema ? "provided" : void 0
1599
+ // Don't send full schema, just note if present
1600
+ });
1601
+ attributes["fallom.raw.response"] = JSON.stringify({
1602
+ object: result?.object,
1603
+ finishReason: result?.finishReason,
1604
+ responseId: result?.response?.id,
1605
+ modelId: result?.response?.modelId
1606
+ });
1694
1607
  }
1695
1608
  if (result?.usage) {
1696
1609
  attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
@@ -1700,10 +1613,6 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1700
1613
  result.experimental_providerMetadata
1701
1614
  );
1702
1615
  }
1703
- if (result?.finishReason) {
1704
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1705
- }
1706
- const usage = extractUsageFromResult(result);
1707
1616
  sendTrace({
1708
1617
  config_key: ctx.configKey,
1709
1618
  session_id: ctx.sessionId,
@@ -1718,10 +1627,7 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1718
1627
  end_time: new Date(endTime).toISOString(),
1719
1628
  duration_ms: endTime - startTime,
1720
1629
  status: "OK",
1721
- prompt_tokens: usage.promptTokens,
1722
- completion_tokens: usage.completionTokens,
1723
- total_tokens: usage.totalTokens,
1724
- attributes: captureContent2 ? attributes : void 0
1630
+ attributes
1725
1631
  }).catch(() => {
1726
1632
  });
1727
1633
  return result;
@@ -1742,7 +1648,11 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1742
1648
  end_time: new Date(endTime).toISOString(),
1743
1649
  duration_ms: endTime - startTime,
1744
1650
  status: "ERROR",
1745
- error_message: error?.message
1651
+ error_message: error?.message,
1652
+ attributes: {
1653
+ "fallom.sdk_version": "2",
1654
+ "fallom.method": "generateObject"
1655
+ }
1746
1656
  }).catch(() => {
1747
1657
  });
1748
1658
  throw error;
@@ -1751,9 +1661,6 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1751
1661
  }
1752
1662
 
1753
1663
  // src/trace/wrappers/vercel-ai/stream-object.ts
1754
- function log3(...args) {
1755
- if (isDebugMode()) console.log("[Fallom]", ...args);
1756
- }
1757
1664
  function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1758
1665
  const ctx = sessionCtx;
1759
1666
  return async (...args) => {
@@ -1761,7 +1668,6 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1761
1668
  const startTime = Date.now();
1762
1669
  const captureContent2 = shouldCaptureContent();
1763
1670
  const result = await aiModule.streamObject(...args);
1764
- log3("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
1765
1671
  if (!isInitialized()) {
1766
1672
  return result;
1767
1673
  }
@@ -1769,18 +1675,19 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1769
1675
  const traceId = traceCtx?.traceId || generateHexId(32);
1770
1676
  const spanId = generateHexId(16);
1771
1677
  const parentSpanId = traceCtx?.parentSpanId;
1772
- let firstTokenTime = null;
1773
1678
  const modelId = params?.model?.modelId || String(params?.model || "unknown");
1774
1679
  if (result?.usage) {
1775
- result.usage.then(async (rawUsage) => {
1680
+ Promise.all([
1681
+ result.usage.catch(() => null),
1682
+ result.object?.catch(() => null),
1683
+ result.finishReason?.catch(() => null)
1684
+ ]).then(async ([rawUsage, responseObject, finishReason]) => {
1776
1685
  const endTime = Date.now();
1777
1686
  if (debug || isDebugMode()) {
1778
- console.log(
1779
- "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1780
- JSON.stringify(rawUsage, null, 2)
1781
- );
1687
+ console.log("\n\u{1F50D} [Fallom Debug] streamObject raw usage:", JSON.stringify(rawUsage, null, 2));
1688
+ console.log("\u{1F50D} [Fallom Debug] streamObject response object:", JSON.stringify(responseObject)?.slice(0, 100));
1689
+ console.log("\u{1F50D} [Fallom Debug] streamObject finish reason:", finishReason);
1782
1690
  }
1783
- log3("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1784
1691
  let providerMetadata = result?.experimental_providerMetadata;
1785
1692
  if (providerMetadata && typeof providerMetadata.then === "function") {
1786
1693
  try {
@@ -1789,16 +1696,25 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1789
1696
  providerMetadata = void 0;
1790
1697
  }
1791
1698
  }
1792
- const usage = extractUsageFromResult(
1793
- { experimental_providerMetadata: providerMetadata },
1794
- rawUsage
1795
- );
1796
- const attributes = {};
1699
+ const attributes = {
1700
+ "fallom.sdk_version": "2",
1701
+ "fallom.method": "streamObject",
1702
+ "fallom.is_streaming": true
1703
+ };
1797
1704
  if (captureContent2) {
1798
- attributes["gen_ai.request.model"] = modelId;
1799
- }
1800
- if (firstTokenTime) {
1801
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1705
+ attributes["fallom.raw.request"] = JSON.stringify({
1706
+ prompt: params?.prompt,
1707
+ messages: params?.messages,
1708
+ system: params?.system,
1709
+ model: modelId,
1710
+ schema: params?.schema ? "provided" : void 0
1711
+ });
1712
+ if (responseObject || finishReason) {
1713
+ attributes["fallom.raw.response"] = JSON.stringify({
1714
+ object: responseObject,
1715
+ finishReason
1716
+ });
1717
+ }
1802
1718
  }
1803
1719
  if (rawUsage) {
1804
1720
  attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
@@ -1820,10 +1736,8 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1820
1736
  end_time: new Date(endTime).toISOString(),
1821
1737
  duration_ms: endTime - startTime,
1822
1738
  status: "OK",
1823
- prompt_tokens: usage.promptTokens,
1824
- completion_tokens: usage.completionTokens,
1825
- total_tokens: usage.totalTokens,
1826
- attributes: captureContent2 ? attributes : void 0
1739
+ is_streaming: true,
1740
+ attributes
1827
1741
  }).catch(() => {
1828
1742
  });
1829
1743
  }).catch((error) => {
@@ -1842,31 +1756,16 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1842
1756
  end_time: new Date(endTime).toISOString(),
1843
1757
  duration_ms: endTime - startTime,
1844
1758
  status: "ERROR",
1845
- error_message: error?.message
1759
+ error_message: error?.message,
1760
+ attributes: {
1761
+ "fallom.sdk_version": "2",
1762
+ "fallom.method": "streamObject",
1763
+ "fallom.is_streaming": true
1764
+ }
1846
1765
  }).catch(() => {
1847
1766
  });
1848
1767
  });
1849
1768
  }
1850
- if (result?.partialObjectStream) {
1851
- const originalStream = result.partialObjectStream;
1852
- const wrappedStream = (async function* () {
1853
- for await (const chunk of originalStream) {
1854
- if (!firstTokenTime) {
1855
- firstTokenTime = Date.now();
1856
- log3("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1857
- }
1858
- yield chunk;
1859
- }
1860
- })();
1861
- return new Proxy(result, {
1862
- get(target, prop) {
1863
- if (prop === "partialObjectStream") {
1864
- return wrappedStream;
1865
- }
1866
- return target[prop];
1867
- }
1868
- });
1869
- }
1870
1769
  return result;
1871
1770
  };
1872
1771
  }
@@ -1885,105 +1784,69 @@ function wrapAISDK(ai, sessionCtx, options) {
1885
1784
  // src/trace/wrappers/mastra.ts
1886
1785
  function wrapMastraAgent(agent, sessionCtx) {
1887
1786
  const originalGenerate = agent.generate.bind(agent);
1888
- const agentName = agent.name || "MastraAgent";
1889
1787
  const ctx = sessionCtx;
1890
1788
  agent.generate = async function(...args) {
1891
1789
  if (!isInitialized()) {
1892
1790
  return originalGenerate(...args);
1893
1791
  }
1894
- const traceId = generateHexId(32);
1792
+ const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1793
+ const traceId = traceCtx?.traceId || generateHexId(32);
1895
1794
  const spanId = generateHexId(16);
1795
+ const parentSpanId = traceCtx?.parentSpanId;
1796
+ const input = args[0];
1896
1797
  const startTime = Date.now();
1897
- const messages = args[0] || [];
1798
+ const captureContent2 = shouldCaptureContent();
1898
1799
  try {
1899
1800
  const result = await originalGenerate(...args);
1900
1801
  const endTime = Date.now();
1901
- const model = result?.model?.modelId || "unknown";
1902
- const toolCalls = [];
1903
- if (result?.steps?.length) {
1904
- for (const step of result.steps) {
1905
- if (step.toolCalls?.length) {
1906
- for (let i = 0; i < step.toolCalls.length; i++) {
1907
- const tc = step.toolCalls[i];
1908
- const tr = step.toolResults?.[i];
1909
- toolCalls.push({
1910
- name: tc.toolName,
1911
- arguments: tc.args,
1912
- result: tr?.result
1913
- });
1914
- }
1915
- }
1916
- }
1917
- }
1918
1802
  const attributes = {
1919
- "gen_ai.system": "Mastra",
1920
- "gen_ai.request.model": model,
1921
- "gen_ai.response.model": model,
1922
- "fallom.source": "mastra-agent",
1923
- "llm.request.type": "chat"
1803
+ "fallom.sdk_version": "2",
1804
+ "fallom.method": "agent.generate",
1805
+ "fallom.agent_name": agent.name || "unknown"
1924
1806
  };
1925
- if (Array.isArray(messages)) {
1926
- messages.forEach((msg, i) => {
1927
- attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
1928
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1929
- });
1930
- }
1931
- if (result?.text) {
1932
- attributes["gen_ai.completion.0.role"] = "assistant";
1933
- attributes["gen_ai.completion.0.content"] = result.text;
1934
- attributes["gen_ai.completion.0.finish_reason"] = "stop";
1935
- }
1936
- if (toolCalls.length > 0) {
1937
- attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
1938
- toolCalls.forEach((tc, i) => {
1939
- attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
1940
- attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
1941
- attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
1942
- });
1943
- }
1944
- if (result?.usage) {
1945
- attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
1946
- attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
1947
- attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
1807
+ if (captureContent2) {
1808
+ attributes["fallom.raw.request"] = JSON.stringify(input);
1809
+ attributes["fallom.raw.response"] = JSON.stringify(result);
1948
1810
  }
1949
- const traceData = {
1811
+ sendTrace({
1950
1812
  config_key: ctx.configKey,
1951
1813
  session_id: ctx.sessionId,
1952
1814
  customer_id: ctx.customerId,
1953
1815
  trace_id: traceId,
1954
1816
  span_id: spanId,
1955
- name: `mastra.${agentName}.generate`,
1956
- kind: "client",
1957
- model,
1817
+ parent_span_id: parentSpanId,
1818
+ name: `agent.${agent.name || "unknown"}.generate`,
1819
+ kind: "agent",
1958
1820
  start_time: new Date(startTime).toISOString(),
1959
1821
  end_time: new Date(endTime).toISOString(),
1960
1822
  duration_ms: endTime - startTime,
1961
1823
  status: "OK",
1962
- prompt_tokens: result?.usage?.promptTokens,
1963
- completion_tokens: result?.usage?.completionTokens,
1964
- total_tokens: result?.usage?.totalTokens,
1965
1824
  attributes
1966
- };
1967
- sendTrace(traceData).catch(() => {
1825
+ }).catch(() => {
1968
1826
  });
1969
1827
  return result;
1970
1828
  } catch (error) {
1971
1829
  const endTime = Date.now();
1972
- const traceData = {
1830
+ sendTrace({
1973
1831
  config_key: ctx.configKey,
1974
1832
  session_id: ctx.sessionId,
1975
1833
  customer_id: ctx.customerId,
1976
1834
  trace_id: traceId,
1977
1835
  span_id: spanId,
1978
- name: `mastra.${agentName}.generate`,
1979
- kind: "client",
1836
+ parent_span_id: parentSpanId,
1837
+ name: `agent.${agent.name || "unknown"}.generate`,
1838
+ kind: "agent",
1980
1839
  start_time: new Date(startTime).toISOString(),
1981
1840
  end_time: new Date(endTime).toISOString(),
1982
1841
  duration_ms: endTime - startTime,
1983
1842
  status: "ERROR",
1984
- error_message: error instanceof Error ? error.message : String(error)
1985
- };
1986
- sendTrace(traceData).catch(() => {
1843
+ error_message: error?.message,
1844
+ attributes: {
1845
+ "fallom.sdk_version": "2",
1846
+ "fallom.method": "agent.generate",
1847
+ "fallom.agent_name": agent.name || "unknown"
1848
+ }
1849
+ }).catch(() => {
1987
1850
  });
1988
1851
  throw error;
1989
1852
  }
@@ -2023,6 +1886,9 @@ var FallomSession = class {
2023
1886
  /**
2024
1887
  * Wrap a Vercel AI SDK model to trace all calls (PostHog style).
2025
1888
  * Returns the same model type with tracing injected.
1889
+ *
1890
+ * Note: This only captures tokens/timing, not prompt/completion content.
1891
+ * Use wrapAISDK for full content tracing.
2026
1892
  */
2027
1893
  traceModel(model) {
2028
1894
  const ctx = this.ctx;
@@ -2048,17 +1914,18 @@ var FallomSession = class {
2048
1914
  trace_id: traceId,
2049
1915
  span_id: spanId,
2050
1916
  parent_span_id: traceCtx?.parentSpanId,
2051
- name: "generateText",
1917
+ name: "doGenerate",
2052
1918
  kind: "llm",
2053
1919
  model: modelId,
2054
1920
  start_time: new Date(startTime).toISOString(),
2055
1921
  end_time: new Date(endTime).toISOString(),
2056
1922
  duration_ms: endTime - startTime,
2057
1923
  status: "OK",
2058
- prompt_tokens: usage?.promptTokens,
2059
- completion_tokens: usage?.completionTokens,
2060
- total_tokens: usage?.totalTokens,
2061
- attributes: shouldCaptureContent() && usage ? { "fallom.raw.usage": JSON.stringify(usage) } : void 0
1924
+ attributes: {
1925
+ "fallom.sdk_version": "2",
1926
+ "fallom.method": "traceModel.doGenerate",
1927
+ ...usage ? { "fallom.raw.usage": JSON.stringify(usage) } : {}
1928
+ }
2062
1929
  }).catch(() => {
2063
1930
  });
2064
1931
  return result;
@@ -2071,14 +1938,15 @@ var FallomSession = class {
2071
1938
  trace_id: traceId,
2072
1939
  span_id: spanId,
2073
1940
  parent_span_id: traceCtx?.parentSpanId,
2074
- name: "generateText",
1941
+ name: "doGenerate",
2075
1942
  kind: "llm",
2076
1943
  model: model.modelId || "unknown",
2077
1944
  start_time: new Date(startTime).toISOString(),
2078
1945
  end_time: new Date(endTime).toISOString(),
2079
1946
  duration_ms: endTime - startTime,
2080
1947
  status: "ERROR",
2081
- error_message: error instanceof Error ? error.message : String(error)
1948
+ error_message: error instanceof Error ? error.message : String(error),
1949
+ attributes: { "fallom.sdk_version": "2", "fallom.method": "traceModel.doGenerate" }
2082
1950
  }).catch(() => {
2083
1951
  });
2084
1952
  throw error;
@@ -2103,14 +1971,19 @@ var FallomSession = class {
2103
1971
  trace_id: traceId,
2104
1972
  span_id: spanId,
2105
1973
  parent_span_id: traceCtx?.parentSpanId,
2106
- name: "streamText",
1974
+ name: "doStream",
2107
1975
  kind: "llm",
2108
1976
  model: modelId,
2109
1977
  start_time: new Date(startTime).toISOString(),
2110
1978
  end_time: new Date(Date.now()).toISOString(),
2111
1979
  duration_ms: Date.now() - startTime,
2112
1980
  status: "OK",
2113
- is_streaming: true
1981
+ is_streaming: true,
1982
+ attributes: {
1983
+ "fallom.sdk_version": "2",
1984
+ "fallom.method": "traceModel.doStream",
1985
+ "fallom.is_streaming": true
1986
+ }
2114
1987
  }).catch(() => {
2115
1988
  });
2116
1989
  return result;
@@ -2122,7 +1995,7 @@ var FallomSession = class {
2122
1995
  trace_id: traceId,
2123
1996
  span_id: spanId,
2124
1997
  parent_span_id: traceCtx?.parentSpanId,
2125
- name: "streamText",
1998
+ name: "doStream",
2126
1999
  kind: "llm",
2127
2000
  model: modelId,
2128
2001
  start_time: new Date(startTime).toISOString(),
@@ -2130,7 +2003,12 @@ var FallomSession = class {
2130
2003
  duration_ms: Date.now() - startTime,
2131
2004
  status: "ERROR",
2132
2005
  error_message: error instanceof Error ? error.message : String(error),
2133
- is_streaming: true
2006
+ is_streaming: true,
2007
+ attributes: {
2008
+ "fallom.sdk_version": "2",
2009
+ "fallom.method": "traceModel.doStream",
2010
+ "fallom.is_streaming": true
2011
+ }
2134
2012
  }).catch(() => {
2135
2013
  });
2136
2014
  throw error;
@@ -2186,7 +2064,7 @@ var promptCache = /* @__PURE__ */ new Map();
2186
2064
  var promptABCache = /* @__PURE__ */ new Map();
2187
2065
  var promptContext = null;
2188
2066
  var SYNC_TIMEOUT2 = 2e3;
2189
- function log5(msg) {
2067
+ function log4(msg) {
2190
2068
  if (debugMode3) {
2191
2069
  console.log(`[Fallom Prompts] ${msg}`);
2192
2070
  }
@@ -2289,10 +2167,10 @@ async function get2(promptKey, options = {}) {
2289
2167
  const { variables, version, debug = false } = options;
2290
2168
  debugMode3 = debug;
2291
2169
  ensureInit2();
2292
- log5(`get() called: promptKey=${promptKey}`);
2170
+ log4(`get() called: promptKey=${promptKey}`);
2293
2171
  let promptData = promptCache.get(promptKey);
2294
2172
  if (!promptData) {
2295
- log5("Not in cache, fetching...");
2173
+ log4("Not in cache, fetching...");
2296
2174
  await fetchPrompts(SYNC_TIMEOUT2);
2297
2175
  promptData = promptCache.get(promptKey);
2298
2176
  }
@@ -2314,7 +2192,7 @@ async function get2(promptKey, options = {}) {
2314
2192
  promptKey,
2315
2193
  promptVersion: targetVersion
2316
2194
  });
2317
- log5(`\u2705 Got prompt: ${promptKey} v${targetVersion}`);
2195
+ log4(`\u2705 Got prompt: ${promptKey} v${targetVersion}`);
2318
2196
  return {
2319
2197
  key: promptKey,
2320
2198
  version: targetVersion,
@@ -2326,10 +2204,10 @@ async function getAB(abTestKey, sessionId, options = {}) {
2326
2204
  const { variables, debug = false } = options;
2327
2205
  debugMode3 = debug;
2328
2206
  ensureInit2();
2329
- log5(`getAB() called: abTestKey=${abTestKey}, sessionId=${sessionId}`);
2207
+ log4(`getAB() called: abTestKey=${abTestKey}, sessionId=${sessionId}`);
2330
2208
  let abData = promptABCache.get(abTestKey);
2331
2209
  if (!abData) {
2332
- log5("Not in cache, fetching...");
2210
+ log4("Not in cache, fetching...");
2333
2211
  await fetchPromptABTests(SYNC_TIMEOUT2);
2334
2212
  abData = promptABCache.get(abTestKey);
2335
2213
  }
@@ -2344,8 +2222,8 @@ async function getAB(abTestKey, sessionId, options = {}) {
2344
2222
  throw new Error(`Prompt A/B test '${abTestKey}' has no current version.`);
2345
2223
  }
2346
2224
  const { variants } = versionData;
2347
- log5(`A/B test '${abTestKey}' has ${variants?.length ?? 0} variants`);
2348
- log5(`Version data: ${JSON.stringify(versionData, null, 2)}`);
2225
+ log4(`A/B test '${abTestKey}' has ${variants?.length ?? 0} variants`);
2226
+ log4(`Version data: ${JSON.stringify(versionData, null, 2)}`);
2349
2227
  if (!variants || variants.length === 0) {
2350
2228
  throw new Error(
2351
2229
  `Prompt A/B test '${abTestKey}' has no variants configured.`
@@ -2391,7 +2269,7 @@ async function getAB(abTestKey, sessionId, options = {}) {
2391
2269
  abTestKey,
2392
2270
  variantIndex: selectedIndex
2393
2271
  });
2394
- log5(
2272
+ log4(
2395
2273
  `\u2705 Got prompt from A/B: ${promptKey} v${targetVersion} (variant ${selectedIndex})`
2396
2274
  );
2397
2275
  return {