@fallom/trace 0.2.2 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -26,7 +26,7 @@ __export(models_exports, {
26
26
  get: () => get,
27
27
  init: () => init2
28
28
  });
29
- function log4(msg) {
29
+ function log3(msg) {
30
30
  if (debugMode2) {
31
31
  console.log(`[Fallom] ${msg}`);
32
32
  }
@@ -58,11 +58,11 @@ function ensureInit() {
58
58
  }
59
59
  async function fetchConfigs(timeout = SYNC_TIMEOUT) {
60
60
  if (!apiKey2) {
61
- log4("_fetchConfigs: No API key, skipping");
61
+ log3("_fetchConfigs: No API key, skipping");
62
62
  return;
63
63
  }
64
64
  try {
65
- log4(`Fetching configs from ${baseUrl2}/configs`);
65
+ log3(`Fetching configs from ${baseUrl2}/configs`);
66
66
  const controller = new AbortController();
67
67
  const timeoutId = setTimeout(() => controller.abort(), timeout);
68
68
  const resp = await fetch(`${baseUrl2}/configs`, {
@@ -70,15 +70,15 @@ async function fetchConfigs(timeout = SYNC_TIMEOUT) {
70
70
  signal: controller.signal
71
71
  });
72
72
  clearTimeout(timeoutId);
73
- log4(`Response status: ${resp.status}`);
73
+ log3(`Response status: ${resp.status}`);
74
74
  if (resp.ok) {
75
75
  const data = await resp.json();
76
76
  const configs = data.configs || [];
77
- log4(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
77
+ log3(`Got ${configs.length} configs: ${configs.map((c) => c.key)}`);
78
78
  for (const c of configs) {
79
79
  const key = c.key;
80
80
  const version = c.version || 1;
81
- log4(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
81
+ log3(`Config '${key}' v${version}: ${JSON.stringify(c.variants)}`);
82
82
  if (!configCache.has(key)) {
83
83
  configCache.set(key, { versions: /* @__PURE__ */ new Map(), latest: null });
84
84
  }
@@ -87,10 +87,10 @@ async function fetchConfigs(timeout = SYNC_TIMEOUT) {
87
87
  cached.latest = version;
88
88
  }
89
89
  } else {
90
- log4(`Fetch failed: ${resp.statusText}`);
90
+ log3(`Fetch failed: ${resp.statusText}`);
91
91
  }
92
92
  } catch (e) {
93
- log4(`Fetch exception: ${e}`);
93
+ log3(`Fetch exception: ${e}`);
94
94
  }
95
95
  }
96
96
  async function fetchSpecificVersion(configKey, version, timeout = SYNC_TIMEOUT) {
@@ -122,24 +122,24 @@ async function get(configKey, sessionId, options = {}) {
122
122
  const { version, fallback, debug = false } = options;
123
123
  debugMode2 = debug;
124
124
  ensureInit();
125
- log4(
125
+ log3(
126
126
  `get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
127
127
  );
128
128
  try {
129
129
  let configData = configCache.get(configKey);
130
- log4(
130
+ log3(
131
131
  `Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
132
132
  );
133
133
  if (!configData) {
134
- log4("Not in cache, fetching...");
134
+ log3("Not in cache, fetching...");
135
135
  await fetchConfigs(SYNC_TIMEOUT);
136
136
  configData = configCache.get(configKey);
137
- log4(
137
+ log3(
138
138
  `After fetch, cache lookup: ${configData ? "found" : "still not found"}`
139
139
  );
140
140
  }
141
141
  if (!configData) {
142
- log4(`Config not found, using fallback: ${fallback}`);
142
+ log3(`Config not found, using fallback: ${fallback}`);
143
143
  if (fallback) {
144
144
  console.warn(
145
145
  `[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
@@ -183,20 +183,20 @@ async function get(configKey, sessionId, options = {}) {
183
183
  const variantsRaw = config.variants;
184
184
  const configVersion = config.version || targetVersion;
185
185
  const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
186
- log4(
186
+ log3(
187
187
  `Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
188
188
  variants
189
189
  )}`
190
190
  );
191
191
  const hashBytes = (0, import_crypto.createHash)("md5").update(sessionId).digest();
192
192
  const hashVal = hashBytes.readUInt32BE(0) % 1e6;
193
- log4(`Session hash: ${hashVal} (out of 1,000,000)`);
193
+ log3(`Session hash: ${hashVal} (out of 1,000,000)`);
194
194
  let cumulative = 0;
195
195
  let assignedModel = variants[variants.length - 1].model;
196
196
  for (const v of variants) {
197
197
  const oldCumulative = cumulative;
198
198
  cumulative += v.weight * 1e4;
199
- log4(
199
+ log3(
200
200
  `Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
201
201
  );
202
202
  if (hashVal < cumulative) {
@@ -204,7 +204,7 @@ async function get(configKey, sessionId, options = {}) {
204
204
  break;
205
205
  }
206
206
  }
207
- log4(`\u2705 Assigned model: ${assignedModel}`);
207
+ log3(`\u2705 Assigned model: ${assignedModel}`);
208
208
  return returnModel(configKey, sessionId, assignedModel, configVersion);
209
209
  } catch (e) {
210
210
  if (e instanceof Error && e.message.includes("not found")) {
@@ -1073,32 +1073,6 @@ function generateHexId(length) {
1073
1073
  crypto.getRandomValues(bytes);
1074
1074
  return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("");
1075
1075
  }
1076
- function messagesToOtelAttributes(messages, completion, model, responseId) {
1077
- const attrs = {};
1078
- if (model) {
1079
- attrs["gen_ai.request.model"] = model;
1080
- attrs["gen_ai.response.model"] = model;
1081
- }
1082
- if (responseId) {
1083
- attrs["gen_ai.response.id"] = responseId;
1084
- }
1085
- if (messages) {
1086
- messages.forEach((msg, i) => {
1087
- attrs[`gen_ai.prompt.${i}.role`] = msg.role;
1088
- attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1089
- });
1090
- }
1091
- if (completion) {
1092
- attrs["gen_ai.completion.0.role"] = completion.role;
1093
- attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
1094
- if (completion.tool_calls) {
1095
- attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
1096
- completion.tool_calls
1097
- );
1098
- }
1099
- }
1100
- return attrs;
1101
- }
1102
1076
 
1103
1077
  // src/trace/wrappers/openai.ts
1104
1078
  function wrapOpenAI(client, sessionCtx) {
@@ -1120,18 +1094,25 @@ function wrapOpenAI(client, sessionCtx) {
1120
1094
  try {
1121
1095
  const response = await originalCreate(...args);
1122
1096
  const endTime = Date.now();
1123
- const attributes = captureContent2 ? messagesToOtelAttributes(
1124
- params?.messages,
1125
- response?.choices?.[0]?.message,
1126
- response?.model || params?.model,
1127
- response?.id
1128
- ) : {};
1097
+ const attributes = {
1098
+ "fallom.sdk_version": "2",
1099
+ "fallom.method": "chat.completions.create"
1100
+ };
1101
+ if (captureContent2) {
1102
+ attributes["fallom.raw.request"] = JSON.stringify({
1103
+ messages: params?.messages,
1104
+ model: params?.model
1105
+ });
1106
+ attributes["fallom.raw.response"] = JSON.stringify({
1107
+ text: response?.choices?.[0]?.message?.content,
1108
+ finishReason: response?.choices?.[0]?.finish_reason,
1109
+ responseId: response?.id,
1110
+ model: response?.model
1111
+ });
1112
+ }
1129
1113
  if (response?.usage) {
1130
1114
  attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1131
1115
  }
1132
- if (response?.choices?.[0]?.finish_reason) {
1133
- attributes["gen_ai.response.finish_reason"] = response.choices[0].finish_reason;
1134
- }
1135
1116
  sendTrace({
1136
1117
  config_key: ctx.configKey,
1137
1118
  session_id: ctx.sessionId,
@@ -1146,24 +1127,12 @@ function wrapOpenAI(client, sessionCtx) {
1146
1127
  end_time: new Date(endTime).toISOString(),
1147
1128
  duration_ms: endTime - startTime,
1148
1129
  status: "OK",
1149
- prompt_tokens: response?.usage?.prompt_tokens,
1150
- completion_tokens: response?.usage?.completion_tokens,
1151
- total_tokens: response?.usage?.total_tokens,
1152
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1130
+ attributes
1153
1131
  }).catch(() => {
1154
1132
  });
1155
1133
  return response;
1156
1134
  } catch (error) {
1157
1135
  const endTime = Date.now();
1158
- const attributes = captureContent2 ? messagesToOtelAttributes(
1159
- params?.messages,
1160
- void 0,
1161
- params?.model,
1162
- void 0
1163
- ) : void 0;
1164
- if (attributes) {
1165
- attributes["error.message"] = error?.message;
1166
- }
1167
1136
  sendTrace({
1168
1137
  config_key: ctx.configKey,
1169
1138
  session_id: ctx.sessionId,
@@ -1179,7 +1148,10 @@ function wrapOpenAI(client, sessionCtx) {
1179
1148
  duration_ms: endTime - startTime,
1180
1149
  status: "ERROR",
1181
1150
  error_message: error?.message,
1182
- attributes
1151
+ attributes: {
1152
+ "fallom.sdk_version": "2",
1153
+ "fallom.method": "chat.completions.create"
1154
+ }
1183
1155
  }).catch(() => {
1184
1156
  });
1185
1157
  throw error;
@@ -1206,21 +1178,26 @@ function wrapAnthropic(client, sessionCtx) {
1206
1178
  try {
1207
1179
  const response = await originalCreate(...args);
1208
1180
  const endTime = Date.now();
1209
- const attributes = captureContent2 ? messagesToOtelAttributes(
1210
- params?.messages,
1211
- { role: "assistant", content: response?.content?.[0]?.text || "" },
1212
- response?.model || params?.model,
1213
- response?.id
1214
- ) : {};
1215
- if (params?.system) {
1216
- attributes["gen_ai.system_prompt"] = params.system;
1181
+ const attributes = {
1182
+ "fallom.sdk_version": "2",
1183
+ "fallom.method": "messages.create"
1184
+ };
1185
+ if (captureContent2) {
1186
+ attributes["fallom.raw.request"] = JSON.stringify({
1187
+ messages: params?.messages,
1188
+ system: params?.system,
1189
+ model: params?.model
1190
+ });
1191
+ attributes["fallom.raw.response"] = JSON.stringify({
1192
+ text: response?.content?.[0]?.text,
1193
+ finishReason: response?.stop_reason,
1194
+ responseId: response?.id,
1195
+ model: response?.model
1196
+ });
1217
1197
  }
1218
1198
  if (response?.usage) {
1219
1199
  attributes["fallom.raw.usage"] = JSON.stringify(response.usage);
1220
1200
  }
1221
- if (response?.stop_reason) {
1222
- attributes["gen_ai.response.finish_reason"] = response.stop_reason;
1223
- }
1224
1201
  sendTrace({
1225
1202
  config_key: ctx.configKey,
1226
1203
  session_id: ctx.sessionId,
@@ -1235,27 +1212,12 @@ function wrapAnthropic(client, sessionCtx) {
1235
1212
  end_time: new Date(endTime).toISOString(),
1236
1213
  duration_ms: endTime - startTime,
1237
1214
  status: "OK",
1238
- prompt_tokens: response?.usage?.input_tokens,
1239
- completion_tokens: response?.usage?.output_tokens,
1240
- total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1241
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1215
+ attributes
1242
1216
  }).catch(() => {
1243
1217
  });
1244
1218
  return response;
1245
1219
  } catch (error) {
1246
1220
  const endTime = Date.now();
1247
- const attributes = captureContent2 ? messagesToOtelAttributes(
1248
- params?.messages,
1249
- void 0,
1250
- params?.model,
1251
- void 0
1252
- ) : void 0;
1253
- if (attributes) {
1254
- attributes["error.message"] = error?.message;
1255
- if (params?.system) {
1256
- attributes["gen_ai.system_prompt"] = params.system;
1257
- }
1258
- }
1259
1221
  sendTrace({
1260
1222
  config_key: ctx.configKey,
1261
1223
  session_id: ctx.sessionId,
@@ -1271,7 +1233,10 @@ function wrapAnthropic(client, sessionCtx) {
1271
1233
  duration_ms: endTime - startTime,
1272
1234
  status: "ERROR",
1273
1235
  error_message: error?.message,
1274
- attributes
1236
+ attributes: {
1237
+ "fallom.sdk_version": "2",
1238
+ "fallom.method": "messages.create"
1239
+ }
1275
1240
  }).catch(() => {
1276
1241
  });
1277
1242
  throw error;
@@ -1282,50 +1247,36 @@ function wrapAnthropic(client, sessionCtx) {
1282
1247
 
1283
1248
  // src/trace/wrappers/google-ai.ts
1284
1249
  function wrapGoogleAI(model, sessionCtx) {
1285
- const originalGenerate = model.generateContent.bind(model);
1250
+ const originalGenerateContent = model.generateContent.bind(model);
1286
1251
  const ctx = sessionCtx;
1287
1252
  model.generateContent = async function(...args) {
1288
1253
  if (!isInitialized()) {
1289
- return originalGenerate(...args);
1254
+ return originalGenerateContent(...args);
1290
1255
  }
1291
1256
  const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1292
1257
  const traceId = traceCtx?.traceId || generateHexId(32);
1293
1258
  const spanId = generateHexId(16);
1294
1259
  const parentSpanId = traceCtx?.parentSpanId;
1260
+ const request = args[0];
1295
1261
  const startTime = Date.now();
1296
1262
  const captureContent2 = shouldCaptureContent();
1297
1263
  try {
1298
- const response = await originalGenerate(...args);
1264
+ const response = await originalGenerateContent(...args);
1299
1265
  const endTime = Date.now();
1300
- const result = response?.response;
1301
- const usage = result?.usageMetadata;
1302
- const modelName = model?.model || "gemini";
1303
- const attributes = {};
1266
+ const result = response?.response || response;
1267
+ const attributes = {
1268
+ "fallom.sdk_version": "2",
1269
+ "fallom.method": "generateContent"
1270
+ };
1304
1271
  if (captureContent2) {
1305
- attributes["gen_ai.request.model"] = modelName;
1306
- attributes["gen_ai.response.model"] = modelName;
1307
- const input = args[0];
1308
- if (typeof input === "string") {
1309
- attributes["gen_ai.prompt.0.role"] = "user";
1310
- attributes["gen_ai.prompt.0.content"] = input;
1311
- } else if (input?.contents) {
1312
- input.contents.forEach((content, i) => {
1313
- attributes[`gen_ai.prompt.${i}.role`] = content.role || "user";
1314
- attributes[`gen_ai.prompt.${i}.content`] = content.parts?.[0]?.text || JSON.stringify(content.parts);
1315
- });
1316
- }
1317
- const outputText = result?.text?.();
1318
- if (outputText) {
1319
- attributes["gen_ai.completion.0.role"] = "assistant";
1320
- attributes["gen_ai.completion.0.content"] = outputText;
1321
- }
1322
- }
1323
- if (usage) {
1324
- attributes["fallom.raw.usage"] = JSON.stringify(usage);
1272
+ attributes["fallom.raw.request"] = JSON.stringify(request);
1273
+ attributes["fallom.raw.response"] = JSON.stringify({
1274
+ text: result?.text?.(),
1275
+ candidates: result?.candidates
1276
+ });
1325
1277
  }
1326
- const candidate = result?.candidates?.[0];
1327
- if (candidate?.finishReason) {
1328
- attributes["gen_ai.response.finish_reason"] = candidate.finishReason;
1278
+ if (result?.usageMetadata) {
1279
+ attributes["fallom.raw.usage"] = JSON.stringify(result.usageMetadata);
1329
1280
  }
1330
1281
  sendTrace({
1331
1282
  config_key: ctx.configKey,
@@ -1336,31 +1287,17 @@ function wrapGoogleAI(model, sessionCtx) {
1336
1287
  parent_span_id: parentSpanId,
1337
1288
  name: "generateContent",
1338
1289
  kind: "llm",
1339
- model: modelName,
1290
+ model: model.model || "gemini",
1340
1291
  start_time: new Date(startTime).toISOString(),
1341
1292
  end_time: new Date(endTime).toISOString(),
1342
1293
  duration_ms: endTime - startTime,
1343
1294
  status: "OK",
1344
- prompt_tokens: usage?.promptTokenCount,
1345
- completion_tokens: usage?.candidatesTokenCount,
1346
- total_tokens: usage?.totalTokenCount,
1347
- attributes: Object.keys(attributes).length > 0 ? attributes : void 0
1295
+ attributes
1348
1296
  }).catch(() => {
1349
1297
  });
1350
1298
  return response;
1351
1299
  } catch (error) {
1352
1300
  const endTime = Date.now();
1353
- const modelName = model?.model || "gemini";
1354
- const attributes = {};
1355
- if (captureContent2) {
1356
- attributes["gen_ai.request.model"] = modelName;
1357
- attributes["error.message"] = error?.message;
1358
- const input = args[0];
1359
- if (typeof input === "string") {
1360
- attributes["gen_ai.prompt.0.role"] = "user";
1361
- attributes["gen_ai.prompt.0.content"] = input;
1362
- }
1363
- }
1364
1301
  sendTrace({
1365
1302
  config_key: ctx.configKey,
1366
1303
  session_id: ctx.sessionId,
@@ -1370,13 +1307,16 @@ function wrapGoogleAI(model, sessionCtx) {
1370
1307
  parent_span_id: parentSpanId,
1371
1308
  name: "generateContent",
1372
1309
  kind: "llm",
1373
- model: modelName,
1310
+ model: model.model || "gemini",
1374
1311
  start_time: new Date(startTime).toISOString(),
1375
1312
  end_time: new Date(endTime).toISOString(),
1376
1313
  duration_ms: endTime - startTime,
1377
1314
  status: "ERROR",
1378
1315
  error_message: error?.message,
1379
- attributes: captureContent2 ? attributes : void 0
1316
+ attributes: {
1317
+ "fallom.sdk_version": "2",
1318
+ "fallom.method": "generateContent"
1319
+ }
1380
1320
  }).catch(() => {
1381
1321
  });
1382
1322
  throw error;
@@ -1385,35 +1325,6 @@ function wrapGoogleAI(model, sessionCtx) {
1385
1325
  return model;
1386
1326
  }
1387
1327
 
1388
- // src/trace/wrappers/vercel-ai/utils.ts
1389
- function extractUsageFromResult(result, directUsage) {
1390
- let usage = directUsage ?? result?.usage;
1391
- const isValidNumber = (v) => v !== null && v !== void 0 && !Number.isNaN(v);
1392
- let promptTokens = isValidNumber(usage?.promptTokens) ? usage.promptTokens : isValidNumber(usage?.inputTokens) ? usage.inputTokens : isValidNumber(usage?.prompt_tokens) ? usage.prompt_tokens : void 0;
1393
- let completionTokens = isValidNumber(usage?.completionTokens) ? usage.completionTokens : isValidNumber(usage?.outputTokens) ? usage.outputTokens : isValidNumber(usage?.completion_tokens) ? usage.completion_tokens : void 0;
1394
- let totalTokens = isValidNumber(usage?.totalTokens) ? usage.totalTokens : isValidNumber(usage?.total_tokens) ? usage.total_tokens : void 0;
1395
- let cost;
1396
- const orUsage = result?.experimental_providerMetadata?.openrouter?.usage;
1397
- if (orUsage) {
1398
- if (promptTokens === void 0 && isValidNumber(orUsage.promptTokens)) {
1399
- promptTokens = orUsage.promptTokens;
1400
- }
1401
- if (completionTokens === void 0 && isValidNumber(orUsage.completionTokens)) {
1402
- completionTokens = orUsage.completionTokens;
1403
- }
1404
- if (totalTokens === void 0 && isValidNumber(orUsage.totalTokens)) {
1405
- totalTokens = orUsage.totalTokens;
1406
- }
1407
- if (isValidNumber(orUsage.cost)) {
1408
- cost = orUsage.cost;
1409
- }
1410
- }
1411
- if (totalTokens === void 0 && (promptTokens !== void 0 || completionTokens !== void 0)) {
1412
- totalTokens = (promptTokens ?? 0) + (completionTokens ?? 0);
1413
- }
1414
- return { promptTokens, completionTokens, totalTokens, cost };
1415
- }
1416
-
1417
1328
  // src/trace/wrappers/vercel-ai/generate-text.ts
1418
1329
  function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1419
1330
  const ctx = sessionCtx;
@@ -1432,54 +1343,33 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1432
1343
  const result = await aiModule.generateText(...args);
1433
1344
  const endTime = Date.now();
1434
1345
  if (debug || isDebugMode()) {
1435
- console.log(
1436
- "\n\u{1F50D} [Fallom Debug] generateText result keys:",
1437
- Object.keys(result || {})
1438
- );
1439
- console.log(
1440
- "\u{1F50D} [Fallom Debug] result.usage:",
1441
- JSON.stringify(result?.usage, null, 2)
1442
- );
1443
- console.log(
1444
- "\u{1F50D} [Fallom Debug] result.experimental_providerMetadata:",
1445
- JSON.stringify(result?.experimental_providerMetadata, null, 2)
1446
- );
1346
+ console.log("\n\u{1F50D} [Fallom Debug] generateText raw result:", JSON.stringify(result, null, 2));
1447
1347
  }
1448
1348
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1449
- const attributes = {};
1349
+ const attributes = {
1350
+ "fallom.sdk_version": "2",
1351
+ "fallom.method": "generateText"
1352
+ };
1450
1353
  if (captureContent2) {
1451
- attributes["gen_ai.request.model"] = modelId;
1452
- attributes["gen_ai.response.model"] = modelId;
1453
- if (params?.prompt) {
1454
- attributes["gen_ai.prompt.0.role"] = "user";
1455
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1456
- }
1457
- if (params?.messages) {
1458
- params.messages.forEach((msg, i) => {
1459
- attributes[`gen_ai.prompt.${i}.role`] = msg.role;
1460
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1461
- });
1462
- }
1463
- if (result?.text) {
1464
- attributes["gen_ai.completion.0.role"] = "assistant";
1465
- attributes["gen_ai.completion.0.content"] = result.text;
1466
- }
1467
- if (result?.response?.id) {
1468
- attributes["gen_ai.response.id"] = result.response.id;
1469
- }
1354
+ attributes["fallom.raw.request"] = JSON.stringify({
1355
+ prompt: params?.prompt,
1356
+ messages: params?.messages,
1357
+ system: params?.system,
1358
+ model: modelId
1359
+ });
1360
+ attributes["fallom.raw.response"] = JSON.stringify({
1361
+ text: result?.text,
1362
+ finishReason: result?.finishReason,
1363
+ responseId: result?.response?.id,
1364
+ modelId: result?.response?.modelId
1365
+ });
1470
1366
  }
1471
1367
  if (result?.usage) {
1472
1368
  attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
1473
1369
  }
1474
1370
  if (result?.experimental_providerMetadata) {
1475
- attributes["fallom.raw.providerMetadata"] = JSON.stringify(
1476
- result.experimental_providerMetadata
1477
- );
1478
- }
1479
- if (result?.finishReason) {
1480
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1371
+ attributes["fallom.raw.providerMetadata"] = JSON.stringify(result.experimental_providerMetadata);
1481
1372
  }
1482
- const usage = extractUsageFromResult(result);
1483
1373
  sendTrace({
1484
1374
  config_key: ctx.configKey,
1485
1375
  session_id: ctx.sessionId,
@@ -1494,10 +1384,7 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1494
1384
  end_time: new Date(endTime).toISOString(),
1495
1385
  duration_ms: endTime - startTime,
1496
1386
  status: "OK",
1497
- prompt_tokens: usage.promptTokens,
1498
- completion_tokens: usage.completionTokens,
1499
- total_tokens: usage.totalTokens,
1500
- attributes: captureContent2 ? attributes : void 0
1387
+ attributes
1501
1388
  }).catch(() => {
1502
1389
  });
1503
1390
  return result;
@@ -1518,7 +1405,17 @@ function createGenerateTextWrapper(aiModule, sessionCtx, debug = false) {
1518
1405
  end_time: new Date(endTime).toISOString(),
1519
1406
  duration_ms: endTime - startTime,
1520
1407
  status: "ERROR",
1521
- error_message: error?.message
1408
+ error_message: error?.message,
1409
+ attributes: {
1410
+ "fallom.sdk_version": "2",
1411
+ "fallom.method": "generateText",
1412
+ "fallom.raw.request": JSON.stringify({
1413
+ prompt: params?.prompt,
1414
+ messages: params?.messages,
1415
+ system: params?.system,
1416
+ model: modelId
1417
+ })
1418
+ }
1522
1419
  }).catch(() => {
1523
1420
  });
1524
1421
  throw error;
@@ -1550,12 +1447,8 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1550
1447
  result.usage.then(async (rawUsage) => {
1551
1448
  const endTime = Date.now();
1552
1449
  if (debug || isDebugMode()) {
1553
- console.log(
1554
- "\n\u{1F50D} [Fallom Debug] streamText usage:",
1555
- JSON.stringify(rawUsage, null, 2)
1556
- );
1450
+ console.log("\n\u{1F50D} [Fallom Debug] streamText raw usage:", JSON.stringify(rawUsage, null, 2));
1557
1451
  }
1558
- log2("\u{1F4CA} streamText usage:", JSON.stringify(rawUsage, null, 2));
1559
1452
  let providerMetadata = result?.experimental_providerMetadata;
1560
1453
  if (providerMetadata && typeof providerMetadata.then === "function") {
1561
1454
  try {
@@ -1564,20 +1457,18 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1564
1457
  providerMetadata = void 0;
1565
1458
  }
1566
1459
  }
1567
- const usage = extractUsageFromResult(
1568
- { experimental_providerMetadata: providerMetadata },
1569
- rawUsage
1570
- );
1571
- const attributes = {};
1460
+ const attributes = {
1461
+ "fallom.sdk_version": "2",
1462
+ "fallom.method": "streamText",
1463
+ "fallom.is_streaming": true
1464
+ };
1572
1465
  if (captureContent2) {
1573
- attributes["gen_ai.request.model"] = modelId;
1574
- if (params?.prompt) {
1575
- attributes["gen_ai.prompt.0.role"] = "user";
1576
- attributes["gen_ai.prompt.0.content"] = params.prompt;
1577
- }
1578
- }
1579
- if (firstTokenTime) {
1580
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1466
+ attributes["fallom.raw.request"] = JSON.stringify({
1467
+ prompt: params?.prompt,
1468
+ messages: params?.messages,
1469
+ system: params?.system,
1470
+ model: modelId
1471
+ });
1581
1472
  }
1582
1473
  if (rawUsage) {
1583
1474
  attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
@@ -1585,7 +1476,10 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1585
1476
  if (providerMetadata) {
1586
1477
  attributes["fallom.raw.providerMetadata"] = JSON.stringify(providerMetadata);
1587
1478
  }
1588
- const tracePayload = {
1479
+ if (firstTokenTime) {
1480
+ attributes["fallom.time_to_first_token_ms"] = firstTokenTime - startTime;
1481
+ }
1482
+ sendTrace({
1589
1483
  config_key: ctx.configKey,
1590
1484
  session_id: ctx.sessionId,
1591
1485
  customer_id: ctx.customerId,
@@ -1599,13 +1493,10 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1599
1493
  end_time: new Date(endTime).toISOString(),
1600
1494
  duration_ms: endTime - startTime,
1601
1495
  status: "OK",
1602
- prompt_tokens: usage.promptTokens,
1603
- completion_tokens: usage.completionTokens,
1604
- total_tokens: usage.totalTokens,
1605
1496
  time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1606
- attributes: captureContent2 ? attributes : void 0
1607
- };
1608
- sendTrace(tracePayload).catch(() => {
1497
+ is_streaming: true,
1498
+ attributes
1499
+ }).catch(() => {
1609
1500
  });
1610
1501
  }).catch((error) => {
1611
1502
  const endTime = Date.now();
@@ -1624,7 +1515,12 @@ function createStreamTextWrapper(aiModule, sessionCtx, debug = false) {
1624
1515
  end_time: new Date(endTime).toISOString(),
1625
1516
  duration_ms: endTime - startTime,
1626
1517
  status: "ERROR",
1627
- error_message: error?.message
1518
+ error_message: error?.message,
1519
+ attributes: {
1520
+ "fallom.sdk_version": "2",
1521
+ "fallom.method": "streamText",
1522
+ "fallom.is_streaming": true
1523
+ }
1628
1524
  }).catch(() => {
1629
1525
  });
1630
1526
  });
@@ -1672,25 +1568,30 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1672
1568
  const endTime = Date.now();
1673
1569
  if (debug || isDebugMode()) {
1674
1570
  console.log(
1675
- "\n\u{1F50D} [Fallom Debug] generateObject result keys:",
1676
- Object.keys(result || {})
1677
- );
1678
- console.log(
1679
- "\u{1F50D} [Fallom Debug] result.usage:",
1680
- JSON.stringify(result?.usage, null, 2)
1571
+ "\n\u{1F50D} [Fallom Debug] generateObject raw result:",
1572
+ JSON.stringify(result, null, 2)
1681
1573
  );
1682
1574
  }
1683
1575
  const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1684
- const attributes = {};
1576
+ const attributes = {
1577
+ "fallom.sdk_version": "2",
1578
+ "fallom.method": "generateObject"
1579
+ };
1685
1580
  if (captureContent2) {
1686
- attributes["gen_ai.request.model"] = modelId;
1687
- attributes["gen_ai.response.model"] = modelId;
1688
- if (result?.object) {
1689
- attributes["gen_ai.completion.0.role"] = "assistant";
1690
- attributes["gen_ai.completion.0.content"] = JSON.stringify(
1691
- result.object
1692
- );
1693
- }
1581
+ attributes["fallom.raw.request"] = JSON.stringify({
1582
+ prompt: params?.prompt,
1583
+ messages: params?.messages,
1584
+ system: params?.system,
1585
+ model: modelId,
1586
+ schema: params?.schema ? "provided" : void 0
1587
+ // Don't send full schema, just note if present
1588
+ });
1589
+ attributes["fallom.raw.response"] = JSON.stringify({
1590
+ object: result?.object,
1591
+ finishReason: result?.finishReason,
1592
+ responseId: result?.response?.id,
1593
+ modelId: result?.response?.modelId
1594
+ });
1694
1595
  }
1695
1596
  if (result?.usage) {
1696
1597
  attributes["fallom.raw.usage"] = JSON.stringify(result.usage);
@@ -1700,10 +1601,6 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1700
1601
  result.experimental_providerMetadata
1701
1602
  );
1702
1603
  }
1703
- if (result?.finishReason) {
1704
- attributes["gen_ai.response.finish_reason"] = result.finishReason;
1705
- }
1706
- const usage = extractUsageFromResult(result);
1707
1604
  sendTrace({
1708
1605
  config_key: ctx.configKey,
1709
1606
  session_id: ctx.sessionId,
@@ -1718,10 +1615,7 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1718
1615
  end_time: new Date(endTime).toISOString(),
1719
1616
  duration_ms: endTime - startTime,
1720
1617
  status: "OK",
1721
- prompt_tokens: usage.promptTokens,
1722
- completion_tokens: usage.completionTokens,
1723
- total_tokens: usage.totalTokens,
1724
- attributes: captureContent2 ? attributes : void 0
1618
+ attributes
1725
1619
  }).catch(() => {
1726
1620
  });
1727
1621
  return result;
@@ -1742,7 +1636,11 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1742
1636
  end_time: new Date(endTime).toISOString(),
1743
1637
  duration_ms: endTime - startTime,
1744
1638
  status: "ERROR",
1745
- error_message: error?.message
1639
+ error_message: error?.message,
1640
+ attributes: {
1641
+ "fallom.sdk_version": "2",
1642
+ "fallom.method": "generateObject"
1643
+ }
1746
1644
  }).catch(() => {
1747
1645
  });
1748
1646
  throw error;
@@ -1751,9 +1649,6 @@ function createGenerateObjectWrapper(aiModule, sessionCtx, debug = false) {
1751
1649
  }
1752
1650
 
1753
1651
  // src/trace/wrappers/vercel-ai/stream-object.ts
1754
- function log3(...args) {
1755
- if (isDebugMode()) console.log("[Fallom]", ...args);
1756
- }
1757
1652
  function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1758
1653
  const ctx = sessionCtx;
1759
1654
  return async (...args) => {
@@ -1761,7 +1656,6 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1761
1656
  const startTime = Date.now();
1762
1657
  const captureContent2 = shouldCaptureContent();
1763
1658
  const result = await aiModule.streamObject(...args);
1764
- log3("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
1765
1659
  if (!isInitialized()) {
1766
1660
  return result;
1767
1661
  }
@@ -1769,18 +1663,13 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1769
1663
  const traceId = traceCtx?.traceId || generateHexId(32);
1770
1664
  const spanId = generateHexId(16);
1771
1665
  const parentSpanId = traceCtx?.parentSpanId;
1772
- let firstTokenTime = null;
1773
1666
  const modelId = params?.model?.modelId || String(params?.model || "unknown");
1774
1667
  if (result?.usage) {
1775
1668
  result.usage.then(async (rawUsage) => {
1776
1669
  const endTime = Date.now();
1777
1670
  if (debug || isDebugMode()) {
1778
- console.log(
1779
- "\n\u{1F50D} [Fallom Debug] streamObject usage:",
1780
- JSON.stringify(rawUsage, null, 2)
1781
- );
1671
+ console.log("\n\u{1F50D} [Fallom Debug] streamObject raw usage:", JSON.stringify(rawUsage, null, 2));
1782
1672
  }
1783
- log3("\u{1F4CA} streamObject usage:", JSON.stringify(rawUsage, null, 2));
1784
1673
  let providerMetadata = result?.experimental_providerMetadata;
1785
1674
  if (providerMetadata && typeof providerMetadata.then === "function") {
1786
1675
  try {
@@ -1789,16 +1678,19 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1789
1678
  providerMetadata = void 0;
1790
1679
  }
1791
1680
  }
1792
- const usage = extractUsageFromResult(
1793
- { experimental_providerMetadata: providerMetadata },
1794
- rawUsage
1795
- );
1796
- const attributes = {};
1681
+ const attributes = {
1682
+ "fallom.sdk_version": "2",
1683
+ "fallom.method": "streamObject",
1684
+ "fallom.is_streaming": true
1685
+ };
1797
1686
  if (captureContent2) {
1798
- attributes["gen_ai.request.model"] = modelId;
1799
- }
1800
- if (firstTokenTime) {
1801
- attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1687
+ attributes["fallom.raw.request"] = JSON.stringify({
1688
+ prompt: params?.prompt,
1689
+ messages: params?.messages,
1690
+ system: params?.system,
1691
+ model: modelId,
1692
+ schema: params?.schema ? "provided" : void 0
1693
+ });
1802
1694
  }
1803
1695
  if (rawUsage) {
1804
1696
  attributes["fallom.raw.usage"] = JSON.stringify(rawUsage);
@@ -1820,10 +1712,8 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1820
1712
  end_time: new Date(endTime).toISOString(),
1821
1713
  duration_ms: endTime - startTime,
1822
1714
  status: "OK",
1823
- prompt_tokens: usage.promptTokens,
1824
- completion_tokens: usage.completionTokens,
1825
- total_tokens: usage.totalTokens,
1826
- attributes: captureContent2 ? attributes : void 0
1715
+ is_streaming: true,
1716
+ attributes
1827
1717
  }).catch(() => {
1828
1718
  });
1829
1719
  }).catch((error) => {
@@ -1842,31 +1732,16 @@ function createStreamObjectWrapper(aiModule, sessionCtx, debug = false) {
1842
1732
  end_time: new Date(endTime).toISOString(),
1843
1733
  duration_ms: endTime - startTime,
1844
1734
  status: "ERROR",
1845
- error_message: error?.message
1735
+ error_message: error?.message,
1736
+ attributes: {
1737
+ "fallom.sdk_version": "2",
1738
+ "fallom.method": "streamObject",
1739
+ "fallom.is_streaming": true
1740
+ }
1846
1741
  }).catch(() => {
1847
1742
  });
1848
1743
  });
1849
1744
  }
1850
- if (result?.partialObjectStream) {
1851
- const originalStream = result.partialObjectStream;
1852
- const wrappedStream = (async function* () {
1853
- for await (const chunk of originalStream) {
1854
- if (!firstTokenTime) {
1855
- firstTokenTime = Date.now();
1856
- log3("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1857
- }
1858
- yield chunk;
1859
- }
1860
- })();
1861
- return new Proxy(result, {
1862
- get(target, prop) {
1863
- if (prop === "partialObjectStream") {
1864
- return wrappedStream;
1865
- }
1866
- return target[prop];
1867
- }
1868
- });
1869
- }
1870
1745
  return result;
1871
1746
  };
1872
1747
  }
@@ -1885,105 +1760,69 @@ function wrapAISDK(ai, sessionCtx, options) {
1885
1760
  // src/trace/wrappers/mastra.ts
1886
1761
  function wrapMastraAgent(agent, sessionCtx) {
1887
1762
  const originalGenerate = agent.generate.bind(agent);
1888
- const agentName = agent.name || "MastraAgent";
1889
1763
  const ctx = sessionCtx;
1890
1764
  agent.generate = async function(...args) {
1891
1765
  if (!isInitialized()) {
1892
1766
  return originalGenerate(...args);
1893
1767
  }
1894
- const traceId = generateHexId(32);
1768
+ const traceCtx = getTraceContextStorage().getStore() || getFallbackTraceContext();
1769
+ const traceId = traceCtx?.traceId || generateHexId(32);
1895
1770
  const spanId = generateHexId(16);
1771
+ const parentSpanId = traceCtx?.parentSpanId;
1772
+ const input = args[0];
1896
1773
  const startTime = Date.now();
1897
- const messages = args[0] || [];
1774
+ const captureContent2 = shouldCaptureContent();
1898
1775
  try {
1899
1776
  const result = await originalGenerate(...args);
1900
1777
  const endTime = Date.now();
1901
- const model = result?.model?.modelId || "unknown";
1902
- const toolCalls = [];
1903
- if (result?.steps?.length) {
1904
- for (const step of result.steps) {
1905
- if (step.toolCalls?.length) {
1906
- for (let i = 0; i < step.toolCalls.length; i++) {
1907
- const tc = step.toolCalls[i];
1908
- const tr = step.toolResults?.[i];
1909
- toolCalls.push({
1910
- name: tc.toolName,
1911
- arguments: tc.args,
1912
- result: tr?.result
1913
- });
1914
- }
1915
- }
1916
- }
1917
- }
1918
1778
  const attributes = {
1919
- "gen_ai.system": "Mastra",
1920
- "gen_ai.request.model": model,
1921
- "gen_ai.response.model": model,
1922
- "fallom.source": "mastra-agent",
1923
- "llm.request.type": "chat"
1779
+ "fallom.sdk_version": "2",
1780
+ "fallom.method": "agent.generate",
1781
+ "fallom.agent_name": agent.name || "unknown"
1924
1782
  };
1925
- if (Array.isArray(messages)) {
1926
- messages.forEach((msg, i) => {
1927
- attributes[`gen_ai.prompt.${i}.role`] = msg.role || "user";
1928
- attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1929
- });
1930
- }
1931
- if (result?.text) {
1932
- attributes["gen_ai.completion.0.role"] = "assistant";
1933
- attributes["gen_ai.completion.0.content"] = result.text;
1934
- attributes["gen_ai.completion.0.finish_reason"] = "stop";
1935
- }
1936
- if (toolCalls.length > 0) {
1937
- attributes["fallom.tool_calls"] = JSON.stringify(toolCalls);
1938
- toolCalls.forEach((tc, i) => {
1939
- attributes[`gen_ai.completion.0.tool_calls.${i}.name`] = tc.name;
1940
- attributes[`gen_ai.completion.0.tool_calls.${i}.type`] = "function";
1941
- attributes[`gen_ai.completion.0.tool_calls.${i}.arguments`] = JSON.stringify(tc.arguments);
1942
- });
1943
- }
1944
- if (result?.usage) {
1945
- attributes["gen_ai.usage.prompt_tokens"] = result.usage.promptTokens;
1946
- attributes["gen_ai.usage.completion_tokens"] = result.usage.completionTokens;
1947
- attributes["llm.usage.total_tokens"] = result.usage.totalTokens;
1783
+ if (captureContent2) {
1784
+ attributes["fallom.raw.request"] = JSON.stringify(input);
1785
+ attributes["fallom.raw.response"] = JSON.stringify(result);
1948
1786
  }
1949
- const traceData = {
1787
+ sendTrace({
1950
1788
  config_key: ctx.configKey,
1951
1789
  session_id: ctx.sessionId,
1952
1790
  customer_id: ctx.customerId,
1953
1791
  trace_id: traceId,
1954
1792
  span_id: spanId,
1955
- name: `mastra.${agentName}.generate`,
1956
- kind: "client",
1957
- model,
1793
+ parent_span_id: parentSpanId,
1794
+ name: `agent.${agent.name || "unknown"}.generate`,
1795
+ kind: "agent",
1958
1796
  start_time: new Date(startTime).toISOString(),
1959
1797
  end_time: new Date(endTime).toISOString(),
1960
1798
  duration_ms: endTime - startTime,
1961
1799
  status: "OK",
1962
- prompt_tokens: result?.usage?.promptTokens,
1963
- completion_tokens: result?.usage?.completionTokens,
1964
- total_tokens: result?.usage?.totalTokens,
1965
1800
  attributes
1966
- };
1967
- sendTrace(traceData).catch(() => {
1801
+ }).catch(() => {
1968
1802
  });
1969
1803
  return result;
1970
1804
  } catch (error) {
1971
1805
  const endTime = Date.now();
1972
- const traceData = {
1806
+ sendTrace({
1973
1807
  config_key: ctx.configKey,
1974
1808
  session_id: ctx.sessionId,
1975
1809
  customer_id: ctx.customerId,
1976
1810
  trace_id: traceId,
1977
1811
  span_id: spanId,
1978
- name: `mastra.${agentName}.generate`,
1979
- kind: "client",
1812
+ parent_span_id: parentSpanId,
1813
+ name: `agent.${agent.name || "unknown"}.generate`,
1814
+ kind: "agent",
1980
1815
  start_time: new Date(startTime).toISOString(),
1981
1816
  end_time: new Date(endTime).toISOString(),
1982
1817
  duration_ms: endTime - startTime,
1983
1818
  status: "ERROR",
1984
- error_message: error instanceof Error ? error.message : String(error)
1985
- };
1986
- sendTrace(traceData).catch(() => {
1819
+ error_message: error?.message,
1820
+ attributes: {
1821
+ "fallom.sdk_version": "2",
1822
+ "fallom.method": "agent.generate",
1823
+ "fallom.agent_name": agent.name || "unknown"
1824
+ }
1825
+ }).catch(() => {
1987
1826
  });
1988
1827
  throw error;
1989
1828
  }
@@ -2023,6 +1862,9 @@ var FallomSession = class {
2023
1862
  /**
2024
1863
  * Wrap a Vercel AI SDK model to trace all calls (PostHog style).
2025
1864
  * Returns the same model type with tracing injected.
1865
+ *
1866
+ * Note: This only captures tokens/timing, not prompt/completion content.
1867
+ * Use wrapAISDK for full content tracing.
2026
1868
  */
2027
1869
  traceModel(model) {
2028
1870
  const ctx = this.ctx;
@@ -2048,17 +1890,18 @@ var FallomSession = class {
2048
1890
  trace_id: traceId,
2049
1891
  span_id: spanId,
2050
1892
  parent_span_id: traceCtx?.parentSpanId,
2051
- name: "generateText",
1893
+ name: "doGenerate",
2052
1894
  kind: "llm",
2053
1895
  model: modelId,
2054
1896
  start_time: new Date(startTime).toISOString(),
2055
1897
  end_time: new Date(endTime).toISOString(),
2056
1898
  duration_ms: endTime - startTime,
2057
1899
  status: "OK",
2058
- prompt_tokens: usage?.promptTokens,
2059
- completion_tokens: usage?.completionTokens,
2060
- total_tokens: usage?.totalTokens,
2061
- attributes: shouldCaptureContent() && usage ? { "fallom.raw.usage": JSON.stringify(usage) } : void 0
1900
+ attributes: {
1901
+ "fallom.sdk_version": "2",
1902
+ "fallom.method": "traceModel.doGenerate",
1903
+ ...usage ? { "fallom.raw.usage": JSON.stringify(usage) } : {}
1904
+ }
2062
1905
  }).catch(() => {
2063
1906
  });
2064
1907
  return result;
@@ -2071,14 +1914,15 @@ var FallomSession = class {
2071
1914
  trace_id: traceId,
2072
1915
  span_id: spanId,
2073
1916
  parent_span_id: traceCtx?.parentSpanId,
2074
- name: "generateText",
1917
+ name: "doGenerate",
2075
1918
  kind: "llm",
2076
1919
  model: model.modelId || "unknown",
2077
1920
  start_time: new Date(startTime).toISOString(),
2078
1921
  end_time: new Date(endTime).toISOString(),
2079
1922
  duration_ms: endTime - startTime,
2080
1923
  status: "ERROR",
2081
- error_message: error instanceof Error ? error.message : String(error)
1924
+ error_message: error instanceof Error ? error.message : String(error),
1925
+ attributes: { "fallom.sdk_version": "2", "fallom.method": "traceModel.doGenerate" }
2082
1926
  }).catch(() => {
2083
1927
  });
2084
1928
  throw error;
@@ -2103,14 +1947,19 @@ var FallomSession = class {
2103
1947
  trace_id: traceId,
2104
1948
  span_id: spanId,
2105
1949
  parent_span_id: traceCtx?.parentSpanId,
2106
- name: "streamText",
1950
+ name: "doStream",
2107
1951
  kind: "llm",
2108
1952
  model: modelId,
2109
1953
  start_time: new Date(startTime).toISOString(),
2110
1954
  end_time: new Date(Date.now()).toISOString(),
2111
1955
  duration_ms: Date.now() - startTime,
2112
1956
  status: "OK",
2113
- is_streaming: true
1957
+ is_streaming: true,
1958
+ attributes: {
1959
+ "fallom.sdk_version": "2",
1960
+ "fallom.method": "traceModel.doStream",
1961
+ "fallom.is_streaming": true
1962
+ }
2114
1963
  }).catch(() => {
2115
1964
  });
2116
1965
  return result;
@@ -2122,7 +1971,7 @@ var FallomSession = class {
2122
1971
  trace_id: traceId,
2123
1972
  span_id: spanId,
2124
1973
  parent_span_id: traceCtx?.parentSpanId,
2125
- name: "streamText",
1974
+ name: "doStream",
2126
1975
  kind: "llm",
2127
1976
  model: modelId,
2128
1977
  start_time: new Date(startTime).toISOString(),
@@ -2130,7 +1979,12 @@ var FallomSession = class {
2130
1979
  duration_ms: Date.now() - startTime,
2131
1980
  status: "ERROR",
2132
1981
  error_message: error instanceof Error ? error.message : String(error),
2133
- is_streaming: true
1982
+ is_streaming: true,
1983
+ attributes: {
1984
+ "fallom.sdk_version": "2",
1985
+ "fallom.method": "traceModel.doStream",
1986
+ "fallom.is_streaming": true
1987
+ }
2134
1988
  }).catch(() => {
2135
1989
  });
2136
1990
  throw error;
@@ -2186,7 +2040,7 @@ var promptCache = /* @__PURE__ */ new Map();
2186
2040
  var promptABCache = /* @__PURE__ */ new Map();
2187
2041
  var promptContext = null;
2188
2042
  var SYNC_TIMEOUT2 = 2e3;
2189
- function log5(msg) {
2043
+ function log4(msg) {
2190
2044
  if (debugMode3) {
2191
2045
  console.log(`[Fallom Prompts] ${msg}`);
2192
2046
  }
@@ -2289,10 +2143,10 @@ async function get2(promptKey, options = {}) {
2289
2143
  const { variables, version, debug = false } = options;
2290
2144
  debugMode3 = debug;
2291
2145
  ensureInit2();
2292
- log5(`get() called: promptKey=${promptKey}`);
2146
+ log4(`get() called: promptKey=${promptKey}`);
2293
2147
  let promptData = promptCache.get(promptKey);
2294
2148
  if (!promptData) {
2295
- log5("Not in cache, fetching...");
2149
+ log4("Not in cache, fetching...");
2296
2150
  await fetchPrompts(SYNC_TIMEOUT2);
2297
2151
  promptData = promptCache.get(promptKey);
2298
2152
  }
@@ -2314,7 +2168,7 @@ async function get2(promptKey, options = {}) {
2314
2168
  promptKey,
2315
2169
  promptVersion: targetVersion
2316
2170
  });
2317
- log5(`\u2705 Got prompt: ${promptKey} v${targetVersion}`);
2171
+ log4(`\u2705 Got prompt: ${promptKey} v${targetVersion}`);
2318
2172
  return {
2319
2173
  key: promptKey,
2320
2174
  version: targetVersion,
@@ -2326,10 +2180,10 @@ async function getAB(abTestKey, sessionId, options = {}) {
2326
2180
  const { variables, debug = false } = options;
2327
2181
  debugMode3 = debug;
2328
2182
  ensureInit2();
2329
- log5(`getAB() called: abTestKey=${abTestKey}, sessionId=${sessionId}`);
2183
+ log4(`getAB() called: abTestKey=${abTestKey}, sessionId=${sessionId}`);
2330
2184
  let abData = promptABCache.get(abTestKey);
2331
2185
  if (!abData) {
2332
- log5("Not in cache, fetching...");
2186
+ log4("Not in cache, fetching...");
2333
2187
  await fetchPromptABTests(SYNC_TIMEOUT2);
2334
2188
  abData = promptABCache.get(abTestKey);
2335
2189
  }
@@ -2344,8 +2198,8 @@ async function getAB(abTestKey, sessionId, options = {}) {
2344
2198
  throw new Error(`Prompt A/B test '${abTestKey}' has no current version.`);
2345
2199
  }
2346
2200
  const { variants } = versionData;
2347
- log5(`A/B test '${abTestKey}' has ${variants?.length ?? 0} variants`);
2348
- log5(`Version data: ${JSON.stringify(versionData, null, 2)}`);
2201
+ log4(`A/B test '${abTestKey}' has ${variants?.length ?? 0} variants`);
2202
+ log4(`Version data: ${JSON.stringify(versionData, null, 2)}`);
2349
2203
  if (!variants || variants.length === 0) {
2350
2204
  throw new Error(
2351
2205
  `Prompt A/B test '${abTestKey}' has no variants configured.`
@@ -2391,7 +2245,7 @@ async function getAB(abTestKey, sessionId, options = {}) {
2391
2245
  abTestKey,
2392
2246
  variantIndex: selectedIndex
2393
2247
  });
2394
- log5(
2248
+ log4(
2395
2249
  `\u2705 Got prompt from A/B: ${promptKey} v${targetVersion} (variant ${selectedIndex})`
2396
2250
  );
2397
2251
  return {