@fallom/trace 0.1.5 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -36,7 +36,7 @@ function log(msg) {
36
36
  }
37
37
  function init(options = {}) {
38
38
  apiKey = options.apiKey || process.env.FALLOM_API_KEY || null;
39
- baseUrl = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
39
+ baseUrl = options.baseUrl || process.env.FALLOM_PROMPTS_URL || process.env.FALLOM_BASE_URL || "https://prompts.fallom.com";
40
40
  initialized = true;
41
41
  if (!apiKey) {
42
42
  return;
@@ -187,6 +187,13 @@ async function getAB(abTestKey, sessionId, options = {}) {
187
187
  throw new Error(`Prompt A/B test '${abTestKey}' has no current version.`);
188
188
  }
189
189
  const { variants } = versionData;
190
+ log(`A/B test '${abTestKey}' has ${variants?.length ?? 0} variants`);
191
+ log(`Version data: ${JSON.stringify(versionData, null, 2)}`);
192
+ if (!variants || variants.length === 0) {
193
+ throw new Error(
194
+ `Prompt A/B test '${abTestKey}' has no variants configured.`
195
+ );
196
+ }
190
197
  const hashBytes = (0, import_crypto.createHash)("md5").update(sessionId).digest();
191
198
  const hashVal = hashBytes.readUInt32BE(0) % 1e6;
192
199
  let cumulative = 0;
@@ -248,7 +255,7 @@ var init_prompts = __esm({
248
255
  "use strict";
249
256
  import_crypto = require("crypto");
250
257
  apiKey = null;
251
- baseUrl = "https://spans.fallom.com";
258
+ baseUrl = "https://prompts.fallom.com";
252
259
  initialized = false;
253
260
  syncInterval = null;
254
261
  debugMode = false;
@@ -280,6 +287,7 @@ __export(trace_exports, {
280
287
  setSession: () => setSession,
281
288
  shutdown: () => shutdown,
282
289
  span: () => span,
290
+ wrapAISDK: () => wrapAISDK,
283
291
  wrapAnthropic: () => wrapAnthropic,
284
292
  wrapGoogleAI: () => wrapGoogleAI,
285
293
  wrapOpenAI: () => wrapOpenAI
@@ -915,7 +923,7 @@ var Resource = (
915
923
  var sessionStorage = new import_async_hooks.AsyncLocalStorage();
916
924
  var fallbackSession = null;
917
925
  var apiKey2 = null;
918
- var baseUrl2 = "https://spans.fallom.com";
926
+ var baseUrl2 = "https://traces.fallom.com";
919
927
  var initialized2 = false;
920
928
  var captureContent = true;
921
929
  var debugMode2 = false;
@@ -958,7 +966,7 @@ async function init2(options = {}) {
958
966
  debugMode2 = options.debug ?? false;
959
967
  log2("\u{1F680} Initializing Fallom tracing...");
960
968
  apiKey2 = options.apiKey || process.env.FALLOM_API_KEY || null;
961
- baseUrl2 = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
969
+ baseUrl2 = options.baseUrl || process.env.FALLOM_TRACES_URL || process.env.FALLOM_BASE_URL || "https://traces.fallom.com";
962
970
  const envCapture = process.env.FALLOM_CAPTURE_CONTENT?.toLowerCase();
963
971
  if (envCapture === "false" || envCapture === "0" || envCapture === "no") {
964
972
  captureContent = false;
@@ -1118,11 +1126,47 @@ async function shutdown() {
1118
1126
  initialized2 = false;
1119
1127
  }
1120
1128
  }
1129
+ function messagesToOtelAttributes(messages, completion, model, responseId) {
1130
+ const attrs = {};
1131
+ if (model) {
1132
+ attrs["gen_ai.request.model"] = model;
1133
+ attrs["gen_ai.response.model"] = model;
1134
+ }
1135
+ if (responseId) {
1136
+ attrs["gen_ai.response.id"] = responseId;
1137
+ }
1138
+ if (messages) {
1139
+ messages.forEach((msg, i) => {
1140
+ attrs[`gen_ai.prompt.${i}.role`] = msg.role;
1141
+ attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1142
+ });
1143
+ }
1144
+ if (completion) {
1145
+ attrs["gen_ai.completion.0.role"] = completion.role;
1146
+ attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
1147
+ if (completion.tool_calls) {
1148
+ attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
1149
+ completion.tool_calls
1150
+ );
1151
+ }
1152
+ }
1153
+ return attrs;
1154
+ }
1155
+ function generateHexId(length) {
1156
+ const bytes = new Uint8Array(length / 2);
1157
+ crypto.getRandomValues(bytes);
1158
+ return Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("");
1159
+ }
1160
+ var traceContextStorage = new import_async_hooks.AsyncLocalStorage();
1161
+ var fallbackTraceContext = null;
1121
1162
  async function sendTrace(trace) {
1163
+ const url = `${baseUrl2}/v1/traces`;
1164
+ log2("\u{1F4E4} Sending trace to:", url);
1165
+ log2(" Session:", trace.session_id, "Config:", trace.config_key);
1122
1166
  try {
1123
1167
  const controller = new AbortController();
1124
1168
  const timeoutId = setTimeout(() => controller.abort(), 5e3);
1125
- await fetch(`${baseUrl2}/v1/traces`, {
1169
+ const response = await fetch(url, {
1126
1170
  method: "POST",
1127
1171
  headers: {
1128
1172
  Authorization: `Bearer ${apiKey2}`,
@@ -1132,8 +1176,14 @@ async function sendTrace(trace) {
1132
1176
  signal: controller.signal
1133
1177
  });
1134
1178
  clearTimeout(timeoutId);
1135
- log2("\u{1F4E4} Trace sent:", trace.name, trace.model);
1136
- } catch {
1179
+ if (!response.ok) {
1180
+ const text = await response.text();
1181
+ log2("\u274C Trace send failed:", response.status, text);
1182
+ } else {
1183
+ log2("\u2705 Trace sent:", trace.name, trace.model);
1184
+ }
1185
+ } catch (err) {
1186
+ log2("\u274C Trace send error:", err instanceof Error ? err.message : err);
1137
1187
  }
1138
1188
  }
1139
1189
  function wrapOpenAI(client) {
@@ -1151,16 +1201,30 @@ function wrapOpenAI(client) {
1151
1201
  promptCtx = getPromptContext2();
1152
1202
  } catch {
1153
1203
  }
1204
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1205
+ const traceId = traceCtx?.traceId || generateHexId(32);
1206
+ const spanId = generateHexId(16);
1207
+ const parentSpanId = traceCtx?.parentSpanId;
1154
1208
  const params = args[0] || {};
1155
1209
  const startTime = Date.now();
1156
1210
  try {
1157
1211
  const response = await originalCreate(...args);
1158
1212
  const endTime = Date.now();
1213
+ const attributes = captureContent ? messagesToOtelAttributes(
1214
+ params?.messages,
1215
+ response?.choices?.[0]?.message,
1216
+ response?.model || params?.model,
1217
+ response?.id
1218
+ ) : void 0;
1159
1219
  sendTrace({
1160
1220
  config_key: ctx.configKey,
1161
1221
  session_id: ctx.sessionId,
1162
1222
  customer_id: ctx.customerId,
1223
+ trace_id: traceId,
1224
+ span_id: spanId,
1225
+ parent_span_id: parentSpanId,
1163
1226
  name: "chat.completions.create",
1227
+ kind: "llm",
1164
1228
  model: response?.model || params?.model,
1165
1229
  start_time: new Date(startTime).toISOString(),
1166
1230
  end_time: new Date(endTime).toISOString(),
@@ -1169,8 +1233,7 @@ function wrapOpenAI(client) {
1169
1233
  prompt_tokens: response?.usage?.prompt_tokens,
1170
1234
  completion_tokens: response?.usage?.completion_tokens,
1171
1235
  total_tokens: response?.usage?.total_tokens,
1172
- input: captureContent ? JSON.stringify(params?.messages) : void 0,
1173
- output: captureContent ? response?.choices?.[0]?.message?.content : void 0,
1236
+ attributes,
1174
1237
  prompt_key: promptCtx?.promptKey,
1175
1238
  prompt_version: promptCtx?.promptVersion,
1176
1239
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1180,17 +1243,31 @@ function wrapOpenAI(client) {
1180
1243
  return response;
1181
1244
  } catch (error) {
1182
1245
  const endTime = Date.now();
1246
+ const attributes = captureContent ? messagesToOtelAttributes(
1247
+ params?.messages,
1248
+ void 0,
1249
+ params?.model,
1250
+ void 0
1251
+ ) : void 0;
1252
+ if (attributes) {
1253
+ attributes["error.message"] = error?.message;
1254
+ }
1183
1255
  sendTrace({
1184
1256
  config_key: ctx.configKey,
1185
1257
  session_id: ctx.sessionId,
1186
1258
  customer_id: ctx.customerId,
1259
+ trace_id: traceId,
1260
+ span_id: spanId,
1261
+ parent_span_id: parentSpanId,
1187
1262
  name: "chat.completions.create",
1263
+ kind: "llm",
1188
1264
  model: params?.model,
1189
1265
  start_time: new Date(startTime).toISOString(),
1190
1266
  end_time: new Date(endTime).toISOString(),
1191
1267
  duration_ms: endTime - startTime,
1192
1268
  status: "ERROR",
1193
1269
  error_message: error?.message,
1270
+ attributes,
1194
1271
  prompt_key: promptCtx?.promptKey,
1195
1272
  prompt_version: promptCtx?.promptVersion,
1196
1273
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1215,16 +1292,33 @@ function wrapAnthropic(client) {
1215
1292
  promptCtx = getPromptContext2();
1216
1293
  } catch {
1217
1294
  }
1295
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1296
+ const traceId = traceCtx?.traceId || generateHexId(32);
1297
+ const spanId = generateHexId(16);
1298
+ const parentSpanId = traceCtx?.parentSpanId;
1218
1299
  const params = args[0] || {};
1219
1300
  const startTime = Date.now();
1220
1301
  try {
1221
1302
  const response = await originalCreate(...args);
1222
1303
  const endTime = Date.now();
1304
+ const attributes = captureContent ? messagesToOtelAttributes(
1305
+ params?.messages,
1306
+ { role: "assistant", content: response?.content?.[0]?.text || "" },
1307
+ response?.model || params?.model,
1308
+ response?.id
1309
+ ) : void 0;
1310
+ if (attributes && params?.system) {
1311
+ attributes["gen_ai.system_prompt"] = params.system;
1312
+ }
1223
1313
  sendTrace({
1224
1314
  config_key: ctx.configKey,
1225
1315
  session_id: ctx.sessionId,
1226
1316
  customer_id: ctx.customerId,
1317
+ trace_id: traceId,
1318
+ span_id: spanId,
1319
+ parent_span_id: parentSpanId,
1227
1320
  name: "messages.create",
1321
+ kind: "llm",
1228
1322
  model: response?.model || params?.model,
1229
1323
  start_time: new Date(startTime).toISOString(),
1230
1324
  end_time: new Date(endTime).toISOString(),
@@ -1233,8 +1327,7 @@ function wrapAnthropic(client) {
1233
1327
  prompt_tokens: response?.usage?.input_tokens,
1234
1328
  completion_tokens: response?.usage?.output_tokens,
1235
1329
  total_tokens: (response?.usage?.input_tokens || 0) + (response?.usage?.output_tokens || 0),
1236
- input: captureContent ? JSON.stringify(params?.messages) : void 0,
1237
- output: captureContent ? response?.content?.[0]?.text : void 0,
1330
+ attributes,
1238
1331
  prompt_key: promptCtx?.promptKey,
1239
1332
  prompt_version: promptCtx?.promptVersion,
1240
1333
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1244,17 +1337,34 @@ function wrapAnthropic(client) {
1244
1337
  return response;
1245
1338
  } catch (error) {
1246
1339
  const endTime = Date.now();
1340
+ const attributes = captureContent ? messagesToOtelAttributes(
1341
+ params?.messages,
1342
+ void 0,
1343
+ params?.model,
1344
+ void 0
1345
+ ) : void 0;
1346
+ if (attributes) {
1347
+ attributes["error.message"] = error?.message;
1348
+ if (params?.system) {
1349
+ attributes["gen_ai.system_prompt"] = params.system;
1350
+ }
1351
+ }
1247
1352
  sendTrace({
1248
1353
  config_key: ctx.configKey,
1249
1354
  session_id: ctx.sessionId,
1250
1355
  customer_id: ctx.customerId,
1356
+ trace_id: traceId,
1357
+ span_id: spanId,
1358
+ parent_span_id: parentSpanId,
1251
1359
  name: "messages.create",
1360
+ kind: "llm",
1252
1361
  model: params?.model,
1253
1362
  start_time: new Date(startTime).toISOString(),
1254
1363
  end_time: new Date(endTime).toISOString(),
1255
1364
  duration_ms: endTime - startTime,
1256
1365
  status: "ERROR",
1257
1366
  error_message: error?.message,
1367
+ attributes,
1258
1368
  prompt_key: promptCtx?.promptKey,
1259
1369
  prompt_version: promptCtx?.promptVersion,
1260
1370
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1279,18 +1389,47 @@ function wrapGoogleAI(model) {
1279
1389
  promptCtx = getPromptContext2();
1280
1390
  } catch {
1281
1391
  }
1392
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1393
+ const traceId = traceCtx?.traceId || generateHexId(32);
1394
+ const spanId = generateHexId(16);
1395
+ const parentSpanId = traceCtx?.parentSpanId;
1282
1396
  const startTime = Date.now();
1283
1397
  try {
1284
1398
  const response = await originalGenerate(...args);
1285
1399
  const endTime = Date.now();
1286
1400
  const result = response?.response;
1287
1401
  const usage = result?.usageMetadata;
1402
+ const modelName = model?.model || "gemini";
1403
+ const attributes = {};
1404
+ if (captureContent) {
1405
+ attributes["gen_ai.request.model"] = modelName;
1406
+ attributes["gen_ai.response.model"] = modelName;
1407
+ const input = args[0];
1408
+ if (typeof input === "string") {
1409
+ attributes["gen_ai.prompt.0.role"] = "user";
1410
+ attributes["gen_ai.prompt.0.content"] = input;
1411
+ } else if (input?.contents) {
1412
+ input.contents.forEach((content, i) => {
1413
+ attributes[`gen_ai.prompt.${i}.role`] = content.role || "user";
1414
+ attributes[`gen_ai.prompt.${i}.content`] = content.parts?.[0]?.text || JSON.stringify(content.parts);
1415
+ });
1416
+ }
1417
+ const outputText = result?.text?.();
1418
+ if (outputText) {
1419
+ attributes["gen_ai.completion.0.role"] = "assistant";
1420
+ attributes["gen_ai.completion.0.content"] = outputText;
1421
+ }
1422
+ }
1288
1423
  sendTrace({
1289
1424
  config_key: ctx.configKey,
1290
1425
  session_id: ctx.sessionId,
1291
1426
  customer_id: ctx.customerId,
1427
+ trace_id: traceId,
1428
+ span_id: spanId,
1429
+ parent_span_id: parentSpanId,
1292
1430
  name: "generateContent",
1293
- model: model?.model || "gemini",
1431
+ kind: "llm",
1432
+ model: modelName,
1294
1433
  start_time: new Date(startTime).toISOString(),
1295
1434
  end_time: new Date(endTime).toISOString(),
1296
1435
  duration_ms: endTime - startTime,
@@ -1298,8 +1437,7 @@ function wrapGoogleAI(model) {
1298
1437
  prompt_tokens: usage?.promptTokenCount,
1299
1438
  completion_tokens: usage?.candidatesTokenCount,
1300
1439
  total_tokens: usage?.totalTokenCount,
1301
- input: captureContent ? JSON.stringify(args[0]) : void 0,
1302
- output: captureContent ? result?.text?.() : void 0,
1440
+ attributes: captureContent ? attributes : void 0,
1303
1441
  prompt_key: promptCtx?.promptKey,
1304
1442
  prompt_version: promptCtx?.promptVersion,
1305
1443
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1309,17 +1447,33 @@ function wrapGoogleAI(model) {
1309
1447
  return response;
1310
1448
  } catch (error) {
1311
1449
  const endTime = Date.now();
1450
+ const modelName = model?.model || "gemini";
1451
+ const attributes = {};
1452
+ if (captureContent) {
1453
+ attributes["gen_ai.request.model"] = modelName;
1454
+ attributes["error.message"] = error?.message;
1455
+ const input = args[0];
1456
+ if (typeof input === "string") {
1457
+ attributes["gen_ai.prompt.0.role"] = "user";
1458
+ attributes["gen_ai.prompt.0.content"] = input;
1459
+ }
1460
+ }
1312
1461
  sendTrace({
1313
1462
  config_key: ctx.configKey,
1314
1463
  session_id: ctx.sessionId,
1315
1464
  customer_id: ctx.customerId,
1465
+ trace_id: traceId,
1466
+ span_id: spanId,
1467
+ parent_span_id: parentSpanId,
1316
1468
  name: "generateContent",
1317
- model: model?.model || "gemini",
1469
+ kind: "llm",
1470
+ model: modelName,
1318
1471
  start_time: new Date(startTime).toISOString(),
1319
1472
  end_time: new Date(endTime).toISOString(),
1320
1473
  duration_ms: endTime - startTime,
1321
1474
  status: "ERROR",
1322
1475
  error_message: error?.message,
1476
+ attributes: captureContent ? attributes : void 0,
1323
1477
  prompt_key: promptCtx?.promptKey,
1324
1478
  prompt_version: promptCtx?.promptVersion,
1325
1479
  prompt_ab_test_key: promptCtx?.abTestKey,
@@ -1331,6 +1485,414 @@ function wrapGoogleAI(model) {
1331
1485
  };
1332
1486
  return model;
1333
1487
  }
1488
+ function wrapAISDK(ai) {
1489
+ const aiModule = ai;
1490
+ return {
1491
+ generateText: createGenerateTextWrapper(aiModule),
1492
+ streamText: createStreamTextWrapper(aiModule),
1493
+ generateObject: aiModule.generateObject ? createGenerateObjectWrapper(aiModule) : void 0,
1494
+ streamObject: aiModule.streamObject ? createStreamObjectWrapper(aiModule) : void 0
1495
+ };
1496
+ }
1497
+ function createGenerateTextWrapper(aiModule) {
1498
+ return async (...args) => {
1499
+ const ctx = sessionStorage.getStore() || fallbackSession;
1500
+ if (!ctx || !initialized2) {
1501
+ return aiModule.generateText(...args);
1502
+ }
1503
+ let promptCtx = null;
1504
+ try {
1505
+ const { getPromptContext: getPromptContext2 } = await Promise.resolve().then(() => (init_prompts(), prompts_exports));
1506
+ promptCtx = getPromptContext2();
1507
+ } catch {
1508
+ }
1509
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1510
+ const traceId = traceCtx?.traceId || generateHexId(32);
1511
+ const spanId = generateHexId(16);
1512
+ const parentSpanId = traceCtx?.parentSpanId;
1513
+ const params = args[0] || {};
1514
+ const startTime = Date.now();
1515
+ try {
1516
+ const result = await aiModule.generateText(...args);
1517
+ const endTime = Date.now();
1518
+ const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1519
+ const attributes = {};
1520
+ if (captureContent) {
1521
+ attributes["gen_ai.request.model"] = modelId;
1522
+ attributes["gen_ai.response.model"] = modelId;
1523
+ if (params?.prompt) {
1524
+ attributes["gen_ai.prompt.0.role"] = "user";
1525
+ attributes["gen_ai.prompt.0.content"] = params.prompt;
1526
+ }
1527
+ if (params?.messages) {
1528
+ params.messages.forEach((msg, i) => {
1529
+ attributes[`gen_ai.prompt.${i}.role`] = msg.role;
1530
+ attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1531
+ });
1532
+ }
1533
+ if (result?.text) {
1534
+ attributes["gen_ai.completion.0.role"] = "assistant";
1535
+ attributes["gen_ai.completion.0.content"] = result.text;
1536
+ }
1537
+ if (result?.response?.id) {
1538
+ attributes["gen_ai.response.id"] = result.response.id;
1539
+ }
1540
+ }
1541
+ sendTrace({
1542
+ config_key: ctx.configKey,
1543
+ session_id: ctx.sessionId,
1544
+ customer_id: ctx.customerId,
1545
+ trace_id: traceId,
1546
+ span_id: spanId,
1547
+ parent_span_id: parentSpanId,
1548
+ name: "generateText",
1549
+ kind: "llm",
1550
+ model: modelId,
1551
+ start_time: new Date(startTime).toISOString(),
1552
+ end_time: new Date(endTime).toISOString(),
1553
+ duration_ms: endTime - startTime,
1554
+ status: "OK",
1555
+ prompt_tokens: result?.usage?.promptTokens,
1556
+ completion_tokens: result?.usage?.completionTokens,
1557
+ total_tokens: result?.usage?.totalTokens,
1558
+ attributes: captureContent ? attributes : void 0,
1559
+ prompt_key: promptCtx?.promptKey,
1560
+ prompt_version: promptCtx?.promptVersion,
1561
+ prompt_ab_test_key: promptCtx?.abTestKey,
1562
+ prompt_variant_index: promptCtx?.variantIndex
1563
+ }).catch(() => {
1564
+ });
1565
+ return result;
1566
+ } catch (error) {
1567
+ const endTime = Date.now();
1568
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1569
+ sendTrace({
1570
+ config_key: ctx.configKey,
1571
+ session_id: ctx.sessionId,
1572
+ customer_id: ctx.customerId,
1573
+ trace_id: traceId,
1574
+ span_id: spanId,
1575
+ parent_span_id: parentSpanId,
1576
+ name: "generateText",
1577
+ kind: "llm",
1578
+ model: modelId,
1579
+ start_time: new Date(startTime).toISOString(),
1580
+ end_time: new Date(endTime).toISOString(),
1581
+ duration_ms: endTime - startTime,
1582
+ status: "ERROR",
1583
+ error_message: error?.message,
1584
+ prompt_key: promptCtx?.promptKey,
1585
+ prompt_version: promptCtx?.promptVersion,
1586
+ prompt_ab_test_key: promptCtx?.abTestKey,
1587
+ prompt_variant_index: promptCtx?.variantIndex
1588
+ }).catch(() => {
1589
+ });
1590
+ throw error;
1591
+ }
1592
+ };
1593
+ }
1594
+ function createStreamTextWrapper(aiModule) {
1595
+ return async (...args) => {
1596
+ const ctx = sessionStorage.getStore() || fallbackSession;
1597
+ const params = args[0] || {};
1598
+ const startTime = Date.now();
1599
+ const result = await aiModule.streamText(...args);
1600
+ if (!ctx || !initialized2) {
1601
+ return result;
1602
+ }
1603
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1604
+ const traceId = traceCtx?.traceId || generateHexId(32);
1605
+ const spanId = generateHexId(16);
1606
+ const parentSpanId = traceCtx?.parentSpanId;
1607
+ let firstTokenTime = null;
1608
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1609
+ let promptCtx = null;
1610
+ try {
1611
+ const { getPromptContext: getPromptContext2 } = await Promise.resolve().then(() => (init_prompts(), prompts_exports));
1612
+ promptCtx = getPromptContext2();
1613
+ } catch {
1614
+ }
1615
+ if (result?.usage) {
1616
+ result.usage.then((usage) => {
1617
+ const endTime = Date.now();
1618
+ log2("\u{1F4CA} streamText usage:", JSON.stringify(usage, null, 2));
1619
+ const attributes = {};
1620
+ if (captureContent) {
1621
+ attributes["gen_ai.request.model"] = modelId;
1622
+ if (params?.prompt) {
1623
+ attributes["gen_ai.prompt.0.role"] = "user";
1624
+ attributes["gen_ai.prompt.0.content"] = params.prompt;
1625
+ }
1626
+ }
1627
+ if (firstTokenTime) {
1628
+ attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1629
+ }
1630
+ const tracePayload = {
1631
+ config_key: ctx.configKey,
1632
+ session_id: ctx.sessionId,
1633
+ customer_id: ctx.customerId,
1634
+ trace_id: traceId,
1635
+ span_id: spanId,
1636
+ parent_span_id: parentSpanId,
1637
+ name: "streamText",
1638
+ kind: "llm",
1639
+ model: modelId,
1640
+ start_time: new Date(startTime).toISOString(),
1641
+ end_time: new Date(endTime).toISOString(),
1642
+ duration_ms: endTime - startTime,
1643
+ status: "OK",
1644
+ prompt_tokens: usage?.promptTokens,
1645
+ completion_tokens: usage?.completionTokens,
1646
+ total_tokens: usage?.totalTokens,
1647
+ time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1648
+ attributes: captureContent ? attributes : void 0,
1649
+ prompt_key: promptCtx?.promptKey,
1650
+ prompt_version: promptCtx?.promptVersion,
1651
+ prompt_ab_test_key: promptCtx?.abTestKey,
1652
+ prompt_variant_index: promptCtx?.variantIndex
1653
+ };
1654
+ sendTrace(tracePayload).catch(() => {
1655
+ });
1656
+ }).catch((error) => {
1657
+ const endTime = Date.now();
1658
+ log2("\u274C streamText error:", error?.message);
1659
+ sendTrace({
1660
+ config_key: ctx.configKey,
1661
+ session_id: ctx.sessionId,
1662
+ customer_id: ctx.customerId,
1663
+ trace_id: traceId,
1664
+ span_id: spanId,
1665
+ parent_span_id: parentSpanId,
1666
+ name: "streamText",
1667
+ kind: "llm",
1668
+ model: modelId,
1669
+ start_time: new Date(startTime).toISOString(),
1670
+ end_time: new Date(endTime).toISOString(),
1671
+ duration_ms: endTime - startTime,
1672
+ status: "ERROR",
1673
+ error_message: error?.message,
1674
+ prompt_key: promptCtx?.promptKey,
1675
+ prompt_version: promptCtx?.promptVersion,
1676
+ prompt_ab_test_key: promptCtx?.abTestKey,
1677
+ prompt_variant_index: promptCtx?.variantIndex
1678
+ }).catch(() => {
1679
+ });
1680
+ });
1681
+ }
1682
+ if (result?.textStream) {
1683
+ const originalTextStream = result.textStream;
1684
+ const wrappedTextStream = (async function* () {
1685
+ for await (const chunk of originalTextStream) {
1686
+ if (!firstTokenTime) {
1687
+ firstTokenTime = Date.now();
1688
+ log2("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1689
+ }
1690
+ yield chunk;
1691
+ }
1692
+ })();
1693
+ return new Proxy(result, {
1694
+ get(target, prop) {
1695
+ if (prop === "textStream") {
1696
+ return wrappedTextStream;
1697
+ }
1698
+ return target[prop];
1699
+ }
1700
+ });
1701
+ }
1702
+ return result;
1703
+ };
1704
+ }
1705
+ function createGenerateObjectWrapper(aiModule) {
1706
+ return async (...args) => {
1707
+ const ctx = sessionStorage.getStore() || fallbackSession;
1708
+ if (!ctx || !initialized2) {
1709
+ return aiModule.generateObject(...args);
1710
+ }
1711
+ let promptCtx = null;
1712
+ try {
1713
+ const { getPromptContext: getPromptContext2 } = await Promise.resolve().then(() => (init_prompts(), prompts_exports));
1714
+ promptCtx = getPromptContext2();
1715
+ } catch {
1716
+ }
1717
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1718
+ const traceId = traceCtx?.traceId || generateHexId(32);
1719
+ const spanId = generateHexId(16);
1720
+ const parentSpanId = traceCtx?.parentSpanId;
1721
+ const params = args[0] || {};
1722
+ const startTime = Date.now();
1723
+ try {
1724
+ const result = await aiModule.generateObject(...args);
1725
+ const endTime = Date.now();
1726
+ const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1727
+ const attributes = {};
1728
+ if (captureContent) {
1729
+ attributes["gen_ai.request.model"] = modelId;
1730
+ attributes["gen_ai.response.model"] = modelId;
1731
+ if (result?.object) {
1732
+ attributes["gen_ai.completion.0.role"] = "assistant";
1733
+ attributes["gen_ai.completion.0.content"] = JSON.stringify(
1734
+ result.object
1735
+ );
1736
+ }
1737
+ }
1738
+ sendTrace({
1739
+ config_key: ctx.configKey,
1740
+ session_id: ctx.sessionId,
1741
+ customer_id: ctx.customerId,
1742
+ trace_id: traceId,
1743
+ span_id: spanId,
1744
+ parent_span_id: parentSpanId,
1745
+ name: "generateObject",
1746
+ kind: "llm",
1747
+ model: modelId,
1748
+ start_time: new Date(startTime).toISOString(),
1749
+ end_time: new Date(endTime).toISOString(),
1750
+ duration_ms: endTime - startTime,
1751
+ status: "OK",
1752
+ prompt_tokens: result?.usage?.promptTokens,
1753
+ completion_tokens: result?.usage?.completionTokens,
1754
+ total_tokens: result?.usage?.totalTokens,
1755
+ attributes: captureContent ? attributes : void 0,
1756
+ prompt_key: promptCtx?.promptKey,
1757
+ prompt_version: promptCtx?.promptVersion,
1758
+ prompt_ab_test_key: promptCtx?.abTestKey,
1759
+ prompt_variant_index: promptCtx?.variantIndex
1760
+ }).catch(() => {
1761
+ });
1762
+ return result;
1763
+ } catch (error) {
1764
+ const endTime = Date.now();
1765
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1766
+ sendTrace({
1767
+ config_key: ctx.configKey,
1768
+ session_id: ctx.sessionId,
1769
+ customer_id: ctx.customerId,
1770
+ trace_id: traceId,
1771
+ span_id: spanId,
1772
+ parent_span_id: parentSpanId,
1773
+ name: "generateObject",
1774
+ kind: "llm",
1775
+ model: modelId,
1776
+ start_time: new Date(startTime).toISOString(),
1777
+ end_time: new Date(endTime).toISOString(),
1778
+ duration_ms: endTime - startTime,
1779
+ status: "ERROR",
1780
+ error_message: error?.message,
1781
+ prompt_key: promptCtx?.promptKey,
1782
+ prompt_version: promptCtx?.promptVersion,
1783
+ prompt_ab_test_key: promptCtx?.abTestKey,
1784
+ prompt_variant_index: promptCtx?.variantIndex
1785
+ }).catch(() => {
1786
+ });
1787
+ throw error;
1788
+ }
1789
+ };
1790
+ }
1791
+ function createStreamObjectWrapper(aiModule) {
1792
+ return async (...args) => {
1793
+ const ctx = sessionStorage.getStore() || fallbackSession;
1794
+ const params = args[0] || {};
1795
+ const startTime = Date.now();
1796
+ const result = await aiModule.streamObject(...args);
1797
+ log2("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
1798
+ if (!ctx || !initialized2) {
1799
+ return result;
1800
+ }
1801
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1802
+ const traceId = traceCtx?.traceId || generateHexId(32);
1803
+ const spanId = generateHexId(16);
1804
+ const parentSpanId = traceCtx?.parentSpanId;
1805
+ let firstTokenTime = null;
1806
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1807
+ let promptCtx = null;
1808
+ try {
1809
+ const { getPromptContext: getPromptContext2 } = await Promise.resolve().then(() => (init_prompts(), prompts_exports));
1810
+ promptCtx = getPromptContext2();
1811
+ } catch {
1812
+ }
1813
+ if (result?.usage) {
1814
+ result.usage.then((usage) => {
1815
+ const endTime = Date.now();
1816
+ log2("\u{1F4CA} streamObject usage:", JSON.stringify(usage, null, 2));
1817
+ const attributes = {};
1818
+ if (captureContent) {
1819
+ attributes["gen_ai.request.model"] = modelId;
1820
+ }
1821
+ if (firstTokenTime) {
1822
+ attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1823
+ }
1824
+ sendTrace({
1825
+ config_key: ctx.configKey,
1826
+ session_id: ctx.sessionId,
1827
+ customer_id: ctx.customerId,
1828
+ trace_id: traceId,
1829
+ span_id: spanId,
1830
+ parent_span_id: parentSpanId,
1831
+ name: "streamObject",
1832
+ kind: "llm",
1833
+ model: modelId,
1834
+ start_time: new Date(startTime).toISOString(),
1835
+ end_time: new Date(endTime).toISOString(),
1836
+ duration_ms: endTime - startTime,
1837
+ status: "OK",
1838
+ prompt_tokens: usage?.promptTokens,
1839
+ completion_tokens: usage?.completionTokens,
1840
+ total_tokens: usage?.totalTokens,
1841
+ attributes: captureContent ? attributes : void 0,
1842
+ prompt_key: promptCtx?.promptKey,
1843
+ prompt_version: promptCtx?.promptVersion,
1844
+ prompt_ab_test_key: promptCtx?.abTestKey,
1845
+ prompt_variant_index: promptCtx?.variantIndex
1846
+ }).catch(() => {
1847
+ });
1848
+ }).catch((error) => {
1849
+ const endTime = Date.now();
1850
+ sendTrace({
1851
+ config_key: ctx.configKey,
1852
+ session_id: ctx.sessionId,
1853
+ customer_id: ctx.customerId,
1854
+ trace_id: traceId,
1855
+ span_id: spanId,
1856
+ parent_span_id: parentSpanId,
1857
+ name: "streamObject",
1858
+ kind: "llm",
1859
+ model: modelId,
1860
+ start_time: new Date(startTime).toISOString(),
1861
+ end_time: new Date(endTime).toISOString(),
1862
+ duration_ms: endTime - startTime,
1863
+ status: "ERROR",
1864
+ error_message: error?.message,
1865
+ prompt_key: promptCtx?.promptKey,
1866
+ prompt_version: promptCtx?.promptVersion,
1867
+ prompt_ab_test_key: promptCtx?.abTestKey,
1868
+ prompt_variant_index: promptCtx?.variantIndex
1869
+ }).catch(() => {
1870
+ });
1871
+ });
1872
+ }
1873
+ if (result?.partialObjectStream) {
1874
+ const originalStream = result.partialObjectStream;
1875
+ const wrappedStream = (async function* () {
1876
+ for await (const chunk of originalStream) {
1877
+ if (!firstTokenTime) {
1878
+ firstTokenTime = Date.now();
1879
+ log2("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1880
+ }
1881
+ yield chunk;
1882
+ }
1883
+ })();
1884
+ return new Proxy(result, {
1885
+ get(target, prop) {
1886
+ if (prop === "partialObjectStream") {
1887
+ return wrappedStream;
1888
+ }
1889
+ return target[prop];
1890
+ }
1891
+ });
1892
+ }
1893
+ return result;
1894
+ };
1895
+ }
1334
1896
 
1335
1897
  // src/models.ts
1336
1898
  var models_exports = {};
@@ -1340,7 +1902,7 @@ __export(models_exports, {
1340
1902
  });
1341
1903
  var import_crypto2 = require("crypto");
1342
1904
  var apiKey3 = null;
1343
- var baseUrl3 = "https://spans.fallom.com";
1905
+ var baseUrl3 = "https://configs.fallom.com";
1344
1906
  var initialized3 = false;
1345
1907
  var syncInterval2 = null;
1346
1908
  var debugMode3 = false;
@@ -1354,7 +1916,7 @@ function log3(msg) {
1354
1916
  }
1355
1917
  function init3(options = {}) {
1356
1918
  apiKey3 = options.apiKey || process.env.FALLOM_API_KEY || null;
1357
- baseUrl3 = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
1919
+ baseUrl3 = options.baseUrl || process.env.FALLOM_CONFIGS_URL || process.env.FALLOM_BASE_URL || "https://configs.fallom.com";
1358
1920
  initialized3 = true;
1359
1921
  if (!apiKey3) {
1360
1922
  return;
@@ -1443,20 +2005,28 @@ async function get2(configKey, sessionId, options = {}) {
1443
2005
  const { version, fallback, debug = false } = options;
1444
2006
  debugMode3 = debug;
1445
2007
  ensureInit2();
1446
- log3(`get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`);
2008
+ log3(
2009
+ `get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
2010
+ );
1447
2011
  try {
1448
2012
  let configData = configCache.get(configKey);
1449
- log3(`Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`);
2013
+ log3(
2014
+ `Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
2015
+ );
1450
2016
  if (!configData) {
1451
2017
  log3("Not in cache, fetching...");
1452
2018
  await fetchConfigs(SYNC_TIMEOUT2);
1453
2019
  configData = configCache.get(configKey);
1454
- log3(`After fetch, cache lookup: ${configData ? "found" : "still not found"}`);
2020
+ log3(
2021
+ `After fetch, cache lookup: ${configData ? "found" : "still not found"}`
2022
+ );
1455
2023
  }
1456
2024
  if (!configData) {
1457
2025
  log3(`Config not found, using fallback: ${fallback}`);
1458
2026
  if (fallback) {
1459
- console.warn(`[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`);
2027
+ console.warn(
2028
+ `[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
2029
+ );
1460
2030
  return returnWithTrace(configKey, sessionId, fallback, 0);
1461
2031
  }
1462
2032
  throw new Error(
@@ -1472,7 +2042,9 @@ async function get2(configKey, sessionId, options = {}) {
1472
2042
  }
1473
2043
  if (!config) {
1474
2044
  if (fallback) {
1475
- console.warn(`[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`);
2045
+ console.warn(
2046
+ `[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`
2047
+ );
1476
2048
  return returnWithTrace(configKey, sessionId, fallback, 0);
1477
2049
  }
1478
2050
  throw new Error(`Config '${configKey}' version ${version} not found.`);
@@ -1483,7 +2055,9 @@ async function get2(configKey, sessionId, options = {}) {
1483
2055
  config = configData.versions.get(targetVersion);
1484
2056
  if (!config) {
1485
2057
  if (fallback) {
1486
- console.warn(`[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`);
2058
+ console.warn(
2059
+ `[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`
2060
+ );
1487
2061
  return returnWithTrace(configKey, sessionId, fallback, 0);
1488
2062
  }
1489
2063
  throw new Error(`Config '${configKey}' has no cached version.`);
@@ -1492,7 +2066,11 @@ async function get2(configKey, sessionId, options = {}) {
1492
2066
  const variantsRaw = config.variants;
1493
2067
  const configVersion = config.version || targetVersion;
1494
2068
  const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
1495
- log3(`Config found! Version: ${configVersion}, Variants: ${JSON.stringify(variants)}`);
2069
+ log3(
2070
+ `Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
2071
+ variants
2072
+ )}`
2073
+ );
1496
2074
  const hashBytes = (0, import_crypto2.createHash)("md5").update(sessionId).digest();
1497
2075
  const hashVal = hashBytes.readUInt32BE(0) % 1e6;
1498
2076
  log3(`Session hash: ${hashVal} (out of 1,000,000)`);
@@ -1501,7 +2079,9 @@ async function get2(configKey, sessionId, options = {}) {
1501
2079
  for (const v of variants) {
1502
2080
  const oldCumulative = cumulative;
1503
2081
  cumulative += v.weight * 1e4;
1504
- log3(`Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`);
2082
+ log3(
2083
+ `Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
2084
+ );
1505
2085
  if (hashVal < cumulative) {
1506
2086
  assignedModel = v.model;
1507
2087
  break;
@@ -1514,7 +2094,9 @@ async function get2(configKey, sessionId, options = {}) {
1514
2094
  throw e;
1515
2095
  }
1516
2096
  if (fallback) {
1517
- console.warn(`[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`);
2097
+ console.warn(
2098
+ `[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`
2099
+ );
1518
2100
  return returnWithTrace(configKey, sessionId, fallback, 0);
1519
2101
  }
1520
2102
  throw e;
@@ -1561,20 +2143,22 @@ init_prompts();
1561
2143
  // src/init.ts
1562
2144
  init_prompts();
1563
2145
  async function init4(options = {}) {
1564
- const baseUrl4 = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
2146
+ const tracesUrl = options.tracesUrl || process.env.FALLOM_TRACES_URL || "https://traces.fallom.com";
2147
+ const configsUrl = options.configsUrl || process.env.FALLOM_CONFIGS_URL || "https://configs.fallom.com";
2148
+ const promptsUrl = options.promptsUrl || process.env.FALLOM_PROMPTS_URL || "https://prompts.fallom.com";
1565
2149
  await init2({
1566
2150
  apiKey: options.apiKey,
1567
- baseUrl: baseUrl4,
2151
+ baseUrl: tracesUrl,
1568
2152
  captureContent: options.captureContent,
1569
2153
  debug: options.debug
1570
2154
  });
1571
2155
  init3({
1572
2156
  apiKey: options.apiKey,
1573
- baseUrl: baseUrl4
2157
+ baseUrl: configsUrl
1574
2158
  });
1575
2159
  init({
1576
2160
  apiKey: options.apiKey,
1577
- baseUrl: baseUrl4
2161
+ baseUrl: promptsUrl
1578
2162
  });
1579
2163
  }
1580
2164