@fallom/trace 0.1.6 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -2,7 +2,7 @@ import {
2
2
  __export,
3
3
  init,
4
4
  prompts_exports
5
- } from "./chunk-IGJD7GBO.mjs";
5
+ } from "./chunk-6MSTRIK4.mjs";
6
6
 
7
7
  // src/trace.ts
8
8
  var trace_exports = {};
@@ -14,6 +14,7 @@ __export(trace_exports, {
14
14
  setSession: () => setSession,
15
15
  shutdown: () => shutdown,
16
16
  span: () => span,
17
+ wrapAISDK: () => wrapAISDK,
17
18
  wrapAnthropic: () => wrapAnthropic,
18
19
  wrapGoogleAI: () => wrapGoogleAI,
19
20
  wrapOpenAI: () => wrapOpenAI
@@ -649,7 +650,7 @@ var Resource = (
649
650
  var sessionStorage = new AsyncLocalStorage();
650
651
  var fallbackSession = null;
651
652
  var apiKey = null;
652
- var baseUrl = "https://spans.fallom.com";
653
+ var baseUrl = "https://traces.fallom.com";
653
654
  var initialized = false;
654
655
  var captureContent = true;
655
656
  var debugMode = false;
@@ -692,7 +693,7 @@ async function init2(options = {}) {
692
693
  debugMode = options.debug ?? false;
693
694
  log("\u{1F680} Initializing Fallom tracing...");
694
695
  apiKey = options.apiKey || process.env.FALLOM_API_KEY || null;
695
- baseUrl = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
696
+ baseUrl = options.baseUrl || process.env.FALLOM_TRACES_URL || process.env.FALLOM_BASE_URL || "https://traces.fallom.com";
696
697
  const envCapture = process.env.FALLOM_CAPTURE_CONTENT?.toLowerCase();
697
698
  if (envCapture === "false" || envCapture === "0" || envCapture === "no") {
698
699
  captureContent = false;
@@ -864,12 +865,12 @@ function messagesToOtelAttributes(messages, completion, model, responseId) {
864
865
  if (messages) {
865
866
  messages.forEach((msg, i) => {
866
867
  attrs[`gen_ai.prompt.${i}.role`] = msg.role;
867
- attrs[`gen_ai.prompt.${i}.content`] = msg.content;
868
+ attrs[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
868
869
  });
869
870
  }
870
871
  if (completion) {
871
872
  attrs["gen_ai.completion.0.role"] = completion.role;
872
- attrs["gen_ai.completion.0.content"] = completion.content;
873
+ attrs["gen_ai.completion.0.content"] = typeof completion.content === "string" ? completion.content : JSON.stringify(completion.content);
873
874
  if (completion.tool_calls) {
874
875
  attrs["gen_ai.completion.0.tool_calls"] = JSON.stringify(
875
876
  completion.tool_calls
@@ -886,10 +887,13 @@ function generateHexId(length) {
886
887
  var traceContextStorage = new AsyncLocalStorage();
887
888
  var fallbackTraceContext = null;
888
889
  async function sendTrace(trace) {
890
+ const url = `${baseUrl}/v1/traces`;
891
+ log("\u{1F4E4} Sending trace to:", url);
892
+ log(" Session:", trace.session_id, "Config:", trace.config_key);
889
893
  try {
890
894
  const controller = new AbortController();
891
895
  const timeoutId = setTimeout(() => controller.abort(), 5e3);
892
- await fetch(`${baseUrl}/v1/traces`, {
896
+ const response = await fetch(url, {
893
897
  method: "POST",
894
898
  headers: {
895
899
  Authorization: `Bearer ${apiKey}`,
@@ -899,8 +903,14 @@ async function sendTrace(trace) {
899
903
  signal: controller.signal
900
904
  });
901
905
  clearTimeout(timeoutId);
902
- log("\u{1F4E4} Trace sent:", trace.name, trace.model);
903
- } catch {
906
+ if (!response.ok) {
907
+ const text = await response.text();
908
+ log("\u274C Trace send failed:", response.status, text);
909
+ } else {
910
+ log("\u2705 Trace sent:", trace.name, trace.model);
911
+ }
912
+ } catch (err) {
913
+ log("\u274C Trace send error:", err instanceof Error ? err.message : err);
904
914
  }
905
915
  }
906
916
  function wrapOpenAI(client) {
@@ -914,7 +924,7 @@ function wrapOpenAI(client) {
914
924
  }
915
925
  let promptCtx = null;
916
926
  try {
917
- const { getPromptContext } = await import("./prompts-67DJ33I4.mjs");
927
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
918
928
  promptCtx = getPromptContext();
919
929
  } catch {
920
930
  }
@@ -1005,7 +1015,7 @@ function wrapAnthropic(client) {
1005
1015
  }
1006
1016
  let promptCtx = null;
1007
1017
  try {
1008
- const { getPromptContext } = await import("./prompts-67DJ33I4.mjs");
1018
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1009
1019
  promptCtx = getPromptContext();
1010
1020
  } catch {
1011
1021
  }
@@ -1102,7 +1112,7 @@ function wrapGoogleAI(model) {
1102
1112
  }
1103
1113
  let promptCtx = null;
1104
1114
  try {
1105
- const { getPromptContext } = await import("./prompts-67DJ33I4.mjs");
1115
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1106
1116
  promptCtx = getPromptContext();
1107
1117
  } catch {
1108
1118
  }
@@ -1202,6 +1212,414 @@ function wrapGoogleAI(model) {
1202
1212
  };
1203
1213
  return model;
1204
1214
  }
1215
+ function wrapAISDK(ai) {
1216
+ const aiModule = ai;
1217
+ return {
1218
+ generateText: createGenerateTextWrapper(aiModule),
1219
+ streamText: createStreamTextWrapper(aiModule),
1220
+ generateObject: aiModule.generateObject ? createGenerateObjectWrapper(aiModule) : void 0,
1221
+ streamObject: aiModule.streamObject ? createStreamObjectWrapper(aiModule) : void 0
1222
+ };
1223
+ }
1224
+ function createGenerateTextWrapper(aiModule) {
1225
+ return async (...args) => {
1226
+ const ctx = sessionStorage.getStore() || fallbackSession;
1227
+ if (!ctx || !initialized) {
1228
+ return aiModule.generateText(...args);
1229
+ }
1230
+ let promptCtx = null;
1231
+ try {
1232
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1233
+ promptCtx = getPromptContext();
1234
+ } catch {
1235
+ }
1236
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1237
+ const traceId = traceCtx?.traceId || generateHexId(32);
1238
+ const spanId = generateHexId(16);
1239
+ const parentSpanId = traceCtx?.parentSpanId;
1240
+ const params = args[0] || {};
1241
+ const startTime = Date.now();
1242
+ try {
1243
+ const result = await aiModule.generateText(...args);
1244
+ const endTime = Date.now();
1245
+ const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1246
+ const attributes = {};
1247
+ if (captureContent) {
1248
+ attributes["gen_ai.request.model"] = modelId;
1249
+ attributes["gen_ai.response.model"] = modelId;
1250
+ if (params?.prompt) {
1251
+ attributes["gen_ai.prompt.0.role"] = "user";
1252
+ attributes["gen_ai.prompt.0.content"] = params.prompt;
1253
+ }
1254
+ if (params?.messages) {
1255
+ params.messages.forEach((msg, i) => {
1256
+ attributes[`gen_ai.prompt.${i}.role`] = msg.role;
1257
+ attributes[`gen_ai.prompt.${i}.content`] = typeof msg.content === "string" ? msg.content : JSON.stringify(msg.content);
1258
+ });
1259
+ }
1260
+ if (result?.text) {
1261
+ attributes["gen_ai.completion.0.role"] = "assistant";
1262
+ attributes["gen_ai.completion.0.content"] = result.text;
1263
+ }
1264
+ if (result?.response?.id) {
1265
+ attributes["gen_ai.response.id"] = result.response.id;
1266
+ }
1267
+ }
1268
+ sendTrace({
1269
+ config_key: ctx.configKey,
1270
+ session_id: ctx.sessionId,
1271
+ customer_id: ctx.customerId,
1272
+ trace_id: traceId,
1273
+ span_id: spanId,
1274
+ parent_span_id: parentSpanId,
1275
+ name: "generateText",
1276
+ kind: "llm",
1277
+ model: modelId,
1278
+ start_time: new Date(startTime).toISOString(),
1279
+ end_time: new Date(endTime).toISOString(),
1280
+ duration_ms: endTime - startTime,
1281
+ status: "OK",
1282
+ prompt_tokens: result?.usage?.promptTokens,
1283
+ completion_tokens: result?.usage?.completionTokens,
1284
+ total_tokens: result?.usage?.totalTokens,
1285
+ attributes: captureContent ? attributes : void 0,
1286
+ prompt_key: promptCtx?.promptKey,
1287
+ prompt_version: promptCtx?.promptVersion,
1288
+ prompt_ab_test_key: promptCtx?.abTestKey,
1289
+ prompt_variant_index: promptCtx?.variantIndex
1290
+ }).catch(() => {
1291
+ });
1292
+ return result;
1293
+ } catch (error) {
1294
+ const endTime = Date.now();
1295
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1296
+ sendTrace({
1297
+ config_key: ctx.configKey,
1298
+ session_id: ctx.sessionId,
1299
+ customer_id: ctx.customerId,
1300
+ trace_id: traceId,
1301
+ span_id: spanId,
1302
+ parent_span_id: parentSpanId,
1303
+ name: "generateText",
1304
+ kind: "llm",
1305
+ model: modelId,
1306
+ start_time: new Date(startTime).toISOString(),
1307
+ end_time: new Date(endTime).toISOString(),
1308
+ duration_ms: endTime - startTime,
1309
+ status: "ERROR",
1310
+ error_message: error?.message,
1311
+ prompt_key: promptCtx?.promptKey,
1312
+ prompt_version: promptCtx?.promptVersion,
1313
+ prompt_ab_test_key: promptCtx?.abTestKey,
1314
+ prompt_variant_index: promptCtx?.variantIndex
1315
+ }).catch(() => {
1316
+ });
1317
+ throw error;
1318
+ }
1319
+ };
1320
+ }
1321
+ function createStreamTextWrapper(aiModule) {
1322
+ return async (...args) => {
1323
+ const ctx = sessionStorage.getStore() || fallbackSession;
1324
+ const params = args[0] || {};
1325
+ const startTime = Date.now();
1326
+ const result = await aiModule.streamText(...args);
1327
+ if (!ctx || !initialized) {
1328
+ return result;
1329
+ }
1330
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1331
+ const traceId = traceCtx?.traceId || generateHexId(32);
1332
+ const spanId = generateHexId(16);
1333
+ const parentSpanId = traceCtx?.parentSpanId;
1334
+ let firstTokenTime = null;
1335
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1336
+ let promptCtx = null;
1337
+ try {
1338
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1339
+ promptCtx = getPromptContext();
1340
+ } catch {
1341
+ }
1342
+ if (result?.usage) {
1343
+ result.usage.then((usage) => {
1344
+ const endTime = Date.now();
1345
+ log("\u{1F4CA} streamText usage:", JSON.stringify(usage, null, 2));
1346
+ const attributes = {};
1347
+ if (captureContent) {
1348
+ attributes["gen_ai.request.model"] = modelId;
1349
+ if (params?.prompt) {
1350
+ attributes["gen_ai.prompt.0.role"] = "user";
1351
+ attributes["gen_ai.prompt.0.content"] = params.prompt;
1352
+ }
1353
+ }
1354
+ if (firstTokenTime) {
1355
+ attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1356
+ }
1357
+ const tracePayload = {
1358
+ config_key: ctx.configKey,
1359
+ session_id: ctx.sessionId,
1360
+ customer_id: ctx.customerId,
1361
+ trace_id: traceId,
1362
+ span_id: spanId,
1363
+ parent_span_id: parentSpanId,
1364
+ name: "streamText",
1365
+ kind: "llm",
1366
+ model: modelId,
1367
+ start_time: new Date(startTime).toISOString(),
1368
+ end_time: new Date(endTime).toISOString(),
1369
+ duration_ms: endTime - startTime,
1370
+ status: "OK",
1371
+ prompt_tokens: usage?.promptTokens,
1372
+ completion_tokens: usage?.completionTokens,
1373
+ total_tokens: usage?.totalTokens,
1374
+ time_to_first_token_ms: firstTokenTime ? firstTokenTime - startTime : void 0,
1375
+ attributes: captureContent ? attributes : void 0,
1376
+ prompt_key: promptCtx?.promptKey,
1377
+ prompt_version: promptCtx?.promptVersion,
1378
+ prompt_ab_test_key: promptCtx?.abTestKey,
1379
+ prompt_variant_index: promptCtx?.variantIndex
1380
+ };
1381
+ sendTrace(tracePayload).catch(() => {
1382
+ });
1383
+ }).catch((error) => {
1384
+ const endTime = Date.now();
1385
+ log("\u274C streamText error:", error?.message);
1386
+ sendTrace({
1387
+ config_key: ctx.configKey,
1388
+ session_id: ctx.sessionId,
1389
+ customer_id: ctx.customerId,
1390
+ trace_id: traceId,
1391
+ span_id: spanId,
1392
+ parent_span_id: parentSpanId,
1393
+ name: "streamText",
1394
+ kind: "llm",
1395
+ model: modelId,
1396
+ start_time: new Date(startTime).toISOString(),
1397
+ end_time: new Date(endTime).toISOString(),
1398
+ duration_ms: endTime - startTime,
1399
+ status: "ERROR",
1400
+ error_message: error?.message,
1401
+ prompt_key: promptCtx?.promptKey,
1402
+ prompt_version: promptCtx?.promptVersion,
1403
+ prompt_ab_test_key: promptCtx?.abTestKey,
1404
+ prompt_variant_index: promptCtx?.variantIndex
1405
+ }).catch(() => {
1406
+ });
1407
+ });
1408
+ }
1409
+ if (result?.textStream) {
1410
+ const originalTextStream = result.textStream;
1411
+ const wrappedTextStream = (async function* () {
1412
+ for await (const chunk of originalTextStream) {
1413
+ if (!firstTokenTime) {
1414
+ firstTokenTime = Date.now();
1415
+ log("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1416
+ }
1417
+ yield chunk;
1418
+ }
1419
+ })();
1420
+ return new Proxy(result, {
1421
+ get(target, prop) {
1422
+ if (prop === "textStream") {
1423
+ return wrappedTextStream;
1424
+ }
1425
+ return target[prop];
1426
+ }
1427
+ });
1428
+ }
1429
+ return result;
1430
+ };
1431
+ }
1432
+ function createGenerateObjectWrapper(aiModule) {
1433
+ return async (...args) => {
1434
+ const ctx = sessionStorage.getStore() || fallbackSession;
1435
+ if (!ctx || !initialized) {
1436
+ return aiModule.generateObject(...args);
1437
+ }
1438
+ let promptCtx = null;
1439
+ try {
1440
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1441
+ promptCtx = getPromptContext();
1442
+ } catch {
1443
+ }
1444
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1445
+ const traceId = traceCtx?.traceId || generateHexId(32);
1446
+ const spanId = generateHexId(16);
1447
+ const parentSpanId = traceCtx?.parentSpanId;
1448
+ const params = args[0] || {};
1449
+ const startTime = Date.now();
1450
+ try {
1451
+ const result = await aiModule.generateObject(...args);
1452
+ const endTime = Date.now();
1453
+ const modelId = result?.response?.modelId || params?.model?.modelId || String(params?.model || "unknown");
1454
+ const attributes = {};
1455
+ if (captureContent) {
1456
+ attributes["gen_ai.request.model"] = modelId;
1457
+ attributes["gen_ai.response.model"] = modelId;
1458
+ if (result?.object) {
1459
+ attributes["gen_ai.completion.0.role"] = "assistant";
1460
+ attributes["gen_ai.completion.0.content"] = JSON.stringify(
1461
+ result.object
1462
+ );
1463
+ }
1464
+ }
1465
+ sendTrace({
1466
+ config_key: ctx.configKey,
1467
+ session_id: ctx.sessionId,
1468
+ customer_id: ctx.customerId,
1469
+ trace_id: traceId,
1470
+ span_id: spanId,
1471
+ parent_span_id: parentSpanId,
1472
+ name: "generateObject",
1473
+ kind: "llm",
1474
+ model: modelId,
1475
+ start_time: new Date(startTime).toISOString(),
1476
+ end_time: new Date(endTime).toISOString(),
1477
+ duration_ms: endTime - startTime,
1478
+ status: "OK",
1479
+ prompt_tokens: result?.usage?.promptTokens,
1480
+ completion_tokens: result?.usage?.completionTokens,
1481
+ total_tokens: result?.usage?.totalTokens,
1482
+ attributes: captureContent ? attributes : void 0,
1483
+ prompt_key: promptCtx?.promptKey,
1484
+ prompt_version: promptCtx?.promptVersion,
1485
+ prompt_ab_test_key: promptCtx?.abTestKey,
1486
+ prompt_variant_index: promptCtx?.variantIndex
1487
+ }).catch(() => {
1488
+ });
1489
+ return result;
1490
+ } catch (error) {
1491
+ const endTime = Date.now();
1492
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1493
+ sendTrace({
1494
+ config_key: ctx.configKey,
1495
+ session_id: ctx.sessionId,
1496
+ customer_id: ctx.customerId,
1497
+ trace_id: traceId,
1498
+ span_id: spanId,
1499
+ parent_span_id: parentSpanId,
1500
+ name: "generateObject",
1501
+ kind: "llm",
1502
+ model: modelId,
1503
+ start_time: new Date(startTime).toISOString(),
1504
+ end_time: new Date(endTime).toISOString(),
1505
+ duration_ms: endTime - startTime,
1506
+ status: "ERROR",
1507
+ error_message: error?.message,
1508
+ prompt_key: promptCtx?.promptKey,
1509
+ prompt_version: promptCtx?.promptVersion,
1510
+ prompt_ab_test_key: promptCtx?.abTestKey,
1511
+ prompt_variant_index: promptCtx?.variantIndex
1512
+ }).catch(() => {
1513
+ });
1514
+ throw error;
1515
+ }
1516
+ };
1517
+ }
1518
+ function createStreamObjectWrapper(aiModule) {
1519
+ return async (...args) => {
1520
+ const ctx = sessionStorage.getStore() || fallbackSession;
1521
+ const params = args[0] || {};
1522
+ const startTime = Date.now();
1523
+ const result = await aiModule.streamObject(...args);
1524
+ log("\u{1F50D} streamObject result keys:", Object.keys(result || {}));
1525
+ if (!ctx || !initialized) {
1526
+ return result;
1527
+ }
1528
+ const traceCtx = traceContextStorage.getStore() || fallbackTraceContext;
1529
+ const traceId = traceCtx?.traceId || generateHexId(32);
1530
+ const spanId = generateHexId(16);
1531
+ const parentSpanId = traceCtx?.parentSpanId;
1532
+ let firstTokenTime = null;
1533
+ const modelId = params?.model?.modelId || String(params?.model || "unknown");
1534
+ let promptCtx = null;
1535
+ try {
1536
+ const { getPromptContext } = await import("./prompts-VAN5E3L4.mjs");
1537
+ promptCtx = getPromptContext();
1538
+ } catch {
1539
+ }
1540
+ if (result?.usage) {
1541
+ result.usage.then((usage) => {
1542
+ const endTime = Date.now();
1543
+ log("\u{1F4CA} streamObject usage:", JSON.stringify(usage, null, 2));
1544
+ const attributes = {};
1545
+ if (captureContent) {
1546
+ attributes["gen_ai.request.model"] = modelId;
1547
+ }
1548
+ if (firstTokenTime) {
1549
+ attributes["gen_ai.time_to_first_token_ms"] = firstTokenTime - startTime;
1550
+ }
1551
+ sendTrace({
1552
+ config_key: ctx.configKey,
1553
+ session_id: ctx.sessionId,
1554
+ customer_id: ctx.customerId,
1555
+ trace_id: traceId,
1556
+ span_id: spanId,
1557
+ parent_span_id: parentSpanId,
1558
+ name: "streamObject",
1559
+ kind: "llm",
1560
+ model: modelId,
1561
+ start_time: new Date(startTime).toISOString(),
1562
+ end_time: new Date(endTime).toISOString(),
1563
+ duration_ms: endTime - startTime,
1564
+ status: "OK",
1565
+ prompt_tokens: usage?.promptTokens,
1566
+ completion_tokens: usage?.completionTokens,
1567
+ total_tokens: usage?.totalTokens,
1568
+ attributes: captureContent ? attributes : void 0,
1569
+ prompt_key: promptCtx?.promptKey,
1570
+ prompt_version: promptCtx?.promptVersion,
1571
+ prompt_ab_test_key: promptCtx?.abTestKey,
1572
+ prompt_variant_index: promptCtx?.variantIndex
1573
+ }).catch(() => {
1574
+ });
1575
+ }).catch((error) => {
1576
+ const endTime = Date.now();
1577
+ sendTrace({
1578
+ config_key: ctx.configKey,
1579
+ session_id: ctx.sessionId,
1580
+ customer_id: ctx.customerId,
1581
+ trace_id: traceId,
1582
+ span_id: spanId,
1583
+ parent_span_id: parentSpanId,
1584
+ name: "streamObject",
1585
+ kind: "llm",
1586
+ model: modelId,
1587
+ start_time: new Date(startTime).toISOString(),
1588
+ end_time: new Date(endTime).toISOString(),
1589
+ duration_ms: endTime - startTime,
1590
+ status: "ERROR",
1591
+ error_message: error?.message,
1592
+ prompt_key: promptCtx?.promptKey,
1593
+ prompt_version: promptCtx?.promptVersion,
1594
+ prompt_ab_test_key: promptCtx?.abTestKey,
1595
+ prompt_variant_index: promptCtx?.variantIndex
1596
+ }).catch(() => {
1597
+ });
1598
+ });
1599
+ }
1600
+ if (result?.partialObjectStream) {
1601
+ const originalStream = result.partialObjectStream;
1602
+ const wrappedStream = (async function* () {
1603
+ for await (const chunk of originalStream) {
1604
+ if (!firstTokenTime) {
1605
+ firstTokenTime = Date.now();
1606
+ log("\u23F1\uFE0F Time to first token:", firstTokenTime - startTime, "ms");
1607
+ }
1608
+ yield chunk;
1609
+ }
1610
+ })();
1611
+ return new Proxy(result, {
1612
+ get(target, prop) {
1613
+ if (prop === "partialObjectStream") {
1614
+ return wrappedStream;
1615
+ }
1616
+ return target[prop];
1617
+ }
1618
+ });
1619
+ }
1620
+ return result;
1621
+ };
1622
+ }
1205
1623
 
1206
1624
  // src/models.ts
1207
1625
  var models_exports = {};
@@ -1211,7 +1629,7 @@ __export(models_exports, {
1211
1629
  });
1212
1630
  import { createHash } from "crypto";
1213
1631
  var apiKey2 = null;
1214
- var baseUrl2 = "https://spans.fallom.com";
1632
+ var baseUrl2 = "https://configs.fallom.com";
1215
1633
  var initialized2 = false;
1216
1634
  var syncInterval = null;
1217
1635
  var debugMode2 = false;
@@ -1225,7 +1643,7 @@ function log2(msg) {
1225
1643
  }
1226
1644
  function init3(options = {}) {
1227
1645
  apiKey2 = options.apiKey || process.env.FALLOM_API_KEY || null;
1228
- baseUrl2 = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
1646
+ baseUrl2 = options.baseUrl || process.env.FALLOM_CONFIGS_URL || process.env.FALLOM_BASE_URL || "https://configs.fallom.com";
1229
1647
  initialized2 = true;
1230
1648
  if (!apiKey2) {
1231
1649
  return;
@@ -1314,20 +1732,28 @@ async function get(configKey, sessionId, options = {}) {
1314
1732
  const { version, fallback, debug = false } = options;
1315
1733
  debugMode2 = debug;
1316
1734
  ensureInit();
1317
- log2(`get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`);
1735
+ log2(
1736
+ `get() called: configKey=${configKey}, sessionId=${sessionId}, fallback=${fallback}`
1737
+ );
1318
1738
  try {
1319
1739
  let configData = configCache.get(configKey);
1320
- log2(`Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`);
1740
+ log2(
1741
+ `Cache lookup for '${configKey}': ${configData ? "found" : "not found"}`
1742
+ );
1321
1743
  if (!configData) {
1322
1744
  log2("Not in cache, fetching...");
1323
1745
  await fetchConfigs(SYNC_TIMEOUT);
1324
1746
  configData = configCache.get(configKey);
1325
- log2(`After fetch, cache lookup: ${configData ? "found" : "still not found"}`);
1747
+ log2(
1748
+ `After fetch, cache lookup: ${configData ? "found" : "still not found"}`
1749
+ );
1326
1750
  }
1327
1751
  if (!configData) {
1328
1752
  log2(`Config not found, using fallback: ${fallback}`);
1329
1753
  if (fallback) {
1330
- console.warn(`[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`);
1754
+ console.warn(
1755
+ `[Fallom WARNING] Config '${configKey}' not found, using fallback model: ${fallback}`
1756
+ );
1331
1757
  return returnWithTrace(configKey, sessionId, fallback, 0);
1332
1758
  }
1333
1759
  throw new Error(
@@ -1343,7 +1769,9 @@ async function get(configKey, sessionId, options = {}) {
1343
1769
  }
1344
1770
  if (!config) {
1345
1771
  if (fallback) {
1346
- console.warn(`[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`);
1772
+ console.warn(
1773
+ `[Fallom WARNING] Config '${configKey}' version ${version} not found, using fallback: ${fallback}`
1774
+ );
1347
1775
  return returnWithTrace(configKey, sessionId, fallback, 0);
1348
1776
  }
1349
1777
  throw new Error(`Config '${configKey}' version ${version} not found.`);
@@ -1354,7 +1782,9 @@ async function get(configKey, sessionId, options = {}) {
1354
1782
  config = configData.versions.get(targetVersion);
1355
1783
  if (!config) {
1356
1784
  if (fallback) {
1357
- console.warn(`[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`);
1785
+ console.warn(
1786
+ `[Fallom WARNING] Config '${configKey}' has no cached version, using fallback: ${fallback}`
1787
+ );
1358
1788
  return returnWithTrace(configKey, sessionId, fallback, 0);
1359
1789
  }
1360
1790
  throw new Error(`Config '${configKey}' has no cached version.`);
@@ -1363,7 +1793,11 @@ async function get(configKey, sessionId, options = {}) {
1363
1793
  const variantsRaw = config.variants;
1364
1794
  const configVersion = config.version || targetVersion;
1365
1795
  const variants = Array.isArray(variantsRaw) ? variantsRaw : Object.values(variantsRaw);
1366
- log2(`Config found! Version: ${configVersion}, Variants: ${JSON.stringify(variants)}`);
1796
+ log2(
1797
+ `Config found! Version: ${configVersion}, Variants: ${JSON.stringify(
1798
+ variants
1799
+ )}`
1800
+ );
1367
1801
  const hashBytes = createHash("md5").update(sessionId).digest();
1368
1802
  const hashVal = hashBytes.readUInt32BE(0) % 1e6;
1369
1803
  log2(`Session hash: ${hashVal} (out of 1,000,000)`);
@@ -1372,7 +1806,9 @@ async function get(configKey, sessionId, options = {}) {
1372
1806
  for (const v of variants) {
1373
1807
  const oldCumulative = cumulative;
1374
1808
  cumulative += v.weight * 1e4;
1375
- log2(`Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`);
1809
+ log2(
1810
+ `Variant ${v.model}: weight=${v.weight}%, range=${oldCumulative}-${cumulative}, hash=${hashVal}, match=${hashVal < cumulative}`
1811
+ );
1376
1812
  if (hashVal < cumulative) {
1377
1813
  assignedModel = v.model;
1378
1814
  break;
@@ -1385,7 +1821,9 @@ async function get(configKey, sessionId, options = {}) {
1385
1821
  throw e;
1386
1822
  }
1387
1823
  if (fallback) {
1388
- console.warn(`[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`);
1824
+ console.warn(
1825
+ `[Fallom WARNING] Error getting model for '${configKey}': ${e}. Using fallback: ${fallback}`
1826
+ );
1389
1827
  return returnWithTrace(configKey, sessionId, fallback, 0);
1390
1828
  }
1391
1829
  throw e;
@@ -1428,20 +1866,22 @@ async function recordSession(configKey, version, sessionId, model) {
1428
1866
 
1429
1867
  // src/init.ts
1430
1868
  async function init4(options = {}) {
1431
- const baseUrl3 = options.baseUrl || process.env.FALLOM_BASE_URL || "https://spans.fallom.com";
1869
+ const tracesUrl = options.tracesUrl || process.env.FALLOM_TRACES_URL || "https://traces.fallom.com";
1870
+ const configsUrl = options.configsUrl || process.env.FALLOM_CONFIGS_URL || "https://configs.fallom.com";
1871
+ const promptsUrl = options.promptsUrl || process.env.FALLOM_PROMPTS_URL || "https://prompts.fallom.com";
1432
1872
  await init2({
1433
1873
  apiKey: options.apiKey,
1434
- baseUrl: baseUrl3,
1874
+ baseUrl: tracesUrl,
1435
1875
  captureContent: options.captureContent,
1436
1876
  debug: options.debug
1437
1877
  });
1438
1878
  init3({
1439
1879
  apiKey: options.apiKey,
1440
- baseUrl: baseUrl3
1880
+ baseUrl: configsUrl
1441
1881
  });
1442
1882
  init({
1443
1883
  apiKey: options.apiKey,
1444
- baseUrl: baseUrl3
1884
+ baseUrl: promptsUrl
1445
1885
  });
1446
1886
  }
1447
1887
 
@@ -0,0 +1,14 @@
1
+ import {
2
+ clearPromptContext,
3
+ get,
4
+ getAB,
5
+ getPromptContext,
6
+ init
7
+ } from "./chunk-6MSTRIK4.mjs";
8
+ export {
9
+ clearPromptContext,
10
+ get,
11
+ getAB,
12
+ getPromptContext,
13
+ init
14
+ };
@@ -0,0 +1,14 @@
1
+ import {
2
+ clearPromptContext,
3
+ get,
4
+ getAB,
5
+ getPromptContext,
6
+ init
7
+ } from "./chunk-H2EACSBT.mjs";
8
+ export {
9
+ clearPromptContext,
10
+ get,
11
+ getAB,
12
+ getPromptContext,
13
+ init
14
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@fallom/trace",
3
- "version": "0.1.6",
3
+ "version": "0.1.10",
4
4
  "description": "Model A/B testing and tracing for LLM applications. Zero latency, production-ready.",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.mjs",