@sentrial/sdk 0.3.3 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -15,12 +15,13 @@ var __copyProps = (to, from, except, desc) => {
15
15
  }
16
16
  return to;
17
17
  };
18
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
18
+ var __toCommonJS = (mod2) => __copyProps(__defProp({}, "__esModule", { value: true }), mod2);
19
19
 
20
20
  // src/index.ts
21
21
  var index_exports = {};
22
22
  __export(index_exports, {
23
23
  ApiError: () => ApiError,
24
+ EventBatcher: () => EventBatcher,
24
25
  EventType: () => EventType,
25
26
  Experiment: () => Experiment,
26
27
  ExperimentRunTracker: () => ExperimentRunTracker,
@@ -40,6 +41,7 @@ __export(index_exports, {
40
41
  clearSessionContext: () => clearSessionContext,
41
42
  configure: () => configure,
42
43
  configureVercel: () => configureVercel,
44
+ createContextVar: () => createContextVar,
43
45
  getCurrentInteraction: () => getCurrentInteraction,
44
46
  getCurrentSessionId: () => getCurrentSessionId,
45
47
  getExperimentContext: () => getExperimentContext,
@@ -62,6 +64,7 @@ __export(index_exports, {
62
64
  withTool: () => withTool,
63
65
  wrapAISDK: () => wrapAISDK,
64
66
  wrapAnthropic: () => wrapAnthropic,
67
+ wrapClaudeAgent: () => wrapClaudeAgent,
65
68
  wrapGoogle: () => wrapGoogle,
66
69
  wrapLLM: () => wrapLLM,
67
70
  wrapOpenAI: () => wrapOpenAI
@@ -138,7 +141,22 @@ var ValidationError = class extends SentrialError {
138
141
  };
139
142
 
140
143
  // src/redact.ts
141
- var import_crypto = require("crypto");
144
+ var _createHash;
145
+ try {
146
+ const mod = eval("require")("crypto");
147
+ if (mod?.createHash) {
148
+ _createHash = mod.createHash;
149
+ }
150
+ } catch {
151
+ }
152
+ function getCreateHash() {
153
+ if (!_createHash) {
154
+ throw new Error(
155
+ 'Sentrial PII hash mode requires Node.js crypto module. Use mode "label" or "remove" in browser/edge environments.'
156
+ );
157
+ }
158
+ return _createHash;
159
+ }
142
160
  var DEFAULT_FIELDS = [
143
161
  "userInput",
144
162
  "assistantOutput",
@@ -165,7 +183,7 @@ var BUILTIN_PATTERNS = {
165
183
  ipAddresses: { pattern: IP_ADDRESS_PATTERN, label: "IP_ADDRESS" }
166
184
  };
167
185
  function hashValue(value) {
168
- return (0, import_crypto.createHash)("sha256").update(value).digest("hex").slice(0, 6);
186
+ return getCreateHash()("sha256").update(value).digest("hex").slice(0, 6);
169
187
  }
170
188
  function replaceMatch(match, label, mode) {
171
189
  switch (mode) {
@@ -227,6 +245,59 @@ function redactPayload(payload, config) {
227
245
  return result;
228
246
  }
229
247
 
248
+ // src/async-context.ts
249
+ var _AsyncLocalStorage = null;
250
+ try {
251
+ const mod = eval("require")("node:async_hooks");
252
+ if (mod?.AsyncLocalStorage) {
253
+ _AsyncLocalStorage = mod.AsyncLocalStorage;
254
+ }
255
+ } catch {
256
+ }
257
+ var NodeContextVar = class {
258
+ _storage;
259
+ _defaultValue;
260
+ constructor(defaultValue) {
261
+ this._storage = new _AsyncLocalStorage();
262
+ this._defaultValue = defaultValue;
263
+ }
264
+ get() {
265
+ const store = this._storage.getStore();
266
+ return store !== void 0 ? store : this._defaultValue;
267
+ }
268
+ set(value) {
269
+ const previous = this.get();
270
+ this._storage.enterWith(value);
271
+ return { _previous: previous };
272
+ }
273
+ reset(token) {
274
+ this._storage.enterWith(token._previous);
275
+ }
276
+ };
277
+ var SimpleContextVar = class {
278
+ _value;
279
+ constructor(defaultValue) {
280
+ this._value = defaultValue;
281
+ }
282
+ get() {
283
+ return this._value;
284
+ }
285
+ set(value) {
286
+ const previous = this._value;
287
+ this._value = value;
288
+ return { _previous: previous };
289
+ }
290
+ reset(token) {
291
+ this._value = token._previous;
292
+ }
293
+ };
294
+ function createContextVar(defaultValue) {
295
+ if (_AsyncLocalStorage) {
296
+ return new NodeContextVar(defaultValue);
297
+ }
298
+ return new SimpleContextVar(defaultValue);
299
+ }
300
+
230
301
  // src/cost.ts
231
302
  var OPENAI_PRICING = {
232
303
  "gpt-5.2": { input: 5, output: 15 },
@@ -278,7 +349,8 @@ var GOOGLE_PRICING = {
278
349
  "gemini-1.0-pro": { input: 0.5, output: 1.5 }
279
350
  };
280
351
  function findModelKey(model, pricing) {
281
- for (const key of Object.keys(pricing)) {
352
+ const keys = Object.keys(pricing).sort((a, b) => b.length - a.length);
353
+ for (const key of keys) {
282
354
  if (model.startsWith(key)) {
283
355
  return key;
284
356
  }
@@ -306,179 +378,783 @@ function calculateGoogleCost(params) {
306
378
  return calculateCost(inputTokens, outputTokens, GOOGLE_PRICING[modelKey]);
307
379
  }
308
380
 
309
- // src/types.ts
310
- var EventType = /* @__PURE__ */ ((EventType2) => {
311
- EventType2["TOOL_CALL"] = "tool_call";
312
- EventType2["LLM_DECISION"] = "llm_decision";
313
- EventType2["STATE_CHANGE"] = "state_change";
314
- EventType2["ERROR"] = "error";
315
- return EventType2;
316
- })(EventType || {});
317
-
318
- // src/client.ts
319
- var DEFAULT_API_URL = "https://api.sentrial.com";
320
- var MAX_RETRIES = 3;
321
- var INITIAL_BACKOFF_MS = 500;
322
- var MAX_BACKOFF_MS = 8e3;
323
- var BACKOFF_MULTIPLIER = 2;
324
- var RETRYABLE_STATUS_CODES = /* @__PURE__ */ new Set([408, 429, 500, 502, 503, 504]);
325
- var REQUEST_TIMEOUT_MS = 1e4;
326
- var SentrialClient = class {
327
- apiUrl;
328
- apiKey;
329
- failSilently;
330
- piiConfig;
331
- piiConfigNeedsHydration = false;
332
- piiHydrationPromise;
333
- currentState = {};
334
- constructor(config = {}) {
335
- this.apiUrl = (config.apiUrl ?? (typeof process !== "undefined" ? process.env?.SENTRIAL_API_URL : void 0) ?? DEFAULT_API_URL).replace(/\/$/, "");
336
- this.apiKey = config.apiKey ?? (typeof process !== "undefined" ? process.env?.SENTRIAL_API_KEY : void 0);
337
- this.failSilently = config.failSilently ?? true;
338
- if (config.pii === true) {
339
- this.piiConfig = { enabled: true };
340
- this.piiConfigNeedsHydration = true;
341
- } else if (config.pii && typeof config.pii === "object") {
342
- this.piiConfig = config.pii;
343
- this.piiConfigNeedsHydration = false;
344
- }
381
+ // src/wrappers.ts
382
+ var _currentSessionId = createContextVar(null);
383
+ var _currentClient = createContextVar(null);
384
+ var _defaultClient = null;
385
+ function setSessionContext(sessionId, client) {
386
+ _currentSessionId.set(sessionId);
387
+ if (client) {
388
+ _currentClient.set(client);
345
389
  }
346
- /**
347
- * Fetch the organization's PII config from the server.
348
- *
349
- * Called lazily on the first request when `pii: true` was passed to the constructor.
350
- * Uses a single shared promise so concurrent requests don't trigger duplicate fetches.
351
- */
352
- async hydratePiiConfig() {
353
- if (!this.piiConfigNeedsHydration) return;
354
- if (this.piiHydrationPromise) {
355
- await this.piiHydrationPromise;
356
- return;
357
- }
358
- this.piiHydrationPromise = (async () => {
359
- try {
360
- const headers = {};
361
- if (this.apiKey) {
362
- headers["Authorization"] = `Bearer ${this.apiKey}`;
363
- }
364
- const controller = new AbortController();
365
- const timeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
366
- let response;
367
- try {
368
- response = await fetch(`${this.apiUrl}/api/sdk/pii-config`, {
369
- method: "GET",
370
- headers,
371
- signal: controller.signal
372
- });
373
- } finally {
374
- clearTimeout(timeoutId);
375
- }
376
- if (response.ok) {
377
- const data = await response.json();
378
- if (data.config) {
379
- this.piiConfig = {
380
- enabled: data.config.enabled,
381
- mode: data.config.mode,
382
- fields: data.config.fields,
383
- builtinPatterns: data.config.builtinPatterns,
384
- customPatterns: (data.config.customPatterns || []).map(
385
- (cp) => ({
386
- pattern: new RegExp(cp.pattern, "g"),
387
- label: cp.label
388
- })
389
- ),
390
- enhancedDetection: data.config.enhancedDetection
391
- };
392
- }
393
- }
394
- } catch {
395
- }
396
- this.piiConfigNeedsHydration = false;
397
- })();
398
- await this.piiHydrationPromise;
390
+ }
391
+ function clearSessionContext() {
392
+ _currentSessionId.set(null);
393
+ _currentClient.set(null);
394
+ }
395
+ function getSessionContext() {
396
+ return _currentSessionId.get();
397
+ }
398
+ function setDefaultClient(client) {
399
+ _defaultClient = client;
400
+ }
401
+ function _setSessionContextWithTokens(sessionId, client) {
402
+ const _sessionToken = _currentSessionId.set(sessionId);
403
+ const _clientToken = client ? _currentClient.set(client) : _currentClient.set(_currentClient.get());
404
+ return { _sessionToken, _clientToken };
405
+ }
406
+ function _restoreSessionContext(tokens) {
407
+ _currentSessionId.reset(tokens._sessionToken);
408
+ _currentClient.reset(tokens._clientToken);
409
+ }
410
+ function getTrackingClient() {
411
+ return _currentClient.get() ?? _defaultClient;
412
+ }
413
+ function wrapOpenAI(client, options = {}) {
414
+ const { trackWithoutSession = false } = options;
415
+ const chat = client.chat;
416
+ if (!chat?.completions?.create) {
417
+ console.warn("Sentrial: OpenAI client does not have chat.completions.create");
418
+ return client;
399
419
  }
400
- /**
401
- * Make an HTTP request with retry logic and exponential backoff.
402
- *
403
- * Retries on transient failures (network errors, timeouts, 429/5xx).
404
- * Up to MAX_RETRIES attempts with exponential backoff.
405
- */
406
- async safeRequest(method, url, body) {
407
- if (this.piiConfigNeedsHydration) {
408
- await this.hydratePiiConfig();
420
+ const originalCreate = chat.completions.create.bind(chat.completions);
421
+ chat.completions.create = async function(...args) {
422
+ const startTime = Date.now();
423
+ const params = args[0] ?? {};
424
+ const messages = params.messages ?? [];
425
+ const model = params.model ?? "unknown";
426
+ const isStreaming = params.stream === true;
427
+ if (isStreaming && !params.stream_options?.include_usage) {
428
+ args[0] = { ...params, stream_options: { ...params.stream_options, include_usage: true } };
409
429
  }
410
- let lastError;
411
- let backoff = INITIAL_BACKOFF_MS;
412
- for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
413
- try {
414
- const headers = {
415
- "Content-Type": "application/json"
416
- };
417
- if (this.apiKey) {
418
- headers["Authorization"] = `Bearer ${this.apiKey}`;
419
- }
420
- const finalBody = this.piiConfig && body && typeof body === "object" ? redactPayload(body, this.piiConfig) : body;
421
- const controller = new AbortController();
422
- const timeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
423
- let response;
424
- try {
425
- response = await fetch(url, {
426
- method,
427
- headers,
428
- body: finalBody ? JSON.stringify(finalBody) : void 0,
429
- signal: controller.signal
430
- });
431
- } finally {
432
- clearTimeout(timeoutId);
433
- }
434
- if (RETRYABLE_STATUS_CODES.has(response.status) && attempt < MAX_RETRIES) {
435
- await this.sleep(backoff);
436
- backoff = Math.min(backoff * BACKOFF_MULTIPLIER, MAX_BACKOFF_MS);
437
- continue;
438
- }
439
- if (!response.ok) {
440
- const errorBody = await response.text();
441
- let errorData = {};
442
- try {
443
- errorData = JSON.parse(errorBody);
444
- } catch {
445
- }
446
- const error = new ApiError(
447
- errorData.error?.message || `HTTP ${response.status}: ${response.statusText}`,
448
- response.status,
449
- errorData.error?.code
450
- );
451
- if (this.failSilently) {
452
- console.warn(`Sentrial: Request failed (${method} ${url}):`, error.message);
453
- return null;
454
- }
455
- throw error;
456
- }
457
- return await response.json();
458
- } catch (error) {
459
- if (error instanceof ApiError) {
460
- throw error;
461
- }
462
- lastError = error instanceof Error ? error : new Error(String(error));
463
- if (attempt < MAX_RETRIES) {
464
- await this.sleep(backoff);
465
- backoff = Math.min(backoff * BACKOFF_MULTIPLIER, MAX_BACKOFF_MS);
466
- continue;
467
- }
430
+ try {
431
+ const response = await originalCreate(...args);
432
+ if (isStreaming) {
433
+ return wrapOpenAIStream(response, { startTime, messages, model, trackWithoutSession });
468
434
  }
435
+ const durationMs = Date.now() - startTime;
436
+ const promptTokens = response.usage?.prompt_tokens ?? 0;
437
+ const completionTokens = response.usage?.completion_tokens ?? 0;
438
+ const totalTokens = response.usage?.total_tokens ?? 0;
439
+ let outputContent = "";
440
+ if (response.choices?.[0]?.message?.content) {
441
+ outputContent = response.choices[0].message.content;
442
+ }
443
+ const cost = calculateOpenAICost({ model, inputTokens: promptTokens, outputTokens: completionTokens });
444
+ trackLLMCall({
445
+ provider: "openai",
446
+ model,
447
+ messages,
448
+ output: outputContent,
449
+ promptTokens,
450
+ completionTokens,
451
+ totalTokens,
452
+ cost,
453
+ durationMs,
454
+ trackWithoutSession
455
+ });
456
+ return response;
457
+ } catch (error) {
458
+ const durationMs = Date.now() - startTime;
459
+ trackLLMError({
460
+ provider: "openai",
461
+ model,
462
+ messages,
463
+ error,
464
+ durationMs,
465
+ trackWithoutSession
466
+ });
467
+ throw error;
469
468
  }
470
- const networkError = new NetworkError(
471
- lastError?.message ?? "Unknown network error",
472
- lastError
473
- );
474
- if (this.failSilently) {
475
- console.warn(`Sentrial: Request failed after ${MAX_RETRIES + 1} attempts (${method} ${url}):`, networkError.message);
476
- return null;
477
- }
478
- throw networkError;
469
+ };
470
+ return client;
471
+ }
472
+ function wrapAnthropic(client, options = {}) {
473
+ const { trackWithoutSession = false } = options;
474
+ const messages = client.messages;
475
+ if (!messages?.create) {
476
+ console.warn("Sentrial: Anthropic client does not have messages.create");
477
+ return client;
479
478
  }
480
- sleep(ms) {
481
- return new Promise((resolve) => setTimeout(resolve, ms));
479
+ const originalCreate = messages.create.bind(messages);
480
+ messages.create = async function(...args) {
481
+ const startTime = Date.now();
482
+ const params = args[0] ?? {};
483
+ const inputMessages = params.messages ?? [];
484
+ const model = params.model ?? "unknown";
485
+ const system = params.system ?? "";
486
+ const isStreaming = params.stream === true;
487
+ try {
488
+ const response = await originalCreate(...args);
489
+ if (isStreaming) {
490
+ return wrapAnthropicStream(response, {
491
+ startTime,
492
+ messages: inputMessages,
493
+ model,
494
+ system,
495
+ trackWithoutSession
496
+ });
497
+ }
498
+ const durationMs = Date.now() - startTime;
499
+ const promptTokens = response.usage?.input_tokens ?? 0;
500
+ const completionTokens = response.usage?.output_tokens ?? 0;
501
+ const totalTokens = promptTokens + completionTokens;
502
+ let outputContent = "";
503
+ if (response.content) {
504
+ for (const block of response.content) {
505
+ if (block.type === "text") {
506
+ outputContent += block.text;
507
+ }
508
+ }
509
+ }
510
+ const cost = calculateAnthropicCost({ model, inputTokens: promptTokens, outputTokens: completionTokens });
511
+ const fullMessages = system ? [{ role: "system", content: system }, ...inputMessages] : inputMessages;
512
+ trackLLMCall({
513
+ provider: "anthropic",
514
+ model,
515
+ messages: fullMessages,
516
+ output: outputContent,
517
+ promptTokens,
518
+ completionTokens,
519
+ totalTokens,
520
+ cost,
521
+ durationMs,
522
+ trackWithoutSession
523
+ });
524
+ return response;
525
+ } catch (error) {
526
+ const durationMs = Date.now() - startTime;
527
+ trackLLMError({
528
+ provider: "anthropic",
529
+ model,
530
+ messages: inputMessages,
531
+ error,
532
+ durationMs,
533
+ trackWithoutSession
534
+ });
535
+ throw error;
536
+ }
537
+ };
538
+ return client;
539
+ }
540
+ function wrapGoogle(model, options = {}) {
541
+ const { trackWithoutSession = false } = options;
542
+ const originalGenerate = model.generateContent;
543
+ if (!originalGenerate) {
544
+ console.warn("Sentrial: Google model does not have generateContent");
545
+ return model;
546
+ }
547
+ model.generateContent = async function(...args) {
548
+ const startTime = Date.now();
549
+ const contents = args[0];
550
+ const modelName = model.model ?? "gemini-unknown";
551
+ const messages = googleContentsToMessages(contents);
552
+ try {
553
+ const response = await originalGenerate.apply(model, args);
554
+ const durationMs = Date.now() - startTime;
555
+ let promptTokens = 0;
556
+ let completionTokens = 0;
557
+ const usageMeta = response.response?.usageMetadata ?? response.usageMetadata;
558
+ if (usageMeta) {
559
+ promptTokens = usageMeta.promptTokenCount ?? 0;
560
+ completionTokens = usageMeta.candidatesTokenCount ?? 0;
561
+ }
562
+ const totalTokens = promptTokens + completionTokens;
563
+ let outputContent = "";
564
+ try {
565
+ outputContent = response.response?.text?.() ?? response.text?.() ?? "";
566
+ } catch {
567
+ }
568
+ const cost = calculateGoogleCost({ model: modelName, inputTokens: promptTokens, outputTokens: completionTokens });
569
+ trackLLMCall({
570
+ provider: "google",
571
+ model: modelName,
572
+ messages,
573
+ output: outputContent,
574
+ promptTokens,
575
+ completionTokens,
576
+ totalTokens,
577
+ cost,
578
+ durationMs,
579
+ trackWithoutSession
580
+ });
581
+ return response;
582
+ } catch (error) {
583
+ const durationMs = Date.now() - startTime;
584
+ trackLLMError({
585
+ provider: "google",
586
+ model: modelName,
587
+ messages,
588
+ error,
589
+ durationMs,
590
+ trackWithoutSession
591
+ });
592
+ throw error;
593
+ }
594
+ };
595
+ return model;
596
+ }
597
+ function googleContentsToMessages(contents) {
598
+ if (typeof contents === "string") {
599
+ return [{ role: "user", content: contents }];
600
+ }
601
+ if (Array.isArray(contents)) {
602
+ return contents.map((item) => {
603
+ if (typeof item === "string") {
604
+ return { role: "user", content: item };
605
+ }
606
+ if (item && typeof item === "object") {
607
+ return { role: item.role ?? "user", content: String(item.content ?? item) };
608
+ }
609
+ return { role: "user", content: String(item) };
610
+ });
611
+ }
612
+ return [{ role: "user", content: String(contents) }];
613
+ }
614
+ function wrapLLM(client, provider) {
615
+ if (provider === "openai" || client.chat?.completions?.create) {
616
+ return wrapOpenAI(client);
617
+ }
618
+ if (provider === "anthropic" || client.messages?.create) {
619
+ return wrapAnthropic(client);
620
+ }
621
+ if (provider === "google" || client.generateContent) {
622
+ return wrapGoogle(client);
623
+ }
624
+ console.warn("Sentrial: Unknown LLM client type. No auto-tracking applied.");
625
+ return client;
626
+ }
627
+ function wrapOpenAIStream(stream, ctx) {
628
+ let fullContent = "";
629
+ let usage = null;
630
+ let tracked = false;
631
+ const originalIterator = stream[Symbol.asyncIterator]?.bind(stream);
632
+ if (!originalIterator) return stream;
633
+ const trackResult = () => {
634
+ if (tracked) return;
635
+ tracked = true;
636
+ const durationMs = Date.now() - ctx.startTime;
637
+ const promptTokens = usage?.prompt_tokens ?? 0;
638
+ const completionTokens = usage?.completion_tokens ?? 0;
639
+ const totalTokens = usage?.total_tokens ?? promptTokens + completionTokens;
640
+ const cost = calculateOpenAICost({ model: ctx.model, inputTokens: promptTokens, outputTokens: completionTokens });
641
+ trackLLMCall({
642
+ provider: "openai",
643
+ model: ctx.model,
644
+ messages: ctx.messages,
645
+ output: fullContent,
646
+ promptTokens,
647
+ completionTokens,
648
+ totalTokens,
649
+ cost,
650
+ durationMs,
651
+ trackWithoutSession: ctx.trackWithoutSession
652
+ });
653
+ };
654
+ return new Proxy(stream, {
655
+ get(target, prop, receiver) {
656
+ if (prop === Symbol.asyncIterator) {
657
+ return function() {
658
+ const iter = originalIterator();
659
+ return {
660
+ async next() {
661
+ const result = await iter.next();
662
+ if (!result.done) {
663
+ const chunk = result.value;
664
+ const delta = chunk.choices?.[0]?.delta?.content;
665
+ if (delta) fullContent += delta;
666
+ if (chunk.usage) usage = chunk.usage;
667
+ } else {
668
+ trackResult();
669
+ }
670
+ return result;
671
+ },
672
+ async return(value) {
673
+ trackResult();
674
+ return iter.return?.(value) ?? { done: true, value: void 0 };
675
+ },
676
+ async throw(error) {
677
+ return iter.throw?.(error) ?? { done: true, value: void 0 };
678
+ }
679
+ };
680
+ };
681
+ }
682
+ return Reflect.get(target, prop, receiver);
683
+ }
684
+ });
685
+ }
686
+ function wrapAnthropicStream(stream, ctx) {
687
+ let fullContent = "";
688
+ let inputTokens = 0;
689
+ let outputTokens = 0;
690
+ let tracked = false;
691
+ const originalIterator = stream[Symbol.asyncIterator]?.bind(stream);
692
+ if (!originalIterator) return stream;
693
+ const trackResult = () => {
694
+ if (tracked) return;
695
+ tracked = true;
696
+ const durationMs = Date.now() - ctx.startTime;
697
+ const totalTokens = inputTokens + outputTokens;
698
+ const cost = calculateAnthropicCost({ model: ctx.model, inputTokens, outputTokens });
699
+ const fullMessages = ctx.system ? [{ role: "system", content: ctx.system }, ...ctx.messages] : ctx.messages;
700
+ trackLLMCall({
701
+ provider: "anthropic",
702
+ model: ctx.model,
703
+ messages: fullMessages,
704
+ output: fullContent,
705
+ promptTokens: inputTokens,
706
+ completionTokens: outputTokens,
707
+ totalTokens,
708
+ cost,
709
+ durationMs,
710
+ trackWithoutSession: ctx.trackWithoutSession
711
+ });
712
+ };
713
+ return new Proxy(stream, {
714
+ get(target, prop, receiver) {
715
+ if (prop === Symbol.asyncIterator) {
716
+ return function() {
717
+ const iter = originalIterator();
718
+ return {
719
+ async next() {
720
+ const result = await iter.next();
721
+ if (!result.done) {
722
+ const event = result.value;
723
+ if (event.type === "content_block_delta" && event.delta?.text) {
724
+ fullContent += event.delta.text;
725
+ }
726
+ if (event.type === "message_start" && event.message?.usage) {
727
+ inputTokens = event.message.usage.input_tokens ?? 0;
728
+ }
729
+ if (event.type === "message_delta" && event.usage) {
730
+ outputTokens = event.usage.output_tokens ?? 0;
731
+ }
732
+ } else {
733
+ trackResult();
734
+ }
735
+ return result;
736
+ },
737
+ async return(value) {
738
+ trackResult();
739
+ return iter.return?.(value) ?? { done: true, value: void 0 };
740
+ },
741
+ async throw(error) {
742
+ return iter.throw?.(error) ?? { done: true, value: void 0 };
743
+ }
744
+ };
745
+ };
746
+ }
747
+ return Reflect.get(target, prop, receiver);
748
+ }
749
+ });
750
+ }
751
+ function trackLLMCall(params) {
752
+ const client = getTrackingClient();
753
+ if (!client) return;
754
+ const sessionId = _currentSessionId.get();
755
+ if (!sessionId && !params.trackWithoutSession) {
756
+ return;
757
+ }
758
+ if (sessionId) {
759
+ client.trackToolCall({
760
+ sessionId,
761
+ toolName: `llm:${params.provider}:${params.model}`,
762
+ toolInput: {
763
+ messages: params.messages,
764
+ model: params.model,
765
+ provider: params.provider
766
+ },
767
+ toolOutput: {
768
+ content: params.output,
769
+ tokens: {
770
+ prompt: params.promptTokens,
771
+ completion: params.completionTokens,
772
+ total: params.totalTokens
773
+ },
774
+ cost_usd: params.cost
775
+ },
776
+ reasoning: `LLM call to ${params.provider} ${params.model}`,
777
+ estimatedCost: params.cost,
778
+ tokenCount: params.totalTokens,
779
+ metadata: {
780
+ provider: params.provider,
781
+ model: params.model,
782
+ duration_ms: params.durationMs,
783
+ prompt_tokens: params.promptTokens,
784
+ completion_tokens: params.completionTokens
785
+ }
786
+ }).catch((err) => {
787
+ console.warn("Sentrial: Failed to track LLM call:", err.message);
788
+ });
789
+ } else if (params.trackWithoutSession) {
790
+ client.createSession({
791
+ name: `LLM: ${params.provider}/${params.model}`,
792
+ agentName: `${params.provider}-wrapper`,
793
+ userId: "anonymous"
794
+ }).then((sid) => {
795
+ if (!sid) return;
796
+ return client.trackToolCall({
797
+ sessionId: sid,
798
+ toolName: `llm:${params.provider}:${params.model}`,
799
+ toolInput: {
800
+ messages: params.messages,
801
+ model: params.model,
802
+ provider: params.provider
803
+ },
804
+ toolOutput: {
805
+ content: params.output,
806
+ tokens: {
807
+ prompt: params.promptTokens,
808
+ completion: params.completionTokens,
809
+ total: params.totalTokens
810
+ },
811
+ cost_usd: params.cost
812
+ },
813
+ estimatedCost: params.cost,
814
+ tokenCount: params.totalTokens,
815
+ metadata: {
816
+ provider: params.provider,
817
+ model: params.model,
818
+ duration_ms: params.durationMs
819
+ }
820
+ }).then(() => {
821
+ return client.completeSession({
822
+ sessionId: sid,
823
+ success: true,
824
+ estimatedCost: params.cost,
825
+ promptTokens: params.promptTokens,
826
+ completionTokens: params.completionTokens,
827
+ totalTokens: params.totalTokens,
828
+ durationMs: params.durationMs
829
+ });
830
+ });
831
+ }).catch((err) => {
832
+ console.warn("Sentrial: Failed to track standalone LLM call:", err.message);
833
+ });
834
+ }
835
+ }
836
+ function trackLLMError(params) {
837
+ const client = getTrackingClient();
838
+ if (!client) return;
839
+ const sessionId = _currentSessionId.get();
840
+ if (!sessionId && !params.trackWithoutSession) {
841
+ return;
842
+ }
843
+ if (!sessionId) return;
844
+ client.trackError({
845
+ sessionId,
846
+ errorMessage: params.error.message,
847
+ errorType: params.error.name,
848
+ toolName: `llm:${params.provider}:${params.model}`,
849
+ metadata: {
850
+ provider: params.provider,
851
+ model: params.model,
852
+ duration_ms: params.durationMs
853
+ }
854
+ }).catch((err) => {
855
+ console.warn("Sentrial: Failed to track LLM error:", err.message);
856
+ });
857
+ }
858
+
859
+ // src/batcher.ts
860
+ var EventBatcher = class {
861
+ queue = [];
862
+ flushIntervalMs;
863
+ flushThreshold;
864
+ maxQueueSize;
865
+ timer = null;
866
+ sendFn;
867
+ flushing = false;
868
+ shutdownCalled = false;
869
+ exitHandler;
870
+ constructor(sendFn, config = {}) {
871
+ this.sendFn = sendFn;
872
+ this.flushIntervalMs = config.flushIntervalMs ?? 1e3;
873
+ this.flushThreshold = config.flushThreshold ?? 10;
874
+ this.maxQueueSize = config.maxQueueSize ?? 1e3;
875
+ this.timer = setInterval(() => {
876
+ void this.flush();
877
+ }, this.flushIntervalMs);
878
+ if (this.timer && typeof this.timer === "object" && "unref" in this.timer) {
879
+ this.timer.unref();
880
+ }
881
+ this.exitHandler = () => {
882
+ void this.shutdown();
883
+ };
884
+ if (typeof process !== "undefined" && process.on) {
885
+ process.on("beforeExit", this.exitHandler);
886
+ }
887
+ }
888
+ /**
889
+ * Enqueue an event for batched delivery.
890
+ *
891
+ * If the queue hits `flushThreshold`, an automatic flush is triggered.
892
+ * If the queue is full (`maxQueueSize`), the oldest event is dropped.
893
+ */
894
+ enqueue(method, url, body) {
895
+ if (this.shutdownCalled) return;
896
+ if (this.queue.length >= this.maxQueueSize) {
897
+ this.queue.shift();
898
+ if (typeof console !== "undefined") {
899
+ console.warn(
900
+ `Sentrial: Event queue full (${this.maxQueueSize}), dropping oldest event`
901
+ );
902
+ }
903
+ }
904
+ this.queue.push({ method, url, body });
905
+ if (this.queue.length >= this.flushThreshold) {
906
+ void this.flush();
907
+ }
908
+ }
909
+ /**
910
+ * Flush all queued events to the API.
911
+ *
912
+ * Drains the queue and fires all requests in parallel. Safe to call
913
+ * concurrently — only one flush runs at a time.
914
+ */
915
+ async flush() {
916
+ if (this.flushing || this.queue.length === 0) return;
917
+ this.flushing = true;
918
+ const batch = this.queue.splice(0, this.queue.length);
919
+ try {
920
+ await Promise.all(
921
+ batch.map(
922
+ (event) => this.sendFn(event.method, event.url, event.body).catch((err) => {
923
+ if (typeof console !== "undefined") {
924
+ console.warn("Sentrial: Batched event failed:", err);
925
+ }
926
+ })
927
+ )
928
+ );
929
+ } finally {
930
+ this.flushing = false;
931
+ }
932
+ }
933
+ /**
934
+ * Stop the batcher: clear the timer, flush remaining events, remove exit handler.
935
+ */
936
+ async shutdown() {
937
+ if (this.shutdownCalled) return;
938
+ this.shutdownCalled = true;
939
+ if (this.timer !== null) {
940
+ clearInterval(this.timer);
941
+ this.timer = null;
942
+ }
943
+ if (typeof process !== "undefined" && process.removeListener) {
944
+ process.removeListener("beforeExit", this.exitHandler);
945
+ }
946
+ this.flushing = false;
947
+ await this.flush();
948
+ }
949
+ /** Number of events currently queued. */
950
+ get size() {
951
+ return this.queue.length;
952
+ }
953
+ };
954
+
955
+ // src/types.ts
956
+ var EventType = /* @__PURE__ */ ((EventType2) => {
957
+ EventType2["TOOL_CALL"] = "tool_call";
958
+ EventType2["LLM_DECISION"] = "llm_decision";
959
+ EventType2["STATE_CHANGE"] = "state_change";
960
+ EventType2["ERROR"] = "error";
961
+ return EventType2;
962
+ })(EventType || {});
963
+
964
+ // src/client.ts
965
+ var DEFAULT_API_URL = "https://api.sentrial.com";
966
+ var MAX_RETRIES = 3;
967
+ var INITIAL_BACKOFF_MS = 500;
968
+ var MAX_BACKOFF_MS = 8e3;
969
+ var BACKOFF_MULTIPLIER = 2;
970
+ var RETRYABLE_STATUS_CODES = /* @__PURE__ */ new Set([408, 429, 500, 502, 503, 504]);
971
+ var REQUEST_TIMEOUT_MS = 1e4;
972
+ var SentrialClient = class {
973
+ apiUrl;
974
+ apiKey;
975
+ failSilently;
976
+ piiConfig;
977
+ piiConfigNeedsHydration = false;
978
+ piiHydrationPromise;
979
+ _stateVar = createContextVar({});
980
+ batcher;
981
+ /** Per-session cost/token accumulator — populated by trackToolCall/trackDecision */
982
+ sessionAccumulators = /* @__PURE__ */ new Map();
983
+ get currentState() {
984
+ return this._stateVar.get();
985
+ }
986
+ set currentState(value) {
987
+ this._stateVar.set(value);
988
+ }
989
+ constructor(config = {}) {
990
+ this.apiUrl = (config.apiUrl ?? (typeof process !== "undefined" ? process.env?.SENTRIAL_API_URL : void 0) ?? DEFAULT_API_URL).replace(/\/$/, "");
991
+ this.apiKey = config.apiKey ?? (typeof process !== "undefined" ? process.env?.SENTRIAL_API_KEY : void 0);
992
+ this.failSilently = config.failSilently ?? true;
993
+ if (config.pii === true) {
994
+ this.piiConfig = { enabled: true };
995
+ this.piiConfigNeedsHydration = true;
996
+ } else if (config.pii && typeof config.pii === "object") {
997
+ this.piiConfig = config.pii;
998
+ this.piiConfigNeedsHydration = false;
999
+ }
1000
+ if (config.batching?.enabled) {
1001
+ this.batcher = new EventBatcher(
1002
+ (method, url, body) => this.safeRequest(method, url, body),
1003
+ config.batching
1004
+ );
1005
+ }
1006
+ }
1007
+ /**
1008
+ * Fetch the organization's PII config from the server.
1009
+ *
1010
+ * Called lazily on the first request when `pii: true` was passed to the constructor.
1011
+ * Uses a single shared promise so concurrent requests don't trigger duplicate fetches.
1012
+ */
1013
+ async hydratePiiConfig() {
1014
+ if (!this.piiConfigNeedsHydration) return;
1015
+ if (this.piiHydrationPromise) {
1016
+ await this.piiHydrationPromise;
1017
+ return;
1018
+ }
1019
+ this.piiHydrationPromise = (async () => {
1020
+ try {
1021
+ const headers = {};
1022
+ if (this.apiKey) {
1023
+ headers["Authorization"] = `Bearer ${this.apiKey}`;
1024
+ }
1025
+ const controller = new AbortController();
1026
+ const timeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
1027
+ let response;
1028
+ try {
1029
+ response = await fetch(`${this.apiUrl}/api/sdk/pii-config`, {
1030
+ method: "GET",
1031
+ headers,
1032
+ signal: controller.signal
1033
+ });
1034
+ } finally {
1035
+ clearTimeout(timeoutId);
1036
+ }
1037
+ if (response.ok) {
1038
+ const data = await response.json();
1039
+ if (data.config) {
1040
+ this.piiConfig = {
1041
+ enabled: data.config.enabled,
1042
+ mode: data.config.mode,
1043
+ fields: data.config.fields,
1044
+ builtinPatterns: data.config.builtinPatterns,
1045
+ customPatterns: (data.config.customPatterns || []).map(
1046
+ (cp) => ({
1047
+ pattern: new RegExp(cp.pattern, "g"),
1048
+ label: cp.label
1049
+ })
1050
+ ),
1051
+ enhancedDetection: data.config.enhancedDetection
1052
+ };
1053
+ }
1054
+ }
1055
+ } catch {
1056
+ }
1057
+ this.piiConfigNeedsHydration = false;
1058
+ })();
1059
+ await this.piiHydrationPromise;
1060
+ }
1061
+ /**
1062
+ * Make an HTTP request with retry logic and exponential backoff.
1063
+ *
1064
+ * Retries on transient failures (network errors, timeouts, 429/5xx).
1065
+ * Up to MAX_RETRIES attempts with exponential backoff.
1066
+ */
1067
+ async safeRequest(method, url, body) {
1068
+ if (this.piiConfigNeedsHydration) {
1069
+ await this.hydratePiiConfig();
1070
+ }
1071
+ let lastError;
1072
+ let backoff = INITIAL_BACKOFF_MS;
1073
+ for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) {
1074
+ try {
1075
+ const headers = {
1076
+ "Content-Type": "application/json"
1077
+ };
1078
+ if (this.apiKey) {
1079
+ headers["Authorization"] = `Bearer ${this.apiKey}`;
1080
+ }
1081
+ const finalBody = this.piiConfig && body && typeof body === "object" ? redactPayload(body, this.piiConfig) : body;
1082
+ const controller = new AbortController();
1083
+ const timeoutId = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
1084
+ let response;
1085
+ try {
1086
+ response = await fetch(url, {
1087
+ method,
1088
+ headers,
1089
+ body: finalBody ? JSON.stringify(finalBody) : void 0,
1090
+ signal: controller.signal
1091
+ });
1092
+ } finally {
1093
+ clearTimeout(timeoutId);
1094
+ }
1095
+ if (RETRYABLE_STATUS_CODES.has(response.status) && attempt < MAX_RETRIES) {
1096
+ await this.sleep(backoff);
1097
+ backoff = Math.min(backoff * BACKOFF_MULTIPLIER, MAX_BACKOFF_MS);
1098
+ continue;
1099
+ }
1100
+ if (!response.ok) {
1101
+ const errorBody = await response.text();
1102
+ let errorData = {};
1103
+ try {
1104
+ errorData = JSON.parse(errorBody);
1105
+ } catch {
1106
+ }
1107
+ const error = new ApiError(
1108
+ errorData.error?.message || `HTTP ${response.status}: ${response.statusText}`,
1109
+ response.status,
1110
+ errorData.error?.code
1111
+ );
1112
+ if (this.failSilently) {
1113
+ console.warn(`Sentrial: Request failed (${method} ${url}):`, error.message);
1114
+ return null;
1115
+ }
1116
+ throw error;
1117
+ }
1118
+ return await response.json();
1119
+ } catch (error) {
1120
+ if (error instanceof ApiError) {
1121
+ throw error;
1122
+ }
1123
+ lastError = error instanceof Error ? error : new Error(String(error));
1124
+ if (attempt < MAX_RETRIES) {
1125
+ await this.sleep(backoff);
1126
+ backoff = Math.min(backoff * BACKOFF_MULTIPLIER, MAX_BACKOFF_MS);
1127
+ continue;
1128
+ }
1129
+ }
1130
+ }
1131
+ const networkError = new NetworkError(
1132
+ lastError?.message ?? "Unknown network error",
1133
+ lastError
1134
+ );
1135
+ if (this.failSilently) {
1136
+ console.warn(`Sentrial: Request failed after ${MAX_RETRIES + 1} attempts (${method} ${url}):`, networkError.message);
1137
+ return null;
1138
+ }
1139
+ throw networkError;
1140
+ }
1141
+ sleep(ms) {
1142
+ return new Promise((resolve) => setTimeout(resolve, ms));
1143
+ }
1144
+ accumulate(sessionId, cost, tokenCount, toolOutput) {
1145
+ let acc = this.sessionAccumulators.get(sessionId);
1146
+ if (!acc) {
1147
+ acc = { cost: 0, promptTokens: 0, completionTokens: 0, totalTokens: 0 };
1148
+ this.sessionAccumulators.set(sessionId, acc);
1149
+ }
1150
+ if (cost != null) acc.cost += cost;
1151
+ if (tokenCount != null) acc.totalTokens += tokenCount;
1152
+ const rawTokens = toolOutput?.tokens;
1153
+ if (rawTokens && typeof rawTokens === "object" && !Array.isArray(rawTokens)) {
1154
+ const tokens = rawTokens;
1155
+ if (typeof tokens.prompt === "number") acc.promptTokens += tokens.prompt;
1156
+ if (typeof tokens.completion === "number") acc.completionTokens += tokens.completion;
1157
+ }
482
1158
  }
483
1159
  /**
484
1160
  * Create a new session
@@ -514,6 +1190,7 @@ var SentrialClient = class {
514
1190
  * @returns Event data
515
1191
  */
516
1192
  async trackToolCall(params) {
1193
+ this.accumulate(params.sessionId, params.estimatedCost, params.tokenCount, params.toolOutput);
517
1194
  const stateBefore = { ...this.currentState };
518
1195
  this.currentState[`${params.toolName}_result`] = params.toolOutput;
519
1196
  const payload = {
@@ -532,6 +1209,10 @@ var SentrialClient = class {
532
1209
  if (params.traceId !== void 0) payload.traceId = params.traceId;
533
1210
  if (params.spanId !== void 0) payload.spanId = params.spanId;
534
1211
  if (params.metadata !== void 0) payload.metadata = params.metadata;
1212
+ if (this.batcher) {
1213
+ this.batcher.enqueue("POST", `${this.apiUrl}/api/sdk/events`, payload);
1214
+ return null;
1215
+ }
535
1216
  return this.safeRequest("POST", `${this.apiUrl}/api/sdk/events`, payload);
536
1217
  }
537
1218
  /**
@@ -541,6 +1222,7 @@ var SentrialClient = class {
541
1222
  * @returns Event data
542
1223
  */
543
1224
  async trackDecision(params) {
1225
+ this.accumulate(params.sessionId, params.estimatedCost, params.tokenCount);
544
1226
  const stateBefore = { ...this.currentState };
545
1227
  const payload = {
546
1228
  sessionId: params.sessionId,
@@ -556,6 +1238,10 @@ var SentrialClient = class {
556
1238
  if (params.traceId !== void 0) payload.traceId = params.traceId;
557
1239
  if (params.spanId !== void 0) payload.spanId = params.spanId;
558
1240
  if (params.metadata !== void 0) payload.metadata = params.metadata;
1241
+ if (this.batcher) {
1242
+ this.batcher.enqueue("POST", `${this.apiUrl}/api/sdk/events`, payload);
1243
+ return null;
1244
+ }
559
1245
  return this.safeRequest("POST", `${this.apiUrl}/api/sdk/events`, payload);
560
1246
  }
561
1247
  /**
@@ -582,6 +1268,10 @@ var SentrialClient = class {
582
1268
  if (params.traceId !== void 0) payload.traceId = params.traceId;
583
1269
  if (params.spanId !== void 0) payload.spanId = params.spanId;
584
1270
  if (params.metadata !== void 0) payload.metadata = params.metadata;
1271
+ if (this.batcher) {
1272
+ this.batcher.enqueue("POST", `${this.apiUrl}/api/sdk/events`, payload);
1273
+ return null;
1274
+ }
585
1275
  return this.safeRequest("POST", `${this.apiUrl}/api/sdk/events`, payload);
586
1276
  }
587
1277
  /**
@@ -627,6 +1317,10 @@ var SentrialClient = class {
627
1317
  if (params.metadata) {
628
1318
  payload.metadata = params.metadata;
629
1319
  }
1320
+ if (this.batcher) {
1321
+ this.batcher.enqueue("POST", `${this.apiUrl}/api/sdk/events`, payload);
1322
+ return null;
1323
+ }
630
1324
  return this.safeRequest("POST", `${this.apiUrl}/api/sdk/events`, payload);
631
1325
  }
632
1326
  /**
@@ -656,6 +1350,17 @@ var SentrialClient = class {
656
1350
  * ```
657
1351
  */
658
1352
  async completeSession(params) {
1353
+ if (this.batcher) {
1354
+ await this.batcher.flush();
1355
+ }
1356
+ const acc = this.sessionAccumulators.get(params.sessionId);
1357
+ if (acc) {
1358
+ if (params.estimatedCost === void 0 && acc.cost > 0) params = { ...params, estimatedCost: acc.cost };
1359
+ if (params.promptTokens === void 0 && acc.promptTokens > 0) params = { ...params, promptTokens: acc.promptTokens };
1360
+ if (params.completionTokens === void 0 && acc.completionTokens > 0) params = { ...params, completionTokens: acc.completionTokens };
1361
+ if (params.totalTokens === void 0 && acc.totalTokens > 0) params = { ...params, totalTokens: acc.totalTokens };
1362
+ this.sessionAccumulators.delete(params.sessionId);
1363
+ }
659
1364
  const payload = {
660
1365
  status: params.success !== false ? "completed" : "failed",
661
1366
  success: params.success ?? true
@@ -676,6 +1381,27 @@ var SentrialClient = class {
676
1381
  payload
677
1382
  );
678
1383
  }
1384
+ /**
1385
+ * Flush any queued events immediately.
1386
+ *
1387
+ * No-op if batching is not enabled.
1388
+ */
1389
+ async flush() {
1390
+ if (this.batcher) {
1391
+ await this.batcher.flush();
1392
+ }
1393
+ }
1394
+ /**
1395
+ * Shut down the event batcher, flushing remaining events.
1396
+ *
1397
+ * Call this before your process exits for a clean shutdown.
1398
+ * No-op if batching is not enabled.
1399
+ */
1400
+ async shutdown() {
1401
+ if (this.batcher) {
1402
+ await this.batcher.shutdown();
1403
+ }
1404
+ }
679
1405
  /**
680
1406
  * Begin tracking an interaction (simplified API)
681
1407
  *
@@ -712,13 +1438,18 @@ var SentrialClient = class {
712
1438
  if (params.input) {
713
1439
  this.currentState.input = params.input;
714
1440
  }
1441
+ let sessionTokens;
1442
+ if (sessionId) {
1443
+ sessionTokens = _setSessionContextWithTokens(sessionId, this);
1444
+ }
715
1445
  return new Interaction({
716
1446
  client: this,
717
1447
  sessionId,
718
1448
  eventId,
719
1449
  userId: params.userId,
720
1450
  event: params.event,
721
- userInput: params.input
1451
+ userInput: params.input,
1452
+ sessionTokens
722
1453
  });
723
1454
  }
724
1455
  // Cost calculation static methods for convenience
@@ -735,12 +1466,15 @@ var Interaction = class {
735
1466
  userId;
736
1467
  /** Event name for this interaction */
737
1468
  event;
1469
+ startTime = Date.now();
738
1470
  finished = false;
739
1471
  success = true;
740
1472
  failureReason;
741
1473
  output;
742
1474
  userInput;
743
1475
  degraded;
1476
+ /** Context tokens for restoring previous session context on finish() */
1477
+ sessionTokens;
744
1478
  constructor(config) {
745
1479
  this.client = config.client;
746
1480
  this.sessionId = config.sessionId;
@@ -749,6 +1483,7 @@ var Interaction = class {
749
1483
  this.event = config.event;
750
1484
  this.userInput = config.userInput;
751
1485
  this.degraded = config.sessionId === null;
1486
+ this.sessionTokens = config.sessionTokens;
752
1487
  }
753
1488
  /**
754
1489
  * Set the output for this interaction
@@ -784,18 +1519,24 @@ var Interaction = class {
784
1519
  }
785
1520
  this.finished = true;
786
1521
  const finalOutput = params.output ?? this.output;
787
- return this.client.completeSession({
1522
+ const result = await this.client.completeSession({
788
1523
  sessionId: this.sessionId,
789
1524
  success: params.success ?? this.success,
790
1525
  failureReason: params.failureReason ?? this.failureReason,
791
1526
  estimatedCost: params.estimatedCost,
792
1527
  customMetrics: params.customMetrics,
1528
+ durationMs: params.durationMs ?? Date.now() - this.startTime,
793
1529
  promptTokens: params.promptTokens,
794
1530
  completionTokens: params.completionTokens,
795
1531
  totalTokens: params.totalTokens,
796
1532
  userInput: this.userInput,
797
1533
  assistantOutput: finalOutput
798
1534
  });
1535
+ if (this.sessionTokens) {
1536
+ _restoreSessionContext(this.sessionTokens);
1537
+ this.sessionTokens = void 0;
1538
+ }
1539
+ return result;
799
1540
  }
800
1541
  /**
801
1542
  * Track a tool call within this interaction
@@ -855,16 +1596,24 @@ function configure(config) {
855
1596
  function begin(params) {
856
1597
  return getClient().begin(params);
857
1598
  }
1599
+ async function flush() {
1600
+ if (defaultClient) await defaultClient.flush();
1601
+ }
1602
+ async function shutdown() {
1603
+ if (defaultClient) await defaultClient.shutdown();
1604
+ }
858
1605
  var sentrial = {
859
1606
  configure,
860
- begin
1607
+ begin,
1608
+ flush,
1609
+ shutdown
861
1610
  };
862
1611
 
863
1612
  // src/vercel.ts
864
- var _defaultClient = null;
1613
+ var _defaultClient2 = null;
865
1614
  var _globalConfig = {};
866
1615
  function configureVercel(config) {
867
- _defaultClient = new SentrialClient({
1616
+ _defaultClient2 = new SentrialClient({
868
1617
  apiKey: config.apiKey,
869
1618
  apiUrl: config.apiUrl,
870
1619
  failSilently: config.failSilently ?? true
@@ -876,10 +1625,10 @@ function configureVercel(config) {
876
1625
  };
877
1626
  }
878
1627
  function getClient2() {
879
- if (!_defaultClient) {
880
- _defaultClient = new SentrialClient();
1628
+ if (!_defaultClient2) {
1629
+ _defaultClient2 = new SentrialClient();
881
1630
  }
882
- return _defaultClient;
1631
+ return _defaultClient2;
883
1632
  }
884
1633
  function extractModelInfo(model) {
885
1634
  const modelId = model.modelId || model.id || "unknown";
@@ -888,7 +1637,7 @@ function extractModelInfo(model) {
888
1637
  }
889
1638
  function guessProvider(modelId) {
890
1639
  const id = modelId.toLowerCase();
891
- if (id.includes("gpt") || id.includes("o1") || id.includes("o3") || id.includes("o4") || id.startsWith("chatgpt")) return "openai";
1640
+ if (id.includes("gpt") || id.startsWith("o1") || id.startsWith("o3") || id.startsWith("o4") || id.startsWith("chatgpt")) return "openai";
892
1641
  if (id.includes("claude")) return "anthropic";
893
1642
  if (id.includes("gemini")) return "google";
894
1643
  if (id.includes("mistral") || id.includes("mixtral") || id.includes("codestral") || id.includes("pixtral")) return "mistral";
@@ -914,7 +1663,7 @@ function calculateCostForCall(provider, modelId, promptTokens, completionTokens)
914
1663
  case "mistral":
915
1664
  return promptTokens / 1e6 * 2 + completionTokens / 1e6 * 6;
916
1665
  default:
917
- return promptTokens * 3e-6 + completionTokens * 6e-6;
1666
+ return 0;
918
1667
  }
919
1668
  }
920
1669
  function extractInput(params) {
@@ -1047,15 +1796,14 @@ function wrapGenerateText(originalFn, client, config) {
1047
1796
  const result = await originalFn(wrappedParams);
1048
1797
  const durationMs = Date.now() - startTime;
1049
1798
  const resolvedModelId = result.response?.modelId || modelId;
1050
- const promptTokens = result.usage?.promptTokens || 0;
1051
- const completionTokens = result.usage?.completionTokens || 0;
1052
- const totalTokens = result.usage?.totalTokens || promptTokens + completionTokens;
1799
+ const promptTokens = result.usage?.promptTokens ?? 0;
1800
+ const completionTokens = result.usage?.completionTokens ?? 0;
1801
+ const totalTokens = result.usage?.totalTokens ?? promptTokens + completionTokens;
1053
1802
  const cost = calculateCostForCall(provider, resolvedModelId, promptTokens, completionTokens);
1054
1803
  const steps = result.steps;
1055
1804
  if (steps && steps.length >= 1) {
1056
- for (let i = 0; i < steps.length; i++) {
1057
- const step = steps[i];
1058
- await client.trackEvent({
1805
+ const stepPromises = steps.map(
1806
+ (step, i) => client.trackEvent({
1059
1807
  sessionId,
1060
1808
  eventType: "llm_call",
1061
1809
  eventData: {
@@ -1063,14 +1811,16 @@ function wrapGenerateText(originalFn, client, config) {
1063
1811
  provider,
1064
1812
  step: i + 1,
1065
1813
  total_steps: steps.length,
1066
- prompt_tokens: step.usage?.promptTokens || 0,
1067
- completion_tokens: step.usage?.completionTokens || 0,
1068
- total_tokens: step.usage?.totalTokens || 0,
1814
+ prompt_tokens: step.usage?.promptTokens ?? 0,
1815
+ completion_tokens: step.usage?.completionTokens ?? 0,
1816
+ total_tokens: step.usage?.totalTokens ?? 0,
1069
1817
  finish_reason: step.finishReason,
1070
1818
  tool_calls: step.toolCalls?.map((tc) => tc.toolName)
1071
1819
  }
1072
- });
1073
- }
1820
+ }).catch(() => {
1821
+ })
1822
+ );
1823
+ await Promise.all(stepPromises);
1074
1824
  } else {
1075
1825
  await client.trackEvent({
1076
1826
  sessionId,
@@ -1079,164 +1829,17 @@ function wrapGenerateText(originalFn, client, config) {
1079
1829
  model: resolvedModelId,
1080
1830
  provider,
1081
1831
  prompt_tokens: promptTokens,
1082
- completion_tokens: completionTokens,
1083
- total_tokens: totalTokens,
1084
- finish_reason: result.finishReason,
1085
- tool_calls: result.toolCalls?.map((tc) => tc.toolName)
1086
- }
1087
- });
1088
- }
1089
- await client.completeSession({
1090
- sessionId,
1091
- success: true,
1092
- output: result.text,
1093
- durationMs,
1094
- estimatedCost: cost,
1095
- promptTokens,
1096
- completionTokens,
1097
- totalTokens
1098
- });
1099
- return result;
1100
- } catch (error) {
1101
- const durationMs = Date.now() - startTime;
1102
- await client.trackError({
1103
- sessionId,
1104
- errorType: error instanceof Error ? error.name : "Error",
1105
- errorMessage: error instanceof Error ? error.message : "Unknown error"
1106
- });
1107
- await client.completeSession({
1108
- sessionId,
1109
- success: false,
1110
- failureReason: error instanceof Error ? error.message : "Unknown error",
1111
- durationMs
1112
- });
1113
- throw error;
1114
- }
1115
- };
1116
- }
1117
- function wrapStreamText(originalFn, client, config) {
1118
- return (params) => {
1119
- const startTime = Date.now();
1120
- const { modelId, provider } = extractModelInfo(params.model);
1121
- const input = extractInput(params);
1122
- let sessionId = null;
1123
- const sessionPromise = (async () => {
1124
- try {
1125
- const id = await client.createSession({
1126
- name: `streamText: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
1127
- agentName: config.defaultAgent ?? "vercel-ai-sdk",
1128
- userId: config.userId ?? "anonymous",
1129
- convoId: config.convoId,
1130
- metadata: {
1131
- model: modelId,
1132
- provider,
1133
- function: "streamText"
1134
- }
1135
- });
1136
- sessionId = id;
1137
- if (id) {
1138
- client.setInput(id, input).catch(() => {
1139
- });
1140
- }
1141
- return id;
1142
- } catch {
1143
- return null;
1144
- }
1145
- })();
1146
- let tracked = false;
1147
- async function trackCompletion(fullText, usageInfo, error) {
1148
- if (tracked) return;
1149
- tracked = true;
1150
- const durationMs = Date.now() - startTime;
1151
- const sid = sessionId || await sessionPromise;
1152
- if (!sid) return;
1153
- if (error) {
1154
- await client.trackError({
1155
- sessionId: sid,
1156
- errorType: error.name || "Error",
1157
- errorMessage: error.message || "Unknown error"
1158
- });
1159
- await client.completeSession({
1160
- sessionId: sid,
1161
- success: false,
1162
- failureReason: error.message || "Unknown error",
1163
- durationMs
1164
- });
1165
- return;
1166
- }
1167
- const promptTokens = usageInfo?.promptTokens || 0;
1168
- const completionTokens = usageInfo?.completionTokens || 0;
1169
- const totalTokens = usageInfo?.totalTokens || promptTokens + completionTokens;
1170
- const cost = calculateCostForCall(provider, modelId, promptTokens, completionTokens);
1171
- await client.completeSession({
1172
- sessionId: sid,
1173
- success: true,
1174
- output: fullText,
1175
- durationMs,
1176
- estimatedCost: cost,
1177
- promptTokens,
1178
- completionTokens,
1179
- totalTokens
1180
- });
1181
- }
1182
- const userOnFinish = params.onFinish;
1183
- const wrappedParams = {
1184
- ...params,
1185
- tools: params.tools ? wrapToolsAsync(params.tools, sessionPromise, client) : void 0,
1186
- onFinish: async (event) => {
1187
- try {
1188
- if (userOnFinish) await userOnFinish(event);
1189
- } catch {
1190
- }
1191
- await trackCompletion(event.text, event.usage);
1192
- }
1193
- };
1194
- const result = originalFn(wrappedParams);
1195
- const textProp = result.text;
1196
- if (textProp != null && typeof textProp.then === "function") {
1197
- textProp.then((text) => {
1198
- trackCompletion(text).catch(() => {
1199
- });
1200
- }).catch((err) => {
1201
- trackCompletion("", void 0, err instanceof Error ? err : new Error(String(err))).catch(() => {
1202
- });
1203
- });
1204
- }
1205
- return result;
1206
- };
1207
- }
1208
- function wrapGenerateObject(originalFn, client, config) {
1209
- return async (params) => {
1210
- const startTime = Date.now();
1211
- const { modelId, provider } = extractModelInfo(params.model);
1212
- const input = extractInput(params);
1213
- const sessionId = await client.createSession({
1214
- name: `generateObject: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
1215
- agentName: config.defaultAgent ?? "vercel-ai-sdk",
1216
- userId: config.userId ?? "anonymous",
1217
- convoId: config.convoId,
1218
- metadata: {
1219
- model: modelId,
1220
- provider,
1221
- function: "generateObject"
1222
- }
1223
- });
1224
- if (!sessionId) {
1225
- return originalFn(params);
1226
- }
1227
- await client.setInput(sessionId, input);
1228
- try {
1229
- const result = await originalFn(params);
1230
- const durationMs = Date.now() - startTime;
1231
- const resolvedModelId = result.response?.modelId || modelId;
1232
- const promptTokens = result.usage?.promptTokens || 0;
1233
- const completionTokens = result.usage?.completionTokens || 0;
1234
- const totalTokens = result.usage?.totalTokens || promptTokens + completionTokens;
1235
- const cost = calculateCostForCall(provider, resolvedModelId, promptTokens, completionTokens);
1832
+ completion_tokens: completionTokens,
1833
+ total_tokens: totalTokens,
1834
+ finish_reason: result.finishReason,
1835
+ tool_calls: result.toolCalls?.map((tc) => tc.toolName)
1836
+ }
1837
+ });
1838
+ }
1236
1839
  await client.completeSession({
1237
1840
  sessionId,
1238
1841
  success: true,
1239
- output: JSON.stringify(result.object),
1842
+ output: result.text,
1240
1843
  durationMs,
1241
1844
  estimatedCost: cost,
1242
1845
  promptTokens,
@@ -1261,24 +1864,26 @@ function wrapGenerateObject(originalFn, client, config) {
1261
1864
  }
1262
1865
  };
1263
1866
  }
1264
- function wrapStreamObject(originalFn, client, config) {
1867
+ function wrapStreamText(originalFn, client, config) {
1265
1868
  return (params) => {
1266
1869
  const startTime = Date.now();
1267
1870
  const { modelId, provider } = extractModelInfo(params.model);
1268
1871
  const input = extractInput(params);
1872
+ let sessionId = null;
1269
1873
  const sessionPromise = (async () => {
1270
1874
  try {
1271
1875
  const id = await client.createSession({
1272
- name: `streamObject: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
1876
+ name: `streamText: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
1273
1877
  agentName: config.defaultAgent ?? "vercel-ai-sdk",
1274
1878
  userId: config.userId ?? "anonymous",
1275
1879
  convoId: config.convoId,
1276
1880
  metadata: {
1277
1881
  model: modelId,
1278
1882
  provider,
1279
- function: "streamObject"
1883
+ function: "streamText"
1280
1884
  }
1281
1885
  });
1886
+ sessionId = id;
1282
1887
  if (id) {
1283
1888
  client.setInput(id, input).catch(() => {
1284
1889
  });
@@ -1288,364 +1893,481 @@ function wrapStreamObject(originalFn, client, config) {
1288
1893
  return null;
1289
1894
  }
1290
1895
  })();
1291
- let tracked = false;
1292
- const userOnFinish = params.onFinish;
1293
1896
  const wrappedParams = {
1294
1897
  ...params,
1295
- onFinish: async (event) => {
1296
- try {
1297
- if (userOnFinish) await userOnFinish(event);
1298
- } catch {
1299
- }
1300
- if (tracked) return;
1301
- tracked = true;
1302
- const durationMs = Date.now() - startTime;
1303
- const sid = await sessionPromise;
1304
- if (!sid) return;
1305
- if (event.error) {
1306
- const err = event.error instanceof Error ? event.error : new Error(String(event.error));
1307
- await client.trackError({
1308
- sessionId: sid,
1309
- errorType: err.name || "Error",
1310
- errorMessage: err.message || "Unknown error"
1311
- });
1312
- await client.completeSession({
1898
+ tools: params.tools ? wrapToolsAsync(params.tools, sessionPromise, client) : void 0
1899
+ };
1900
+ const result = originalFn(wrappedParams);
1901
+ const originalTextStream = result.textStream;
1902
+ let fullText = "";
1903
+ let tracked = false;
1904
+ async function trackCompletion(text, error) {
1905
+ if (tracked) return;
1906
+ tracked = true;
1907
+ const durationMs = Date.now() - startTime;
1908
+ const sid = sessionId || await sessionPromise;
1909
+ if (!sid) return;
1910
+ if (error) {
1911
+ await client.trackError({
1912
+ sessionId: sid,
1913
+ errorType: error.name || "Error",
1914
+ errorMessage: error.message || "Unknown error"
1915
+ }).catch(() => {
1916
+ });
1917
+ await client.completeSession({
1918
+ sessionId: sid,
1919
+ success: false,
1920
+ failureReason: error.message || "Unknown error",
1921
+ durationMs
1922
+ }).catch(() => {
1923
+ });
1924
+ return;
1925
+ }
1926
+ let resolvedModelId = modelId;
1927
+ try {
1928
+ const resp = result.response ? await result.response : void 0;
1929
+ if (resp?.modelId) resolvedModelId = resp.modelId;
1930
+ } catch {
1931
+ }
1932
+ let usage;
1933
+ try {
1934
+ usage = result.usage ? await result.usage : void 0;
1935
+ } catch {
1936
+ }
1937
+ let steps;
1938
+ try {
1939
+ steps = result.steps ? await result.steps : void 0;
1940
+ } catch {
1941
+ }
1942
+ if (steps && steps.length >= 1) {
1943
+ let totalPrompt = 0, totalCompletion = 0;
1944
+ const stepPromises = steps.map((step, i) => {
1945
+ const sp = step.usage?.promptTokens ?? 0;
1946
+ const sc = step.usage?.completionTokens ?? 0;
1947
+ totalPrompt += sp;
1948
+ totalCompletion += sc;
1949
+ return client.trackEvent({
1313
1950
  sessionId: sid,
1314
- success: false,
1315
- failureReason: err.message || "Unknown error",
1316
- durationMs
1951
+ eventType: "llm_call",
1952
+ eventData: {
1953
+ model: resolvedModelId,
1954
+ provider,
1955
+ step: i + 1,
1956
+ total_steps: steps.length,
1957
+ prompt_tokens: sp,
1958
+ completion_tokens: sc,
1959
+ total_tokens: step.usage?.totalTokens ?? 0,
1960
+ finish_reason: step.finishReason,
1961
+ tool_calls: step.toolCalls?.map((tc) => tc.toolName)
1962
+ }
1963
+ }).catch(() => {
1317
1964
  });
1318
- return;
1319
- }
1320
- const promptTokens = event.usage?.promptTokens || 0;
1321
- const completionTokens = event.usage?.completionTokens || 0;
1322
- const totalTokens = event.usage?.totalTokens || promptTokens + completionTokens;
1323
- const cost = calculateCostForCall(provider, modelId, promptTokens, completionTokens);
1965
+ });
1966
+ await Promise.all(stepPromises);
1967
+ const promptTokens = usage?.promptTokens ?? totalPrompt;
1968
+ const completionTokens = usage?.completionTokens ?? totalCompletion;
1969
+ const totalTokens = usage?.totalTokens ?? promptTokens + completionTokens;
1970
+ const cost = calculateCostForCall(provider, resolvedModelId, promptTokens, completionTokens);
1971
+ await client.completeSession({
1972
+ sessionId: sid,
1973
+ success: true,
1974
+ output: text,
1975
+ durationMs,
1976
+ estimatedCost: cost,
1977
+ promptTokens,
1978
+ completionTokens,
1979
+ totalTokens
1980
+ }).catch(() => {
1981
+ });
1982
+ } else {
1983
+ const promptTokens = usage?.promptTokens ?? 0;
1984
+ const completionTokens = usage?.completionTokens ?? 0;
1985
+ const totalTokens = usage?.totalTokens ?? promptTokens + completionTokens;
1986
+ const cost = calculateCostForCall(provider, resolvedModelId, promptTokens, completionTokens);
1987
+ await client.trackEvent({
1988
+ sessionId: sid,
1989
+ eventType: "llm_call",
1990
+ eventData: {
1991
+ model: resolvedModelId,
1992
+ provider,
1993
+ prompt_tokens: promptTokens,
1994
+ completion_tokens: completionTokens,
1995
+ total_tokens: totalTokens
1996
+ }
1997
+ }).catch(() => {
1998
+ });
1324
1999
  await client.completeSession({
1325
2000
  sessionId: sid,
1326
2001
  success: true,
1327
- output: event.object != null ? JSON.stringify(event.object) : "",
2002
+ output: text,
1328
2003
  durationMs,
1329
2004
  estimatedCost: cost,
1330
2005
  promptTokens,
1331
2006
  completionTokens,
1332
2007
  totalTokens
2008
+ }).catch(() => {
1333
2009
  });
1334
2010
  }
1335
- };
1336
- return originalFn(wrappedParams);
1337
- };
1338
- }
1339
- function wrapAISDK(ai, options) {
1340
- const client = options?.client ?? getClient2();
1341
- const config = {
1342
- defaultAgent: options?.defaultAgent ?? _globalConfig.defaultAgent,
1343
- userId: options?.userId ?? _globalConfig.userId,
1344
- convoId: options?.convoId ?? _globalConfig.convoId
1345
- };
1346
- return {
1347
- generateText: ai.generateText ? wrapGenerateText(ai.generateText, client, config) : wrapGenerateText(
1348
- () => Promise.reject(new Error("generateText not available")),
1349
- client,
1350
- config
1351
- ),
1352
- streamText: ai.streamText ? wrapStreamText(ai.streamText, client, config) : wrapStreamText(() => ({ textStream: (async function* () {
1353
- })() }), client, config),
1354
- generateObject: ai.generateObject ? wrapGenerateObject(ai.generateObject, client, config) : wrapGenerateObject(
1355
- () => Promise.reject(new Error("generateObject not available")),
1356
- client,
1357
- config
1358
- ),
1359
- streamObject: ai.streamObject ? wrapStreamObject(ai.streamObject, client, config) : wrapStreamObject(() => ({}), client, config)
2011
+ }
2012
+ result.textStream = (async function* () {
2013
+ try {
2014
+ for await (const chunk of originalTextStream) {
2015
+ fullText += chunk;
2016
+ yield chunk;
2017
+ }
2018
+ await trackCompletion(fullText);
2019
+ } catch (error) {
2020
+ await trackCompletion(
2021
+ fullText,
2022
+ error instanceof Error ? error : new Error(String(error))
2023
+ );
2024
+ throw error;
2025
+ }
2026
+ })();
2027
+ return result;
1360
2028
  };
1361
2029
  }
1362
-
1363
- // src/wrappers.ts
1364
- var _currentSessionId = null;
1365
- var _currentClient = null;
1366
- var _defaultClient2 = null;
1367
- function setSessionContext(sessionId, client) {
1368
- _currentSessionId = sessionId;
1369
- if (client) {
1370
- _currentClient = client;
1371
- }
1372
- }
1373
- function clearSessionContext() {
1374
- _currentSessionId = null;
1375
- _currentClient = null;
1376
- }
1377
- function getSessionContext() {
1378
- return _currentSessionId;
1379
- }
1380
- function setDefaultClient(client) {
1381
- _defaultClient2 = client;
1382
- }
1383
- function getTrackingClient() {
1384
- return _currentClient ?? _defaultClient2;
1385
- }
1386
- function wrapOpenAI(client, options = {}) {
1387
- const { trackWithoutSession = false } = options;
1388
- const chat = client.chat;
1389
- if (!chat?.completions?.create) {
1390
- console.warn("Sentrial: OpenAI client does not have chat.completions.create");
1391
- return client;
1392
- }
1393
- const originalCreate = chat.completions.create.bind(chat.completions);
1394
- chat.completions.create = async function(...args) {
2030
+ function wrapGenerateObject(originalFn, client, config) {
2031
+ return async (params) => {
1395
2032
  const startTime = Date.now();
1396
- const params = args[0] ?? {};
1397
- const messages = params.messages ?? [];
1398
- const model = params.model ?? "unknown";
1399
- try {
1400
- const response = await originalCreate(...args);
1401
- const durationMs = Date.now() - startTime;
1402
- const promptTokens = response.usage?.prompt_tokens ?? 0;
1403
- const completionTokens = response.usage?.completion_tokens ?? 0;
1404
- const totalTokens = response.usage?.total_tokens ?? 0;
1405
- let outputContent = "";
1406
- if (response.choices?.[0]?.message?.content) {
1407
- outputContent = response.choices[0].message.content;
2033
+ const { modelId, provider } = extractModelInfo(params.model);
2034
+ const input = extractInput(params);
2035
+ const sessionId = await client.createSession({
2036
+ name: `generateObject: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
2037
+ agentName: config.defaultAgent ?? "vercel-ai-sdk",
2038
+ userId: config.userId ?? "anonymous",
2039
+ convoId: config.convoId,
2040
+ metadata: {
2041
+ model: modelId,
2042
+ provider,
2043
+ function: "generateObject"
1408
2044
  }
1409
- const cost = calculateOpenAICost({ model, inputTokens: promptTokens, outputTokens: completionTokens });
1410
- trackLLMCall({
1411
- provider: "openai",
1412
- model,
1413
- messages,
1414
- output: outputContent,
1415
- promptTokens,
1416
- completionTokens,
1417
- totalTokens,
1418
- cost,
1419
- durationMs,
1420
- trackWithoutSession
1421
- });
1422
- return response;
1423
- } catch (error) {
1424
- const durationMs = Date.now() - startTime;
1425
- trackLLMError({
1426
- provider: "openai",
1427
- model,
1428
- messages,
1429
- error,
1430
- durationMs,
1431
- trackWithoutSession
1432
- });
1433
- throw error;
2045
+ });
2046
+ if (!sessionId) {
2047
+ return originalFn(params);
1434
2048
  }
1435
- };
1436
- return client;
1437
- }
1438
- function wrapAnthropic(client, options = {}) {
1439
- const { trackWithoutSession = false } = options;
1440
- const messages = client.messages;
1441
- if (!messages?.create) {
1442
- console.warn("Sentrial: Anthropic client does not have messages.create");
1443
- return client;
1444
- }
1445
- const originalCreate = messages.create.bind(messages);
1446
- messages.create = async function(...args) {
1447
- const startTime = Date.now();
1448
- const params = args[0] ?? {};
1449
- const inputMessages = params.messages ?? [];
1450
- const model = params.model ?? "unknown";
1451
- const system = params.system ?? "";
2049
+ await client.setInput(sessionId, input);
1452
2050
  try {
1453
- const response = await originalCreate(...args);
2051
+ const result = await originalFn(params);
1454
2052
  const durationMs = Date.now() - startTime;
1455
- const promptTokens = response.usage?.input_tokens ?? 0;
1456
- const completionTokens = response.usage?.output_tokens ?? 0;
1457
- const totalTokens = promptTokens + completionTokens;
1458
- let outputContent = "";
1459
- if (response.content) {
1460
- for (const block of response.content) {
1461
- if (block.type === "text") {
1462
- outputContent += block.text;
1463
- }
2053
+ const resolvedModelId = result.response?.modelId || modelId;
2054
+ const promptTokens = result.usage?.promptTokens ?? 0;
2055
+ const completionTokens = result.usage?.completionTokens ?? 0;
2056
+ const totalTokens = result.usage?.totalTokens ?? promptTokens + completionTokens;
2057
+ const cost = calculateCostForCall(provider, resolvedModelId, promptTokens, completionTokens);
2058
+ await client.trackEvent({
2059
+ sessionId,
2060
+ eventType: "llm_call",
2061
+ eventData: {
2062
+ model: resolvedModelId,
2063
+ provider,
2064
+ prompt_tokens: promptTokens,
2065
+ completion_tokens: completionTokens,
2066
+ total_tokens: totalTokens
1464
2067
  }
1465
- }
1466
- const cost = calculateAnthropicCost({ model, inputTokens: promptTokens, outputTokens: completionTokens });
1467
- const fullMessages = system ? [{ role: "system", content: system }, ...inputMessages] : inputMessages;
1468
- trackLLMCall({
1469
- provider: "anthropic",
1470
- model,
1471
- messages: fullMessages,
1472
- output: outputContent,
2068
+ }).catch(() => {
2069
+ });
2070
+ await client.completeSession({
2071
+ sessionId,
2072
+ success: true,
2073
+ output: JSON.stringify(result.object),
2074
+ durationMs,
2075
+ estimatedCost: cost,
1473
2076
  promptTokens,
1474
2077
  completionTokens,
1475
- totalTokens,
1476
- cost,
1477
- durationMs,
1478
- trackWithoutSession
2078
+ totalTokens
2079
+ });
2080
+ return result;
2081
+ } catch (error) {
2082
+ const durationMs = Date.now() - startTime;
2083
+ await client.trackError({
2084
+ sessionId,
2085
+ errorType: error instanceof Error ? error.name : "Error",
2086
+ errorMessage: error instanceof Error ? error.message : "Unknown error"
1479
2087
  });
1480
- return response;
1481
- } catch (error) {
1482
- const durationMs = Date.now() - startTime;
1483
- trackLLMError({
1484
- provider: "anthropic",
1485
- model,
1486
- messages: inputMessages,
1487
- error,
1488
- durationMs,
1489
- trackWithoutSession
2088
+ await client.completeSession({
2089
+ sessionId,
2090
+ success: false,
2091
+ failureReason: error instanceof Error ? error.message : "Unknown error",
2092
+ durationMs
1490
2093
  });
1491
2094
  throw error;
1492
2095
  }
1493
2096
  };
1494
- return client;
1495
2097
  }
1496
- function wrapGoogle(model, options = {}) {
1497
- const { trackWithoutSession = false } = options;
1498
- const originalGenerate = model.generateContent;
1499
- if (!originalGenerate) {
1500
- console.warn("Sentrial: Google model does not have generateContent");
1501
- return model;
1502
- }
1503
- model.generateContent = async function(...args) {
2098
+ function wrapStreamObject(originalFn, client, config) {
2099
+ return (params) => {
1504
2100
  const startTime = Date.now();
1505
- const contents = args[0];
1506
- const modelName = model.model ?? "gemini-unknown";
1507
- const messages = googleContentsToMessages(contents);
1508
- try {
1509
- const response = await originalGenerate.apply(model, args);
2101
+ const { modelId, provider } = extractModelInfo(params.model);
2102
+ const input = extractInput(params);
2103
+ const sessionPromise = (async () => {
2104
+ try {
2105
+ const id = await client.createSession({
2106
+ name: `streamObject: ${input.slice(0, 50)}${input.length > 50 ? "..." : ""}`,
2107
+ agentName: config.defaultAgent ?? "vercel-ai-sdk",
2108
+ userId: config.userId ?? "anonymous",
2109
+ convoId: config.convoId,
2110
+ metadata: {
2111
+ model: modelId,
2112
+ provider,
2113
+ function: "streamObject"
2114
+ }
2115
+ });
2116
+ if (id) {
2117
+ client.setInput(id, input).catch(() => {
2118
+ });
2119
+ }
2120
+ return id;
2121
+ } catch {
2122
+ return null;
2123
+ }
2124
+ })();
2125
+ const result = originalFn(params);
2126
+ async function completeStreamObject(obj, error) {
1510
2127
  const durationMs = Date.now() - startTime;
1511
- let promptTokens = 0;
1512
- let completionTokens = 0;
1513
- if (response.usageMetadata) {
1514
- promptTokens = response.usageMetadata.promptTokenCount ?? 0;
1515
- completionTokens = response.usageMetadata.candidatesTokenCount ?? 0;
2128
+ const sid = await sessionPromise;
2129
+ if (!sid) return;
2130
+ if (error) {
2131
+ await client.trackError({
2132
+ sessionId: sid,
2133
+ errorType: error.name || "Error",
2134
+ errorMessage: error.message || "Unknown error"
2135
+ }).catch(() => {
2136
+ });
2137
+ await client.completeSession({
2138
+ sessionId: sid,
2139
+ success: false,
2140
+ failureReason: error.message || "Unknown error",
2141
+ durationMs
2142
+ }).catch(() => {
2143
+ });
2144
+ return;
1516
2145
  }
1517
- const totalTokens = promptTokens + completionTokens;
1518
- let outputContent = "";
2146
+ let usage;
1519
2147
  try {
1520
- outputContent = response.response?.text() ?? "";
2148
+ usage = result.usage ? await result.usage : void 0;
1521
2149
  } catch {
1522
2150
  }
1523
- const cost = calculateGoogleCost({ model: modelName, inputTokens: promptTokens, outputTokens: completionTokens });
1524
- trackLLMCall({
1525
- provider: "google",
1526
- model: modelName,
1527
- messages,
1528
- output: outputContent,
2151
+ const promptTokens = usage?.promptTokens ?? 0;
2152
+ const completionTokens = usage?.completionTokens ?? 0;
2153
+ const totalTokens = usage?.totalTokens ?? promptTokens + completionTokens;
2154
+ const cost = calculateCostForCall(provider, modelId, promptTokens, completionTokens);
2155
+ await client.trackEvent({
2156
+ sessionId: sid,
2157
+ eventType: "llm_call",
2158
+ eventData: {
2159
+ model: modelId,
2160
+ provider,
2161
+ prompt_tokens: promptTokens,
2162
+ completion_tokens: completionTokens,
2163
+ total_tokens: totalTokens
2164
+ }
2165
+ }).catch(() => {
2166
+ });
2167
+ await client.completeSession({
2168
+ sessionId: sid,
2169
+ success: true,
2170
+ output: JSON.stringify(obj),
2171
+ durationMs,
2172
+ estimatedCost: cost,
1529
2173
  promptTokens,
1530
2174
  completionTokens,
1531
- totalTokens,
1532
- cost,
1533
- durationMs,
1534
- trackWithoutSession
2175
+ totalTokens
2176
+ }).catch(() => {
1535
2177
  });
1536
- return response;
1537
- } catch (error) {
1538
- const durationMs = Date.now() - startTime;
1539
- trackLLMError({
1540
- provider: "google",
1541
- model: modelName,
1542
- messages,
1543
- error,
1544
- durationMs,
1545
- trackWithoutSession
2178
+ }
2179
+ if (result.object) {
2180
+ const originalObjectPromise = result.object;
2181
+ result.object = originalObjectPromise.then(async (obj) => {
2182
+ await completeStreamObject(obj);
2183
+ return obj;
2184
+ }).catch(async (error) => {
2185
+ await completeStreamObject(void 0, error instanceof Error ? error : new Error(String(error)));
2186
+ throw error;
2187
+ });
2188
+ } else if (result.usage) {
2189
+ result.usage.then(async () => {
2190
+ await completeStreamObject(void 0);
2191
+ }).catch(async (error) => {
2192
+ await completeStreamObject(void 0, error instanceof Error ? error : new Error(String(error)));
1546
2193
  });
1547
- throw error;
1548
2194
  }
2195
+ return result;
1549
2196
  };
1550
- return model;
1551
- }
1552
- function googleContentsToMessages(contents) {
1553
- if (typeof contents === "string") {
1554
- return [{ role: "user", content: contents }];
1555
- }
1556
- if (Array.isArray(contents)) {
1557
- return contents.map((item) => {
1558
- if (typeof item === "string") {
1559
- return { role: "user", content: item };
1560
- }
1561
- if (item && typeof item === "object") {
1562
- return { role: item.role ?? "user", content: String(item.content ?? item) };
1563
- }
1564
- return { role: "user", content: String(item) };
1565
- });
1566
- }
1567
- return [{ role: "user", content: String(contents) }];
1568
2197
  }
1569
- function wrapLLM(client, provider) {
1570
- if (provider === "openai" || client.chat?.completions?.create) {
1571
- return wrapOpenAI(client);
1572
- }
1573
- if (provider === "anthropic" || client.messages?.create) {
1574
- return wrapAnthropic(client);
1575
- }
1576
- if (provider === "google" || client.generateContent) {
1577
- return wrapGoogle(client);
1578
- }
1579
- console.warn("Sentrial: Unknown LLM client type. No auto-tracking applied.");
1580
- return client;
2198
+ function wrapAISDK(ai, options) {
2199
+ const client = options?.client ?? getClient2();
2200
+ const config = {
2201
+ defaultAgent: options?.defaultAgent ?? _globalConfig.defaultAgent,
2202
+ userId: options?.userId ?? _globalConfig.userId,
2203
+ convoId: options?.convoId ?? _globalConfig.convoId
2204
+ };
2205
+ return {
2206
+ generateText: ai.generateText ? wrapGenerateText(ai.generateText, client, config) : wrapGenerateText(
2207
+ () => Promise.reject(new Error("generateText not available")),
2208
+ client,
2209
+ config
2210
+ ),
2211
+ streamText: ai.streamText ? wrapStreamText(ai.streamText, client, config) : wrapStreamText(() => ({ textStream: (async function* () {
2212
+ })() }), client, config),
2213
+ generateObject: ai.generateObject ? wrapGenerateObject(ai.generateObject, client, config) : wrapGenerateObject(
2214
+ () => Promise.reject(new Error("generateObject not available")),
2215
+ client,
2216
+ config
2217
+ ),
2218
+ streamObject: ai.streamObject ? wrapStreamObject(ai.streamObject, client, config) : wrapStreamObject(() => ({}), client, config)
2219
+ };
1581
2220
  }
1582
- function trackLLMCall(params) {
1583
- const client = getTrackingClient();
1584
- if (!client) return;
1585
- const sessionId = _currentSessionId;
1586
- if (!sessionId && !params.trackWithoutSession) {
1587
- return;
1588
- }
1589
- if (sessionId) {
1590
- client.trackToolCall({
1591
- sessionId,
1592
- toolName: `llm:${params.provider}:${params.model}`,
1593
- toolInput: {
1594
- messages: params.messages,
1595
- model: params.model,
1596
- provider: params.provider
1597
- },
1598
- toolOutput: {
1599
- content: params.output,
1600
- tokens: {
1601
- prompt: params.promptTokens,
1602
- completion: params.completionTokens,
1603
- total: params.totalTokens
1604
- },
1605
- cost_usd: params.cost
1606
- },
1607
- reasoning: `LLM call to ${params.provider} ${params.model}`,
1608
- estimatedCost: params.cost,
1609
- tokenCount: params.totalTokens,
1610
- metadata: {
1611
- provider: params.provider,
1612
- model: params.model,
1613
- duration_ms: params.durationMs,
1614
- prompt_tokens: params.promptTokens,
1615
- completion_tokens: params.completionTokens
1616
- }
1617
- }).catch((err) => {
1618
- console.warn("Sentrial: Failed to track LLM call:", err.message);
2221
+
2222
+ // src/claude-code.ts
2223
+ function wrapClaudeAgent(queryFn, wrapOptions) {
2224
+ const {
2225
+ client,
2226
+ defaultAgent = "claude-agent",
2227
+ userId = "anonymous",
2228
+ convoId,
2229
+ extraMetadata
2230
+ } = wrapOptions;
2231
+ return function wrappedQuery(params) {
2232
+ const { prompt, options = {} } = params;
2233
+ const startTime = Date.now();
2234
+ let sessionId = null;
2235
+ let resolveSessionReady;
2236
+ const sessionReady = new Promise((resolve) => {
2237
+ resolveSessionReady = resolve;
1619
2238
  });
1620
- }
1621
- }
1622
- function trackLLMError(params) {
1623
- const client = getTrackingClient();
1624
- if (!client) return;
1625
- const sessionId = _currentSessionId;
1626
- if (!sessionId && !params.trackWithoutSession) {
1627
- return;
1628
- }
1629
- if (sessionId) {
1630
- client.trackError({
1631
- sessionId,
1632
- errorMessage: params.error.message,
1633
- errorType: params.error.name,
1634
- toolName: `llm:${params.provider}:${params.model}`,
1635
- metadata: {
1636
- provider: params.provider,
1637
- model: params.model,
1638
- duration_ms: params.durationMs
2239
+ const sessionName = typeof prompt === "string" ? `${defaultAgent}: ${prompt.slice(0, 100)}` : `${defaultAgent} session`;
2240
+ const pendingToolCalls = [];
2241
+ const sentrialToolHook = {
2242
+ hooks: [
2243
+ async (input, toolUseID, _opts) => {
2244
+ await sessionReady;
2245
+ if (!sessionId) return;
2246
+ const toolOutput = input?.tool_response && typeof input.tool_response === "object" ? input.tool_response : { response: input?.tool_response ?? null };
2247
+ const p = client.trackToolCall({
2248
+ sessionId,
2249
+ toolName: input?.tool_name ?? "unknown",
2250
+ toolInput: input?.tool_input ?? {},
2251
+ toolOutput,
2252
+ metadata: { tool_use_id: toolUseID }
2253
+ }).catch(() => {
2254
+ });
2255
+ pendingToolCalls.push(p);
2256
+ }
2257
+ ]
2258
+ };
2259
+ const sentrialToolFailureHook = {
2260
+ hooks: [
2261
+ async (input, toolUseID, _opts) => {
2262
+ await sessionReady;
2263
+ if (!sessionId) return;
2264
+ const p = client.trackToolCall({
2265
+ sessionId,
2266
+ toolName: input?.tool_name ?? "unknown",
2267
+ toolInput: input?.tool_input ?? {},
2268
+ toolOutput: {},
2269
+ toolError: { message: input?.error ?? "unknown error" },
2270
+ metadata: { tool_use_id: toolUseID }
2271
+ }).catch(() => {
2272
+ });
2273
+ pendingToolCalls.push(p);
2274
+ }
2275
+ ]
2276
+ };
2277
+ const mergedHooks = {
2278
+ ...options.hooks ?? {}
2279
+ };
2280
+ const existingPostToolUse = mergedHooks.PostToolUse ?? [];
2281
+ mergedHooks.PostToolUse = [...existingPostToolUse, sentrialToolHook];
2282
+ const existingPostToolUseFailure = mergedHooks.PostToolUseFailure ?? [];
2283
+ mergedHooks.PostToolUseFailure = [...existingPostToolUseFailure, sentrialToolFailureHook];
2284
+ const mergedOptions = {
2285
+ ...options,
2286
+ hooks: mergedHooks
2287
+ };
2288
+ const generator = queryFn({ prompt, options: mergedOptions });
2289
+ return (async function* () {
2290
+ try {
2291
+ for await (const message of generator) {
2292
+ if (message.type === "system" && message.subtype === "init") {
2293
+ const metadata = {
2294
+ model: message.model,
2295
+ tools: message.tools,
2296
+ cwd: message.cwd,
2297
+ mcp_servers: message.mcp_servers,
2298
+ sdk_session_id: message.session_id,
2299
+ ...extraMetadata ?? {}
2300
+ };
2301
+ try {
2302
+ sessionId = await client.createSession({
2303
+ name: sessionName,
2304
+ agentName: defaultAgent,
2305
+ userId,
2306
+ convoId,
2307
+ metadata
2308
+ });
2309
+ } catch {
2310
+ sessionId = null;
2311
+ }
2312
+ resolveSessionReady();
2313
+ }
2314
+ if (message.type === "result" && sessionId) {
2315
+ const isError = !!message.is_error;
2316
+ const inputTokens = message.usage?.input_tokens ?? 0;
2317
+ const outputTokens = message.usage?.output_tokens ?? 0;
2318
+ let failureReason;
2319
+ if (isError) {
2320
+ if (message.errors && message.errors.length > 0) {
2321
+ failureReason = message.errors.join("; ");
2322
+ } else {
2323
+ failureReason = message.subtype;
2324
+ }
2325
+ }
2326
+ await Promise.allSettled(pendingToolCalls);
2327
+ try {
2328
+ await client.completeSession({
2329
+ sessionId,
2330
+ success: !isError,
2331
+ failureReason,
2332
+ estimatedCost: message.total_cost_usd,
2333
+ promptTokens: inputTokens,
2334
+ completionTokens: outputTokens,
2335
+ totalTokens: inputTokens + outputTokens,
2336
+ durationMs: message.duration_ms ?? Date.now() - startTime,
2337
+ userInput: typeof prompt === "string" ? prompt : void 0,
2338
+ output: message.result,
2339
+ customMetrics: {
2340
+ num_turns: message.num_turns ?? 0,
2341
+ duration_api_ms: message.duration_api_ms ?? 0
2342
+ }
2343
+ });
2344
+ } catch {
2345
+ }
2346
+ }
2347
+ yield message;
2348
+ }
2349
+ } catch (error) {
2350
+ if (sessionId) {
2351
+ await Promise.allSettled(pendingToolCalls);
2352
+ try {
2353
+ await client.completeSession({
2354
+ sessionId,
2355
+ success: false,
2356
+ failureReason: error instanceof Error ? error.message : String(error),
2357
+ durationMs: Date.now() - startTime
2358
+ });
2359
+ } catch {
2360
+ }
2361
+ }
2362
+ throw error;
1639
2363
  }
1640
- }).catch((err) => {
1641
- console.warn("Sentrial: Failed to track LLM error:", err.message);
1642
- });
1643
- }
2364
+ })();
2365
+ };
1644
2366
  }
1645
2367
 
1646
2368
  // src/decorators.ts
1647
2369
  var _defaultClient3 = null;
1648
- var _currentInteraction = null;
2370
+ var _currentInteraction = createContextVar(null);
1649
2371
  function getClient3() {
1650
2372
  if (!_defaultClient3) {
1651
2373
  try {
@@ -1665,7 +2387,7 @@ function getCurrentSessionId() {
1665
2387
  return getSessionContext();
1666
2388
  }
1667
2389
  function getCurrentInteraction() {
1668
- return _currentInteraction;
2390
+ return _currentInteraction.get();
1669
2391
  }
1670
2392
  function withTool(name, fn) {
1671
2393
  const isAsync = fn.constructor.name === "AsyncFunction";
@@ -1766,10 +2488,11 @@ function withSession(agentName, fn, options = {}) {
1766
2488
  input: userInput
1767
2489
  });
1768
2490
  const sessionId = interaction.getSessionId();
2491
+ let sessionTokens;
1769
2492
  if (sessionId) {
1770
- setSessionContext(sessionId, client);
2493
+ sessionTokens = _setSessionContextWithTokens(sessionId, client);
1771
2494
  }
1772
- _currentInteraction = interaction;
2495
+ const interactionToken = _currentInteraction.set(interaction);
1773
2496
  try {
1774
2497
  const result = await fn(...args);
1775
2498
  let output;
@@ -1794,8 +2517,10 @@ function withSession(agentName, fn, options = {}) {
1794
2517
  });
1795
2518
  throw error;
1796
2519
  } finally {
1797
- clearSessionContext();
1798
- _currentInteraction = null;
2520
+ if (sessionTokens) {
2521
+ _restoreSessionContext(sessionTokens);
2522
+ }
2523
+ _currentInteraction.reset(interactionToken);
1799
2524
  }
1800
2525
  };
1801
2526
  }
@@ -1881,10 +2606,11 @@ function TrackSession(agentName, options) {
1881
2606
  input: userInput
1882
2607
  });
1883
2608
  const sessionId = interaction.getSessionId();
2609
+ let sessionTokens;
1884
2610
  if (sessionId) {
1885
- setSessionContext(sessionId, client);
2611
+ sessionTokens = _setSessionContextWithTokens(sessionId, client);
1886
2612
  }
1887
- _currentInteraction = interaction;
2613
+ const interactionToken = _currentInteraction.set(interaction);
1888
2614
  try {
1889
2615
  const result = await originalMethod.apply(this, args);
1890
2616
  let output;
@@ -1909,8 +2635,10 @@ function TrackSession(agentName, options) {
1909
2635
  });
1910
2636
  throw error;
1911
2637
  } finally {
1912
- clearSessionContext();
1913
- _currentInteraction = null;
2638
+ if (sessionTokens) {
2639
+ _restoreSessionContext(sessionTokens);
2640
+ }
2641
+ _currentInteraction.reset(interactionToken);
1914
2642
  }
1915
2643
  };
1916
2644
  return descriptor;
@@ -1923,6 +2651,8 @@ var SessionContext = class {
1923
2651
  client;
1924
2652
  interaction = null;
1925
2653
  output;
2654
+ sessionTokens;
2655
+ interactionToken;
1926
2656
  constructor(options) {
1927
2657
  this.userId = options.userId;
1928
2658
  this.agent = options.agent;
@@ -1941,9 +2671,9 @@ var SessionContext = class {
1941
2671
  });
1942
2672
  const sessionId = this.interaction.getSessionId();
1943
2673
  if (sessionId) {
1944
- setSessionContext(sessionId, this.client);
2674
+ this.sessionTokens = _setSessionContextWithTokens(sessionId, this.client);
1945
2675
  }
1946
- _currentInteraction = this.interaction;
2676
+ this.interactionToken = _currentInteraction.set(this.interaction);
1947
2677
  return this;
1948
2678
  }
1949
2679
  /**
@@ -1963,8 +2693,12 @@ var SessionContext = class {
1963
2693
  failureReason: options?.error
1964
2694
  });
1965
2695
  }
1966
- clearSessionContext();
1967
- _currentInteraction = null;
2696
+ if (this.sessionTokens) {
2697
+ _restoreSessionContext(this.sessionTokens);
2698
+ }
2699
+ if (this.interactionToken) {
2700
+ _currentInteraction.reset(this.interactionToken);
2701
+ }
1968
2702
  }
1969
2703
  /**
1970
2704
  * Get the session ID
@@ -2018,30 +2752,31 @@ function serializeOutput(value) {
2018
2752
  }
2019
2753
 
2020
2754
  // src/context.ts
2021
- var _experimentContext = null;
2755
+ var _experimentContext = createContextVar(null);
2022
2756
  function getSystemPrompt(defaultPrompt) {
2023
- if (_experimentContext?.systemPrompt) {
2024
- return _experimentContext.systemPrompt;
2757
+ const ctx = _experimentContext.get();
2758
+ if (ctx?.systemPrompt) {
2759
+ return ctx.systemPrompt;
2025
2760
  }
2026
2761
  return defaultPrompt ?? "";
2027
2762
  }
2028
2763
  function getExperimentContext() {
2029
- return _experimentContext;
2764
+ return _experimentContext.get();
2030
2765
  }
2031
2766
  function isExperimentMode() {
2032
- return _experimentContext !== null;
2767
+ return _experimentContext.get() !== null;
2033
2768
  }
2034
2769
  function getVariantName() {
2035
- return _experimentContext?.variantName ?? null;
2770
+ return _experimentContext.get()?.variantName ?? null;
2036
2771
  }
2037
2772
  function getExperimentId() {
2038
- return _experimentContext?.experimentId ?? null;
2773
+ return _experimentContext.get()?.experimentId ?? null;
2039
2774
  }
2040
2775
  function setExperimentContext(context) {
2041
- _experimentContext = context;
2776
+ _experimentContext.set(context);
2042
2777
  }
2043
2778
  function clearExperimentContext() {
2044
- _experimentContext = null;
2779
+ _experimentContext.set(null);
2045
2780
  }
2046
2781
 
2047
2782
  // src/experiment.ts
@@ -2375,6 +3110,7 @@ var Experiment = class {
2375
3110
  // Annotate the CommonJS export names for ESM import in node:
2376
3111
  0 && (module.exports = {
2377
3112
  ApiError,
3113
+ EventBatcher,
2378
3114
  EventType,
2379
3115
  Experiment,
2380
3116
  ExperimentRunTracker,
@@ -2394,6 +3130,7 @@ var Experiment = class {
2394
3130
  clearSessionContext,
2395
3131
  configure,
2396
3132
  configureVercel,
3133
+ createContextVar,
2397
3134
  getCurrentInteraction,
2398
3135
  getCurrentSessionId,
2399
3136
  getExperimentContext,
@@ -2416,6 +3153,7 @@ var Experiment = class {
2416
3153
  withTool,
2417
3154
  wrapAISDK,
2418
3155
  wrapAnthropic,
3156
+ wrapClaudeAgent,
2419
3157
  wrapGoogle,
2420
3158
  wrapLLM,
2421
3159
  wrapOpenAI