@sentrial/sdk 0.3.3 → 0.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,3 +1,194 @@
1
+ /**
2
+ * Async-safe context variables for the Sentrial SDK.
3
+ *
4
+ * In Node.js (>= 18): uses AsyncLocalStorage for true per-request isolation.
5
+ * In browsers: falls back to a simple variable (safe because browsers have no
6
+ * concurrent request handling within a single page).
7
+ *
8
+ * This is the TypeScript equivalent of Python's `contextvars.ContextVar`.
9
+ */
10
+ interface ContextToken<T> {
11
+ /** @internal */
12
+ readonly _previous: T;
13
+ }
14
+ interface ContextVar<T> {
15
+ /** Get the current value (returns defaultValue if unset in this context). */
16
+ get(): T;
17
+ /** Set a new value. Returns a token that can restore the previous value. */
18
+ set(value: T): ContextToken<T>;
19
+ /** Restore the value that was active before the given token's set() call. */
20
+ reset(token: ContextToken<T>): void;
21
+ }
22
+ /**
23
+ * Create an async-safe context variable.
24
+ *
25
+ * Uses AsyncLocalStorage in Node.js for per-request isolation,
26
+ * falls back to a simple variable in browsers.
27
+ *
28
+ * @param defaultValue - Value returned by get() when nothing has been set.
29
+ */
30
+ declare function createContextVar<T>(defaultValue: T): ContextVar<T>;
31
+
32
+ /**
33
+ * Sentrial LLM Wrappers - Auto-instrument LLM provider SDKs
34
+ *
35
+ * These wrappers automatically track all LLM calls with:
36
+ * - Input messages
37
+ * - Output responses
38
+ * - Token counts
39
+ * - Cost estimation
40
+ * - Latency
41
+ *
42
+ * @example OpenAI
43
+ * ```ts
44
+ * import OpenAI from 'openai';
45
+ * import { wrapOpenAI, configure } from '@sentrial/sdk';
46
+ *
47
+ * configure({ apiKey: 'sentrial_live_xxx' });
48
+ *
49
+ * const openai = wrapOpenAI(new OpenAI());
50
+ *
51
+ * // All calls are now automatically tracked!
52
+ * const response = await openai.chat.completions.create({
53
+ * model: 'gpt-4o',
54
+ * messages: [{ role: 'user', content: 'Hello!' }],
55
+ * });
56
+ * ```
57
+ *
58
+ * @example Anthropic
59
+ * ```ts
60
+ * import Anthropic from '@anthropic-ai/sdk';
61
+ * import { wrapAnthropic, configure } from '@sentrial/sdk';
62
+ *
63
+ * configure({ apiKey: 'sentrial_live_xxx' });
64
+ *
65
+ * const anthropic = wrapAnthropic(new Anthropic());
66
+ *
67
+ * const response = await anthropic.messages.create({
68
+ * model: 'claude-3-5-sonnet-20241022',
69
+ * max_tokens: 1024,
70
+ * messages: [{ role: 'user', content: 'Hello!' }],
71
+ * });
72
+ * ```
73
+ */
74
+
75
+ /** Tokens for restoring previous session context (used internally by decorators). */
76
+ interface SessionContextTokens {
77
+ _sessionToken: ContextToken<string | null>;
78
+ _clientToken: ContextToken<SentrialClient | null>;
79
+ }
80
+ /**
81
+ * Set the current session context for auto-tracking.
82
+ *
83
+ * Call this before making LLM calls to associate them with a session.
84
+ * This is automatically called when using withSession() or decorators.
85
+ *
86
+ * @param sessionId - The session ID to track LLM calls under
87
+ * @param client - Optional Sentrial client (uses default if not provided)
88
+ */
89
+ declare function setSessionContext(sessionId: string, client?: SentrialClient): void;
90
+ /**
91
+ * Clear the current session context.
92
+ */
93
+ declare function clearSessionContext(): void;
94
+ /**
95
+ * Get the current session ID.
96
+ */
97
+ declare function getSessionContext(): string | null;
98
+ /**
99
+ * Set the default client for wrappers.
100
+ */
101
+ declare function setDefaultClient(client: SentrialClient): void;
102
+ /**
103
+ * Wrap an OpenAI client to automatically track all LLM calls.
104
+ *
105
+ * @param client - OpenAI client instance
106
+ * @param options - Wrapper options
107
+ * @returns The same client, now with auto-tracking enabled
108
+ *
109
+ * @example
110
+ * ```ts
111
+ * import OpenAI from 'openai';
112
+ * import { wrapOpenAI, configure } from '@sentrial/sdk';
113
+ *
114
+ * configure({ apiKey: 'sentrial_live_xxx' });
115
+ * const openai = wrapOpenAI(new OpenAI());
116
+ *
117
+ * // Now use client normally - all calls are tracked!
118
+ * const response = await openai.chat.completions.create({
119
+ * model: 'gpt-4o',
120
+ * messages: [{ role: 'user', content: 'Hello' }],
121
+ * });
122
+ * ```
123
+ */
124
+ declare function wrapOpenAI<T extends object>(client: T, options?: {
125
+ trackWithoutSession?: boolean;
126
+ }): T;
127
+ /**
128
+ * Wrap an Anthropic client to automatically track all LLM calls.
129
+ *
130
+ * @param client - Anthropic client instance
131
+ * @param options - Wrapper options
132
+ * @returns The same client, now with auto-tracking enabled
133
+ *
134
+ * @example
135
+ * ```ts
136
+ * import Anthropic from '@anthropic-ai/sdk';
137
+ * import { wrapAnthropic, configure } from '@sentrial/sdk';
138
+ *
139
+ * configure({ apiKey: 'sentrial_live_xxx' });
140
+ * const anthropic = wrapAnthropic(new Anthropic());
141
+ *
142
+ * const response = await anthropic.messages.create({
143
+ * model: 'claude-3-5-sonnet-20241022',
144
+ * max_tokens: 1024,
145
+ * messages: [{ role: 'user', content: 'Hello!' }],
146
+ * });
147
+ * ```
148
+ */
149
+ declare function wrapAnthropic<T extends object>(client: T, options?: {
150
+ trackWithoutSession?: boolean;
151
+ }): T;
152
+ /**
153
+ * Wrap a Google GenerativeModel to automatically track all LLM calls.
154
+ *
155
+ * @param model - Google GenerativeModel instance
156
+ * @param options - Wrapper options
157
+ * @returns The same model, now with auto-tracking enabled
158
+ *
159
+ * @example
160
+ * ```ts
161
+ * import { GoogleGenerativeAI } from '@google/generative-ai';
162
+ * import { wrapGoogle, configure } from '@sentrial/sdk';
163
+ *
164
+ * configure({ apiKey: 'sentrial_live_xxx' });
165
+ *
166
+ * const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
167
+ * const model = wrapGoogle(genAI.getGenerativeModel({ model: 'gemini-2.0-flash' }));
168
+ *
169
+ * const response = await model.generateContent('Hello!');
170
+ * ```
171
+ */
172
+ declare function wrapGoogle<T extends object>(model: T, options?: {
173
+ trackWithoutSession?: boolean;
174
+ }): T;
175
+ /**
176
+ * Auto-detect and wrap any supported LLM client.
177
+ *
178
+ * @param client - Any supported LLM client (OpenAI, Anthropic, Google)
179
+ * @param provider - Optional provider hint
180
+ * @returns The wrapped client
181
+ *
182
+ * @example
183
+ * ```ts
184
+ * import OpenAI from 'openai';
185
+ * import { wrapLLM } from '@sentrial/sdk';
186
+ *
187
+ * const client = wrapLLM(new OpenAI()); // Auto-detected as OpenAI
188
+ * ```
189
+ */
190
+ declare function wrapLLM<T extends object>(client: T, provider?: 'openai' | 'anthropic' | 'google'): T;
191
+
1
192
  /**
2
193
  * PII Redaction Module
3
194
  *
@@ -71,6 +262,68 @@ declare function redactValue(value: unknown, mode: PiiMode, builtinPatterns: Req
71
262
  */
72
263
  declare function redactPayload(payload: Record<string, unknown>, config: PiiConfig): Record<string, unknown>;
73
264
 
265
+ /**
266
+ * EventBatcher — fire-and-forget event queue with periodic flushing.
267
+ *
268
+ * Buffers tracking events (tool calls, decisions, errors, generic events)
269
+ * and flushes them in batches to reduce HTTP overhead. Session lifecycle
270
+ * calls (createSession, completeSession) bypass the batcher entirely
271
+ * because they need synchronous responses.
272
+ *
273
+ * Flush triggers:
274
+ * 1. Timer — every `flushIntervalMs` (default 1 000 ms)
275
+ * 2. Queue size — when queue reaches `flushThreshold` (default 10)
276
+ * 3. Manual — `flush()` or `shutdown()`
277
+ * 4. Process exit — `beforeExit` handler
278
+ *
279
+ * Back-pressure: queue is capped at `maxQueueSize` (default 1 000).
280
+ * When full, oldest events are dropped and a warning is logged.
281
+ */
282
+ interface BatcherConfig {
283
+ /** Whether batching is enabled (default: false — all calls are immediate) */
284
+ enabled?: boolean;
285
+ /** Milliseconds between automatic flushes (default: 1000) */
286
+ flushIntervalMs?: number;
287
+ /** Flush when the queue reaches this many items (default: 10) */
288
+ flushThreshold?: number;
289
+ /** Maximum queue size before dropping oldest events (default: 1000) */
290
+ maxQueueSize?: number;
291
+ }
292
+ /** Signature for the function that actually sends an event to the API. */
293
+ type SendFn = (method: string, url: string, body: unknown) => Promise<unknown>;
294
+ declare class EventBatcher {
295
+ private readonly queue;
296
+ private readonly flushIntervalMs;
297
+ private readonly flushThreshold;
298
+ private readonly maxQueueSize;
299
+ private timer;
300
+ private sendFn;
301
+ private flushing;
302
+ private shutdownCalled;
303
+ private readonly exitHandler;
304
+ constructor(sendFn: SendFn, config?: BatcherConfig);
305
+ /**
306
+ * Enqueue an event for batched delivery.
307
+ *
308
+ * If the queue hits `flushThreshold`, an automatic flush is triggered.
309
+ * If the queue is full (`maxQueueSize`), the oldest event is dropped.
310
+ */
311
+ enqueue(method: string, url: string, body: unknown): void;
312
+ /**
313
+ * Flush all queued events to the API.
314
+ *
315
+ * Drains the queue and fires all requests in parallel. Safe to call
316
+ * concurrently — only one flush runs at a time.
317
+ */
318
+ flush(): Promise<void>;
319
+ /**
320
+ * Stop the batcher: clear the timer, flush remaining events, remove exit handler.
321
+ */
322
+ shutdown(): Promise<void>;
323
+ /** Number of events currently queued. */
324
+ get size(): number;
325
+ }
326
+
74
327
  /**
75
328
  * Type definitions for the Sentrial TypeScript SDK
76
329
  */
@@ -107,6 +360,14 @@ interface SentrialClientConfig {
107
360
  * - Pass `true` to fetch the org's PII config from the server automatically.
108
361
  */
109
362
  pii?: PiiConfig | boolean;
363
+ /**
364
+ * Event batching configuration.
365
+ * When enabled, fire-and-forget tracking calls (trackToolCall, trackDecision,
366
+ * trackError, trackEvent) are queued and flushed periodically instead of
367
+ * being sent immediately. Session lifecycle calls (createSession,
368
+ * completeSession) always bypass the batcher.
369
+ */
370
+ batching?: BatcherConfig;
110
371
  }
111
372
  /**
112
373
  * Parameters for creating a new session
@@ -318,6 +579,8 @@ interface FinishParams {
318
579
  completionTokens?: number;
319
580
  /** Total tokens used */
320
581
  totalTokens?: number;
582
+ /** Duration in milliseconds (auto-calculated from begin/finish if not provided) */
583
+ durationMs?: number;
321
584
  }
322
585
  /**
323
586
  * Parameters for cost calculation
@@ -454,7 +717,12 @@ declare class SentrialClient {
454
717
  private piiConfig?;
455
718
  private piiConfigNeedsHydration;
456
719
  private piiHydrationPromise?;
457
- private currentState;
720
+ private readonly _stateVar;
721
+ private readonly batcher?;
722
+ /** Per-session cost/token accumulator — populated by trackToolCall/trackDecision */
723
+ private sessionAccumulators;
724
+ private get currentState();
725
+ private set currentState(value);
458
726
  constructor(config?: SentrialClientConfig);
459
727
  /**
460
728
  * Fetch the organization's PII config from the server.
@@ -471,6 +739,7 @@ declare class SentrialClient {
471
739
  */
472
740
  private safeRequest;
473
741
  private sleep;
742
+ private accumulate;
474
743
  /**
475
744
  * Create a new session
476
745
  *
@@ -553,6 +822,19 @@ declare class SentrialClient {
553
822
  * ```
554
823
  */
555
824
  completeSession(params: CompleteSessionParams): Promise<Session | null>;
825
+ /**
826
+ * Flush any queued events immediately.
827
+ *
828
+ * No-op if batching is not enabled.
829
+ */
830
+ flush(): Promise<void>;
831
+ /**
832
+ * Shut down the event batcher, flushing remaining events.
833
+ *
834
+ * Call this before your process exits for a clean shutdown.
835
+ * No-op if batching is not enabled.
836
+ */
837
+ shutdown(): Promise<void>;
556
838
  /**
557
839
  * Begin tracking an interaction (simplified API)
558
840
  *
@@ -585,6 +867,7 @@ interface InteractionConfig {
585
867
  userId: string;
586
868
  event: string;
587
869
  userInput?: string;
870
+ sessionTokens?: SessionContextTokens;
588
871
  }
589
872
  /**
590
873
  * Represents an in-progress interaction that can be finished.
@@ -604,12 +887,15 @@ declare class Interaction {
604
887
  readonly userId: string;
605
888
  /** Event name for this interaction */
606
889
  readonly event: string;
890
+ private readonly startTime;
607
891
  private finished;
608
892
  private success;
609
893
  private failureReason?;
610
894
  private output?;
611
895
  private readonly userInput?;
612
896
  private readonly degraded;
897
+ /** Context tokens for restoring previous session context on finish() */
898
+ private sessionTokens?;
613
899
  constructor(config: InteractionConfig);
614
900
  /**
615
901
  * Set the output for this interaction
@@ -703,9 +989,13 @@ declare function begin(params: BeginParams): Promise<Interaction>;
703
989
  /**
704
990
  * Simple API namespace for module-level usage
705
991
  */
992
+ declare function flush(): Promise<void>;
993
+ declare function shutdown(): Promise<void>;
706
994
  declare const sentrial: {
707
995
  configure: typeof configure;
708
996
  begin: typeof begin;
997
+ flush: typeof flush;
998
+ shutdown: typeof shutdown;
709
999
  };
710
1000
 
711
1001
  /**
@@ -960,159 +1250,68 @@ declare function wrapAISDK(ai: AIModule, options?: {
960
1250
  };
961
1251
 
962
1252
  /**
963
- * Sentrial LLM Wrappers - Auto-instrument LLM provider SDKs
964
- *
965
- * These wrappers automatically track all LLM calls with:
966
- * - Input messages
967
- * - Output responses
968
- * - Token counts
969
- * - Cost estimation
970
- * - Latency
971
- *
972
- * @example OpenAI
973
- * ```ts
974
- * import OpenAI from 'openai';
975
- * import { wrapOpenAI, configure } from '@sentrial/sdk';
976
- *
977
- * configure({ apiKey: 'sentrial_live_xxx' });
978
- *
979
- * const openai = wrapOpenAI(new OpenAI());
980
- *
981
- * // All calls are now automatically tracked!
982
- * const response = await openai.chat.completions.create({
983
- * model: 'gpt-4o',
984
- * messages: [{ role: 'user', content: 'Hello!' }],
985
- * });
986
- * ```
987
- *
988
- * @example Anthropic
989
- * ```ts
990
- * import Anthropic from '@anthropic-ai/sdk';
991
- * import { wrapAnthropic, configure } from '@sentrial/sdk';
992
- *
993
- * configure({ apiKey: 'sentrial_live_xxx' });
994
- *
995
- * const anthropic = wrapAnthropic(new Anthropic());
996
- *
997
- * const response = await anthropic.messages.create({
998
- * model: 'claude-3-5-sonnet-20241022',
999
- * max_tokens: 1024,
1000
- * messages: [{ role: 'user', content: 'Hello!' }],
1001
- * });
1002
- * ```
1003
- */
1004
-
1005
- /**
1006
- * Set the current session context for auto-tracking.
1007
- *
1008
- * Call this before making LLM calls to associate them with a session.
1009
- * This is automatically called when using withSession() or decorators.
1010
- *
1011
- * @param sessionId - The session ID to track LLM calls under
1012
- * @param client - Optional Sentrial client (uses default if not provided)
1013
- */
1014
- declare function setSessionContext(sessionId: string, client?: SentrialClient): void;
1015
- /**
1016
- * Clear the current session context.
1017
- */
1018
- declare function clearSessionContext(): void;
1019
- /**
1020
- * Get the current session ID.
1021
- */
1022
- declare function getSessionContext(): string | null;
1023
- /**
1024
- * Set the default client for wrappers.
1025
- */
1026
- declare function setDefaultClient(client: SentrialClient): void;
1027
- /**
1028
- * Wrap an OpenAI client to automatically track all LLM calls.
1029
- *
1030
- * @param client - OpenAI client instance
1031
- * @param options - Wrapper options
1032
- * @returns The same client, now with auto-tracking enabled
1033
- *
1034
- * @example
1035
- * ```ts
1036
- * import OpenAI from 'openai';
1037
- * import { wrapOpenAI, configure } from '@sentrial/sdk';
1038
- *
1039
- * configure({ apiKey: 'sentrial_live_xxx' });
1040
- * const openai = wrapOpenAI(new OpenAI());
1041
- *
1042
- * // Now use client normally - all calls are tracked!
1043
- * const response = await openai.chat.completions.create({
1044
- * model: 'gpt-4o',
1045
- * messages: [{ role: 'user', content: 'Hello' }],
1046
- * });
1047
- * ```
1048
- */
1049
- declare function wrapOpenAI<T extends object>(client: T, options?: {
1050
- trackWithoutSession?: boolean;
1051
- }): T;
1052
- /**
1053
- * Wrap an Anthropic client to automatically track all LLM calls.
1253
+ * Sentrial wrapper for the Claude Agent SDK (Claude Code).
1054
1254
  *
1055
- * @param client - Anthropic client instance
1056
- * @param options - Wrapper options
1057
- * @returns The same client, now with auto-tracking enabled
1255
+ * Wraps the `query()` async generator to automatically track sessions,
1256
+ * tool calls, tokens, and costs to Sentrial.
1058
1257
  *
1059
1258
  * @example
1060
1259
  * ```ts
1061
- * import Anthropic from '@anthropic-ai/sdk';
1062
- * import { wrapAnthropic, configure } from '@sentrial/sdk';
1260
+ * import { query } from '@anthropic-ai/claude-agent-sdk';
1261
+ * import { SentrialClient, wrapClaudeAgent } from '@sentrial/sdk';
1063
1262
  *
1064
- * configure({ apiKey: 'sentrial_live_xxx' });
1065
- * const anthropic = wrapAnthropic(new Anthropic());
1263
+ * const client = new SentrialClient({ apiKey: process.env.SENTRIAL_API_KEY });
1066
1264
  *
1067
- * const response = await anthropic.messages.create({
1068
- * model: 'claude-3-5-sonnet-20241022',
1069
- * max_tokens: 1024,
1070
- * messages: [{ role: 'user', content: 'Hello!' }],
1265
+ * const trackedQuery = wrapClaudeAgent(query, {
1266
+ * client,
1267
+ * defaultAgent: 'my-agent',
1268
+ * userId: 'user-123',
1071
1269
  * });
1072
- * ```
1073
- */
1074
- declare function wrapAnthropic<T extends object>(client: T, options?: {
1075
- trackWithoutSession?: boolean;
1076
- }): T;
1077
- /**
1078
- * Wrap a Google GenerativeModel to automatically track all LLM calls.
1079
1270
  *
1080
- * @param model - Google GenerativeModel instance
1081
- * @param options - Wrapper options
1082
- * @returns The same model, now with auto-tracking enabled
1083
- *
1084
- * @example
1085
- * ```ts
1086
- * import { GoogleGenerativeAI } from '@google/generative-ai';
1087
- * import { wrapGoogle, configure } from '@sentrial/sdk';
1088
- *
1089
- * configure({ apiKey: 'sentrial_live_xxx' });
1090
- *
1091
- * const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
1092
- * const model = wrapGoogle(genAI.getGenerativeModel({ model: 'gemini-2.0-flash' }));
1093
- *
1094
- * const response = await model.generateContent('Hello!');
1271
+ * for await (const message of trackedQuery({ prompt: 'Fix the tests' })) {
1272
+ * // Messages pass through unchanged
1273
+ * console.log(message.type);
1274
+ * }
1095
1275
  * ```
1096
1276
  */
1097
- declare function wrapGoogle<T extends object>(model: T, options?: {
1098
- trackWithoutSession?: boolean;
1099
- }): T;
1277
+
1278
+ type HookCallback = (input: any, toolUseID: string | undefined, options: {
1279
+ signal: AbortSignal;
1280
+ }) => Promise<any>;
1281
+ interface HookCallbackMatcher {
1282
+ matcher?: string;
1283
+ hooks: HookCallback[];
1284
+ }
1285
+ interface QueryOptions {
1286
+ hooks?: Record<string, HookCallbackMatcher[]>;
1287
+ [key: string]: unknown;
1288
+ }
1289
+ interface QueryParams {
1290
+ prompt: string | AsyncIterable<any>;
1291
+ options?: QueryOptions;
1292
+ }
1293
+ type QueryFunction = (params: QueryParams) => AsyncGenerator<any, void>;
1294
+ interface WrapClaudeAgentOptions {
1295
+ /** Sentrial client instance for tracking */
1296
+ client: SentrialClient;
1297
+ /** Default agent name (used in session creation). Defaults to 'claude-agent'. */
1298
+ defaultAgent?: string;
1299
+ /** User ID to associate with sessions. Defaults to 'anonymous'. */
1300
+ userId?: string;
1301
+ /** Conversation ID to group related sessions into a thread. */
1302
+ convoId?: string;
1303
+ /** Extra metadata to include in every session. */
1304
+ extraMetadata?: Record<string, unknown>;
1305
+ }
1100
1306
  /**
1101
- * Auto-detect and wrap any supported LLM client.
1307
+ * Wrap the Claude Agent SDK's `query()` function to automatically track
1308
+ * sessions, tool calls, tokens, and costs to Sentrial.
1102
1309
  *
1103
- * @param client - Any supported LLM client (OpenAI, Anthropic, Google)
1104
- * @param provider - Optional provider hint
1105
- * @returns The wrapped client
1106
- *
1107
- * @example
1108
- * ```ts
1109
- * import OpenAI from 'openai';
1110
- * import { wrapLLM } from '@sentrial/sdk';
1111
- *
1112
- * const client = wrapLLM(new OpenAI()); // Auto-detected as OpenAI
1113
- * ```
1310
+ * @param queryFn - The original `query()` function from `@anthropic-ai/claude-agent-sdk`
1311
+ * @param wrapOptions - Configuration for the wrapper
1312
+ * @returns A new function with the same signature as `query()`
1114
1313
  */
1115
- declare function wrapLLM<T extends object>(client: T, provider?: 'openai' | 'anthropic' | 'google'): T;
1314
+ declare function wrapClaudeAgent(queryFn: QueryFunction, wrapOptions: WrapClaudeAgentOptions): QueryFunction;
1116
1315
 
1117
1316
  /**
1118
1317
  * Sentrial Decorators - Easy instrumentation for AI agents
@@ -1296,6 +1495,8 @@ declare class SessionContext {
1296
1495
  private readonly client;
1297
1496
  private interaction;
1298
1497
  private output?;
1498
+ private sessionTokens?;
1499
+ private interactionToken?;
1299
1500
  constructor(options: {
1300
1501
  userId: string;
1301
1502
  agent: string;
@@ -1680,4 +1881,4 @@ declare class Experiment {
1680
1881
  getResults(): Promise<ExperimentResults | null>;
1681
1882
  }
1682
1883
 
1683
- export { ApiError, type ApiResponse, type BeginParams, type CompleteSessionParams, type CostParams, type CreateSessionParams, type Event, EventType, Experiment, type ExperimentContext, type ExperimentResults, type ExperimentRunResult, ExperimentRunTracker, type ExperimentTestCase, type ExperimentVariant, type FinishParams, type GenerateTextParams, type GenerateTextResult, Interaction, NetworkError, type PiiBuiltinPatterns, type PiiConfig, type PiiCustomPattern, type PiiField, type PiiMode, SentrialClient, type SentrialClientConfig, SentrialError, type Session, SessionContext, type SessionStatus, type StreamTextResult, Tool, type TrackDecisionParams, type TrackErrorParams, TrackSession, type TrackToolCallParams, ValidationError, begin, calculateAnthropicCost, calculateGoogleCost, calculateOpenAICost, clearExperimentContext, clearSessionContext, configure, configureVercel, getCurrentInteraction, getCurrentSessionId, getExperimentContext, getExperimentId, getSessionContext, getSystemPrompt, getVariantName, hashValue, isExperimentMode, redactPayload, redactString, redactValue, replaceMatch, sentrial, setClient, setDefaultClient, setExperimentContext, setSessionContext, withSession, withTool, wrapAISDK, wrapAnthropic, wrapGoogle, wrapLLM, wrapOpenAI };
1884
+ export { ApiError, type ApiResponse, type BatcherConfig, type BeginParams, type CompleteSessionParams, type ContextToken, type ContextVar, type CostParams, type CreateSessionParams, type Event, EventBatcher, EventType, Experiment, type ExperimentContext, type ExperimentResults, type ExperimentRunResult, ExperimentRunTracker, type ExperimentTestCase, type ExperimentVariant, type FinishParams, type GenerateTextParams, type GenerateTextResult, Interaction, NetworkError, type PiiBuiltinPatterns, type PiiConfig, type PiiCustomPattern, type PiiField, type PiiMode, SentrialClient, type SentrialClientConfig, SentrialError, type Session, SessionContext, type SessionStatus, type StreamTextResult, Tool, type TrackDecisionParams, type TrackErrorParams, TrackSession, type TrackToolCallParams, ValidationError, type WrapClaudeAgentOptions, begin, calculateAnthropicCost, calculateGoogleCost, calculateOpenAICost, clearExperimentContext, clearSessionContext, configure, configureVercel, createContextVar, getCurrentInteraction, getCurrentSessionId, getExperimentContext, getExperimentId, getSessionContext, getSystemPrompt, getVariantName, hashValue, isExperimentMode, redactPayload, redactString, redactValue, replaceMatch, sentrial, setClient, setDefaultClient, setExperimentContext, setSessionContext, withSession, withTool, wrapAISDK, wrapAnthropic, wrapClaudeAgent, wrapGoogle, wrapLLM, wrapOpenAI };