@usetransactional/llm-node 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,58 @@
1
+ import { Serialized } from '@langchain/core/load/serializable';
2
+ import { LLMResult } from '@langchain/core/outputs';
3
+
4
+ /**
5
+ * LangChain Integration
6
+ *
7
+ * Callback handler for automatic tracing of LangChain chains and LLM calls.
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * import { TransactionalCallbackHandler } from 'transactional-llm/langchain';
12
+ * import { ChatOpenAI } from '@langchain/openai';
13
+ *
14
+ * const handler = new TransactionalCallbackHandler({
15
+ * sessionId: 'conversation-123',
16
+ * });
17
+ *
18
+ * const model = new ChatOpenAI({ modelName: 'gpt-4o' });
19
+ * const response = await model.invoke('Hello!', { callbacks: [handler] });
20
+ * ```
21
+ */
22
+
23
+ interface TransactionalCallbackHandlerOptions {
24
+ /** Session ID to group traces */
25
+ sessionId?: string;
26
+ /** User ID for attribution */
27
+ userId?: string;
28
+ /** Custom metadata for all traces */
29
+ metadata?: Record<string, unknown>;
30
+ }
31
+ /**
32
+ * LangChain callback handler for automatic tracing.
33
+ * Implements the LangChain callback interface without extending BaseCallbackHandler
34
+ * to avoid requiring @langchain/core as a direct dependency.
35
+ */
36
+ declare class TransactionalCallbackHandler {
37
+ name: string;
38
+ private options;
39
+ private traceHandle?;
40
+ private observationStack;
41
+ constructor(options?: TransactionalCallbackHandlerOptions);
42
+ private getClient;
43
+ handleChainStart(chain: Serialized, inputs: Record<string, unknown>, runId: string): Promise<void>;
44
+ handleChainEnd(outputs: Record<string, unknown>, runId: string): Promise<void>;
45
+ handleChainError(error: Error, runId: string): Promise<void>;
46
+ handleLLMStart(llm: Serialized, prompts: string[], runId: string): Promise<void>;
47
+ handleLLMEnd(output: LLMResult, runId: string): Promise<void>;
48
+ handleLLMError(error: Error, runId: string): Promise<void>;
49
+ handleChatModelStart(llm: Serialized, messages: unknown[][], runId: string): Promise<void>;
50
+ handleToolStart(tool: Serialized, input: string, runId: string): Promise<void>;
51
+ handleToolEnd(output: string, runId: string): Promise<void>;
52
+ handleToolError(error: Error, runId: string): Promise<void>;
53
+ handleRetrieverStart(retriever: Serialized, query: string, runId: string): Promise<void>;
54
+ handleRetrieverEnd(documents: unknown[], runId: string): Promise<void>;
55
+ handleRetrieverError(error: Error, runId: string): Promise<void>;
56
+ }
57
+
58
+ export { TransactionalCallbackHandler, type TransactionalCallbackHandlerOptions };
@@ -0,0 +1,58 @@
1
+ import { Serialized } from '@langchain/core/load/serializable';
2
+ import { LLMResult } from '@langchain/core/outputs';
3
+
4
+ /**
5
+ * LangChain Integration
6
+ *
7
+ * Callback handler for automatic tracing of LangChain chains and LLM calls.
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * import { TransactionalCallbackHandler } from 'transactional-llm/langchain';
12
+ * import { ChatOpenAI } from '@langchain/openai';
13
+ *
14
+ * const handler = new TransactionalCallbackHandler({
15
+ * sessionId: 'conversation-123',
16
+ * });
17
+ *
18
+ * const model = new ChatOpenAI({ modelName: 'gpt-4o' });
19
+ * const response = await model.invoke('Hello!', { callbacks: [handler] });
20
+ * ```
21
+ */
22
+
23
+ interface TransactionalCallbackHandlerOptions {
24
+ /** Session ID to group traces */
25
+ sessionId?: string;
26
+ /** User ID for attribution */
27
+ userId?: string;
28
+ /** Custom metadata for all traces */
29
+ metadata?: Record<string, unknown>;
30
+ }
31
+ /**
32
+ * LangChain callback handler for automatic tracing.
33
+ * Implements the LangChain callback interface without extending BaseCallbackHandler
34
+ * to avoid requiring @langchain/core as a direct dependency.
35
+ */
36
+ declare class TransactionalCallbackHandler {
37
+ name: string;
38
+ private options;
39
+ private traceHandle?;
40
+ private observationStack;
41
+ constructor(options?: TransactionalCallbackHandlerOptions);
42
+ private getClient;
43
+ handleChainStart(chain: Serialized, inputs: Record<string, unknown>, runId: string): Promise<void>;
44
+ handleChainEnd(outputs: Record<string, unknown>, runId: string): Promise<void>;
45
+ handleChainError(error: Error, runId: string): Promise<void>;
46
+ handleLLMStart(llm: Serialized, prompts: string[], runId: string): Promise<void>;
47
+ handleLLMEnd(output: LLMResult, runId: string): Promise<void>;
48
+ handleLLMError(error: Error, runId: string): Promise<void>;
49
+ handleChatModelStart(llm: Serialized, messages: unknown[][], runId: string): Promise<void>;
50
+ handleToolStart(tool: Serialized, input: string, runId: string): Promise<void>;
51
+ handleToolEnd(output: string, runId: string): Promise<void>;
52
+ handleToolError(error: Error, runId: string): Promise<void>;
53
+ handleRetrieverStart(retriever: Serialized, query: string, runId: string): Promise<void>;
54
+ handleRetrieverEnd(documents: unknown[], runId: string): Promise<void>;
55
+ handleRetrieverError(error: Error, runId: string): Promise<void>;
56
+ }
57
+
58
+ export { TransactionalCallbackHandler, type TransactionalCallbackHandlerOptions };
@@ -0,0 +1,216 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/integrations/langchain.ts
21
+ var langchain_exports = {};
22
+ __export(langchain_exports, {
23
+ TransactionalCallbackHandler: () => TransactionalCallbackHandler
24
+ });
25
+ module.exports = __toCommonJS(langchain_exports);
26
+
27
+ // src/client.ts
28
+ var import_nanoid = require("nanoid");
29
+
30
+ // src/index.ts
31
+ var defaultClient = null;
32
+ function getLlmOps() {
33
+ if (!defaultClient) {
34
+ throw new Error(
35
+ "LLM Ops SDK not initialized. Call initLlmOps() first."
36
+ );
37
+ }
38
+ return defaultClient;
39
+ }
40
+ function isInitialized() {
41
+ return defaultClient !== null;
42
+ }
43
+
44
+ // src/integrations/langchain.ts
45
+ function getModelName(serialized, fallback) {
46
+ if ("kwargs" in serialized && serialized.kwargs) {
47
+ const kwargs = serialized.kwargs;
48
+ return String(kwargs.model || kwargs.modelName || fallback);
49
+ }
50
+ return fallback;
51
+ }
52
+ var TransactionalCallbackHandler = class {
53
+ constructor(options = {}) {
54
+ this.name = "TransactionalCallbackHandler";
55
+ this.observationStack = /* @__PURE__ */ new Map();
56
+ this.options = options;
57
+ }
58
+ getClient() {
59
+ if (!isInitialized()) {
60
+ console.warn(
61
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the LangChain handler."
62
+ );
63
+ return null;
64
+ }
65
+ return getLlmOps();
66
+ }
67
+ // ===========================================
68
+ // CHAIN CALLBACKS
69
+ // ===========================================
70
+ async handleChainStart(chain, inputs, runId) {
71
+ const client = this.getClient();
72
+ if (!client) return;
73
+ const name = chain.id?.join("/") || "chain";
74
+ if (!this.traceHandle) {
75
+ this.traceHandle = client.trace({
76
+ name,
77
+ sessionId: this.options.sessionId,
78
+ userId: this.options.userId,
79
+ input: inputs,
80
+ metadata: this.options.metadata
81
+ });
82
+ } else {
83
+ const span = client.span({
84
+ name,
85
+ input: inputs
86
+ });
87
+ this.observationStack.set(runId, span);
88
+ }
89
+ }
90
+ async handleChainEnd(outputs, runId) {
91
+ const observation = this.observationStack.get(runId);
92
+ if (observation) {
93
+ await observation.end({ output: outputs });
94
+ this.observationStack.delete(runId);
95
+ } else if (this.traceHandle && this.observationStack.size === 0) {
96
+ await this.traceHandle.end({ output: outputs });
97
+ this.traceHandle = void 0;
98
+ }
99
+ }
100
+ async handleChainError(error, runId) {
101
+ const observation = this.observationStack.get(runId);
102
+ if (observation) {
103
+ await observation.error(error);
104
+ this.observationStack.delete(runId);
105
+ } else if (this.traceHandle) {
106
+ await this.traceHandle.error(error);
107
+ this.traceHandle = void 0;
108
+ }
109
+ }
110
+ // ===========================================
111
+ // LLM CALLBACKS
112
+ // ===========================================
113
+ async handleLLMStart(llm, prompts, runId) {
114
+ const client = this.getClient();
115
+ if (!client) return;
116
+ const name = llm.id?.join("/") || "llm";
117
+ const modelName = getModelName(llm, name);
118
+ const generation = client.generation({
119
+ name,
120
+ modelName,
121
+ input: { prompts }
122
+ });
123
+ this.observationStack.set(runId, generation);
124
+ }
125
+ async handleLLMEnd(output, runId) {
126
+ const observation = this.observationStack.get(runId);
127
+ if (!observation) return;
128
+ const tokenUsage = output.llmOutput?.tokenUsage;
129
+ await observation.end({
130
+ output: { generations: output.generations },
131
+ promptTokens: tokenUsage?.promptTokens,
132
+ completionTokens: tokenUsage?.completionTokens
133
+ });
134
+ this.observationStack.delete(runId);
135
+ }
136
+ async handleLLMError(error, runId) {
137
+ const observation = this.observationStack.get(runId);
138
+ if (observation) {
139
+ await observation.error(error);
140
+ this.observationStack.delete(runId);
141
+ }
142
+ }
143
+ // ===========================================
144
+ // CHAT MODEL CALLBACKS
145
+ // ===========================================
146
+ async handleChatModelStart(llm, messages, runId) {
147
+ const client = this.getClient();
148
+ if (!client) return;
149
+ const name = llm.id?.join("/") || "chat";
150
+ const modelName = getModelName(llm, name);
151
+ const generation = client.generation({
152
+ name,
153
+ modelName,
154
+ input: { messages }
155
+ });
156
+ this.observationStack.set(runId, generation);
157
+ }
158
+ // ===========================================
159
+ // TOOL CALLBACKS
160
+ // ===========================================
161
+ async handleToolStart(tool, input, runId) {
162
+ const client = this.getClient();
163
+ if (!client) return;
164
+ const name = tool.id?.join("/") || "tool";
165
+ const span = client.span({
166
+ name,
167
+ input: { input }
168
+ });
169
+ this.observationStack.set(runId, span);
170
+ }
171
+ async handleToolEnd(output, runId) {
172
+ const observation = this.observationStack.get(runId);
173
+ if (observation) {
174
+ await observation.end({ output: { output } });
175
+ this.observationStack.delete(runId);
176
+ }
177
+ }
178
+ async handleToolError(error, runId) {
179
+ const observation = this.observationStack.get(runId);
180
+ if (observation) {
181
+ await observation.error(error);
182
+ this.observationStack.delete(runId);
183
+ }
184
+ }
185
+ // ===========================================
186
+ // RETRIEVER CALLBACKS
187
+ // ===========================================
188
+ async handleRetrieverStart(retriever, query, runId) {
189
+ const client = this.getClient();
190
+ if (!client) return;
191
+ const name = retriever.id?.join("/") || "retriever";
192
+ const span = client.span({
193
+ name,
194
+ input: { query }
195
+ });
196
+ this.observationStack.set(runId, span);
197
+ }
198
+ async handleRetrieverEnd(documents, runId) {
199
+ const observation = this.observationStack.get(runId);
200
+ if (observation) {
201
+ await observation.end({ output: { documents } });
202
+ this.observationStack.delete(runId);
203
+ }
204
+ }
205
+ async handleRetrieverError(error, runId) {
206
+ const observation = this.observationStack.get(runId);
207
+ if (observation) {
208
+ await observation.error(error);
209
+ this.observationStack.delete(runId);
210
+ }
211
+ }
212
+ };
213
+ // Annotate the CommonJS export names for ESM import in node:
214
+ 0 && (module.exports = {
215
+ TransactionalCallbackHandler
216
+ });
@@ -0,0 +1,177 @@
1
+ import {
2
+ getLlmOps,
3
+ isInitialized
4
+ } from "../chunk-IR6P3PV4.mjs";
5
+
6
+ // src/integrations/langchain.ts
7
+ function getModelName(serialized, fallback) {
8
+ if ("kwargs" in serialized && serialized.kwargs) {
9
+ const kwargs = serialized.kwargs;
10
+ return String(kwargs.model || kwargs.modelName || fallback);
11
+ }
12
+ return fallback;
13
+ }
14
+ var TransactionalCallbackHandler = class {
15
+ constructor(options = {}) {
16
+ this.name = "TransactionalCallbackHandler";
17
+ this.observationStack = /* @__PURE__ */ new Map();
18
+ this.options = options;
19
+ }
20
+ getClient() {
21
+ if (!isInitialized()) {
22
+ console.warn(
23
+ "[LlmOps] SDK not initialized. Call initLlmOps() before using the LangChain handler."
24
+ );
25
+ return null;
26
+ }
27
+ return getLlmOps();
28
+ }
29
+ // ===========================================
30
+ // CHAIN CALLBACKS
31
+ // ===========================================
32
+ async handleChainStart(chain, inputs, runId) {
33
+ const client = this.getClient();
34
+ if (!client) return;
35
+ const name = chain.id?.join("/") || "chain";
36
+ if (!this.traceHandle) {
37
+ this.traceHandle = client.trace({
38
+ name,
39
+ sessionId: this.options.sessionId,
40
+ userId: this.options.userId,
41
+ input: inputs,
42
+ metadata: this.options.metadata
43
+ });
44
+ } else {
45
+ const span = client.span({
46
+ name,
47
+ input: inputs
48
+ });
49
+ this.observationStack.set(runId, span);
50
+ }
51
+ }
52
+ async handleChainEnd(outputs, runId) {
53
+ const observation = this.observationStack.get(runId);
54
+ if (observation) {
55
+ await observation.end({ output: outputs });
56
+ this.observationStack.delete(runId);
57
+ } else if (this.traceHandle && this.observationStack.size === 0) {
58
+ await this.traceHandle.end({ output: outputs });
59
+ this.traceHandle = void 0;
60
+ }
61
+ }
62
+ async handleChainError(error, runId) {
63
+ const observation = this.observationStack.get(runId);
64
+ if (observation) {
65
+ await observation.error(error);
66
+ this.observationStack.delete(runId);
67
+ } else if (this.traceHandle) {
68
+ await this.traceHandle.error(error);
69
+ this.traceHandle = void 0;
70
+ }
71
+ }
72
+ // ===========================================
73
+ // LLM CALLBACKS
74
+ // ===========================================
75
+ async handleLLMStart(llm, prompts, runId) {
76
+ const client = this.getClient();
77
+ if (!client) return;
78
+ const name = llm.id?.join("/") || "llm";
79
+ const modelName = getModelName(llm, name);
80
+ const generation = client.generation({
81
+ name,
82
+ modelName,
83
+ input: { prompts }
84
+ });
85
+ this.observationStack.set(runId, generation);
86
+ }
87
+ async handleLLMEnd(output, runId) {
88
+ const observation = this.observationStack.get(runId);
89
+ if (!observation) return;
90
+ const tokenUsage = output.llmOutput?.tokenUsage;
91
+ await observation.end({
92
+ output: { generations: output.generations },
93
+ promptTokens: tokenUsage?.promptTokens,
94
+ completionTokens: tokenUsage?.completionTokens
95
+ });
96
+ this.observationStack.delete(runId);
97
+ }
98
+ async handleLLMError(error, runId) {
99
+ const observation = this.observationStack.get(runId);
100
+ if (observation) {
101
+ await observation.error(error);
102
+ this.observationStack.delete(runId);
103
+ }
104
+ }
105
+ // ===========================================
106
+ // CHAT MODEL CALLBACKS
107
+ // ===========================================
108
+ async handleChatModelStart(llm, messages, runId) {
109
+ const client = this.getClient();
110
+ if (!client) return;
111
+ const name = llm.id?.join("/") || "chat";
112
+ const modelName = getModelName(llm, name);
113
+ const generation = client.generation({
114
+ name,
115
+ modelName,
116
+ input: { messages }
117
+ });
118
+ this.observationStack.set(runId, generation);
119
+ }
120
+ // ===========================================
121
+ // TOOL CALLBACKS
122
+ // ===========================================
123
+ async handleToolStart(tool, input, runId) {
124
+ const client = this.getClient();
125
+ if (!client) return;
126
+ const name = tool.id?.join("/") || "tool";
127
+ const span = client.span({
128
+ name,
129
+ input: { input }
130
+ });
131
+ this.observationStack.set(runId, span);
132
+ }
133
+ async handleToolEnd(output, runId) {
134
+ const observation = this.observationStack.get(runId);
135
+ if (observation) {
136
+ await observation.end({ output: { output } });
137
+ this.observationStack.delete(runId);
138
+ }
139
+ }
140
+ async handleToolError(error, runId) {
141
+ const observation = this.observationStack.get(runId);
142
+ if (observation) {
143
+ await observation.error(error);
144
+ this.observationStack.delete(runId);
145
+ }
146
+ }
147
+ // ===========================================
148
+ // RETRIEVER CALLBACKS
149
+ // ===========================================
150
+ async handleRetrieverStart(retriever, query, runId) {
151
+ const client = this.getClient();
152
+ if (!client) return;
153
+ const name = retriever.id?.join("/") || "retriever";
154
+ const span = client.span({
155
+ name,
156
+ input: { query }
157
+ });
158
+ this.observationStack.set(runId, span);
159
+ }
160
+ async handleRetrieverEnd(documents, runId) {
161
+ const observation = this.observationStack.get(runId);
162
+ if (observation) {
163
+ await observation.end({ output: { documents } });
164
+ this.observationStack.delete(runId);
165
+ }
166
+ }
167
+ async handleRetrieverError(error, runId) {
168
+ const observation = this.observationStack.get(runId);
169
+ if (observation) {
170
+ await observation.error(error);
171
+ this.observationStack.delete(runId);
172
+ }
173
+ }
174
+ };
175
+ export {
176
+ TransactionalCallbackHandler
177
+ };
@@ -0,0 +1,72 @@
1
+ export { getLlmOps, initLlmOps } from '../index.mjs';
2
+
3
+ /**
4
+ * Vercel AI SDK Integration
5
+ *
6
+ * Wrapper functions for automatic tracing of Vercel AI SDK calls.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { wrapAiSdk } from 'transactional-llm/vercel-ai';
11
+ * import { generateText } from 'ai';
12
+ *
13
+ * const wrappedGenerateText = wrapAiSdk(generateText);
14
+ *
15
+ * const { text } = await wrappedGenerateText({
16
+ * model: openai('gpt-4o'),
17
+ * prompt: 'Hello!',
18
+ * });
19
+ * ```
20
+ */
21
+ interface TelemetryMetadata {
22
+ userId?: string;
23
+ sessionId?: string;
24
+ [key: string]: unknown;
25
+ }
26
+ interface WrappedOptions {
27
+ experimental_telemetry?: {
28
+ metadata?: TelemetryMetadata;
29
+ };
30
+ model?: {
31
+ modelId?: string;
32
+ provider?: string;
33
+ };
34
+ prompt?: string;
35
+ messages?: unknown[];
36
+ [key: string]: unknown;
37
+ }
38
+ interface GenerateResult {
39
+ text?: string;
40
+ usage?: {
41
+ promptTokens?: number;
42
+ completionTokens?: number;
43
+ totalTokens?: number;
44
+ };
45
+ [key: string]: unknown;
46
+ }
47
+ type AiFn<T> = (options: WrappedOptions) => Promise<T>;
48
+ /**
49
+ * Wrap a Vercel AI SDK function to automatically trace calls
50
+ *
51
+ * @param fn - The AI SDK function to wrap (e.g., generateText, streamText)
52
+ * @returns A wrapped function that traces the call
53
+ *
54
+ * @example
55
+ * ```typescript
56
+ * const wrappedGenerateText = wrapAiSdk(generateText);
57
+ * const { text } = await wrappedGenerateText({
58
+ * model: openai('gpt-4o'),
59
+ * prompt: 'Hello!',
60
+ * });
61
+ * ```
62
+ */
63
+ declare function wrapAiSdk<T extends GenerateResult>(fn: AiFn<T>): AiFn<T>;
64
+ /**
65
+ * Create a traced version of streamText
66
+ *
67
+ * For streaming responses, this wraps the stream and captures
68
+ * the final token counts when the stream completes.
69
+ */
70
+ declare function wrapStreamText<T extends AsyncIterable<unknown>>(fn: (options: WrappedOptions) => Promise<T>): (options: WrappedOptions) => Promise<T>;
71
+
72
+ export { wrapAiSdk, wrapStreamText };
@@ -0,0 +1,72 @@
1
+ export { getLlmOps, initLlmOps } from '../index.js';
2
+
3
+ /**
4
+ * Vercel AI SDK Integration
5
+ *
6
+ * Wrapper functions for automatic tracing of Vercel AI SDK calls.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { wrapAiSdk } from 'transactional-llm/vercel-ai';
11
+ * import { generateText } from 'ai';
12
+ *
13
+ * const wrappedGenerateText = wrapAiSdk(generateText);
14
+ *
15
+ * const { text } = await wrappedGenerateText({
16
+ * model: openai('gpt-4o'),
17
+ * prompt: 'Hello!',
18
+ * });
19
+ * ```
20
+ */
21
+ interface TelemetryMetadata {
22
+ userId?: string;
23
+ sessionId?: string;
24
+ [key: string]: unknown;
25
+ }
26
+ interface WrappedOptions {
27
+ experimental_telemetry?: {
28
+ metadata?: TelemetryMetadata;
29
+ };
30
+ model?: {
31
+ modelId?: string;
32
+ provider?: string;
33
+ };
34
+ prompt?: string;
35
+ messages?: unknown[];
36
+ [key: string]: unknown;
37
+ }
38
+ interface GenerateResult {
39
+ text?: string;
40
+ usage?: {
41
+ promptTokens?: number;
42
+ completionTokens?: number;
43
+ totalTokens?: number;
44
+ };
45
+ [key: string]: unknown;
46
+ }
47
+ type AiFn<T> = (options: WrappedOptions) => Promise<T>;
48
+ /**
49
+ * Wrap a Vercel AI SDK function to automatically trace calls
50
+ *
51
+ * @param fn - The AI SDK function to wrap (e.g., generateText, streamText)
52
+ * @returns A wrapped function that traces the call
53
+ *
54
+ * @example
55
+ * ```typescript
56
+ * const wrappedGenerateText = wrapAiSdk(generateText);
57
+ * const { text } = await wrappedGenerateText({
58
+ * model: openai('gpt-4o'),
59
+ * prompt: 'Hello!',
60
+ * });
61
+ * ```
62
+ */
63
+ declare function wrapAiSdk<T extends GenerateResult>(fn: AiFn<T>): AiFn<T>;
64
+ /**
65
+ * Create a traced version of streamText
66
+ *
67
+ * For streaming responses, this wraps the stream and captures
68
+ * the final token counts when the stream completes.
69
+ */
70
+ declare function wrapStreamText<T extends AsyncIterable<unknown>>(fn: (options: WrappedOptions) => Promise<T>): (options: WrappedOptions) => Promise<T>;
71
+
72
+ export { wrapAiSdk, wrapStreamText };