langsmith 0.3.56 → 0.3.57-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/experimental/vercel/index.cjs +279 -0
- package/dist/experimental/vercel/index.d.ts +30 -0
- package/dist/experimental/vercel/index.js +276 -0
- package/dist/experimental/vercel/middleware.cjs +214 -0
- package/dist/experimental/vercel/middleware.d.ts +31 -0
- package/dist/experimental/vercel/middleware.js +209 -0
- package/dist/index.cjs +1 -1
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/utils/vercel.cjs +7 -5
- package/dist/utils/vercel.d.ts +1 -0
- package/dist/utils/vercel.js +6 -5
- package/dist/wrappers/vercel.cjs +1 -0
- package/dist/wrappers/vercel.d.ts +1 -0
- package/dist/wrappers/vercel.js +1 -0
- package/experimental/vercel.cjs +1 -0
- package/experimental/vercel.d.cts +1 -0
- package/experimental/vercel.d.ts +1 -0
- package/experimental/vercel.js +1 -0
- package/package.json +17 -4
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.wrapAISDK = void 0;
|
|
4
|
+
/* eslint-disable import/no-extraneous-dependencies */
|
|
5
|
+
const middleware_js_1 = require("./middleware.cjs");
|
|
6
|
+
const traceable_js_1 = require("../../traceable.cjs");
|
|
7
|
+
const _wrapTools = (tools) => {
|
|
8
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
9
|
+
const wrappedTools = {};
|
|
10
|
+
if (tools) {
|
|
11
|
+
for (const [key, tool] of Object.entries(tools)) {
|
|
12
|
+
wrappedTools[key] = tool;
|
|
13
|
+
if (tool != null &&
|
|
14
|
+
typeof tool === "object" &&
|
|
15
|
+
"execute" in tool &&
|
|
16
|
+
typeof tool.execute === "function") {
|
|
17
|
+
wrappedTools[key].execute = (0, traceable_js_1.traceable)(tool.execute.bind(tool), {
|
|
18
|
+
name: key,
|
|
19
|
+
run_type: "tool",
|
|
20
|
+
});
|
|
21
|
+
}
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
return wrappedTools;
|
|
25
|
+
};
|
|
26
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
27
|
+
const _getModelDisplayName = (model) => {
|
|
28
|
+
if (typeof model === "string") {
|
|
29
|
+
return model;
|
|
30
|
+
}
|
|
31
|
+
if (model.config != null &&
|
|
32
|
+
typeof model.config === "object" &&
|
|
33
|
+
typeof model.config.provider === "string") {
|
|
34
|
+
return model.config.provider;
|
|
35
|
+
}
|
|
36
|
+
return model.modelId ?? "unknown";
|
|
37
|
+
};
|
|
38
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
39
|
+
const _getModelId = (model) => {
|
|
40
|
+
if (typeof model === "string") {
|
|
41
|
+
return model;
|
|
42
|
+
}
|
|
43
|
+
return typeof model.modelId === "string" ? model.modelId : undefined;
|
|
44
|
+
};
|
|
45
|
+
const _formatTracedInputs = (params) => {
|
|
46
|
+
const { prompt, messages, model, tools, ...rest } = params;
|
|
47
|
+
if (Array.isArray(prompt)) {
|
|
48
|
+
return { ...rest, messages: prompt.map(middleware_js_1.populateToolCallsForTracing) };
|
|
49
|
+
}
|
|
50
|
+
else if (Array.isArray(messages)) {
|
|
51
|
+
return { ...rest, messages: messages.map(middleware_js_1.populateToolCallsForTracing) };
|
|
52
|
+
}
|
|
53
|
+
else {
|
|
54
|
+
return { ...rest, prompt, messages };
|
|
55
|
+
}
|
|
56
|
+
};
|
|
57
|
+
/**
|
|
58
|
+
* Wraps Vercel AI SDK 5 functions with LangSmith tracing capabilities.
|
|
59
|
+
*
|
|
60
|
+
* @param methods - Object containing AI SDK methods to wrap
|
|
61
|
+
* @param methods.wrapLanguageModel - AI SDK's wrapLanguageModel function
|
|
62
|
+
* @param methods.generateText - AI SDK's generateText function
|
|
63
|
+
* @param methods.streamText - AI SDK's streamText function
|
|
64
|
+
* @param methods.streamObject - AI SDK's streamObject function
|
|
65
|
+
* @param methods.generateObject - AI SDK's generateObject function
|
|
66
|
+
*
|
|
67
|
+
* @returns Object containing wrapped versions of the AI SDK functions with LangSmith tracing
|
|
68
|
+
* @returns returns.generateText - Wrapped generateText function that traces calls to LangSmith
|
|
69
|
+
* @returns returns.generateObject - Wrapped generateObject function that traces calls to LangSmith
|
|
70
|
+
* @returns returns.streamText - Wrapped streamText function that traces calls to LangSmith
|
|
71
|
+
* @returns returns.streamObject - Wrapped streamObject function that traces calls to LangSmith
|
|
72
|
+
*/
|
|
73
|
+
const wrapAISDK = ({ wrapLanguageModel, generateText, streamText, streamObject, generateObject, }, lsConfig) => {
|
|
74
|
+
/**
|
|
75
|
+
* Wrapped version of AI SDK 5's generateText with LangSmith tracing.
|
|
76
|
+
*
|
|
77
|
+
* This function has the same signature and behavior as the original generateText,
|
|
78
|
+
* but adds automatic tracing to LangSmith for observability.
|
|
79
|
+
*
|
|
80
|
+
* ```ts
|
|
81
|
+
* import * as ai from "ai";
|
|
82
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
83
|
+
*
|
|
84
|
+
* const { generateText } = wrapAISDK(ai);
|
|
85
|
+
* const { text } = await generateText(...);
|
|
86
|
+
* ```
|
|
87
|
+
*
|
|
88
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-text} Original generateText documentation
|
|
89
|
+
* @param params - Same parameters as the original generateText function
|
|
90
|
+
* @returns Promise resolving to the same result as generateText, with tracing applied
|
|
91
|
+
*/
|
|
92
|
+
const wrappedGenerateText = async (params) => {
|
|
93
|
+
const traceableFunc = (0, traceable_js_1.traceable)(async (params) => {
|
|
94
|
+
const wrappedModel = wrapLanguageModel({
|
|
95
|
+
model: params.model,
|
|
96
|
+
middleware: (0, middleware_js_1.LangSmithMiddleware)({
|
|
97
|
+
name: _getModelDisplayName(params.model),
|
|
98
|
+
modelId: params.model.modelId,
|
|
99
|
+
}),
|
|
100
|
+
});
|
|
101
|
+
return generateText({
|
|
102
|
+
...params,
|
|
103
|
+
tools: _wrapTools(params.tools),
|
|
104
|
+
model: wrappedModel,
|
|
105
|
+
});
|
|
106
|
+
}, {
|
|
107
|
+
name: _getModelDisplayName(params.model),
|
|
108
|
+
...lsConfig,
|
|
109
|
+
metadata: {
|
|
110
|
+
ai_sdk_method: "ai.generateText",
|
|
111
|
+
...lsConfig?.metadata,
|
|
112
|
+
},
|
|
113
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
114
|
+
processOutputs: (outputs) => {
|
|
115
|
+
if (outputs.outputs == null || typeof outputs.outputs !== "object") {
|
|
116
|
+
return outputs;
|
|
117
|
+
}
|
|
118
|
+
const { steps } = outputs.outputs;
|
|
119
|
+
if (Array.isArray(steps)) {
|
|
120
|
+
const lastStep = steps.at(-1);
|
|
121
|
+
if (lastStep == null || typeof lastStep !== "object") {
|
|
122
|
+
return outputs;
|
|
123
|
+
}
|
|
124
|
+
const { content } = lastStep;
|
|
125
|
+
return (0, middleware_js_1.populateToolCallsForTracing)({
|
|
126
|
+
content,
|
|
127
|
+
role: "assistant",
|
|
128
|
+
});
|
|
129
|
+
}
|
|
130
|
+
else {
|
|
131
|
+
return outputs;
|
|
132
|
+
}
|
|
133
|
+
},
|
|
134
|
+
});
|
|
135
|
+
return traceableFunc(params);
|
|
136
|
+
};
|
|
137
|
+
/**
|
|
138
|
+
* Wrapped version of AI SDK 5's generateObject with LangSmith tracing.
|
|
139
|
+
*
|
|
140
|
+
* This function has the same signature and behavior as the original generateObject,
|
|
141
|
+
* but adds automatic tracing to LangSmith for observability.
|
|
142
|
+
*
|
|
143
|
+
* ```ts
|
|
144
|
+
* import * as ai from "ai";
|
|
145
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
146
|
+
*
|
|
147
|
+
* const { generateObject } = wrapAISDK(ai);
|
|
148
|
+
* const { object } = await generateObject(...);
|
|
149
|
+
* ```
|
|
150
|
+
*
|
|
151
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-structured-data} Original generateObject documentation
|
|
152
|
+
* @param params - Same parameters as the original generateObject function
|
|
153
|
+
* @returns Promise resolving to the same result as generateObject, with tracing applied
|
|
154
|
+
*/
|
|
155
|
+
const wrappedGenerateObject = async (params) => {
|
|
156
|
+
const traceableFunc = (0, traceable_js_1.traceable)(async (params) => {
|
|
157
|
+
const wrappedModel = wrapLanguageModel({
|
|
158
|
+
model: params.model,
|
|
159
|
+
middleware: (0, middleware_js_1.LangSmithMiddleware)({
|
|
160
|
+
name: _getModelDisplayName(params.model),
|
|
161
|
+
modelId: _getModelId(params.model),
|
|
162
|
+
}),
|
|
163
|
+
});
|
|
164
|
+
return generateObject({
|
|
165
|
+
...params,
|
|
166
|
+
model: wrappedModel,
|
|
167
|
+
});
|
|
168
|
+
}, {
|
|
169
|
+
name: _getModelDisplayName(params.model),
|
|
170
|
+
...lsConfig,
|
|
171
|
+
metadata: {
|
|
172
|
+
ai_sdk_method: "ai.generateObject",
|
|
173
|
+
...lsConfig?.metadata,
|
|
174
|
+
},
|
|
175
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
176
|
+
processOutputs: (outputs) => {
|
|
177
|
+
if (outputs.outputs == null || typeof outputs.outputs !== "object") {
|
|
178
|
+
return outputs;
|
|
179
|
+
}
|
|
180
|
+
return outputs.outputs.object ?? outputs;
|
|
181
|
+
},
|
|
182
|
+
});
|
|
183
|
+
return traceableFunc(params);
|
|
184
|
+
};
|
|
185
|
+
/**
|
|
186
|
+
* Wrapped version of AI SDK 5's streamText with LangSmith tracing.
|
|
187
|
+
*
|
|
188
|
+
* Must be called with `await`, but otherwise behaves the same as the
|
|
189
|
+
* original streamText and adds adds automatic tracing to LangSmith
|
|
190
|
+
* for observability.
|
|
191
|
+
*
|
|
192
|
+
* ```ts
|
|
193
|
+
* import * as ai from "ai";
|
|
194
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
195
|
+
*
|
|
196
|
+
* const { streamText } = wrapAISDK(ai);
|
|
197
|
+
* const { textStream } = await streamText(...);
|
|
198
|
+
* ```
|
|
199
|
+
*
|
|
200
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-text} Original streamText documentation
|
|
201
|
+
* @param params - Same parameters as the original streamText function
|
|
202
|
+
* @returns Promise resolving to the same result as streamText, with tracing applied
|
|
203
|
+
*/
|
|
204
|
+
const wrappedStreamText = async (params) => {
|
|
205
|
+
const traceableFunc = (0, traceable_js_1.traceable)(async (params) => {
|
|
206
|
+
const wrappedModel = wrapLanguageModel({
|
|
207
|
+
model: params.model,
|
|
208
|
+
middleware: (0, middleware_js_1.LangSmithMiddleware)({
|
|
209
|
+
name: _getModelDisplayName(params.model),
|
|
210
|
+
modelId: _getModelId(params.model),
|
|
211
|
+
}),
|
|
212
|
+
});
|
|
213
|
+
return streamText({
|
|
214
|
+
...params,
|
|
215
|
+
tools: _wrapTools(params.tools),
|
|
216
|
+
model: wrappedModel,
|
|
217
|
+
});
|
|
218
|
+
}, {
|
|
219
|
+
name: _getModelDisplayName(params.model),
|
|
220
|
+
...lsConfig,
|
|
221
|
+
metadata: {
|
|
222
|
+
ai_sdk_method: "ai.streamText",
|
|
223
|
+
...lsConfig?.metadata,
|
|
224
|
+
},
|
|
225
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
226
|
+
});
|
|
227
|
+
return traceableFunc(params);
|
|
228
|
+
};
|
|
229
|
+
/**
|
|
230
|
+
* Wrapped version of AI SDK 5's streamObject with LangSmith tracing.
|
|
231
|
+
*
|
|
232
|
+
* Must be called with `await`, but otherwise behaves the same as the
|
|
233
|
+
* original streamObject and adds adds automatic tracing to LangSmith
|
|
234
|
+
* for observability.
|
|
235
|
+
*
|
|
236
|
+
* ```ts
|
|
237
|
+
* import * as ai from "ai";
|
|
238
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
239
|
+
*
|
|
240
|
+
* const { streamObject } = wrapAISDK(ai);
|
|
241
|
+
* const { partialObjectStream } = await streamObject(...);
|
|
242
|
+
* ```
|
|
243
|
+
*
|
|
244
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-structured-data} Original streamObject documentation
|
|
245
|
+
* @param params - Same parameters as the original streamObject function
|
|
246
|
+
* @returns Promise resolving to the same result as streamObject, with tracing applied
|
|
247
|
+
*/
|
|
248
|
+
const wrappedStreamObject = async (params) => {
|
|
249
|
+
const traceableFunc = (0, traceable_js_1.traceable)(async (params) => {
|
|
250
|
+
const wrappedModel = wrapLanguageModel({
|
|
251
|
+
model: params.model,
|
|
252
|
+
middleware: (0, middleware_js_1.LangSmithMiddleware)({
|
|
253
|
+
name: _getModelDisplayName(params.model),
|
|
254
|
+
modelId: _getModelId(params.model),
|
|
255
|
+
}),
|
|
256
|
+
});
|
|
257
|
+
return streamObject({
|
|
258
|
+
...params,
|
|
259
|
+
model: wrappedModel,
|
|
260
|
+
});
|
|
261
|
+
}, {
|
|
262
|
+
name: _getModelDisplayName(params.model),
|
|
263
|
+
...lsConfig,
|
|
264
|
+
metadata: {
|
|
265
|
+
ai_sdk_method: "ai.streamObject",
|
|
266
|
+
...lsConfig?.metadata,
|
|
267
|
+
},
|
|
268
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
269
|
+
});
|
|
270
|
+
return traceableFunc(params);
|
|
271
|
+
};
|
|
272
|
+
return {
|
|
273
|
+
generateText: wrappedGenerateText,
|
|
274
|
+
generateObject: wrappedGenerateObject,
|
|
275
|
+
streamText: wrappedStreamText,
|
|
276
|
+
streamObject: wrappedStreamObject,
|
|
277
|
+
};
|
|
278
|
+
};
|
|
279
|
+
exports.wrapAISDK = wrapAISDK;
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
import { RunTreeConfig } from "../../run_trees.js";
|
|
2
|
+
/**
|
|
3
|
+
* Wraps Vercel AI SDK 5 functions with LangSmith tracing capabilities.
|
|
4
|
+
*
|
|
5
|
+
* @param methods - Object containing AI SDK methods to wrap
|
|
6
|
+
* @param methods.wrapLanguageModel - AI SDK's wrapLanguageModel function
|
|
7
|
+
* @param methods.generateText - AI SDK's generateText function
|
|
8
|
+
* @param methods.streamText - AI SDK's streamText function
|
|
9
|
+
* @param methods.streamObject - AI SDK's streamObject function
|
|
10
|
+
* @param methods.generateObject - AI SDK's generateObject function
|
|
11
|
+
*
|
|
12
|
+
* @returns Object containing wrapped versions of the AI SDK functions with LangSmith tracing
|
|
13
|
+
* @returns returns.generateText - Wrapped generateText function that traces calls to LangSmith
|
|
14
|
+
* @returns returns.generateObject - Wrapped generateObject function that traces calls to LangSmith
|
|
15
|
+
* @returns returns.streamText - Wrapped streamText function that traces calls to LangSmith
|
|
16
|
+
* @returns returns.streamObject - Wrapped streamObject function that traces calls to LangSmith
|
|
17
|
+
*/
|
|
18
|
+
declare const wrapAISDK: <WrapLanguageModelType extends (...args: any[]) => any, GenerateTextType extends (...args: any[]) => any, StreamTextType extends (...args: any[]) => any, StreamObjectType extends (...args: any[]) => any, GenerateObjectType extends (...args: any[]) => any>({ wrapLanguageModel, generateText, streamText, streamObject, generateObject, }: {
|
|
19
|
+
wrapLanguageModel: WrapLanguageModelType;
|
|
20
|
+
generateText: GenerateTextType;
|
|
21
|
+
streamText: StreamTextType;
|
|
22
|
+
streamObject: StreamObjectType;
|
|
23
|
+
generateObject: GenerateObjectType;
|
|
24
|
+
}, lsConfig?: Partial<Omit<RunTreeConfig, "inputs" | "outputs">>) => {
|
|
25
|
+
generateText: (params: Parameters<GenerateTextType>[0]) => Promise<ReturnType<GenerateTextType>>;
|
|
26
|
+
generateObject: (params: Parameters<GenerateObjectType>[0]) => Promise<ReturnType<GenerateObjectType>>;
|
|
27
|
+
streamText: (params: Parameters<StreamTextType>[0]) => Promise<ReturnType<StreamTextType>>;
|
|
28
|
+
streamObject: (params: Parameters<StreamObjectType>[0]) => Promise<ReturnType<StreamObjectType>>;
|
|
29
|
+
};
|
|
30
|
+
export { wrapAISDK };
|
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
/* eslint-disable import/no-extraneous-dependencies */
|
|
2
|
+
import { LangSmithMiddleware, populateToolCallsForTracing, } from "./middleware.js";
|
|
3
|
+
import { traceable } from "../../traceable.js";
|
|
4
|
+
const _wrapTools = (tools) => {
|
|
5
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
6
|
+
const wrappedTools = {};
|
|
7
|
+
if (tools) {
|
|
8
|
+
for (const [key, tool] of Object.entries(tools)) {
|
|
9
|
+
wrappedTools[key] = tool;
|
|
10
|
+
if (tool != null &&
|
|
11
|
+
typeof tool === "object" &&
|
|
12
|
+
"execute" in tool &&
|
|
13
|
+
typeof tool.execute === "function") {
|
|
14
|
+
wrappedTools[key].execute = traceable(tool.execute.bind(tool), {
|
|
15
|
+
name: key,
|
|
16
|
+
run_type: "tool",
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
return wrappedTools;
|
|
22
|
+
};
|
|
23
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
24
|
+
const _getModelDisplayName = (model) => {
|
|
25
|
+
if (typeof model === "string") {
|
|
26
|
+
return model;
|
|
27
|
+
}
|
|
28
|
+
if (model.config != null &&
|
|
29
|
+
typeof model.config === "object" &&
|
|
30
|
+
typeof model.config.provider === "string") {
|
|
31
|
+
return model.config.provider;
|
|
32
|
+
}
|
|
33
|
+
return model.modelId ?? "unknown";
|
|
34
|
+
};
|
|
35
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
36
|
+
const _getModelId = (model) => {
|
|
37
|
+
if (typeof model === "string") {
|
|
38
|
+
return model;
|
|
39
|
+
}
|
|
40
|
+
return typeof model.modelId === "string" ? model.modelId : undefined;
|
|
41
|
+
};
|
|
42
|
+
const _formatTracedInputs = (params) => {
|
|
43
|
+
const { prompt, messages, model, tools, ...rest } = params;
|
|
44
|
+
if (Array.isArray(prompt)) {
|
|
45
|
+
return { ...rest, messages: prompt.map(populateToolCallsForTracing) };
|
|
46
|
+
}
|
|
47
|
+
else if (Array.isArray(messages)) {
|
|
48
|
+
return { ...rest, messages: messages.map(populateToolCallsForTracing) };
|
|
49
|
+
}
|
|
50
|
+
else {
|
|
51
|
+
return { ...rest, prompt, messages };
|
|
52
|
+
}
|
|
53
|
+
};
|
|
54
|
+
/**
|
|
55
|
+
* Wraps Vercel AI SDK 5 functions with LangSmith tracing capabilities.
|
|
56
|
+
*
|
|
57
|
+
* @param methods - Object containing AI SDK methods to wrap
|
|
58
|
+
* @param methods.wrapLanguageModel - AI SDK's wrapLanguageModel function
|
|
59
|
+
* @param methods.generateText - AI SDK's generateText function
|
|
60
|
+
* @param methods.streamText - AI SDK's streamText function
|
|
61
|
+
* @param methods.streamObject - AI SDK's streamObject function
|
|
62
|
+
* @param methods.generateObject - AI SDK's generateObject function
|
|
63
|
+
*
|
|
64
|
+
* @returns Object containing wrapped versions of the AI SDK functions with LangSmith tracing
|
|
65
|
+
* @returns returns.generateText - Wrapped generateText function that traces calls to LangSmith
|
|
66
|
+
* @returns returns.generateObject - Wrapped generateObject function that traces calls to LangSmith
|
|
67
|
+
* @returns returns.streamText - Wrapped streamText function that traces calls to LangSmith
|
|
68
|
+
* @returns returns.streamObject - Wrapped streamObject function that traces calls to LangSmith
|
|
69
|
+
*/
|
|
70
|
+
const wrapAISDK = ({ wrapLanguageModel, generateText, streamText, streamObject, generateObject, }, lsConfig) => {
|
|
71
|
+
/**
|
|
72
|
+
* Wrapped version of AI SDK 5's generateText with LangSmith tracing.
|
|
73
|
+
*
|
|
74
|
+
* This function has the same signature and behavior as the original generateText,
|
|
75
|
+
* but adds automatic tracing to LangSmith for observability.
|
|
76
|
+
*
|
|
77
|
+
* ```ts
|
|
78
|
+
* import * as ai from "ai";
|
|
79
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
80
|
+
*
|
|
81
|
+
* const { generateText } = wrapAISDK(ai);
|
|
82
|
+
* const { text } = await generateText(...);
|
|
83
|
+
* ```
|
|
84
|
+
*
|
|
85
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-text} Original generateText documentation
|
|
86
|
+
* @param params - Same parameters as the original generateText function
|
|
87
|
+
* @returns Promise resolving to the same result as generateText, with tracing applied
|
|
88
|
+
*/
|
|
89
|
+
const wrappedGenerateText = async (params) => {
|
|
90
|
+
const traceableFunc = traceable(async (params) => {
|
|
91
|
+
const wrappedModel = wrapLanguageModel({
|
|
92
|
+
model: params.model,
|
|
93
|
+
middleware: LangSmithMiddleware({
|
|
94
|
+
name: _getModelDisplayName(params.model),
|
|
95
|
+
modelId: params.model.modelId,
|
|
96
|
+
}),
|
|
97
|
+
});
|
|
98
|
+
return generateText({
|
|
99
|
+
...params,
|
|
100
|
+
tools: _wrapTools(params.tools),
|
|
101
|
+
model: wrappedModel,
|
|
102
|
+
});
|
|
103
|
+
}, {
|
|
104
|
+
name: _getModelDisplayName(params.model),
|
|
105
|
+
...lsConfig,
|
|
106
|
+
metadata: {
|
|
107
|
+
ai_sdk_method: "ai.generateText",
|
|
108
|
+
...lsConfig?.metadata,
|
|
109
|
+
},
|
|
110
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
111
|
+
processOutputs: (outputs) => {
|
|
112
|
+
if (outputs.outputs == null || typeof outputs.outputs !== "object") {
|
|
113
|
+
return outputs;
|
|
114
|
+
}
|
|
115
|
+
const { steps } = outputs.outputs;
|
|
116
|
+
if (Array.isArray(steps)) {
|
|
117
|
+
const lastStep = steps.at(-1);
|
|
118
|
+
if (lastStep == null || typeof lastStep !== "object") {
|
|
119
|
+
return outputs;
|
|
120
|
+
}
|
|
121
|
+
const { content } = lastStep;
|
|
122
|
+
return populateToolCallsForTracing({
|
|
123
|
+
content,
|
|
124
|
+
role: "assistant",
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
else {
|
|
128
|
+
return outputs;
|
|
129
|
+
}
|
|
130
|
+
},
|
|
131
|
+
});
|
|
132
|
+
return traceableFunc(params);
|
|
133
|
+
};
|
|
134
|
+
/**
|
|
135
|
+
* Wrapped version of AI SDK 5's generateObject with LangSmith tracing.
|
|
136
|
+
*
|
|
137
|
+
* This function has the same signature and behavior as the original generateObject,
|
|
138
|
+
* but adds automatic tracing to LangSmith for observability.
|
|
139
|
+
*
|
|
140
|
+
* ```ts
|
|
141
|
+
* import * as ai from "ai";
|
|
142
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
143
|
+
*
|
|
144
|
+
* const { generateObject } = wrapAISDK(ai);
|
|
145
|
+
* const { object } = await generateObject(...);
|
|
146
|
+
* ```
|
|
147
|
+
*
|
|
148
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-structured-data} Original generateObject documentation
|
|
149
|
+
* @param params - Same parameters as the original generateObject function
|
|
150
|
+
* @returns Promise resolving to the same result as generateObject, with tracing applied
|
|
151
|
+
*/
|
|
152
|
+
const wrappedGenerateObject = async (params) => {
|
|
153
|
+
const traceableFunc = traceable(async (params) => {
|
|
154
|
+
const wrappedModel = wrapLanguageModel({
|
|
155
|
+
model: params.model,
|
|
156
|
+
middleware: LangSmithMiddleware({
|
|
157
|
+
name: _getModelDisplayName(params.model),
|
|
158
|
+
modelId: _getModelId(params.model),
|
|
159
|
+
}),
|
|
160
|
+
});
|
|
161
|
+
return generateObject({
|
|
162
|
+
...params,
|
|
163
|
+
model: wrappedModel,
|
|
164
|
+
});
|
|
165
|
+
}, {
|
|
166
|
+
name: _getModelDisplayName(params.model),
|
|
167
|
+
...lsConfig,
|
|
168
|
+
metadata: {
|
|
169
|
+
ai_sdk_method: "ai.generateObject",
|
|
170
|
+
...lsConfig?.metadata,
|
|
171
|
+
},
|
|
172
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
173
|
+
processOutputs: (outputs) => {
|
|
174
|
+
if (outputs.outputs == null || typeof outputs.outputs !== "object") {
|
|
175
|
+
return outputs;
|
|
176
|
+
}
|
|
177
|
+
return outputs.outputs.object ?? outputs;
|
|
178
|
+
},
|
|
179
|
+
});
|
|
180
|
+
return traceableFunc(params);
|
|
181
|
+
};
|
|
182
|
+
/**
|
|
183
|
+
* Wrapped version of AI SDK 5's streamText with LangSmith tracing.
|
|
184
|
+
*
|
|
185
|
+
* Must be called with `await`, but otherwise behaves the same as the
|
|
186
|
+
* original streamText and adds adds automatic tracing to LangSmith
|
|
187
|
+
* for observability.
|
|
188
|
+
*
|
|
189
|
+
* ```ts
|
|
190
|
+
* import * as ai from "ai";
|
|
191
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
192
|
+
*
|
|
193
|
+
* const { streamText } = wrapAISDK(ai);
|
|
194
|
+
* const { textStream } = await streamText(...);
|
|
195
|
+
* ```
|
|
196
|
+
*
|
|
197
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-text} Original streamText documentation
|
|
198
|
+
* @param params - Same parameters as the original streamText function
|
|
199
|
+
* @returns Promise resolving to the same result as streamText, with tracing applied
|
|
200
|
+
*/
|
|
201
|
+
const wrappedStreamText = async (params) => {
|
|
202
|
+
const traceableFunc = traceable(async (params) => {
|
|
203
|
+
const wrappedModel = wrapLanguageModel({
|
|
204
|
+
model: params.model,
|
|
205
|
+
middleware: LangSmithMiddleware({
|
|
206
|
+
name: _getModelDisplayName(params.model),
|
|
207
|
+
modelId: _getModelId(params.model),
|
|
208
|
+
}),
|
|
209
|
+
});
|
|
210
|
+
return streamText({
|
|
211
|
+
...params,
|
|
212
|
+
tools: _wrapTools(params.tools),
|
|
213
|
+
model: wrappedModel,
|
|
214
|
+
});
|
|
215
|
+
}, {
|
|
216
|
+
name: _getModelDisplayName(params.model),
|
|
217
|
+
...lsConfig,
|
|
218
|
+
metadata: {
|
|
219
|
+
ai_sdk_method: "ai.streamText",
|
|
220
|
+
...lsConfig?.metadata,
|
|
221
|
+
},
|
|
222
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
223
|
+
});
|
|
224
|
+
return traceableFunc(params);
|
|
225
|
+
};
|
|
226
|
+
/**
|
|
227
|
+
* Wrapped version of AI SDK 5's streamObject with LangSmith tracing.
|
|
228
|
+
*
|
|
229
|
+
* Must be called with `await`, but otherwise behaves the same as the
|
|
230
|
+
* original streamObject and adds adds automatic tracing to LangSmith
|
|
231
|
+
* for observability.
|
|
232
|
+
*
|
|
233
|
+
* ```ts
|
|
234
|
+
* import * as ai from "ai";
|
|
235
|
+
* import { wrapAISDK } from "langsmith/experimental/vercel";
|
|
236
|
+
*
|
|
237
|
+
* const { streamObject } = wrapAISDK(ai);
|
|
238
|
+
* const { partialObjectStream } = await streamObject(...);
|
|
239
|
+
* ```
|
|
240
|
+
*
|
|
241
|
+
* @see {@link https://sdk.vercel.ai/docs/ai-sdk-core/generating-structured-data} Original streamObject documentation
|
|
242
|
+
* @param params - Same parameters as the original streamObject function
|
|
243
|
+
* @returns Promise resolving to the same result as streamObject, with tracing applied
|
|
244
|
+
*/
|
|
245
|
+
const wrappedStreamObject = async (params) => {
|
|
246
|
+
const traceableFunc = traceable(async (params) => {
|
|
247
|
+
const wrappedModel = wrapLanguageModel({
|
|
248
|
+
model: params.model,
|
|
249
|
+
middleware: LangSmithMiddleware({
|
|
250
|
+
name: _getModelDisplayName(params.model),
|
|
251
|
+
modelId: _getModelId(params.model),
|
|
252
|
+
}),
|
|
253
|
+
});
|
|
254
|
+
return streamObject({
|
|
255
|
+
...params,
|
|
256
|
+
model: wrappedModel,
|
|
257
|
+
});
|
|
258
|
+
}, {
|
|
259
|
+
name: _getModelDisplayName(params.model),
|
|
260
|
+
...lsConfig,
|
|
261
|
+
metadata: {
|
|
262
|
+
ai_sdk_method: "ai.streamObject",
|
|
263
|
+
...lsConfig?.metadata,
|
|
264
|
+
},
|
|
265
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
266
|
+
});
|
|
267
|
+
return traceableFunc(params);
|
|
268
|
+
};
|
|
269
|
+
return {
|
|
270
|
+
generateText: wrappedGenerateText,
|
|
271
|
+
generateObject: wrappedGenerateObject,
|
|
272
|
+
streamText: wrappedStreamText,
|
|
273
|
+
streamObject: wrappedStreamObject,
|
|
274
|
+
};
|
|
275
|
+
};
|
|
276
|
+
export { wrapAISDK };
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.populateToolCallsForTracing = void 0;
|
|
4
|
+
exports.LangSmithMiddleware = LangSmithMiddleware;
|
|
5
|
+
const traceable_js_1 = require("../../traceable.cjs");
|
|
6
|
+
const vercel_js_1 = require("../../utils/vercel.cjs");
|
|
7
|
+
const populateToolCallsForTracing = (message) => {
|
|
8
|
+
const formattedMessage = {
|
|
9
|
+
...message,
|
|
10
|
+
};
|
|
11
|
+
if (formattedMessage.role !== "assistant") {
|
|
12
|
+
return formattedMessage;
|
|
13
|
+
}
|
|
14
|
+
if (Array.isArray(formattedMessage.content)) {
|
|
15
|
+
const toolCalls = formattedMessage.content
|
|
16
|
+
.filter((block) => {
|
|
17
|
+
return (block != null &&
|
|
18
|
+
typeof block === "object" &&
|
|
19
|
+
block.type == "tool-call");
|
|
20
|
+
})
|
|
21
|
+
.map((block) => {
|
|
22
|
+
return {
|
|
23
|
+
id: block.toolCallId,
|
|
24
|
+
type: "function",
|
|
25
|
+
function: {
|
|
26
|
+
name: block.toolName,
|
|
27
|
+
arguments: typeof block.input !== "string"
|
|
28
|
+
? JSON.stringify(block.input)
|
|
29
|
+
: block.input,
|
|
30
|
+
},
|
|
31
|
+
};
|
|
32
|
+
});
|
|
33
|
+
if (toolCalls.length > 0) {
|
|
34
|
+
formattedMessage.tool_calls = toolCalls;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return formattedMessage;
|
|
38
|
+
};
|
|
39
|
+
exports.populateToolCallsForTracing = populateToolCallsForTracing;
|
|
40
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
41
|
+
const _formatTracedInputs = (params) => {
|
|
42
|
+
const { prompt, ...rest } = params;
|
|
43
|
+
if (prompt == null) {
|
|
44
|
+
return params;
|
|
45
|
+
}
|
|
46
|
+
if (Array.isArray(prompt)) {
|
|
47
|
+
return { ...rest, messages: prompt.map(exports.populateToolCallsForTracing) };
|
|
48
|
+
}
|
|
49
|
+
return rest;
|
|
50
|
+
};
|
|
51
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
52
|
+
const _formatTracedOutputs = (outputs) => {
|
|
53
|
+
const { request, response, ...rest } = outputs;
|
|
54
|
+
const formattedOutputs = { ...rest };
|
|
55
|
+
if (formattedOutputs.role == null) {
|
|
56
|
+
formattedOutputs.role = formattedOutputs.type ?? "assistant";
|
|
57
|
+
}
|
|
58
|
+
return (0, exports.populateToolCallsForTracing)(formattedOutputs);
|
|
59
|
+
};
|
|
60
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
61
|
+
const setUsageMetadataOnRunTree = (result, runTree) => {
|
|
62
|
+
if (result.usage == null || typeof result.usage !== "object") {
|
|
63
|
+
return;
|
|
64
|
+
}
|
|
65
|
+
const langsmithUsage = {
|
|
66
|
+
input_tokens: result.usage?.inputTokens,
|
|
67
|
+
output_tokens: result.usage?.outputTokens,
|
|
68
|
+
total_tokens: result.usage?.totalTokens,
|
|
69
|
+
};
|
|
70
|
+
const inputTokenDetails = (0, vercel_js_1.extractInputTokenDetails)(result.providerMetadata ?? {}, result.usage?.cachedInputTokens);
|
|
71
|
+
runTree.extra = {
|
|
72
|
+
...runTree.extra,
|
|
73
|
+
metadata: {
|
|
74
|
+
...runTree.extra?.metadata,
|
|
75
|
+
usage_metadata: {
|
|
76
|
+
...langsmithUsage,
|
|
77
|
+
input_token_details: {
|
|
78
|
+
...inputTokenDetails,
|
|
79
|
+
},
|
|
80
|
+
},
|
|
81
|
+
},
|
|
82
|
+
};
|
|
83
|
+
};
|
|
84
|
+
/**
|
|
85
|
+
* AI SDK middleware that wraps an AI SDK 5 model and adds LangSmith tracing.
|
|
86
|
+
*/
|
|
87
|
+
function LangSmithMiddleware(config) {
|
|
88
|
+
const { name, modelId, lsConfig } = config ?? {};
|
|
89
|
+
return {
|
|
90
|
+
wrapGenerate: async ({ doGenerate, params }) => {
|
|
91
|
+
const traceableFunc = (0, traceable_js_1.traceable)(async (_params) => {
|
|
92
|
+
const result = await doGenerate();
|
|
93
|
+
const currentRunTree = (0, traceable_js_1.getCurrentRunTree)(true);
|
|
94
|
+
if (currentRunTree !== undefined) {
|
|
95
|
+
setUsageMetadataOnRunTree(result, currentRunTree);
|
|
96
|
+
}
|
|
97
|
+
return result;
|
|
98
|
+
}, {
|
|
99
|
+
...lsConfig,
|
|
100
|
+
name: name ?? "ai.doGenerate",
|
|
101
|
+
run_type: "llm",
|
|
102
|
+
metadata: {
|
|
103
|
+
ls_model_name: modelId,
|
|
104
|
+
ai_sdk_method: "ai.doGenerate",
|
|
105
|
+
...lsConfig?.metadata,
|
|
106
|
+
},
|
|
107
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
108
|
+
processOutputs: (outputs) => {
|
|
109
|
+
return _formatTracedOutputs(outputs);
|
|
110
|
+
},
|
|
111
|
+
});
|
|
112
|
+
return traceableFunc(params);
|
|
113
|
+
},
|
|
114
|
+
wrapStream: async ({ doStream, params }) => {
|
|
115
|
+
const parentRunTree = (0, traceable_js_1.getCurrentRunTree)(true);
|
|
116
|
+
let runTree;
|
|
117
|
+
if (parentRunTree != null &&
|
|
118
|
+
typeof parentRunTree === "object" &&
|
|
119
|
+
typeof parentRunTree.createChild === "function") {
|
|
120
|
+
runTree = parentRunTree?.createChild({
|
|
121
|
+
...lsConfig,
|
|
122
|
+
name: name ?? "ai.doStream",
|
|
123
|
+
run_type: "llm",
|
|
124
|
+
metadata: {
|
|
125
|
+
ls_model_name: modelId,
|
|
126
|
+
ai_sdk_method: "ai.doStream",
|
|
127
|
+
...lsConfig?.metadata,
|
|
128
|
+
},
|
|
129
|
+
inputs: _formatTracedInputs(params),
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
await runTree?.postRun();
|
|
133
|
+
try {
|
|
134
|
+
const { stream, ...rest } = await doStream();
|
|
135
|
+
const chunks = [];
|
|
136
|
+
const transformStream = new TransformStream({
|
|
137
|
+
async transform(chunk, controller) {
|
|
138
|
+
chunks.push(chunk);
|
|
139
|
+
controller.enqueue(chunk);
|
|
140
|
+
},
|
|
141
|
+
async flush() {
|
|
142
|
+
try {
|
|
143
|
+
const output = chunks.reduce((aggregated, chunk) => {
|
|
144
|
+
if (chunk.type === "text-delta") {
|
|
145
|
+
if (chunk.delta == null) {
|
|
146
|
+
return aggregated;
|
|
147
|
+
}
|
|
148
|
+
return {
|
|
149
|
+
...aggregated,
|
|
150
|
+
text: aggregated.text + chunk.delta,
|
|
151
|
+
};
|
|
152
|
+
}
|
|
153
|
+
else if (chunk.type === "tool-call") {
|
|
154
|
+
const matchingToolCall = aggregated.tool_calls.find((call) => call.id === chunk.toolCallId);
|
|
155
|
+
if (matchingToolCall != null) {
|
|
156
|
+
return aggregated;
|
|
157
|
+
}
|
|
158
|
+
return {
|
|
159
|
+
...aggregated,
|
|
160
|
+
tool_calls: [
|
|
161
|
+
...aggregated.tool_calls,
|
|
162
|
+
{
|
|
163
|
+
id: chunk.toolCallId,
|
|
164
|
+
type: "function",
|
|
165
|
+
function: {
|
|
166
|
+
name: chunk.toolName,
|
|
167
|
+
arguments: chunk.input,
|
|
168
|
+
},
|
|
169
|
+
},
|
|
170
|
+
],
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
else if (chunk.type === "finish") {
|
|
174
|
+
if (runTree != null) {
|
|
175
|
+
setUsageMetadataOnRunTree(chunk, runTree);
|
|
176
|
+
}
|
|
177
|
+
return {
|
|
178
|
+
...aggregated,
|
|
179
|
+
providerMetadata: chunk.providerMetadata,
|
|
180
|
+
finishReason: chunk.finishReason,
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
else {
|
|
184
|
+
return aggregated;
|
|
185
|
+
}
|
|
186
|
+
}, {
|
|
187
|
+
text: "",
|
|
188
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
189
|
+
tool_calls: [],
|
|
190
|
+
});
|
|
191
|
+
await runTree?.end(_formatTracedOutputs(output));
|
|
192
|
+
}
|
|
193
|
+
catch (error) {
|
|
194
|
+
await runTree?.end(undefined, error.message ?? String(error));
|
|
195
|
+
throw error;
|
|
196
|
+
}
|
|
197
|
+
finally {
|
|
198
|
+
await runTree?.patchRun();
|
|
199
|
+
}
|
|
200
|
+
},
|
|
201
|
+
});
|
|
202
|
+
return {
|
|
203
|
+
stream: stream.pipeThrough(transformStream),
|
|
204
|
+
...rest,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
catch (error) {
|
|
208
|
+
await runTree?.end(undefined, error.message ?? String(error));
|
|
209
|
+
await runTree?.patchRun();
|
|
210
|
+
throw error;
|
|
211
|
+
}
|
|
212
|
+
},
|
|
213
|
+
};
|
|
214
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import type { LanguageModelV2Middleware, LanguageModelV2Message } from "@ai-sdk/provider";
|
|
2
|
+
import type { RunTreeConfig } from "../../run_trees.js";
|
|
3
|
+
export declare const populateToolCallsForTracing: (message: LanguageModelV2Message) => ({
|
|
4
|
+
role: "system";
|
|
5
|
+
content: string;
|
|
6
|
+
} & {
|
|
7
|
+
providerOptions?: import("@ai-sdk/provider").SharedV2ProviderOptions;
|
|
8
|
+
} & Record<string, unknown>) | ({
|
|
9
|
+
role: "user";
|
|
10
|
+
content: Array<import("@ai-sdk/provider").LanguageModelV2TextPart | import("@ai-sdk/provider").LanguageModelV2FilePart>;
|
|
11
|
+
} & {
|
|
12
|
+
providerOptions?: import("@ai-sdk/provider").SharedV2ProviderOptions;
|
|
13
|
+
} & Record<string, unknown>) | ({
|
|
14
|
+
role: "assistant";
|
|
15
|
+
content: Array<import("@ai-sdk/provider").LanguageModelV2TextPart | import("@ai-sdk/provider").LanguageModelV2FilePart | import("@ai-sdk/provider").LanguageModelV2ReasoningPart | import("@ai-sdk/provider").LanguageModelV2ToolCallPart | import("@ai-sdk/provider").LanguageModelV2ToolResultPart>;
|
|
16
|
+
} & {
|
|
17
|
+
providerOptions?: import("@ai-sdk/provider").SharedV2ProviderOptions;
|
|
18
|
+
} & Record<string, unknown>) | ({
|
|
19
|
+
role: "tool";
|
|
20
|
+
content: Array<import("@ai-sdk/provider").LanguageModelV2ToolResultPart>;
|
|
21
|
+
} & {
|
|
22
|
+
providerOptions?: import("@ai-sdk/provider").SharedV2ProviderOptions;
|
|
23
|
+
} & Record<string, unknown>);
|
|
24
|
+
/**
|
|
25
|
+
* AI SDK middleware that wraps an AI SDK 5 model and adds LangSmith tracing.
|
|
26
|
+
*/
|
|
27
|
+
export declare function LangSmithMiddleware(config?: {
|
|
28
|
+
name: string;
|
|
29
|
+
modelId?: string;
|
|
30
|
+
lsConfig?: Partial<Omit<RunTreeConfig, "inputs" | "outputs">>;
|
|
31
|
+
}): LanguageModelV2Middleware;
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
import { getCurrentRunTree, traceable } from "../../traceable.js";
|
|
2
|
+
import { extractInputTokenDetails } from "../../utils/vercel.js";
|
|
3
|
+
export const populateToolCallsForTracing = (message) => {
|
|
4
|
+
const formattedMessage = {
|
|
5
|
+
...message,
|
|
6
|
+
};
|
|
7
|
+
if (formattedMessage.role !== "assistant") {
|
|
8
|
+
return formattedMessage;
|
|
9
|
+
}
|
|
10
|
+
if (Array.isArray(formattedMessage.content)) {
|
|
11
|
+
const toolCalls = formattedMessage.content
|
|
12
|
+
.filter((block) => {
|
|
13
|
+
return (block != null &&
|
|
14
|
+
typeof block === "object" &&
|
|
15
|
+
block.type == "tool-call");
|
|
16
|
+
})
|
|
17
|
+
.map((block) => {
|
|
18
|
+
return {
|
|
19
|
+
id: block.toolCallId,
|
|
20
|
+
type: "function",
|
|
21
|
+
function: {
|
|
22
|
+
name: block.toolName,
|
|
23
|
+
arguments: typeof block.input !== "string"
|
|
24
|
+
? JSON.stringify(block.input)
|
|
25
|
+
: block.input,
|
|
26
|
+
},
|
|
27
|
+
};
|
|
28
|
+
});
|
|
29
|
+
if (toolCalls.length > 0) {
|
|
30
|
+
formattedMessage.tool_calls = toolCalls;
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
return formattedMessage;
|
|
34
|
+
};
|
|
35
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
36
|
+
const _formatTracedInputs = (params) => {
|
|
37
|
+
const { prompt, ...rest } = params;
|
|
38
|
+
if (prompt == null) {
|
|
39
|
+
return params;
|
|
40
|
+
}
|
|
41
|
+
if (Array.isArray(prompt)) {
|
|
42
|
+
return { ...rest, messages: prompt.map(populateToolCallsForTracing) };
|
|
43
|
+
}
|
|
44
|
+
return rest;
|
|
45
|
+
};
|
|
46
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
47
|
+
const _formatTracedOutputs = (outputs) => {
|
|
48
|
+
const { request, response, ...rest } = outputs;
|
|
49
|
+
const formattedOutputs = { ...rest };
|
|
50
|
+
if (formattedOutputs.role == null) {
|
|
51
|
+
formattedOutputs.role = formattedOutputs.type ?? "assistant";
|
|
52
|
+
}
|
|
53
|
+
return populateToolCallsForTracing(formattedOutputs);
|
|
54
|
+
};
|
|
55
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
56
|
+
const setUsageMetadataOnRunTree = (result, runTree) => {
|
|
57
|
+
if (result.usage == null || typeof result.usage !== "object") {
|
|
58
|
+
return;
|
|
59
|
+
}
|
|
60
|
+
const langsmithUsage = {
|
|
61
|
+
input_tokens: result.usage?.inputTokens,
|
|
62
|
+
output_tokens: result.usage?.outputTokens,
|
|
63
|
+
total_tokens: result.usage?.totalTokens,
|
|
64
|
+
};
|
|
65
|
+
const inputTokenDetails = extractInputTokenDetails(result.providerMetadata ?? {}, result.usage?.cachedInputTokens);
|
|
66
|
+
runTree.extra = {
|
|
67
|
+
...runTree.extra,
|
|
68
|
+
metadata: {
|
|
69
|
+
...runTree.extra?.metadata,
|
|
70
|
+
usage_metadata: {
|
|
71
|
+
...langsmithUsage,
|
|
72
|
+
input_token_details: {
|
|
73
|
+
...inputTokenDetails,
|
|
74
|
+
},
|
|
75
|
+
},
|
|
76
|
+
},
|
|
77
|
+
};
|
|
78
|
+
};
|
|
79
|
+
/**
|
|
80
|
+
* AI SDK middleware that wraps an AI SDK 5 model and adds LangSmith tracing.
|
|
81
|
+
*/
|
|
82
|
+
export function LangSmithMiddleware(config) {
|
|
83
|
+
const { name, modelId, lsConfig } = config ?? {};
|
|
84
|
+
return {
|
|
85
|
+
wrapGenerate: async ({ doGenerate, params }) => {
|
|
86
|
+
const traceableFunc = traceable(async (_params) => {
|
|
87
|
+
const result = await doGenerate();
|
|
88
|
+
const currentRunTree = getCurrentRunTree(true);
|
|
89
|
+
if (currentRunTree !== undefined) {
|
|
90
|
+
setUsageMetadataOnRunTree(result, currentRunTree);
|
|
91
|
+
}
|
|
92
|
+
return result;
|
|
93
|
+
}, {
|
|
94
|
+
...lsConfig,
|
|
95
|
+
name: name ?? "ai.doGenerate",
|
|
96
|
+
run_type: "llm",
|
|
97
|
+
metadata: {
|
|
98
|
+
ls_model_name: modelId,
|
|
99
|
+
ai_sdk_method: "ai.doGenerate",
|
|
100
|
+
...lsConfig?.metadata,
|
|
101
|
+
},
|
|
102
|
+
processInputs: (inputs) => _formatTracedInputs(inputs),
|
|
103
|
+
processOutputs: (outputs) => {
|
|
104
|
+
return _formatTracedOutputs(outputs);
|
|
105
|
+
},
|
|
106
|
+
});
|
|
107
|
+
return traceableFunc(params);
|
|
108
|
+
},
|
|
109
|
+
wrapStream: async ({ doStream, params }) => {
|
|
110
|
+
const parentRunTree = getCurrentRunTree(true);
|
|
111
|
+
let runTree;
|
|
112
|
+
if (parentRunTree != null &&
|
|
113
|
+
typeof parentRunTree === "object" &&
|
|
114
|
+
typeof parentRunTree.createChild === "function") {
|
|
115
|
+
runTree = parentRunTree?.createChild({
|
|
116
|
+
...lsConfig,
|
|
117
|
+
name: name ?? "ai.doStream",
|
|
118
|
+
run_type: "llm",
|
|
119
|
+
metadata: {
|
|
120
|
+
ls_model_name: modelId,
|
|
121
|
+
ai_sdk_method: "ai.doStream",
|
|
122
|
+
...lsConfig?.metadata,
|
|
123
|
+
},
|
|
124
|
+
inputs: _formatTracedInputs(params),
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
await runTree?.postRun();
|
|
128
|
+
try {
|
|
129
|
+
const { stream, ...rest } = await doStream();
|
|
130
|
+
const chunks = [];
|
|
131
|
+
const transformStream = new TransformStream({
|
|
132
|
+
async transform(chunk, controller) {
|
|
133
|
+
chunks.push(chunk);
|
|
134
|
+
controller.enqueue(chunk);
|
|
135
|
+
},
|
|
136
|
+
async flush() {
|
|
137
|
+
try {
|
|
138
|
+
const output = chunks.reduce((aggregated, chunk) => {
|
|
139
|
+
if (chunk.type === "text-delta") {
|
|
140
|
+
if (chunk.delta == null) {
|
|
141
|
+
return aggregated;
|
|
142
|
+
}
|
|
143
|
+
return {
|
|
144
|
+
...aggregated,
|
|
145
|
+
text: aggregated.text + chunk.delta,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
148
|
+
else if (chunk.type === "tool-call") {
|
|
149
|
+
const matchingToolCall = aggregated.tool_calls.find((call) => call.id === chunk.toolCallId);
|
|
150
|
+
if (matchingToolCall != null) {
|
|
151
|
+
return aggregated;
|
|
152
|
+
}
|
|
153
|
+
return {
|
|
154
|
+
...aggregated,
|
|
155
|
+
tool_calls: [
|
|
156
|
+
...aggregated.tool_calls,
|
|
157
|
+
{
|
|
158
|
+
id: chunk.toolCallId,
|
|
159
|
+
type: "function",
|
|
160
|
+
function: {
|
|
161
|
+
name: chunk.toolName,
|
|
162
|
+
arguments: chunk.input,
|
|
163
|
+
},
|
|
164
|
+
},
|
|
165
|
+
],
|
|
166
|
+
};
|
|
167
|
+
}
|
|
168
|
+
else if (chunk.type === "finish") {
|
|
169
|
+
if (runTree != null) {
|
|
170
|
+
setUsageMetadataOnRunTree(chunk, runTree);
|
|
171
|
+
}
|
|
172
|
+
return {
|
|
173
|
+
...aggregated,
|
|
174
|
+
providerMetadata: chunk.providerMetadata,
|
|
175
|
+
finishReason: chunk.finishReason,
|
|
176
|
+
};
|
|
177
|
+
}
|
|
178
|
+
else {
|
|
179
|
+
return aggregated;
|
|
180
|
+
}
|
|
181
|
+
}, {
|
|
182
|
+
text: "",
|
|
183
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
184
|
+
tool_calls: [],
|
|
185
|
+
});
|
|
186
|
+
await runTree?.end(_formatTracedOutputs(output));
|
|
187
|
+
}
|
|
188
|
+
catch (error) {
|
|
189
|
+
await runTree?.end(undefined, error.message ?? String(error));
|
|
190
|
+
throw error;
|
|
191
|
+
}
|
|
192
|
+
finally {
|
|
193
|
+
await runTree?.patchRun();
|
|
194
|
+
}
|
|
195
|
+
},
|
|
196
|
+
});
|
|
197
|
+
return {
|
|
198
|
+
stream: stream.pipeThrough(transformStream),
|
|
199
|
+
...rest,
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
catch (error) {
|
|
203
|
+
await runTree?.end(undefined, error.message ?? String(error));
|
|
204
|
+
await runTree?.patchRun();
|
|
205
|
+
throw error;
|
|
206
|
+
}
|
|
207
|
+
},
|
|
208
|
+
};
|
|
209
|
+
}
|
package/dist/index.cjs
CHANGED
|
@@ -10,4 +10,4 @@ Object.defineProperty(exports, "overrideFetchImplementation", { enumerable: true
|
|
|
10
10
|
var project_js_1 = require("./utils/project.cjs");
|
|
11
11
|
Object.defineProperty(exports, "getDefaultProjectName", { enumerable: true, get: function () { return project_js_1.getDefaultProjectName; } });
|
|
12
12
|
// Update using yarn bump-version
|
|
13
|
-
exports.__version__ = "0.3.
|
|
13
|
+
exports.__version__ = "0.3.57-rc.1";
|
package/dist/index.d.ts
CHANGED
|
@@ -3,4 +3,4 @@ export type { Dataset, Example, TracerSession, Run, Feedback, RetrieverOutput, }
|
|
|
3
3
|
export { RunTree, type RunTreeConfig } from "./run_trees.js";
|
|
4
4
|
export { overrideFetchImplementation } from "./singletons/fetch.js";
|
|
5
5
|
export { getDefaultProjectName } from "./utils/project.js";
|
|
6
|
-
export declare const __version__ = "0.3.
|
|
6
|
+
export declare const __version__ = "0.3.57-rc.1";
|
package/dist/index.js
CHANGED
|
@@ -3,4 +3,4 @@ export { RunTree } from "./run_trees.js";
|
|
|
3
3
|
export { overrideFetchImplementation } from "./singletons/fetch.js";
|
|
4
4
|
export { getDefaultProjectName } from "./utils/project.js";
|
|
5
5
|
// Update using yarn bump-version
|
|
6
|
-
export const __version__ = "0.3.
|
|
6
|
+
export const __version__ = "0.3.57-rc.1";
|
package/dist/utils/vercel.cjs
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.extractInputTokenDetails = extractInputTokenDetails;
|
|
3
4
|
exports.extractUsageMetadata = extractUsageMetadata;
|
|
4
|
-
function extractInputTokenDetails(providerMetadata,
|
|
5
|
+
function extractInputTokenDetails(providerMetadata, cachedTokenUsage) {
|
|
5
6
|
const inputTokenDetails = {};
|
|
6
7
|
if (providerMetadata.anthropic != null &&
|
|
7
8
|
typeof providerMetadata.anthropic === "object") {
|
|
@@ -52,9 +53,8 @@ function extractInputTokenDetails(providerMetadata, spanAttributes) {
|
|
|
52
53
|
typeof openai.cachedPromptTokens === "number") {
|
|
53
54
|
inputTokenDetails.cache_read = openai.cachedPromptTokens;
|
|
54
55
|
}
|
|
55
|
-
else if (typeof
|
|
56
|
-
inputTokenDetails.cache_read =
|
|
57
|
-
spanAttributes["ai.usage.cachedInputTokens"];
|
|
56
|
+
else if (typeof cachedTokenUsage === "number") {
|
|
57
|
+
inputTokenDetails.cache_read = cachedTokenUsage;
|
|
58
58
|
}
|
|
59
59
|
}
|
|
60
60
|
return inputTokenDetails;
|
|
@@ -88,7 +88,9 @@ function extractUsageMetadata(span) {
|
|
|
88
88
|
if (typeof span.attributes["ai.response.providerMetadata"] === "string") {
|
|
89
89
|
try {
|
|
90
90
|
const providerMetadata = JSON.parse(span.attributes["ai.response.providerMetadata"]);
|
|
91
|
-
usageMetadata.input_token_details = extractInputTokenDetails(providerMetadata, span.attributes
|
|
91
|
+
usageMetadata.input_token_details = extractInputTokenDetails(providerMetadata, typeof span.attributes["ai.usage.cachedInputTokens"] === "number"
|
|
92
|
+
? span.attributes["ai.usage.cachedInputTokens"]
|
|
93
|
+
: undefined);
|
|
92
94
|
if (providerMetadata.anthropic != null &&
|
|
93
95
|
typeof providerMetadata.anthropic === "object") {
|
|
94
96
|
// AI SDK does not include Anthropic cache tokens in their stated input token
|
package/dist/utils/vercel.d.ts
CHANGED
package/dist/utils/vercel.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
function extractInputTokenDetails(providerMetadata,
|
|
1
|
+
export function extractInputTokenDetails(providerMetadata, cachedTokenUsage) {
|
|
2
2
|
const inputTokenDetails = {};
|
|
3
3
|
if (providerMetadata.anthropic != null &&
|
|
4
4
|
typeof providerMetadata.anthropic === "object") {
|
|
@@ -49,9 +49,8 @@ function extractInputTokenDetails(providerMetadata, spanAttributes) {
|
|
|
49
49
|
typeof openai.cachedPromptTokens === "number") {
|
|
50
50
|
inputTokenDetails.cache_read = openai.cachedPromptTokens;
|
|
51
51
|
}
|
|
52
|
-
else if (typeof
|
|
53
|
-
inputTokenDetails.cache_read =
|
|
54
|
-
spanAttributes["ai.usage.cachedInputTokens"];
|
|
52
|
+
else if (typeof cachedTokenUsage === "number") {
|
|
53
|
+
inputTokenDetails.cache_read = cachedTokenUsage;
|
|
55
54
|
}
|
|
56
55
|
}
|
|
57
56
|
return inputTokenDetails;
|
|
@@ -85,7 +84,9 @@ export function extractUsageMetadata(span) {
|
|
|
85
84
|
if (typeof span.attributes["ai.response.providerMetadata"] === "string") {
|
|
86
85
|
try {
|
|
87
86
|
const providerMetadata = JSON.parse(span.attributes["ai.response.providerMetadata"]);
|
|
88
|
-
usageMetadata.input_token_details = extractInputTokenDetails(providerMetadata, span.attributes
|
|
87
|
+
usageMetadata.input_token_details = extractInputTokenDetails(providerMetadata, typeof span.attributes["ai.usage.cachedInputTokens"] === "number"
|
|
88
|
+
? span.attributes["ai.usage.cachedInputTokens"]
|
|
89
|
+
: undefined);
|
|
89
90
|
if (providerMetadata.anthropic != null &&
|
|
90
91
|
typeof providerMetadata.anthropic === "object") {
|
|
91
92
|
// AI SDK does not include Anthropic cache tokens in their stated input token
|
package/dist/wrappers/vercel.cjs
CHANGED
|
@@ -4,6 +4,7 @@ exports.wrapAISDKModel = void 0;
|
|
|
4
4
|
const traceable_js_1 = require("../traceable.cjs");
|
|
5
5
|
const generic_js_1 = require("./generic.cjs");
|
|
6
6
|
/**
|
|
7
|
+
* @deprecated Use `wrapAISDK` from `langsmith/experimental/vercel` instead.
|
|
7
8
|
* Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing.
|
|
8
9
|
* After wrapping a model, you can use it with the Vercel AI SDK Core
|
|
9
10
|
* methods as normal.
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { RunTreeConfig } from "../index.js";
|
|
2
2
|
/**
|
|
3
|
+
* @deprecated Use `wrapAISDK` from `langsmith/experimental/vercel` instead.
|
|
3
4
|
* Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing.
|
|
4
5
|
* After wrapping a model, you can use it with the Vercel AI SDK Core
|
|
5
6
|
* methods as normal.
|
package/dist/wrappers/vercel.js
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { traceable } from "../traceable.js";
|
|
2
2
|
import { _wrapClient } from "./generic.js";
|
|
3
3
|
/**
|
|
4
|
+
* @deprecated Use `wrapAISDK` from `langsmith/experimental/vercel` instead.
|
|
4
5
|
* Wrap a Vercel AI SDK model, enabling automatic LangSmith tracing.
|
|
5
6
|
* After wrapping a model, you can use it with the Vercel AI SDK Core
|
|
6
7
|
* methods as normal.
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
module.exports = require('../dist/experimental/vercel/index.cjs');
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/experimental/vercel/index.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/experimental/vercel/index.js'
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from '../dist/experimental/vercel/index.js'
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "langsmith",
|
|
3
|
-
"version": "0.3.
|
|
3
|
+
"version": "0.3.57-rc.1",
|
|
4
4
|
"description": "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform.",
|
|
5
5
|
"packageManager": "yarn@1.22.19",
|
|
6
6
|
"files": [
|
|
@@ -89,6 +89,10 @@
|
|
|
89
89
|
"experimental/otel/processor.js",
|
|
90
90
|
"experimental/otel/processor.d.ts",
|
|
91
91
|
"experimental/otel/processor.d.cts",
|
|
92
|
+
"experimental/vercel.cjs",
|
|
93
|
+
"experimental/vercel.js",
|
|
94
|
+
"experimental/vercel.d.ts",
|
|
95
|
+
"experimental/vercel.d.cts",
|
|
92
96
|
"index.cjs",
|
|
93
97
|
"index.js",
|
|
94
98
|
"index.d.ts",
|
|
@@ -144,8 +148,8 @@
|
|
|
144
148
|
"uuid": "^10.0.0"
|
|
145
149
|
},
|
|
146
150
|
"devDependencies": {
|
|
147
|
-
"@ai-sdk/anthropic": "2.0.
|
|
148
|
-
"@ai-sdk/openai": "2.0.
|
|
151
|
+
"@ai-sdk/anthropic": "^2.0.1",
|
|
152
|
+
"@ai-sdk/openai": "^2.0.10",
|
|
149
153
|
"@babel/preset-env": "^7.22.4",
|
|
150
154
|
"@faker-js/faker": "^8.4.1",
|
|
151
155
|
"@jest/globals": "^29.5.0",
|
|
@@ -163,7 +167,7 @@
|
|
|
163
167
|
"@types/node-fetch": "^2.6.12",
|
|
164
168
|
"@typescript-eslint/eslint-plugin": "^5.59.8",
|
|
165
169
|
"@typescript-eslint/parser": "^5.59.8",
|
|
166
|
-
"ai": "5.0.
|
|
170
|
+
"ai": "^5.0.10",
|
|
167
171
|
"babel-jest": "^29.5.0",
|
|
168
172
|
"cross-env": "^7.0.3",
|
|
169
173
|
"dotenv": "^16.1.3",
|
|
@@ -410,6 +414,15 @@
|
|
|
410
414
|
"import": "./experimental/otel/processor.js",
|
|
411
415
|
"require": "./experimental/otel/processor.cjs"
|
|
412
416
|
},
|
|
417
|
+
"./experimental/vercel": {
|
|
418
|
+
"types": {
|
|
419
|
+
"import": "./experimental/vercel.d.ts",
|
|
420
|
+
"require": "./experimental/vercel.d.cts",
|
|
421
|
+
"default": "./experimental/vercel.d.ts"
|
|
422
|
+
},
|
|
423
|
+
"import": "./experimental/vercel.js",
|
|
424
|
+
"require": "./experimental/vercel.cjs"
|
|
425
|
+
},
|
|
413
426
|
"./package.json": "./package.json"
|
|
414
427
|
}
|
|
415
428
|
}
|