langsmith 0.4.6 → 0.4.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,431 @@
1
+ import { isTraceableFunction, traceable, } from "../traceable.js";
2
+ const _createUsageMetadata = (usage) => {
3
+ const usageMetadata = {
4
+ input_tokens: usage.promptTokenCount || 0,
5
+ output_tokens: (() => {
6
+ if ("responseTokenCount" in usage) {
7
+ return usage.responseTokenCount || 0;
8
+ }
9
+ if ("candidatesTokenCount" in usage) {
10
+ return usage.candidatesTokenCount || 0;
11
+ }
12
+ return 0;
13
+ })(),
14
+ total_tokens: usage.totalTokenCount || 0,
15
+ };
16
+ // Add input token details if available
17
+ usageMetadata.input_token_details = {
18
+ ...(usage.cachedContentTokenCount && {
19
+ cache_read_over_200k: Math.max(0, usage.cachedContentTokenCount - 200000),
20
+ }),
21
+ ...(usage.promptTokenCount && {
22
+ over_200k: Math.max(0, usage.promptTokenCount - 200000),
23
+ }),
24
+ ...(usage.cachedContentTokenCount && {
25
+ cache_read: usage.cachedContentTokenCount,
26
+ }),
27
+ };
28
+ // Add output token details if available
29
+ usageMetadata.output_token_details = {
30
+ ...("candidatesTokenCount" in usage &&
31
+ usage.candidatesTokenCount != null && {
32
+ over_200k: Math.max(0, usage.candidatesTokenCount - 200000),
33
+ }),
34
+ ...(usage.thoughtsTokenCount && {
35
+ reasoning: usage.thoughtsTokenCount,
36
+ }),
37
+ };
38
+ return usageMetadata;
39
+ };
40
+ const chatAggregator = (input) => {
41
+ const chunks = Array.isArray(input) &&
42
+ input.every((item) => typeof item === "object" && item !== null)
43
+ ? input
44
+ : [];
45
+ if (!chunks || chunks.length === 0) {
46
+ return { content: "", role: "assistant" };
47
+ }
48
+ let text = "";
49
+ let thoughtText = "";
50
+ const toolCalls = [];
51
+ const otherParts = [];
52
+ let usageMetadata = null;
53
+ let finishReason = null;
54
+ let safetyRatings = null;
55
+ for (const chunk of chunks) {
56
+ if (chunk?.usageMetadata) {
57
+ usageMetadata = chunk.usageMetadata;
58
+ }
59
+ if (chunk?.candidates && Array.isArray(chunk.candidates)) {
60
+ for (const candidate of chunk.candidates) {
61
+ if (candidate.finishReason) {
62
+ finishReason = candidate.finishReason;
63
+ }
64
+ if (candidate.safetyRatings) {
65
+ safetyRatings = candidate.safetyRatings;
66
+ }
67
+ if (candidate.content?.parts) {
68
+ for (const part of candidate.content.parts) {
69
+ if ("text" in part && part.text !== undefined) {
70
+ if (part.thought) {
71
+ thoughtText += part.text;
72
+ }
73
+ else {
74
+ text += part.text;
75
+ }
76
+ }
77
+ else if ("functionCall" in part && part.functionCall) {
78
+ toolCalls.push({
79
+ type: "function",
80
+ function: {
81
+ name: part.functionCall.name || "",
82
+ arguments: JSON.stringify(part.functionCall.args || {}),
83
+ },
84
+ });
85
+ }
86
+ else if ("codeExecutionResult" in part &&
87
+ part.codeExecutionResult) {
88
+ otherParts.push({
89
+ type: "code_execution_result",
90
+ code_execution_result: part.codeExecutionResult,
91
+ });
92
+ }
93
+ else if ("executableCode" in part && part.executableCode) {
94
+ otherParts.push({
95
+ type: "executable_code",
96
+ executable_code: part.executableCode,
97
+ });
98
+ }
99
+ else if ("inlineData" in part && part.inlineData) {
100
+ const mimeType = part.inlineData.mimeType || "image/jpeg";
101
+ const data = part.inlineData.data || "";
102
+ otherParts.push({
103
+ type: "image_url",
104
+ image_url: {
105
+ url: `data:${mimeType};base64,${data}`,
106
+ detail: "high",
107
+ },
108
+ });
109
+ }
110
+ else if ("fileData" in part && part.fileData) {
111
+ otherParts.push({
112
+ type: "file_data",
113
+ mime_type: part.fileData.mimeType,
114
+ file_uri: part.fileData.fileUri,
115
+ });
116
+ }
117
+ }
118
+ }
119
+ }
120
+ }
121
+ else if (chunk?.text) {
122
+ text += chunk.text;
123
+ }
124
+ }
125
+ const contentParts = [];
126
+ if (thoughtText) {
127
+ contentParts.push({ type: "text", text: thoughtText, thought: true });
128
+ }
129
+ if (text) {
130
+ contentParts.push({ type: "text", text: text });
131
+ }
132
+ contentParts.push(...otherParts);
133
+ const result = {
134
+ role: "assistant",
135
+ };
136
+ if (contentParts.length > 1 ||
137
+ (contentParts.length > 0 && contentParts[0].type !== "text")) {
138
+ result.content = contentParts;
139
+ }
140
+ else if (contentParts.length === 1 &&
141
+ contentParts[0].type === "text" &&
142
+ !contentParts[0].thought) {
143
+ result.content = contentParts[0].text;
144
+ }
145
+ else if (thoughtText && !text) {
146
+ result.content = contentParts;
147
+ }
148
+ else {
149
+ result.content = text || "";
150
+ }
151
+ if (toolCalls.length > 0) {
152
+ result.tool_calls = toolCalls;
153
+ }
154
+ if (finishReason) {
155
+ result.finish_reason = finishReason;
156
+ }
157
+ if (safetyRatings) {
158
+ result.safety_ratings = safetyRatings;
159
+ }
160
+ if (usageMetadata) {
161
+ result.usage_metadata = _createUsageMetadata(usageMetadata);
162
+ }
163
+ return result;
164
+ };
165
+ function processGeminiInputs(inputs) {
166
+ const { contents, model, ...rest } = inputs;
167
+ if (!contents)
168
+ return inputs;
169
+ if (typeof contents === "string") {
170
+ return {
171
+ messages: [{ role: "user", content: contents }],
172
+ ...rest,
173
+ };
174
+ }
175
+ if (Array.isArray(contents)) {
176
+ if (contents.every((item) => typeof item === "string")) {
177
+ return {
178
+ messages: contents.map((text) => ({ role: "user", content: text })),
179
+ ...rest,
180
+ };
181
+ }
182
+ const messages = contents
183
+ .map((content) => {
184
+ if (typeof content !== "object" || content === null)
185
+ return null;
186
+ const isContent = (item) => {
187
+ return "parts" in item && Array.isArray(item.parts);
188
+ };
189
+ const role = "role" in content ? content.role : "user";
190
+ const parts = isContent(content) ? content.parts ?? [] : [content];
191
+ const textParts = [];
192
+ const contentParts = [];
193
+ for (const part of parts) {
194
+ if (typeof part === "object" && part !== null) {
195
+ if ("text" in part && part.text) {
196
+ textParts.push(part.text);
197
+ contentParts.push({ type: "text", text: part.text });
198
+ }
199
+ else if ("inlineData" in part) {
200
+ const inlineData = part.inlineData;
201
+ const mimeType = inlineData?.mimeType || "image/jpeg";
202
+ const data = inlineData?.data || "";
203
+ contentParts.push({
204
+ type: "image_url",
205
+ image_url: {
206
+ url: `data:${mimeType};base64,${data}`,
207
+ detail: "high",
208
+ },
209
+ });
210
+ }
211
+ else if ("functionResponse" in part) {
212
+ const funcResponse = part.functionResponse;
213
+ contentParts.push({
214
+ type: "function_response", //TODO: add testing for function_response
215
+ function_response: {
216
+ name: funcResponse?.name,
217
+ response: funcResponse?.response || {},
218
+ },
219
+ });
220
+ }
221
+ }
222
+ else if (typeof part === "string") {
223
+ textParts.push(part);
224
+ contentParts.push({ type: "text", text: part });
225
+ }
226
+ }
227
+ const messageContent = contentParts.length > 0 &&
228
+ contentParts.every((p) => p.type === "text")
229
+ ? textParts.join("\n")
230
+ : contentParts.length > 0
231
+ ? contentParts
232
+ : "";
233
+ return { role, content: messageContent };
234
+ })
235
+ .filter((msg) => msg !== null);
236
+ return { messages, ...rest };
237
+ }
238
+ return inputs;
239
+ }
240
+ function processGeminiOutputs(outputs) {
241
+ const response = (outputs?.outputs || outputs);
242
+ if (!response) {
243
+ return { content: "", role: "assistant" };
244
+ }
245
+ if ("content" in response &&
246
+ "role" in response &&
247
+ !("candidates" in response)) {
248
+ return response;
249
+ }
250
+ let text = "";
251
+ let thoughtText = "";
252
+ const toolCalls = [];
253
+ const otherParts = [];
254
+ let finishReason = null;
255
+ let safetyRatings = null;
256
+ if ("candidates" in response &&
257
+ Array.isArray(response.candidates) &&
258
+ response.candidates.length > 0) {
259
+ const firstCandidate = response.candidates[0];
260
+ if (firstCandidate.finishReason) {
261
+ finishReason = firstCandidate.finishReason;
262
+ }
263
+ if (firstCandidate.safetyRatings) {
264
+ safetyRatings = firstCandidate.safetyRatings;
265
+ }
266
+ if (firstCandidate?.content?.parts) {
267
+ for (const part of firstCandidate.content.parts) {
268
+ if ("text" in part && part.text !== undefined) {
269
+ if (part.thought) {
270
+ thoughtText += part.text;
271
+ }
272
+ else {
273
+ text += part.text;
274
+ }
275
+ }
276
+ else if ("functionCall" in part && part.functionCall) {
277
+ toolCalls.push({
278
+ type: "function",
279
+ function: {
280
+ name: part.functionCall.name || "",
281
+ arguments: JSON.stringify(part.functionCall.args || {}),
282
+ },
283
+ });
284
+ }
285
+ else if ("codeExecutionResult" in part && part.codeExecutionResult) {
286
+ otherParts.push({
287
+ type: "code_execution_result",
288
+ code_execution_result: part.codeExecutionResult,
289
+ });
290
+ }
291
+ else if ("executableCode" in part && part.executableCode) {
292
+ otherParts.push({
293
+ type: "executable_code",
294
+ executable_code: part.executableCode,
295
+ });
296
+ }
297
+ else if ("inlineData" in part && part.inlineData) {
298
+ const mimeType = part.inlineData.mimeType || "image/jpeg";
299
+ const data = part.inlineData.data || "";
300
+ otherParts.push({
301
+ type: "image_url",
302
+ image_url: {
303
+ url: `data:${mimeType};base64,${data}`,
304
+ detail: "high",
305
+ },
306
+ });
307
+ }
308
+ else if ("fileData" in part && part.fileData) {
309
+ otherParts.push({
310
+ type: "file_data",
311
+ mime_type: part.fileData.mimeType,
312
+ file_uri: part.fileData.fileUri,
313
+ });
314
+ }
315
+ }
316
+ }
317
+ }
318
+ const contentParts = [];
319
+ if (thoughtText) {
320
+ contentParts.push({ type: "text", text: thoughtText, thought: true });
321
+ }
322
+ if (text) {
323
+ contentParts.push({ type: "text", text: text });
324
+ }
325
+ contentParts.push(...otherParts);
326
+ const result = {
327
+ role: "assistant",
328
+ };
329
+ if (contentParts.length > 1 ||
330
+ (contentParts.length > 0 && contentParts[0].type !== "text")) {
331
+ result.content = contentParts;
332
+ }
333
+ else if (contentParts.length === 1 &&
334
+ contentParts[0].type === "text" &&
335
+ !contentParts[0].thought) {
336
+ result.content = contentParts[0].text;
337
+ }
338
+ else if (thoughtText && !text) {
339
+ result.content = contentParts;
340
+ }
341
+ else {
342
+ result.content = text || "";
343
+ }
344
+ if (toolCalls.length > 0) {
345
+ result.tool_calls = toolCalls;
346
+ }
347
+ if (finishReason) {
348
+ result.finish_reason = finishReason;
349
+ }
350
+ if (safetyRatings) {
351
+ result.safety_ratings = safetyRatings;
352
+ }
353
+ if ("usageMetadata" in response && response.usageMetadata) {
354
+ result.usage_metadata = _createUsageMetadata(response.usageMetadata);
355
+ }
356
+ return result;
357
+ }
358
+ function getInvocationParams(payload) {
359
+ const config = (payload?.[0] || payload);
360
+ return {
361
+ ls_provider: "google",
362
+ ls_model_type: "chat",
363
+ ls_model_name: config?.model || "unknown",
364
+ ls_temperature: config?.config?.temperature,
365
+ ls_max_tokens: config?.config?.maxOutputTokens,
366
+ };
367
+ }
368
+ /**
369
+ * Wraps a Google Gemini client to enable automatic LangSmith tracing.
370
+ *
371
+ * **⚠️ BETA: This feature is in beta and may change in future releases.**
372
+ *
373
+ * Supports tracing for:
374
+ * - Text generation (streaming and non-streaming)
375
+ * - Multimodal inputs (text + images)
376
+ * - Image generation output (gemini-2.5-flash-image)
377
+ * - Function calling
378
+ * - Usage metadata extraction
379
+ *
380
+ * @param gemini - A Google GenAI client instance
381
+ * @param options - LangSmith tracing configuration options
382
+ * @returns A wrapped client with automatic tracing enabled
383
+ *
384
+ * @example
385
+ * ```ts
386
+ * import { GoogleGenAI } from "@google/genai";
387
+ * import { wrapGemini } from "langsmith/wrappers/gemini";
388
+ *
389
+ * const client = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
390
+ * const wrapped = wrapGemini(client, { tracingEnabled: true });
391
+ *
392
+ * // Use the wrapped client exactly like the original
393
+ * const response = await wrapped.models.generateContent({
394
+ * model: "gemini-2.0-flash-exp",
395
+ * contents: "Hello!",
396
+ * });
397
+ * ```
398
+ */
399
+ export function wrapGemini(gemini, options) {
400
+ // Prevent double wrapping
401
+ if (isTraceableFunction(gemini.models.generateContent) ||
402
+ isTraceableFunction(gemini.models.generateContentStream)) {
403
+ throw new Error("This Google Gen AI client has already been wrapped. " +
404
+ "Wrapping a client multiple times is not supported.");
405
+ }
406
+ const tracedGeminiClient = { ...gemini };
407
+ const geminiTraceConfig = {
408
+ name: "ChatGoogleGenerativeAI",
409
+ run_type: "llm",
410
+ getInvocationParams,
411
+ processInputs: processGeminiInputs,
412
+ processOutputs: processGeminiOutputs,
413
+ ...options,
414
+ };
415
+ const geminiStreamTraceConfig = {
416
+ name: "ChatGoogleGenerativeAI",
417
+ run_type: "llm",
418
+ aggregator: chatAggregator,
419
+ getInvocationParams,
420
+ processInputs: processGeminiInputs,
421
+ processOutputs: processGeminiOutputs,
422
+ ...options,
423
+ };
424
+ tracedGeminiClient.models = {
425
+ ...gemini.models,
426
+ generateContent: traceable(gemini.models.generateContent.bind(gemini.models), geminiTraceConfig),
427
+ generateContentStream: traceable(gemini.models.generateContentStream.bind(gemini.models), geminiStreamTraceConfig),
428
+ };
429
+ return tracedGeminiClient;
430
+ }
431
+ export default wrapGemini;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "langsmith",
3
- "version": "0.4.6",
3
+ "version": "0.4.8",
4
4
  "description": "Client library to connect to the LangSmith Observability and Evaluation Platform.",
5
5
  "packageManager": "yarn@1.22.19",
6
6
  "files": [
@@ -143,6 +143,8 @@
143
143
  "@ai-sdk/anthropic": "^3.0.0",
144
144
  "@ai-sdk/openai": "^3.0.0",
145
145
  "@ai-sdk/provider": "^3.0.0",
146
+ "@anthropic-ai/claude-agent-sdk": "^0.1.76",
147
+ "@google/genai": "^1.29.0",
146
148
  "@anthropic-ai/sdk": "^0.71.2",
147
149
  "@babel/preset-env": "^7.22.4",
148
150
  "@faker-js/faker": "^8.4.1",