langsmith 0.4.7 → 0.4.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/client.cjs +10 -2
  2. package/dist/client.d.ts +1 -0
  3. package/dist/client.js +10 -2
  4. package/dist/experimental/anthropic/context.cjs +187 -0
  5. package/dist/experimental/anthropic/context.d.ts +5 -0
  6. package/dist/experimental/anthropic/context.js +183 -0
  7. package/dist/experimental/anthropic/index.cjs +188 -0
  8. package/dist/experimental/anthropic/index.d.ts +30 -0
  9. package/dist/experimental/anthropic/index.js +185 -0
  10. package/dist/experimental/anthropic/messages.cjs +102 -0
  11. package/dist/experimental/anthropic/messages.d.ts +6 -0
  12. package/dist/experimental/anthropic/messages.js +96 -0
  13. package/dist/experimental/anthropic/types.cjs +3 -0
  14. package/dist/experimental/anthropic/types.d.ts +50 -0
  15. package/dist/experimental/anthropic/types.js +2 -0
  16. package/dist/experimental/anthropic/usage.cjs +180 -0
  17. package/dist/experimental/anthropic/usage.d.ts +1 -0
  18. package/dist/experimental/anthropic/usage.js +175 -0
  19. package/dist/experimental/anthropic/utils.cjs +24 -0
  20. package/dist/experimental/anthropic/utils.d.ts +1 -0
  21. package/dist/experimental/anthropic/utils.js +20 -0
  22. package/dist/index.cjs +1 -1
  23. package/dist/index.d.ts +1 -1
  24. package/dist/index.js +1 -1
  25. package/dist/traceable.cjs +38 -4
  26. package/dist/traceable.d.ts +4 -0
  27. package/dist/traceable.js +38 -4
  28. package/dist/utils/usage.cjs +6 -7
  29. package/dist/utils/usage.js +6 -7
  30. package/dist/wrappers/gemini.cjs +434 -0
  31. package/dist/wrappers/gemini.d.ts +46 -0
  32. package/dist/wrappers/gemini.js +431 -0
  33. package/experimental/anthropic.cjs +1 -0
  34. package/experimental/anthropic.d.cts +1 -0
  35. package/experimental/anthropic.d.ts +1 -0
  36. package/experimental/anthropic.js +1 -0
  37. package/package.json +16 -1
@@ -0,0 +1,434 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.wrapGemini = wrapGemini;
4
+ const traceable_js_1 = require("../traceable.cjs");
5
+ const _createUsageMetadata = (usage) => {
6
+ const usageMetadata = {
7
+ input_tokens: usage.promptTokenCount || 0,
8
+ output_tokens: (() => {
9
+ if ("responseTokenCount" in usage) {
10
+ return usage.responseTokenCount || 0;
11
+ }
12
+ if ("candidatesTokenCount" in usage) {
13
+ return usage.candidatesTokenCount || 0;
14
+ }
15
+ return 0;
16
+ })(),
17
+ total_tokens: usage.totalTokenCount || 0,
18
+ };
19
+ // Add input token details if available
20
+ usageMetadata.input_token_details = {
21
+ ...(usage.cachedContentTokenCount && {
22
+ cache_read_over_200k: Math.max(0, usage.cachedContentTokenCount - 200000),
23
+ }),
24
+ ...(usage.promptTokenCount && {
25
+ over_200k: Math.max(0, usage.promptTokenCount - 200000),
26
+ }),
27
+ ...(usage.cachedContentTokenCount && {
28
+ cache_read: usage.cachedContentTokenCount,
29
+ }),
30
+ };
31
+ // Add output token details if available
32
+ usageMetadata.output_token_details = {
33
+ ...("candidatesTokenCount" in usage &&
34
+ usage.candidatesTokenCount != null && {
35
+ over_200k: Math.max(0, usage.candidatesTokenCount - 200000),
36
+ }),
37
+ ...(usage.thoughtsTokenCount && {
38
+ reasoning: usage.thoughtsTokenCount,
39
+ }),
40
+ };
41
+ return usageMetadata;
42
+ };
43
+ const chatAggregator = (input) => {
44
+ const chunks = Array.isArray(input) &&
45
+ input.every((item) => typeof item === "object" && item !== null)
46
+ ? input
47
+ : [];
48
+ if (!chunks || chunks.length === 0) {
49
+ return { content: "", role: "assistant" };
50
+ }
51
+ let text = "";
52
+ let thoughtText = "";
53
+ const toolCalls = [];
54
+ const otherParts = [];
55
+ let usageMetadata = null;
56
+ let finishReason = null;
57
+ let safetyRatings = null;
58
+ for (const chunk of chunks) {
59
+ if (chunk?.usageMetadata) {
60
+ usageMetadata = chunk.usageMetadata;
61
+ }
62
+ if (chunk?.candidates && Array.isArray(chunk.candidates)) {
63
+ for (const candidate of chunk.candidates) {
64
+ if (candidate.finishReason) {
65
+ finishReason = candidate.finishReason;
66
+ }
67
+ if (candidate.safetyRatings) {
68
+ safetyRatings = candidate.safetyRatings;
69
+ }
70
+ if (candidate.content?.parts) {
71
+ for (const part of candidate.content.parts) {
72
+ if ("text" in part && part.text !== undefined) {
73
+ if (part.thought) {
74
+ thoughtText += part.text;
75
+ }
76
+ else {
77
+ text += part.text;
78
+ }
79
+ }
80
+ else if ("functionCall" in part && part.functionCall) {
81
+ toolCalls.push({
82
+ type: "function",
83
+ function: {
84
+ name: part.functionCall.name || "",
85
+ arguments: JSON.stringify(part.functionCall.args || {}),
86
+ },
87
+ });
88
+ }
89
+ else if ("codeExecutionResult" in part &&
90
+ part.codeExecutionResult) {
91
+ otherParts.push({
92
+ type: "code_execution_result",
93
+ code_execution_result: part.codeExecutionResult,
94
+ });
95
+ }
96
+ else if ("executableCode" in part && part.executableCode) {
97
+ otherParts.push({
98
+ type: "executable_code",
99
+ executable_code: part.executableCode,
100
+ });
101
+ }
102
+ else if ("inlineData" in part && part.inlineData) {
103
+ const mimeType = part.inlineData.mimeType || "image/jpeg";
104
+ const data = part.inlineData.data || "";
105
+ otherParts.push({
106
+ type: "image_url",
107
+ image_url: {
108
+ url: `data:${mimeType};base64,${data}`,
109
+ detail: "high",
110
+ },
111
+ });
112
+ }
113
+ else if ("fileData" in part && part.fileData) {
114
+ otherParts.push({
115
+ type: "file_data",
116
+ mime_type: part.fileData.mimeType,
117
+ file_uri: part.fileData.fileUri,
118
+ });
119
+ }
120
+ }
121
+ }
122
+ }
123
+ }
124
+ else if (chunk?.text) {
125
+ text += chunk.text;
126
+ }
127
+ }
128
+ const contentParts = [];
129
+ if (thoughtText) {
130
+ contentParts.push({ type: "text", text: thoughtText, thought: true });
131
+ }
132
+ if (text) {
133
+ contentParts.push({ type: "text", text: text });
134
+ }
135
+ contentParts.push(...otherParts);
136
+ const result = {
137
+ role: "assistant",
138
+ };
139
+ if (contentParts.length > 1 ||
140
+ (contentParts.length > 0 && contentParts[0].type !== "text")) {
141
+ result.content = contentParts;
142
+ }
143
+ else if (contentParts.length === 1 &&
144
+ contentParts[0].type === "text" &&
145
+ !contentParts[0].thought) {
146
+ result.content = contentParts[0].text;
147
+ }
148
+ else if (thoughtText && !text) {
149
+ result.content = contentParts;
150
+ }
151
+ else {
152
+ result.content = text || "";
153
+ }
154
+ if (toolCalls.length > 0) {
155
+ result.tool_calls = toolCalls;
156
+ }
157
+ if (finishReason) {
158
+ result.finish_reason = finishReason;
159
+ }
160
+ if (safetyRatings) {
161
+ result.safety_ratings = safetyRatings;
162
+ }
163
+ if (usageMetadata) {
164
+ result.usage_metadata = _createUsageMetadata(usageMetadata);
165
+ }
166
+ return result;
167
+ };
168
+ function processGeminiInputs(inputs) {
169
+ const { contents, model, ...rest } = inputs;
170
+ if (!contents)
171
+ return inputs;
172
+ if (typeof contents === "string") {
173
+ return {
174
+ messages: [{ role: "user", content: contents }],
175
+ ...rest,
176
+ };
177
+ }
178
+ if (Array.isArray(contents)) {
179
+ if (contents.every((item) => typeof item === "string")) {
180
+ return {
181
+ messages: contents.map((text) => ({ role: "user", content: text })),
182
+ ...rest,
183
+ };
184
+ }
185
+ const messages = contents
186
+ .map((content) => {
187
+ if (typeof content !== "object" || content === null)
188
+ return null;
189
+ const isContent = (item) => {
190
+ return "parts" in item && Array.isArray(item.parts);
191
+ };
192
+ const role = "role" in content ? content.role : "user";
193
+ const parts = isContent(content) ? content.parts ?? [] : [content];
194
+ const textParts = [];
195
+ const contentParts = [];
196
+ for (const part of parts) {
197
+ if (typeof part === "object" && part !== null) {
198
+ if ("text" in part && part.text) {
199
+ textParts.push(part.text);
200
+ contentParts.push({ type: "text", text: part.text });
201
+ }
202
+ else if ("inlineData" in part) {
203
+ const inlineData = part.inlineData;
204
+ const mimeType = inlineData?.mimeType || "image/jpeg";
205
+ const data = inlineData?.data || "";
206
+ contentParts.push({
207
+ type: "image_url",
208
+ image_url: {
209
+ url: `data:${mimeType};base64,${data}`,
210
+ detail: "high",
211
+ },
212
+ });
213
+ }
214
+ else if ("functionResponse" in part) {
215
+ const funcResponse = part.functionResponse;
216
+ contentParts.push({
217
+ type: "function_response", //TODO: add testing for function_response
218
+ function_response: {
219
+ name: funcResponse?.name,
220
+ response: funcResponse?.response || {},
221
+ },
222
+ });
223
+ }
224
+ }
225
+ else if (typeof part === "string") {
226
+ textParts.push(part);
227
+ contentParts.push({ type: "text", text: part });
228
+ }
229
+ }
230
+ const messageContent = contentParts.length > 0 &&
231
+ contentParts.every((p) => p.type === "text")
232
+ ? textParts.join("\n")
233
+ : contentParts.length > 0
234
+ ? contentParts
235
+ : "";
236
+ return { role, content: messageContent };
237
+ })
238
+ .filter((msg) => msg !== null);
239
+ return { messages, ...rest };
240
+ }
241
+ return inputs;
242
+ }
243
+ function processGeminiOutputs(outputs) {
244
+ const response = (outputs?.outputs || outputs);
245
+ if (!response) {
246
+ return { content: "", role: "assistant" };
247
+ }
248
+ if ("content" in response &&
249
+ "role" in response &&
250
+ !("candidates" in response)) {
251
+ return response;
252
+ }
253
+ let text = "";
254
+ let thoughtText = "";
255
+ const toolCalls = [];
256
+ const otherParts = [];
257
+ let finishReason = null;
258
+ let safetyRatings = null;
259
+ if ("candidates" in response &&
260
+ Array.isArray(response.candidates) &&
261
+ response.candidates.length > 0) {
262
+ const firstCandidate = response.candidates[0];
263
+ if (firstCandidate.finishReason) {
264
+ finishReason = firstCandidate.finishReason;
265
+ }
266
+ if (firstCandidate.safetyRatings) {
267
+ safetyRatings = firstCandidate.safetyRatings;
268
+ }
269
+ if (firstCandidate?.content?.parts) {
270
+ for (const part of firstCandidate.content.parts) {
271
+ if ("text" in part && part.text !== undefined) {
272
+ if (part.thought) {
273
+ thoughtText += part.text;
274
+ }
275
+ else {
276
+ text += part.text;
277
+ }
278
+ }
279
+ else if ("functionCall" in part && part.functionCall) {
280
+ toolCalls.push({
281
+ type: "function",
282
+ function: {
283
+ name: part.functionCall.name || "",
284
+ arguments: JSON.stringify(part.functionCall.args || {}),
285
+ },
286
+ });
287
+ }
288
+ else if ("codeExecutionResult" in part && part.codeExecutionResult) {
289
+ otherParts.push({
290
+ type: "code_execution_result",
291
+ code_execution_result: part.codeExecutionResult,
292
+ });
293
+ }
294
+ else if ("executableCode" in part && part.executableCode) {
295
+ otherParts.push({
296
+ type: "executable_code",
297
+ executable_code: part.executableCode,
298
+ });
299
+ }
300
+ else if ("inlineData" in part && part.inlineData) {
301
+ const mimeType = part.inlineData.mimeType || "image/jpeg";
302
+ const data = part.inlineData.data || "";
303
+ otherParts.push({
304
+ type: "image_url",
305
+ image_url: {
306
+ url: `data:${mimeType};base64,${data}`,
307
+ detail: "high",
308
+ },
309
+ });
310
+ }
311
+ else if ("fileData" in part && part.fileData) {
312
+ otherParts.push({
313
+ type: "file_data",
314
+ mime_type: part.fileData.mimeType,
315
+ file_uri: part.fileData.fileUri,
316
+ });
317
+ }
318
+ }
319
+ }
320
+ }
321
+ const contentParts = [];
322
+ if (thoughtText) {
323
+ contentParts.push({ type: "text", text: thoughtText, thought: true });
324
+ }
325
+ if (text) {
326
+ contentParts.push({ type: "text", text: text });
327
+ }
328
+ contentParts.push(...otherParts);
329
+ const result = {
330
+ role: "assistant",
331
+ };
332
+ if (contentParts.length > 1 ||
333
+ (contentParts.length > 0 && contentParts[0].type !== "text")) {
334
+ result.content = contentParts;
335
+ }
336
+ else if (contentParts.length === 1 &&
337
+ contentParts[0].type === "text" &&
338
+ !contentParts[0].thought) {
339
+ result.content = contentParts[0].text;
340
+ }
341
+ else if (thoughtText && !text) {
342
+ result.content = contentParts;
343
+ }
344
+ else {
345
+ result.content = text || "";
346
+ }
347
+ if (toolCalls.length > 0) {
348
+ result.tool_calls = toolCalls;
349
+ }
350
+ if (finishReason) {
351
+ result.finish_reason = finishReason;
352
+ }
353
+ if (safetyRatings) {
354
+ result.safety_ratings = safetyRatings;
355
+ }
356
+ if ("usageMetadata" in response && response.usageMetadata) {
357
+ result.usage_metadata = _createUsageMetadata(response.usageMetadata);
358
+ }
359
+ return result;
360
+ }
361
+ function getInvocationParams(payload) {
362
+ const config = (payload?.[0] || payload);
363
+ return {
364
+ ls_provider: "google",
365
+ ls_model_type: "chat",
366
+ ls_model_name: config?.model || "unknown",
367
+ ls_temperature: config?.config?.temperature,
368
+ ls_max_tokens: config?.config?.maxOutputTokens,
369
+ };
370
+ }
371
+ /**
372
+ * Wraps a Google Gemini client to enable automatic LangSmith tracing.
373
+ *
374
+ * **⚠️ BETA: This feature is in beta and may change in future releases.**
375
+ *
376
+ * Supports tracing for:
377
+ * - Text generation (streaming and non-streaming)
378
+ * - Multimodal inputs (text + images)
379
+ * - Image generation output (gemini-2.5-flash-image)
380
+ * - Function calling
381
+ * - Usage metadata extraction
382
+ *
383
+ * @param gemini - A Google GenAI client instance
384
+ * @param options - LangSmith tracing configuration options
385
+ * @returns A wrapped client with automatic tracing enabled
386
+ *
387
+ * @example
388
+ * ```ts
389
+ * import { GoogleGenAI } from "@google/genai";
390
+ * import { wrapGemini } from "langsmith/wrappers/gemini";
391
+ *
392
+ * const client = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
393
+ * const wrapped = wrapGemini(client, { tracingEnabled: true });
394
+ *
395
+ * // Use the wrapped client exactly like the original
396
+ * const response = await wrapped.models.generateContent({
397
+ * model: "gemini-2.0-flash-exp",
398
+ * contents: "Hello!",
399
+ * });
400
+ * ```
401
+ */
402
+ function wrapGemini(gemini, options) {
403
+ // Prevent double wrapping
404
+ if ((0, traceable_js_1.isTraceableFunction)(gemini.models.generateContent) ||
405
+ (0, traceable_js_1.isTraceableFunction)(gemini.models.generateContentStream)) {
406
+ throw new Error("This Google Gen AI client has already been wrapped. " +
407
+ "Wrapping a client multiple times is not supported.");
408
+ }
409
+ const tracedGeminiClient = { ...gemini };
410
+ const geminiTraceConfig = {
411
+ name: "ChatGoogleGenerativeAI",
412
+ run_type: "llm",
413
+ getInvocationParams,
414
+ processInputs: processGeminiInputs,
415
+ processOutputs: processGeminiOutputs,
416
+ ...options,
417
+ };
418
+ const geminiStreamTraceConfig = {
419
+ name: "ChatGoogleGenerativeAI",
420
+ run_type: "llm",
421
+ aggregator: chatAggregator,
422
+ getInvocationParams,
423
+ processInputs: processGeminiInputs,
424
+ processOutputs: processGeminiOutputs,
425
+ ...options,
426
+ };
427
+ tracedGeminiClient.models = {
428
+ ...gemini.models,
429
+ generateContent: (0, traceable_js_1.traceable)(gemini.models.generateContent.bind(gemini.models), geminiTraceConfig),
430
+ generateContentStream: (0, traceable_js_1.traceable)(gemini.models.generateContentStream.bind(gemini.models), geminiStreamTraceConfig),
431
+ };
432
+ return tracedGeminiClient;
433
+ }
434
+ exports.default = wrapGemini;
@@ -0,0 +1,46 @@
1
+ import type { RunTreeConfig } from "../index.js";
2
+ type GoogleGenAIType = {
3
+ models: {
4
+ generateContent: (...args: any[]) => any;
5
+ generateContentStream: (...args: any[]) => any;
6
+ };
7
+ };
8
+ type PatchedGeminiClient<T extends GoogleGenAIType> = T & {
9
+ models: T["models"] & {
10
+ generateContent: T["models"]["generateContent"];
11
+ generateContentStream: T["models"]["generateContentStream"];
12
+ };
13
+ };
14
+ /**
15
+ * Wraps a Google Gemini client to enable automatic LangSmith tracing.
16
+ *
17
+ * **⚠️ BETA: This feature is in beta and may change in future releases.**
18
+ *
19
+ * Supports tracing for:
20
+ * - Text generation (streaming and non-streaming)
21
+ * - Multimodal inputs (text + images)
22
+ * - Image generation output (gemini-2.5-flash-image)
23
+ * - Function calling
24
+ * - Usage metadata extraction
25
+ *
26
+ * @param gemini - A Google GenAI client instance
27
+ * @param options - LangSmith tracing configuration options
28
+ * @returns A wrapped client with automatic tracing enabled
29
+ *
30
+ * @example
31
+ * ```ts
32
+ * import { GoogleGenAI } from "@google/genai";
33
+ * import { wrapGemini } from "langsmith/wrappers/gemini";
34
+ *
35
+ * const client = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY });
36
+ * const wrapped = wrapGemini(client, { tracingEnabled: true });
37
+ *
38
+ * // Use the wrapped client exactly like the original
39
+ * const response = await wrapped.models.generateContent({
40
+ * model: "gemini-2.0-flash-exp",
41
+ * contents: "Hello!",
42
+ * });
43
+ * ```
44
+ */
45
+ export declare function wrapGemini<T extends GoogleGenAIType>(gemini: T, options?: Partial<RunTreeConfig>): PatchedGeminiClient<T>;
46
+ export default wrapGemini;