@langchain/anthropic 0.2.13 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -41,28 +41,381 @@ function extractToken(chunk) {
41
41
  return undefined;
42
42
  }
43
43
  /**
44
- * Wrapper around Anthropic large language models.
45
- *
46
- * To use this package, you should have an Anthropic API key set as an
47
- * environment variable named `ANTHROPIC_API_KEY` or passed
48
- * into the constructor.
49
- *
50
- * @remarks
51
- * Any parameters that are valid to be passed to {@link
52
- * https://console.anthropic.com/docs/api/reference |
53
- * `anthropic.messages`} can be passed through {@link invocationKwargs},
54
- * even if not explicitly available on this class.
55
- * @example
44
+ * Anthropic chat model integration.
45
+ *
46
+ * Setup:
47
+ * Install `@langchain/anthropic` and set an environment variable named `ANTHROPIC_API_KEY`.
48
+ *
49
+ * ```bash
50
+ * npm install @langchain/anthropic
51
+ * export ANTHROPIC_API_KEY="your-api-key"
52
+ * ```
53
+ *
54
+ * ## [Constructor args](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html#constructor)
55
+ *
56
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_anthropic.ChatAnthropicCallOptions.html)
57
+ *
58
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
59
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
60
+ *
61
+ * ```typescript
62
+ * // When calling `.bind`, call options should be passed via the first argument
63
+ * const llmWithArgsBound = llm.bind({
64
+ * stop: ["\n"],
65
+ * tools: [...],
66
+ * });
67
+ *
68
+ * // When calling `.bindTools`, call options should be passed via the second argument
69
+ * const llmWithTools = llm.bindTools(
70
+ * [...],
71
+ * {
72
+ * tool_choice: "auto",
73
+ * }
74
+ * );
75
+ * ```
76
+ *
77
+ * ## Examples
78
+ *
79
+ * <details open>
80
+ * <summary><strong>Instantiate</strong></summary>
81
+ *
82
+ * ```typescript
83
+ * import { ChatAnthropic } from '@langchain/anthropic';
84
+ *
85
+ * const llm = new ChatAnthropic({
86
+ * model: "claude-3-5-sonnet-20240620",
87
+ * temperature: 0,
88
+ * maxTokens: undefined,
89
+ * maxRetries: 2,
90
+ * // apiKey: "...",
91
+ * // baseUrl: "...",
92
+ * // other params...
93
+ * });
94
+ * ```
95
+ * </details>
96
+ *
97
+ * <br />
98
+ *
99
+ * <details>
100
+ * <summary><strong>Invoking</strong></summary>
101
+ *
102
+ * ```typescript
103
+ * const input = `Translate "I love programming" into French.`;
104
+ *
105
+ * // Models also accept a list of chat messages or a formatted prompt
106
+ * const result = await llm.invoke(input);
107
+ * console.log(result);
108
+ * ```
109
+ *
110
+ * ```txt
111
+ * AIMessage {
112
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
113
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
114
+ * "response_metadata": {
115
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
116
+ * "model": "claude-3-5-sonnet-20240620",
117
+ * "stop_reason": "end_turn",
118
+ * "stop_sequence": null,
119
+ * "usage": {
120
+ * "input_tokens": 25,
121
+ * "output_tokens": 19
122
+ * },
123
+ * "type": "message",
124
+ * "role": "assistant"
125
+ * },
126
+ * "usage_metadata": {
127
+ * "input_tokens": 25,
128
+ * "output_tokens": 19,
129
+ * "total_tokens": 44
130
+ * }
131
+ * }
132
+ * ```
133
+ * </details>
134
+ *
135
+ * <br />
136
+ *
137
+ * <details>
138
+ * <summary><strong>Streaming Chunks</strong></summary>
139
+ *
140
+ * ```typescript
141
+ * for await (const chunk of await llm.stream(input)) {
142
+ * console.log(chunk);
143
+ * }
144
+ * ```
145
+ *
146
+ * ```txt
147
+ * AIMessageChunk {
148
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
149
+ * "content": "",
150
+ * "additional_kwargs": {
151
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
152
+ * "type": "message",
153
+ * "role": "assistant",
154
+ * "model": "claude-3-5-sonnet-20240620"
155
+ * },
156
+ * "usage_metadata": {
157
+ * "input_tokens": 25,
158
+ * "output_tokens": 1,
159
+ * "total_tokens": 26
160
+ * }
161
+ * }
162
+ * AIMessageChunk {
163
+ * "content": "",
164
+ * }
165
+ * AIMessageChunk {
166
+ * "content": "Here",
167
+ * }
168
+ * AIMessageChunk {
169
+ * "content": "'s",
170
+ * }
171
+ * AIMessageChunk {
172
+ * "content": " the translation to",
173
+ * }
174
+ * AIMessageChunk {
175
+ * "content": " French:\n\nJ",
176
+ * }
177
+ * AIMessageChunk {
178
+ * "content": "'adore la programmation",
179
+ * }
180
+ * AIMessageChunk {
181
+ * "content": ".",
182
+ * }
183
+ * AIMessageChunk {
184
+ * "content": "",
185
+ * "additional_kwargs": {
186
+ * "stop_reason": "end_turn",
187
+ * "stop_sequence": null
188
+ * },
189
+ * "usage_metadata": {
190
+ * "input_tokens": 0,
191
+ * "output_tokens": 19,
192
+ * "total_tokens": 19
193
+ * }
194
+ * }
195
+ * ```
196
+ * </details>
197
+ *
198
+ * <br />
199
+ *
200
+ * <details>
201
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
202
+ *
203
+ * ```typescript
204
+ * import { AIMessageChunk } from '@langchain/core/messages';
205
+ * import { concat } from '@langchain/core/utils/stream';
206
+ *
207
+ * const stream = await llm.stream(input);
208
+ * let full: AIMessageChunk | undefined;
209
+ * for await (const chunk of stream) {
210
+ * full = !full ? chunk : concat(full, chunk);
211
+ * }
212
+ * console.log(full);
213
+ * ```
214
+ *
215
+ * ```txt
216
+ * AIMessageChunk {
217
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
218
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
219
+ * "additional_kwargs": {
220
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
221
+ * "type": "message",
222
+ * "role": "assistant",
223
+ * "model": "claude-3-5-sonnet-20240620",
224
+ * "stop_reason": "end_turn",
225
+ * "stop_sequence": null
226
+ * },
227
+ * "usage_metadata": {
228
+ * "input_tokens": 25,
229
+ * "output_tokens": 20,
230
+ * "total_tokens": 45
231
+ * }
232
+ * }
233
+ * ```
234
+ * </details>
235
+ *
236
+ * <br />
237
+ *
238
+ * <details>
239
+ * <summary><strong>Bind tools</strong></summary>
240
+ *
241
+ * ```typescript
242
+ * import { z } from 'zod';
243
+ *
244
+ * const GetWeather = {
245
+ * name: "GetWeather",
246
+ * description: "Get the current weather in a given location",
247
+ * schema: z.object({
248
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
249
+ * }),
250
+ * }
251
+ *
252
+ * const GetPopulation = {
253
+ * name: "GetPopulation",
254
+ * description: "Get the current population in a given location",
255
+ * schema: z.object({
256
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
257
+ * }),
258
+ * }
259
+ *
260
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
261
+ * const aiMsg = await llmWithTools.invoke(
262
+ * "Which city is hotter today and which is bigger: LA or NY?"
263
+ * );
264
+ * console.log(aiMsg.tool_calls);
265
+ * ```
266
+ *
267
+ * ```txt
268
+ * [
269
+ * {
270
+ * name: 'GetWeather',
271
+ * args: { location: 'Los Angeles, CA' },
272
+ * id: 'toolu_01WjW3Dann6BPJVtLhovdBD5',
273
+ * type: 'tool_call'
274
+ * },
275
+ * {
276
+ * name: 'GetWeather',
277
+ * args: { location: 'New York, NY' },
278
+ * id: 'toolu_01G6wfJgqi5zRmJomsmkyZXe',
279
+ * type: 'tool_call'
280
+ * },
281
+ * {
282
+ * name: 'GetPopulation',
283
+ * args: { location: 'Los Angeles, CA' },
284
+ * id: 'toolu_0165qYWBA2VFyUst5RA18zew',
285
+ * type: 'tool_call'
286
+ * },
287
+ * {
288
+ * name: 'GetPopulation',
289
+ * args: { location: 'New York, NY' },
290
+ * id: 'toolu_01PGNyP33vxr13tGqr7i3rDo',
291
+ * type: 'tool_call'
292
+ * }
293
+ * ]
294
+ * ```
295
+ * </details>
296
+ *
297
+ * <br />
298
+ *
299
+ * <details>
300
+ * <summary><strong>Structured Output</strong></summary>
301
+ *
56
302
  * ```typescript
57
- * import { ChatAnthropic } from "@langchain/anthropic";
303
+ * import { z } from 'zod';
304
+ *
305
+ * const Joke = z.object({
306
+ * setup: z.string().describe("The setup of the joke"),
307
+ * punchline: z.string().describe("The punchline to the joke"),
308
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
309
+ * }).describe('Joke to tell user.');
58
310
  *
59
- * const model = new ChatAnthropic({
60
- * temperature: 0.9,
61
- * apiKey: 'YOUR-API-KEY',
311
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
312
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
313
+ * console.log(jokeResult);
314
+ * ```
315
+ *
316
+ * ```txt
317
+ * {
318
+ * setup: "Why don't cats play poker in the jungle?",
319
+ * punchline: 'Too many cheetahs!',
320
+ * rating: 7
321
+ * }
322
+ * ```
323
+ * </details>
324
+ *
325
+ * <br />
326
+ *
327
+ * <details>
328
+ * <summary><strong>Multimodal</strong></summary>
329
+ *
330
+ * ```typescript
331
+ * import { HumanMessage } from '@langchain/core/messages';
332
+ *
333
+ * const imageUrl = "https://example.com/image.jpg";
334
+ * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
335
+ * const base64Image = Buffer.from(imageData).toString('base64');
336
+ *
337
+ * const message = new HumanMessage({
338
+ * content: [
339
+ * { type: "text", text: "describe the weather in this image" },
340
+ * {
341
+ * type: "image_url",
342
+ * image_url: { url: `data:image/jpeg;base64,${base64Image}` },
343
+ * },
344
+ * ]
62
345
  * });
63
- * const res = await model.invoke({ input: 'Hello!' });
64
- * console.log(res);
346
+ *
347
+ * const imageDescriptionAiMsg = await llm.invoke([message]);
348
+ * console.log(imageDescriptionAiMsg.content);
349
+ * ```
350
+ *
351
+ * ```txt
352
+ * The weather in this image appears to be beautiful and clear. The sky is a vibrant blue with scattered white clouds, suggesting a sunny and pleasant day. The clouds are wispy and light, indicating calm conditions without any signs of storms or heavy weather. The bright green grass on the rolling hills looks lush and well-watered, which could mean recent rainfall or good growing conditions. Overall, the scene depicts a perfect spring or early summer day with mild temperatures, plenty of sunshine, and gentle breezes - ideal weather for enjoying the outdoors or for plant growth.
353
+ * ```
354
+ * </details>
355
+ *
356
+ * <br />
357
+ *
358
+ * <details>
359
+ * <summary><strong>Usage Metadata</strong></summary>
360
+ *
361
+ * ```typescript
362
+ * const aiMsgForMetadata = await llm.invoke(input);
363
+ * console.log(aiMsgForMetadata.usage_metadata);
364
+ * ```
365
+ *
366
+ * ```txt
367
+ * { input_tokens: 25, output_tokens: 19, total_tokens: 44 }
368
+ * ```
369
+ * </details>
370
+ *
371
+ * <br />
372
+ *
373
+ * <details>
374
+ * <summary><strong>Stream Usage Metadata</strong></summary>
375
+ *
376
+ * ```typescript
377
+ * const streamForMetadata = await llm.stream(
378
+ * input,
379
+ * {
380
+ * streamUsage: true
381
+ * }
382
+ * );
383
+ * let fullForMetadata: AIMessageChunk | undefined;
384
+ * for await (const chunk of streamForMetadata) {
385
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
386
+ * }
387
+ * console.log(fullForMetadata?.usage_metadata);
388
+ * ```
389
+ *
390
+ * ```txt
391
+ * { input_tokens: 25, output_tokens: 20, total_tokens: 45 }
65
392
  * ```
393
+ * </details>
394
+ *
395
+ * <br />
396
+ *
397
+ * <details>
398
+ * <summary><strong>Response Metadata</strong></summary>
399
+ *
400
+ * ```typescript
401
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
402
+ * console.log(aiMsgForResponseMetadata.response_metadata);
403
+ * ```
404
+ *
405
+ * ```txt
406
+ * {
407
+ * id: 'msg_01STxeQxJmp4sCSpioD6vK3L',
408
+ * model: 'claude-3-5-sonnet-20240620',
409
+ * stop_reason: 'end_turn',
410
+ * stop_sequence: null,
411
+ * usage: { input_tokens: 25, output_tokens: 19 },
412
+ * type: 'message',
413
+ * role: 'assistant'
414
+ * }
415
+ * ```
416
+ * </details>
417
+ *
418
+ * <br />
66
419
  */
67
420
  class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
68
421
  static lc_name() {
@@ -349,20 +702,11 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
349
702
  }
350
703
  /** @ignore */
351
704
  async _generateNonStreaming(messages, params, requestOptions) {
352
- const options = params.tools !== undefined
353
- ? {
354
- ...requestOptions,
355
- headers: {
356
- ...requestOptions.headers,
357
- "anthropic-beta": "tools-2024-04-04",
358
- },
359
- }
360
- : requestOptions;
361
705
  const response = await this.completionWithRetry({
362
706
  ...params,
363
707
  stream: false,
364
708
  ...(0, message_inputs_js_1._formatMessagesForAnthropic)(messages),
365
- }, options);
709
+ }, requestOptions);
366
710
  const { content, ...additionalKwargs } = response;
367
711
  const generations = (0, message_outputs_js_1.anthropicResponseToChatMessages)(content, additionalKwargs);
368
712
  // eslint-disable-next-line @typescript-eslint/no-unused-vars
@@ -66,12 +66,12 @@ export interface AnthropicInput {
66
66
  apiKey?: string;
67
67
  /** Anthropic API URL */
68
68
  anthropicApiUrl?: string;
69
+ /** @deprecated Use "model" instead */
70
+ modelName?: string;
69
71
  /** Model name to use */
70
- modelName: string;
71
- /** Model name to use */
72
- model: string;
72
+ model?: string;
73
73
  /** Overridable Anthropic ClientOptions */
74
- clientOptions: ClientOptions;
74
+ clientOptions?: ClientOptions;
75
75
  /** Holds any additional parameters that are valid to pass to {@link
76
76
  * https://console.anthropic.com/docs/api/reference |
77
77
  * `anthropic.messages`} that are not explicitly specified on this class.
@@ -89,28 +89,381 @@ export interface AnthropicInput {
89
89
  */
90
90
  type Kwargs = Record<string, any>;
91
91
  /**
92
- * Wrapper around Anthropic large language models.
93
- *
94
- * To use this package, you should have an Anthropic API key set as an
95
- * environment variable named `ANTHROPIC_API_KEY` or passed
96
- * into the constructor.
97
- *
98
- * @remarks
99
- * Any parameters that are valid to be passed to {@link
100
- * https://console.anthropic.com/docs/api/reference |
101
- * `anthropic.messages`} can be passed through {@link invocationKwargs},
102
- * even if not explicitly available on this class.
103
- * @example
92
+ * Anthropic chat model integration.
93
+ *
94
+ * Setup:
95
+ * Install `@langchain/anthropic` and set an environment variable named `ANTHROPIC_API_KEY`.
96
+ *
97
+ * ```bash
98
+ * npm install @langchain/anthropic
99
+ * export ANTHROPIC_API_KEY="your-api-key"
100
+ * ```
101
+ *
102
+ * ## [Constructor args](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html#constructor)
103
+ *
104
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_anthropic.ChatAnthropicCallOptions.html)
105
+ *
106
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
107
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
108
+ *
109
+ * ```typescript
110
+ * // When calling `.bind`, call options should be passed via the first argument
111
+ * const llmWithArgsBound = llm.bind({
112
+ * stop: ["\n"],
113
+ * tools: [...],
114
+ * });
115
+ *
116
+ * // When calling `.bindTools`, call options should be passed via the second argument
117
+ * const llmWithTools = llm.bindTools(
118
+ * [...],
119
+ * {
120
+ * tool_choice: "auto",
121
+ * }
122
+ * );
123
+ * ```
124
+ *
125
+ * ## Examples
126
+ *
127
+ * <details open>
128
+ * <summary><strong>Instantiate</strong></summary>
129
+ *
104
130
  * ```typescript
105
- * import { ChatAnthropic } from "@langchain/anthropic";
131
+ * import { ChatAnthropic } from '@langchain/anthropic';
106
132
  *
107
- * const model = new ChatAnthropic({
108
- * temperature: 0.9,
109
- * apiKey: 'YOUR-API-KEY',
133
+ * const llm = new ChatAnthropic({
134
+ * model: "claude-3-5-sonnet-20240620",
135
+ * temperature: 0,
136
+ * maxTokens: undefined,
137
+ * maxRetries: 2,
138
+ * // apiKey: "...",
139
+ * // baseUrl: "...",
140
+ * // other params...
110
141
  * });
111
- * const res = await model.invoke({ input: 'Hello!' });
112
- * console.log(res);
113
142
  * ```
143
+ * </details>
144
+ *
145
+ * <br />
146
+ *
147
+ * <details>
148
+ * <summary><strong>Invoking</strong></summary>
149
+ *
150
+ * ```typescript
151
+ * const input = `Translate "I love programming" into French.`;
152
+ *
153
+ * // Models also accept a list of chat messages or a formatted prompt
154
+ * const result = await llm.invoke(input);
155
+ * console.log(result);
156
+ * ```
157
+ *
158
+ * ```txt
159
+ * AIMessage {
160
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
161
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
162
+ * "response_metadata": {
163
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
164
+ * "model": "claude-3-5-sonnet-20240620",
165
+ * "stop_reason": "end_turn",
166
+ * "stop_sequence": null,
167
+ * "usage": {
168
+ * "input_tokens": 25,
169
+ * "output_tokens": 19
170
+ * },
171
+ * "type": "message",
172
+ * "role": "assistant"
173
+ * },
174
+ * "usage_metadata": {
175
+ * "input_tokens": 25,
176
+ * "output_tokens": 19,
177
+ * "total_tokens": 44
178
+ * }
179
+ * }
180
+ * ```
181
+ * </details>
182
+ *
183
+ * <br />
184
+ *
185
+ * <details>
186
+ * <summary><strong>Streaming Chunks</strong></summary>
187
+ *
188
+ * ```typescript
189
+ * for await (const chunk of await llm.stream(input)) {
190
+ * console.log(chunk);
191
+ * }
192
+ * ```
193
+ *
194
+ * ```txt
195
+ * AIMessageChunk {
196
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
197
+ * "content": "",
198
+ * "additional_kwargs": {
199
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
200
+ * "type": "message",
201
+ * "role": "assistant",
202
+ * "model": "claude-3-5-sonnet-20240620"
203
+ * },
204
+ * "usage_metadata": {
205
+ * "input_tokens": 25,
206
+ * "output_tokens": 1,
207
+ * "total_tokens": 26
208
+ * }
209
+ * }
210
+ * AIMessageChunk {
211
+ * "content": "",
212
+ * }
213
+ * AIMessageChunk {
214
+ * "content": "Here",
215
+ * }
216
+ * AIMessageChunk {
217
+ * "content": "'s",
218
+ * }
219
+ * AIMessageChunk {
220
+ * "content": " the translation to",
221
+ * }
222
+ * AIMessageChunk {
223
+ * "content": " French:\n\nJ",
224
+ * }
225
+ * AIMessageChunk {
226
+ * "content": "'adore la programmation",
227
+ * }
228
+ * AIMessageChunk {
229
+ * "content": ".",
230
+ * }
231
+ * AIMessageChunk {
232
+ * "content": "",
233
+ * "additional_kwargs": {
234
+ * "stop_reason": "end_turn",
235
+ * "stop_sequence": null
236
+ * },
237
+ * "usage_metadata": {
238
+ * "input_tokens": 0,
239
+ * "output_tokens": 19,
240
+ * "total_tokens": 19
241
+ * }
242
+ * }
243
+ * ```
244
+ * </details>
245
+ *
246
+ * <br />
247
+ *
248
+ * <details>
249
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
250
+ *
251
+ * ```typescript
252
+ * import { AIMessageChunk } from '@langchain/core/messages';
253
+ * import { concat } from '@langchain/core/utils/stream';
254
+ *
255
+ * const stream = await llm.stream(input);
256
+ * let full: AIMessageChunk | undefined;
257
+ * for await (const chunk of stream) {
258
+ * full = !full ? chunk : concat(full, chunk);
259
+ * }
260
+ * console.log(full);
261
+ * ```
262
+ *
263
+ * ```txt
264
+ * AIMessageChunk {
265
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
266
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
267
+ * "additional_kwargs": {
268
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
269
+ * "type": "message",
270
+ * "role": "assistant",
271
+ * "model": "claude-3-5-sonnet-20240620",
272
+ * "stop_reason": "end_turn",
273
+ * "stop_sequence": null
274
+ * },
275
+ * "usage_metadata": {
276
+ * "input_tokens": 25,
277
+ * "output_tokens": 20,
278
+ * "total_tokens": 45
279
+ * }
280
+ * }
281
+ * ```
282
+ * </details>
283
+ *
284
+ * <br />
285
+ *
286
+ * <details>
287
+ * <summary><strong>Bind tools</strong></summary>
288
+ *
289
+ * ```typescript
290
+ * import { z } from 'zod';
291
+ *
292
+ * const GetWeather = {
293
+ * name: "GetWeather",
294
+ * description: "Get the current weather in a given location",
295
+ * schema: z.object({
296
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
297
+ * }),
298
+ * }
299
+ *
300
+ * const GetPopulation = {
301
+ * name: "GetPopulation",
302
+ * description: "Get the current population in a given location",
303
+ * schema: z.object({
304
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
305
+ * }),
306
+ * }
307
+ *
308
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
309
+ * const aiMsg = await llmWithTools.invoke(
310
+ * "Which city is hotter today and which is bigger: LA or NY?"
311
+ * );
312
+ * console.log(aiMsg.tool_calls);
313
+ * ```
314
+ *
315
+ * ```txt
316
+ * [
317
+ * {
318
+ * name: 'GetWeather',
319
+ * args: { location: 'Los Angeles, CA' },
320
+ * id: 'toolu_01WjW3Dann6BPJVtLhovdBD5',
321
+ * type: 'tool_call'
322
+ * },
323
+ * {
324
+ * name: 'GetWeather',
325
+ * args: { location: 'New York, NY' },
326
+ * id: 'toolu_01G6wfJgqi5zRmJomsmkyZXe',
327
+ * type: 'tool_call'
328
+ * },
329
+ * {
330
+ * name: 'GetPopulation',
331
+ * args: { location: 'Los Angeles, CA' },
332
+ * id: 'toolu_0165qYWBA2VFyUst5RA18zew',
333
+ * type: 'tool_call'
334
+ * },
335
+ * {
336
+ * name: 'GetPopulation',
337
+ * args: { location: 'New York, NY' },
338
+ * id: 'toolu_01PGNyP33vxr13tGqr7i3rDo',
339
+ * type: 'tool_call'
340
+ * }
341
+ * ]
342
+ * ```
343
+ * </details>
344
+ *
345
+ * <br />
346
+ *
347
+ * <details>
348
+ * <summary><strong>Structured Output</strong></summary>
349
+ *
350
+ * ```typescript
351
+ * import { z } from 'zod';
352
+ *
353
+ * const Joke = z.object({
354
+ * setup: z.string().describe("The setup of the joke"),
355
+ * punchline: z.string().describe("The punchline to the joke"),
356
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
357
+ * }).describe('Joke to tell user.');
358
+ *
359
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
360
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
361
+ * console.log(jokeResult);
362
+ * ```
363
+ *
364
+ * ```txt
365
+ * {
366
+ * setup: "Why don't cats play poker in the jungle?",
367
+ * punchline: 'Too many cheetahs!',
368
+ * rating: 7
369
+ * }
370
+ * ```
371
+ * </details>
372
+ *
373
+ * <br />
374
+ *
375
+ * <details>
376
+ * <summary><strong>Multimodal</strong></summary>
377
+ *
378
+ * ```typescript
379
+ * import { HumanMessage } from '@langchain/core/messages';
380
+ *
381
+ * const imageUrl = "https://example.com/image.jpg";
382
+ * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
383
+ * const base64Image = Buffer.from(imageData).toString('base64');
384
+ *
385
+ * const message = new HumanMessage({
386
+ * content: [
387
+ * { type: "text", text: "describe the weather in this image" },
388
+ * {
389
+ * type: "image_url",
390
+ * image_url: { url: `data:image/jpeg;base64,${base64Image}` },
391
+ * },
392
+ * ]
393
+ * });
394
+ *
395
+ * const imageDescriptionAiMsg = await llm.invoke([message]);
396
+ * console.log(imageDescriptionAiMsg.content);
397
+ * ```
398
+ *
399
+ * ```txt
400
+ * The weather in this image appears to be beautiful and clear. The sky is a vibrant blue with scattered white clouds, suggesting a sunny and pleasant day. The clouds are wispy and light, indicating calm conditions without any signs of storms or heavy weather. The bright green grass on the rolling hills looks lush and well-watered, which could mean recent rainfall or good growing conditions. Overall, the scene depicts a perfect spring or early summer day with mild temperatures, plenty of sunshine, and gentle breezes - ideal weather for enjoying the outdoors or for plant growth.
401
+ * ```
402
+ * </details>
403
+ *
404
+ * <br />
405
+ *
406
+ * <details>
407
+ * <summary><strong>Usage Metadata</strong></summary>
408
+ *
409
+ * ```typescript
410
+ * const aiMsgForMetadata = await llm.invoke(input);
411
+ * console.log(aiMsgForMetadata.usage_metadata);
412
+ * ```
413
+ *
414
+ * ```txt
415
+ * { input_tokens: 25, output_tokens: 19, total_tokens: 44 }
416
+ * ```
417
+ * </details>
418
+ *
419
+ * <br />
420
+ *
421
+ * <details>
422
+ * <summary><strong>Stream Usage Metadata</strong></summary>
423
+ *
424
+ * ```typescript
425
+ * const streamForMetadata = await llm.stream(
426
+ * input,
427
+ * {
428
+ * streamUsage: true
429
+ * }
430
+ * );
431
+ * let fullForMetadata: AIMessageChunk | undefined;
432
+ * for await (const chunk of streamForMetadata) {
433
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
434
+ * }
435
+ * console.log(fullForMetadata?.usage_metadata);
436
+ * ```
437
+ *
438
+ * ```txt
439
+ * { input_tokens: 25, output_tokens: 20, total_tokens: 45 }
440
+ * ```
441
+ * </details>
442
+ *
443
+ * <br />
444
+ *
445
+ * <details>
446
+ * <summary><strong>Response Metadata</strong></summary>
447
+ *
448
+ * ```typescript
449
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
450
+ * console.log(aiMsgForResponseMetadata.response_metadata);
451
+ * ```
452
+ *
453
+ * ```txt
454
+ * {
455
+ * id: 'msg_01STxeQxJmp4sCSpioD6vK3L',
456
+ * model: 'claude-3-5-sonnet-20240620',
457
+ * stop_reason: 'end_turn',
458
+ * stop_sequence: null,
459
+ * usage: { input_tokens: 25, output_tokens: 19 },
460
+ * type: 'message',
461
+ * role: 'assistant'
462
+ * }
463
+ * ```
464
+ * </details>
465
+ *
466
+ * <br />
114
467
  */
115
468
  export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCallOptions = ChatAnthropicCallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> implements AnthropicInput {
116
469
  static lc_name(): string;
@@ -135,7 +488,7 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
135
488
  protected batchClient: Anthropic;
136
489
  protected streamingClient: Anthropic;
137
490
  streamUsage: boolean;
138
- constructor(fields?: Partial<AnthropicInput> & BaseChatModelParams);
491
+ constructor(fields?: AnthropicInput & BaseChatModelParams);
139
492
  getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
140
493
  /**
141
494
  * Formats LangChain StructuredTools to AnthropicTools.
@@ -152,8 +505,8 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
152
505
  invocationParams(options?: this["ParsedCallOptions"]): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, "messages"> & Kwargs;
153
506
  /** @ignore */
154
507
  _identifyingParams(): {
155
- system?: string | undefined;
156
- model: "claude-2.1" | (string & {}) | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-2.0" | "claude-instant-1.2";
508
+ system?: string | Anthropic.Messages.TextBlockParam[] | undefined;
509
+ model: Anthropic.Messages.Model;
157
510
  max_tokens: number;
158
511
  tools?: Anthropic.Messages.Tool[] | undefined;
159
512
  tool_choice?: Anthropic.Messages.MessageCreateParams.ToolChoiceAuto | Anthropic.Messages.MessageCreateParams.ToolChoiceAny | Anthropic.Messages.MessageCreateParams.ToolChoiceTool | undefined;
@@ -169,8 +522,8 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
169
522
  * Get the identifying parameters for the model
170
523
  */
171
524
  identifyingParams(): {
172
- system?: string | undefined;
173
- model: "claude-2.1" | (string & {}) | "claude-3-opus-20240229" | "claude-3-sonnet-20240229" | "claude-3-haiku-20240307" | "claude-2.0" | "claude-instant-1.2";
525
+ system?: string | Anthropic.Messages.TextBlockParam[] | undefined;
526
+ model: Anthropic.Messages.Model;
174
527
  max_tokens: number;
175
528
  tools?: Anthropic.Messages.Tool[] | undefined;
176
529
  tool_choice?: Anthropic.Messages.MessageCreateParams.ToolChoiceAuto | Anthropic.Messages.MessageCreateParams.ToolChoiceAny | Anthropic.Messages.MessageCreateParams.ToolChoiceTool | undefined;
@@ -188,7 +541,7 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
188
541
  generations: import("@langchain/core/outputs").ChatGeneration[];
189
542
  llmOutput: {
190
543
  id: string;
191
- model: string;
544
+ model: Anthropic.Messages.Model;
192
545
  stop_reason: "tool_use" | "stop_sequence" | "end_turn" | "max_tokens" | null;
193
546
  stop_sequence: string | null;
194
547
  usage: Anthropic.Messages.Usage;
@@ -38,28 +38,381 @@ function extractToken(chunk) {
38
38
  return undefined;
39
39
  }
40
40
  /**
41
- * Wrapper around Anthropic large language models.
42
- *
43
- * To use this package, you should have an Anthropic API key set as an
44
- * environment variable named `ANTHROPIC_API_KEY` or passed
45
- * into the constructor.
46
- *
47
- * @remarks
48
- * Any parameters that are valid to be passed to {@link
49
- * https://console.anthropic.com/docs/api/reference |
50
- * `anthropic.messages`} can be passed through {@link invocationKwargs},
51
- * even if not explicitly available on this class.
52
- * @example
41
+ * Anthropic chat model integration.
42
+ *
43
+ * Setup:
44
+ * Install `@langchain/anthropic` and set an environment variable named `ANTHROPIC_API_KEY`.
45
+ *
46
+ * ```bash
47
+ * npm install @langchain/anthropic
48
+ * export ANTHROPIC_API_KEY="your-api-key"
49
+ * ```
50
+ *
51
+ * ## [Constructor args](https://api.js.langchain.com/classes/langchain_anthropic.ChatAnthropic.html#constructor)
52
+ *
53
+ * ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_anthropic.ChatAnthropicCallOptions.html)
54
+ *
55
+ * Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
56
+ * They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
57
+ *
58
+ * ```typescript
59
+ * // When calling `.bind`, call options should be passed via the first argument
60
+ * const llmWithArgsBound = llm.bind({
61
+ * stop: ["\n"],
62
+ * tools: [...],
63
+ * });
64
+ *
65
+ * // When calling `.bindTools`, call options should be passed via the second argument
66
+ * const llmWithTools = llm.bindTools(
67
+ * [...],
68
+ * {
69
+ * tool_choice: "auto",
70
+ * }
71
+ * );
72
+ * ```
73
+ *
74
+ * ## Examples
75
+ *
76
+ * <details open>
77
+ * <summary><strong>Instantiate</strong></summary>
78
+ *
79
+ * ```typescript
80
+ * import { ChatAnthropic } from '@langchain/anthropic';
81
+ *
82
+ * const llm = new ChatAnthropic({
83
+ * model: "claude-3-5-sonnet-20240620",
84
+ * temperature: 0,
85
+ * maxTokens: undefined,
86
+ * maxRetries: 2,
87
+ * // apiKey: "...",
88
+ * // baseUrl: "...",
89
+ * // other params...
90
+ * });
91
+ * ```
92
+ * </details>
93
+ *
94
+ * <br />
95
+ *
96
+ * <details>
97
+ * <summary><strong>Invoking</strong></summary>
98
+ *
99
+ * ```typescript
100
+ * const input = `Translate "I love programming" into French.`;
101
+ *
102
+ * // Models also accept a list of chat messages or a formatted prompt
103
+ * const result = await llm.invoke(input);
104
+ * console.log(result);
105
+ * ```
106
+ *
107
+ * ```txt
108
+ * AIMessage {
109
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
110
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
111
+ * "response_metadata": {
112
+ * "id": "msg_01QDpd78JUHpRP6bRRNyzbW3",
113
+ * "model": "claude-3-5-sonnet-20240620",
114
+ * "stop_reason": "end_turn",
115
+ * "stop_sequence": null,
116
+ * "usage": {
117
+ * "input_tokens": 25,
118
+ * "output_tokens": 19
119
+ * },
120
+ * "type": "message",
121
+ * "role": "assistant"
122
+ * },
123
+ * "usage_metadata": {
124
+ * "input_tokens": 25,
125
+ * "output_tokens": 19,
126
+ * "total_tokens": 44
127
+ * }
128
+ * }
129
+ * ```
130
+ * </details>
131
+ *
132
+ * <br />
133
+ *
134
+ * <details>
135
+ * <summary><strong>Streaming Chunks</strong></summary>
136
+ *
137
+ * ```typescript
138
+ * for await (const chunk of await llm.stream(input)) {
139
+ * console.log(chunk);
140
+ * }
141
+ * ```
142
+ *
143
+ * ```txt
144
+ * AIMessageChunk {
145
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
146
+ * "content": "",
147
+ * "additional_kwargs": {
148
+ * "id": "msg_01N8MwoYxiKo9w4chE4gXUs4",
149
+ * "type": "message",
150
+ * "role": "assistant",
151
+ * "model": "claude-3-5-sonnet-20240620"
152
+ * },
153
+ * "usage_metadata": {
154
+ * "input_tokens": 25,
155
+ * "output_tokens": 1,
156
+ * "total_tokens": 26
157
+ * }
158
+ * }
159
+ * AIMessageChunk {
160
+ * "content": "",
161
+ * }
162
+ * AIMessageChunk {
163
+ * "content": "Here",
164
+ * }
165
+ * AIMessageChunk {
166
+ * "content": "'s",
167
+ * }
168
+ * AIMessageChunk {
169
+ * "content": " the translation to",
170
+ * }
171
+ * AIMessageChunk {
172
+ * "content": " French:\n\nJ",
173
+ * }
174
+ * AIMessageChunk {
175
+ * "content": "'adore la programmation",
176
+ * }
177
+ * AIMessageChunk {
178
+ * "content": ".",
179
+ * }
180
+ * AIMessageChunk {
181
+ * "content": "",
182
+ * "additional_kwargs": {
183
+ * "stop_reason": "end_turn",
184
+ * "stop_sequence": null
185
+ * },
186
+ * "usage_metadata": {
187
+ * "input_tokens": 0,
188
+ * "output_tokens": 19,
189
+ * "total_tokens": 19
190
+ * }
191
+ * }
192
+ * ```
193
+ * </details>
194
+ *
195
+ * <br />
196
+ *
197
+ * <details>
198
+ * <summary><strong>Aggregate Streamed Chunks</strong></summary>
199
+ *
200
+ * ```typescript
201
+ * import { AIMessageChunk } from '@langchain/core/messages';
202
+ * import { concat } from '@langchain/core/utils/stream';
203
+ *
204
+ * const stream = await llm.stream(input);
205
+ * let full: AIMessageChunk | undefined;
206
+ * for await (const chunk of stream) {
207
+ * full = !full ? chunk : concat(full, chunk);
208
+ * }
209
+ * console.log(full);
210
+ * ```
211
+ *
212
+ * ```txt
213
+ * AIMessageChunk {
214
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
215
+ * "content": "Here's the translation to French:\n\nJ'adore la programmation.",
216
+ * "additional_kwargs": {
217
+ * "id": "msg_01SBTb5zSGXfjUc7yQ8EKEEA",
218
+ * "type": "message",
219
+ * "role": "assistant",
220
+ * "model": "claude-3-5-sonnet-20240620",
221
+ * "stop_reason": "end_turn",
222
+ * "stop_sequence": null
223
+ * },
224
+ * "usage_metadata": {
225
+ * "input_tokens": 25,
226
+ * "output_tokens": 20,
227
+ * "total_tokens": 45
228
+ * }
229
+ * }
230
+ * ```
231
+ * </details>
232
+ *
233
+ * <br />
234
+ *
235
+ * <details>
236
+ * <summary><strong>Bind tools</strong></summary>
237
+ *
238
+ * ```typescript
239
+ * import { z } from 'zod';
240
+ *
241
+ * const GetWeather = {
242
+ * name: "GetWeather",
243
+ * description: "Get the current weather in a given location",
244
+ * schema: z.object({
245
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
246
+ * }),
247
+ * }
248
+ *
249
+ * const GetPopulation = {
250
+ * name: "GetPopulation",
251
+ * description: "Get the current population in a given location",
252
+ * schema: z.object({
253
+ * location: z.string().describe("The city and state, e.g. San Francisco, CA")
254
+ * }),
255
+ * }
256
+ *
257
+ * const llmWithTools = llm.bindTools([GetWeather, GetPopulation]);
258
+ * const aiMsg = await llmWithTools.invoke(
259
+ * "Which city is hotter today and which is bigger: LA or NY?"
260
+ * );
261
+ * console.log(aiMsg.tool_calls);
262
+ * ```
263
+ *
264
+ * ```txt
265
+ * [
266
+ * {
267
+ * name: 'GetWeather',
268
+ * args: { location: 'Los Angeles, CA' },
269
+ * id: 'toolu_01WjW3Dann6BPJVtLhovdBD5',
270
+ * type: 'tool_call'
271
+ * },
272
+ * {
273
+ * name: 'GetWeather',
274
+ * args: { location: 'New York, NY' },
275
+ * id: 'toolu_01G6wfJgqi5zRmJomsmkyZXe',
276
+ * type: 'tool_call'
277
+ * },
278
+ * {
279
+ * name: 'GetPopulation',
280
+ * args: { location: 'Los Angeles, CA' },
281
+ * id: 'toolu_0165qYWBA2VFyUst5RA18zew',
282
+ * type: 'tool_call'
283
+ * },
284
+ * {
285
+ * name: 'GetPopulation',
286
+ * args: { location: 'New York, NY' },
287
+ * id: 'toolu_01PGNyP33vxr13tGqr7i3rDo',
288
+ * type: 'tool_call'
289
+ * }
290
+ * ]
291
+ * ```
292
+ * </details>
293
+ *
294
+ * <br />
295
+ *
296
+ * <details>
297
+ * <summary><strong>Structured Output</strong></summary>
298
+ *
53
299
  * ```typescript
54
- * import { ChatAnthropic } from "@langchain/anthropic";
300
+ * import { z } from 'zod';
301
+ *
302
+ * const Joke = z.object({
303
+ * setup: z.string().describe("The setup of the joke"),
304
+ * punchline: z.string().describe("The punchline to the joke"),
305
+ * rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
306
+ * }).describe('Joke to tell user.');
55
307
  *
56
- * const model = new ChatAnthropic({
57
- * temperature: 0.9,
58
- * apiKey: 'YOUR-API-KEY',
308
+ * const structuredLlm = llm.withStructuredOutput(Joke, { name: "Joke" });
309
+ * const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
310
+ * console.log(jokeResult);
311
+ * ```
312
+ *
313
+ * ```txt
314
+ * {
315
+ * setup: "Why don't cats play poker in the jungle?",
316
+ * punchline: 'Too many cheetahs!',
317
+ * rating: 7
318
+ * }
319
+ * ```
320
+ * </details>
321
+ *
322
+ * <br />
323
+ *
324
+ * <details>
325
+ * <summary><strong>Multimodal</strong></summary>
326
+ *
327
+ * ```typescript
328
+ * import { HumanMessage } from '@langchain/core/messages';
329
+ *
330
+ * const imageUrl = "https://example.com/image.jpg";
331
+ * const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
332
+ * const base64Image = Buffer.from(imageData).toString('base64');
333
+ *
334
+ * const message = new HumanMessage({
335
+ * content: [
336
+ * { type: "text", text: "describe the weather in this image" },
337
+ * {
338
+ * type: "image_url",
339
+ * image_url: { url: `data:image/jpeg;base64,${base64Image}` },
340
+ * },
341
+ * ]
59
342
  * });
60
- * const res = await model.invoke({ input: 'Hello!' });
61
- * console.log(res);
343
+ *
344
+ * const imageDescriptionAiMsg = await llm.invoke([message]);
345
+ * console.log(imageDescriptionAiMsg.content);
346
+ * ```
347
+ *
348
+ * ```txt
349
+ * The weather in this image appears to be beautiful and clear. The sky is a vibrant blue with scattered white clouds, suggesting a sunny and pleasant day. The clouds are wispy and light, indicating calm conditions without any signs of storms or heavy weather. The bright green grass on the rolling hills looks lush and well-watered, which could mean recent rainfall or good growing conditions. Overall, the scene depicts a perfect spring or early summer day with mild temperatures, plenty of sunshine, and gentle breezes - ideal weather for enjoying the outdoors or for plant growth.
350
+ * ```
351
+ * </details>
352
+ *
353
+ * <br />
354
+ *
355
+ * <details>
356
+ * <summary><strong>Usage Metadata</strong></summary>
357
+ *
358
+ * ```typescript
359
+ * const aiMsgForMetadata = await llm.invoke(input);
360
+ * console.log(aiMsgForMetadata.usage_metadata);
361
+ * ```
362
+ *
363
+ * ```txt
364
+ * { input_tokens: 25, output_tokens: 19, total_tokens: 44 }
365
+ * ```
366
+ * </details>
367
+ *
368
+ * <br />
369
+ *
370
+ * <details>
371
+ * <summary><strong>Stream Usage Metadata</strong></summary>
372
+ *
373
+ * ```typescript
374
+ * const streamForMetadata = await llm.stream(
375
+ * input,
376
+ * {
377
+ * streamUsage: true
378
+ * }
379
+ * );
380
+ * let fullForMetadata: AIMessageChunk | undefined;
381
+ * for await (const chunk of streamForMetadata) {
382
+ * fullForMetadata = !fullForMetadata ? chunk : concat(fullForMetadata, chunk);
383
+ * }
384
+ * console.log(fullForMetadata?.usage_metadata);
385
+ * ```
386
+ *
387
+ * ```txt
388
+ * { input_tokens: 25, output_tokens: 20, total_tokens: 45 }
62
389
  * ```
390
+ * </details>
391
+ *
392
+ * <br />
393
+ *
394
+ * <details>
395
+ * <summary><strong>Response Metadata</strong></summary>
396
+ *
397
+ * ```typescript
398
+ * const aiMsgForResponseMetadata = await llm.invoke(input);
399
+ * console.log(aiMsgForResponseMetadata.response_metadata);
400
+ * ```
401
+ *
402
+ * ```txt
403
+ * {
404
+ * id: 'msg_01STxeQxJmp4sCSpioD6vK3L',
405
+ * model: 'claude-3-5-sonnet-20240620',
406
+ * stop_reason: 'end_turn',
407
+ * stop_sequence: null,
408
+ * usage: { input_tokens: 25, output_tokens: 19 },
409
+ * type: 'message',
410
+ * role: 'assistant'
411
+ * }
412
+ * ```
413
+ * </details>
414
+ *
415
+ * <br />
63
416
  */
64
417
  export class ChatAnthropicMessages extends BaseChatModel {
65
418
  static lc_name() {
@@ -346,20 +699,11 @@ export class ChatAnthropicMessages extends BaseChatModel {
346
699
  }
347
700
  /** @ignore */
348
701
  async _generateNonStreaming(messages, params, requestOptions) {
349
- const options = params.tools !== undefined
350
- ? {
351
- ...requestOptions,
352
- headers: {
353
- ...requestOptions.headers,
354
- "anthropic-beta": "tools-2024-04-04",
355
- },
356
- }
357
- : requestOptions;
358
702
  const response = await this.completionWithRetry({
359
703
  ...params,
360
704
  stream: false,
361
705
  ..._formatMessagesForAnthropic(messages),
362
- }, options);
706
+ }, requestOptions);
363
707
  const { content, ...additionalKwargs } = response;
364
708
  const generations = anthropicResponseToChatMessages(content, additionalKwargs);
365
709
  // eslint-disable-next-line @typescript-eslint/no-unused-vars
@@ -166,9 +166,6 @@ function _formatMessagesForAnthropic(messages) {
166
166
  const mergedMessages = _mergeMessages(messages);
167
167
  let system;
168
168
  if (mergedMessages.length > 0 && mergedMessages[0]._getType() === "system") {
169
- if (typeof messages[0].content !== "string") {
170
- throw new Error("System message content must be a string.");
171
- }
172
169
  system = messages[0].content;
173
170
  }
174
171
  const conversationMessages = system !== undefined ? mergedMessages.slice(1) : mergedMessages;
@@ -3,14 +3,11 @@
3
3
  */
4
4
  import { BaseMessage } from "@langchain/core/messages";
5
5
  import { ToolCall } from "@langchain/core/messages/tool";
6
- import { AnthropicMessageParam, AnthropicToolResponse } from "../types.js";
6
+ import { AnthropicMessageCreateParams, AnthropicToolResponse } from "../types.js";
7
7
  export declare function _convertLangChainToolCallToAnthropic(toolCall: ToolCall): AnthropicToolResponse;
8
8
  /**
9
9
  * Formats messages as a prompt for the model.
10
10
  * @param messages The base messages to format as a prompt.
11
11
  * @returns The formatted prompt.
12
12
  */
13
- export declare function _formatMessagesForAnthropic(messages: BaseMessage[]): {
14
- system?: string;
15
- messages: AnthropicMessageParam[];
16
- };
13
+ export declare function _formatMessagesForAnthropic(messages: BaseMessage[]): AnthropicMessageCreateParams;
@@ -162,9 +162,6 @@ export function _formatMessagesForAnthropic(messages) {
162
162
  const mergedMessages = _mergeMessages(messages);
163
163
  let system;
164
164
  if (mergedMessages.length > 0 && mergedMessages[0]._getType() === "system") {
165
- if (typeof messages[0].content !== "string") {
166
- throw new Error("System message content must be a string.");
167
- }
168
165
  system = messages[0].content;
169
166
  }
170
167
  const conversationMessages = system !== undefined ? mergedMessages.slice(1) : mergedMessages;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/anthropic",
3
- "version": "0.2.13",
3
+ "version": "0.2.15",
4
4
  "description": "Anthropic integrations for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {
@@ -35,7 +35,7 @@
35
35
  "author": "LangChain",
36
36
  "license": "MIT",
37
37
  "dependencies": {
38
- "@anthropic-ai/sdk": "^0.22.0",
38
+ "@anthropic-ai/sdk": "^0.25.2",
39
39
  "@langchain/core": ">=0.2.21 <0.3.0",
40
40
  "fast-xml-parser": "^4.4.1",
41
41
  "zod": "^3.22.4",