@langchain/anthropic 0.1.9 → 0.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,12 @@
1
1
  import { Anthropic } from "@anthropic-ai/sdk";
2
2
  import { AIMessage, AIMessageChunk, } from "@langchain/core/messages";
3
- import { ChatGenerationChunk } from "@langchain/core/outputs";
3
+ import { ChatGenerationChunk, } from "@langchain/core/outputs";
4
4
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
5
  import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
6
+ import { zodToJsonSchema } from "zod-to-json-schema";
7
+ import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
8
+ import { isZodSchema } from "@langchain/core/utils/types";
9
+ import { AnthropicToolsOutputParser } from "./output_parsers.js";
6
10
  function _formatImage(imageUrl) {
7
11
  const regex = /^data:(image\/.+);base64,(.+)$/;
8
12
  const match = imageUrl.match(regex);
@@ -19,6 +23,34 @@ function _formatImage(imageUrl) {
19
23
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
20
24
  };
21
25
  }
26
+ function anthropicResponseToChatMessages(messages, additionalKwargs) {
27
+ if (messages.length === 1 && messages[0].type === "text") {
28
+ return [
29
+ {
30
+ text: messages[0].text,
31
+ message: new AIMessage(messages[0].text, additionalKwargs),
32
+ },
33
+ ];
34
+ }
35
+ else {
36
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
37
+ const castMessage = messages;
38
+ const generations = [
39
+ {
40
+ text: "",
41
+ message: new AIMessage({
42
+ content: castMessage,
43
+ additional_kwargs: additionalKwargs,
44
+ }),
45
+ },
46
+ ];
47
+ return generations;
48
+ }
49
+ }
50
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
51
+ function isAnthropicTool(tool) {
52
+ return "input_schema" in tool;
53
+ }
22
54
  /**
23
55
  * Wrapper around Anthropic large language models.
24
56
  *
@@ -162,6 +194,32 @@ export class ChatAnthropicMessages extends BaseChatModel {
162
194
  this.streaming = fields?.streaming ?? false;
163
195
  this.clientOptions = fields?.clientOptions ?? {};
164
196
  }
197
+ /**
198
+ * Formats LangChain StructuredTools to AnthropicTools.
199
+ *
200
+ * @param {ChatAnthropicCallOptions["tools"]} tools The tools to format
201
+ * @returns {AnthropicTool[] | undefined} The formatted tools, or undefined if none are passed.
202
+ * @throws {Error} If a mix of AnthropicTools and StructuredTools are passed.
203
+ */
204
+ formatStructuredToolToAnthropic(tools) {
205
+ if (!tools || !tools.length) {
206
+ return undefined;
207
+ }
208
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
209
+ if (tools.every((tool) => isAnthropicTool(tool))) {
210
+ // If the tool is already an anthropic tool, return it
211
+ return tools;
212
+ }
213
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
214
+ if (tools.some((tool) => isAnthropicTool(tool))) {
215
+ throw new Error(`Can not pass in a mix of AnthropicTools and StructuredTools`);
216
+ }
217
+ return tools.map((tool) => ({
218
+ name: tool.name,
219
+ description: tool.description,
220
+ input_schema: zodToJsonSchema(tool.schema),
221
+ }));
222
+ }
165
223
  /**
166
224
  * Get the parameters used to invoke the model
167
225
  */
@@ -177,6 +235,27 @@ export class ChatAnthropicMessages extends BaseChatModel {
177
235
  ...this.invocationKwargs,
178
236
  };
179
237
  }
238
+ invocationOptions(request, options) {
239
+ const toolUseBetaHeader = {
240
+ "anthropic-beta": "tools-2024-04-04",
241
+ };
242
+ const tools = this.formatStructuredToolToAnthropic(options?.tools);
243
+ // If tools are present, populate the body with the message request params.
244
+ // This is because Anthropic overwrites the message request params if a body
245
+ // is passed.
246
+ const body = tools
247
+ ? {
248
+ ...request,
249
+ tools,
250
+ }
251
+ : undefined;
252
+ const headers = tools ? toolUseBetaHeader : undefined;
253
+ return {
254
+ signal: options.signal,
255
+ ...(body ? { body } : {}),
256
+ ...(headers ? { headers } : {}),
257
+ };
258
+ }
180
259
  /** @ignore */
181
260
  _identifyingParams() {
182
261
  return {
@@ -195,69 +274,91 @@ export class ChatAnthropicMessages extends BaseChatModel {
195
274
  }
196
275
  async *_streamResponseChunks(messages, options, runManager) {
197
276
  const params = this.invocationParams(options);
198
- const stream = await this.createStreamWithRetry({
277
+ const requestOptions = this.invocationOptions({
199
278
  ...params,
279
+ stream: false,
200
280
  ...this.formatMessagesForAnthropic(messages),
201
- stream: true,
202
- });
203
- let usageData = { input_tokens: 0, output_tokens: 0 };
204
- for await (const data of stream) {
205
- if (options.signal?.aborted) {
206
- stream.controller.abort();
207
- throw new Error("AbortError: User aborted the request.");
208
- }
209
- if (data.type === "message_start") {
210
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
211
- const { content, usage, ...additionalKwargs } = data.message;
212
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
213
- const filteredAdditionalKwargs = {};
214
- for (const [key, value] of Object.entries(additionalKwargs)) {
215
- if (value !== undefined && value !== null) {
216
- filteredAdditionalKwargs[key] = value;
217
- }
281
+ }, options);
282
+ if (options.tools !== undefined && options.tools.length > 0) {
283
+ const requestOptions = this.invocationOptions({
284
+ ...params,
285
+ stream: false,
286
+ ...this.formatMessagesForAnthropic(messages),
287
+ }, options);
288
+ const generations = await this._generateNonStreaming(messages, params, requestOptions);
289
+ yield new ChatGenerationChunk({
290
+ message: new AIMessageChunk({
291
+ content: generations[0].message.content,
292
+ additional_kwargs: generations[0].message.additional_kwargs,
293
+ }),
294
+ text: generations[0].text,
295
+ });
296
+ }
297
+ else {
298
+ const stream = await this.createStreamWithRetry({
299
+ ...params,
300
+ ...this.formatMessagesForAnthropic(messages),
301
+ stream: true,
302
+ }, requestOptions);
303
+ let usageData = { input_tokens: 0, output_tokens: 0 };
304
+ for await (const data of stream) {
305
+ if (options.signal?.aborted) {
306
+ stream.controller.abort();
307
+ throw new Error("AbortError: User aborted the request.");
218
308
  }
219
- usageData = usage;
220
- yield new ChatGenerationChunk({
221
- message: new AIMessageChunk({
222
- content: "",
223
- additional_kwargs: filteredAdditionalKwargs,
224
- }),
225
- text: "",
226
- });
227
- }
228
- else if (data.type === "message_delta") {
229
- yield new ChatGenerationChunk({
230
- message: new AIMessageChunk({
231
- content: "",
232
- additional_kwargs: { ...data.delta },
233
- }),
234
- text: "",
235
- });
236
- if (data?.usage !== undefined) {
237
- usageData.output_tokens += data.usage.output_tokens;
309
+ if (data.type === "message_start") {
310
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
311
+ const { content, usage, ...additionalKwargs } = data.message;
312
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
313
+ const filteredAdditionalKwargs = {};
314
+ for (const [key, value] of Object.entries(additionalKwargs)) {
315
+ if (value !== undefined && value !== null) {
316
+ filteredAdditionalKwargs[key] = value;
317
+ }
318
+ }
319
+ usageData = usage;
320
+ yield new ChatGenerationChunk({
321
+ message: new AIMessageChunk({
322
+ content: "",
323
+ additional_kwargs: filteredAdditionalKwargs,
324
+ }),
325
+ text: "",
326
+ });
238
327
  }
239
- }
240
- else if (data.type === "content_block_delta") {
241
- const content = data.delta?.text;
242
- if (content !== undefined) {
328
+ else if (data.type === "message_delta") {
243
329
  yield new ChatGenerationChunk({
244
330
  message: new AIMessageChunk({
245
- content,
246
- additional_kwargs: {},
331
+ content: "",
332
+ additional_kwargs: { ...data.delta },
247
333
  }),
248
- text: content,
334
+ text: "",
249
335
  });
250
- await runManager?.handleLLMNewToken(content);
336
+ if (data?.usage !== undefined) {
337
+ usageData.output_tokens += data.usage.output_tokens;
338
+ }
339
+ }
340
+ else if (data.type === "content_block_delta") {
341
+ const content = data.delta?.text;
342
+ if (content !== undefined) {
343
+ yield new ChatGenerationChunk({
344
+ message: new AIMessageChunk({
345
+ content,
346
+ additional_kwargs: {},
347
+ }),
348
+ text: content,
349
+ });
350
+ await runManager?.handleLLMNewToken(content);
351
+ }
251
352
  }
252
353
  }
354
+ yield new ChatGenerationChunk({
355
+ message: new AIMessageChunk({
356
+ content: "",
357
+ additional_kwargs: { usage: usageData },
358
+ }),
359
+ text: "",
360
+ });
253
361
  }
254
- yield new ChatGenerationChunk({
255
- message: new AIMessageChunk({
256
- content: "",
257
- additional_kwargs: { usage: usageData },
258
- }),
259
- text: "",
260
- });
261
362
  }
262
363
  /**
263
364
  * Formats messages as a prompt for the model.
@@ -281,6 +382,9 @@ export class ChatAnthropicMessages extends BaseChatModel {
281
382
  else if (message._getType() === "ai") {
282
383
  role = "assistant";
283
384
  }
385
+ else if (message._getType() === "tool") {
386
+ role = "user";
387
+ }
284
388
  else if (message._getType() === "system") {
285
389
  throw new Error("System messages are only permitted as the first passed message.");
286
390
  }
@@ -294,26 +398,44 @@ export class ChatAnthropicMessages extends BaseChatModel {
294
398
  };
295
399
  }
296
400
  else {
297
- return {
298
- role,
299
- content: message.content.map((contentPart) => {
300
- if (contentPart.type === "image_url") {
301
- let source;
302
- if (typeof contentPart.image_url === "string") {
303
- source = _formatImage(contentPart.image_url);
304
- }
305
- else {
306
- source = _formatImage(contentPart.image_url.url);
307
- }
308
- return {
309
- type: "image",
310
- source,
311
- };
401
+ const contentBlocks = message.content.map((contentPart) => {
402
+ if (contentPart.type === "image_url") {
403
+ let source;
404
+ if (typeof contentPart.image_url === "string") {
405
+ source = _formatImage(contentPart.image_url);
312
406
  }
313
407
  else {
314
- return contentPart;
408
+ source = _formatImage(contentPart.image_url.url);
315
409
  }
316
- }),
410
+ return {
411
+ type: "image",
412
+ source,
413
+ };
414
+ }
415
+ else if (contentPart.type === "text") {
416
+ // Assuming contentPart is of type MessageContentText here
417
+ return {
418
+ type: "text",
419
+ text: contentPart.text,
420
+ };
421
+ }
422
+ else if (contentPart.type === "tool_use") {
423
+ // TODO: Fix when SDK types are fixed
424
+ return {
425
+ type: "tool_use",
426
+ id: contentPart.id,
427
+ name: contentPart.name,
428
+ input: contentPart.input,
429
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
430
+ };
431
+ }
432
+ else {
433
+ throw new Error("Unsupported message content format");
434
+ }
435
+ });
436
+ return {
437
+ role,
438
+ content: contentBlocks,
317
439
  };
318
440
  }
319
441
  });
@@ -323,6 +445,17 @@ export class ChatAnthropicMessages extends BaseChatModel {
323
445
  };
324
446
  }
325
447
  /** @ignore */
448
+ async _generateNonStreaming(messages, params, requestOptions) {
449
+ const response = await this.completionWithRetry({
450
+ ...params,
451
+ stream: false,
452
+ ...this.formatMessagesForAnthropic(messages),
453
+ }, requestOptions);
454
+ const { content, ...additionalKwargs } = response;
455
+ const generations = anthropicResponseToChatMessages(content, additionalKwargs);
456
+ return generations;
457
+ }
458
+ /** @ignore */
326
459
  async _generate(messages, options, runManager) {
327
460
  if (this.stopSequences && options.stop) {
328
461
  throw new Error(`"stopSequence" parameter found in input and default params`);
@@ -330,7 +463,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
330
463
  const params = this.invocationParams(options);
331
464
  if (params.stream) {
332
465
  let finalChunk;
333
- const stream = await this._streamResponseChunks(messages, options, runManager);
466
+ const stream = this._streamResponseChunks(messages, options, runManager);
334
467
  for await (const chunk of stream) {
335
468
  if (finalChunk === undefined) {
336
469
  finalChunk = chunk;
@@ -352,26 +485,14 @@ export class ChatAnthropicMessages extends BaseChatModel {
352
485
  };
353
486
  }
354
487
  else {
355
- const response = await this.completionWithRetry({
488
+ const requestOptions = this.invocationOptions({
356
489
  ...params,
357
490
  stream: false,
358
491
  ...this.formatMessagesForAnthropic(messages),
359
- }, { signal: options.signal });
360
- const { content, ...additionalKwargs } = response;
361
- if (!Array.isArray(content) || content.length !== 1) {
362
- console.log(content);
363
- throw new Error("Received multiple content parts in Anthropic response. Only single part messages are currently supported.");
364
- }
492
+ }, options);
493
+ const generations = await this._generateNonStreaming(messages, params, requestOptions);
365
494
  return {
366
- generations: [
367
- {
368
- text: content[0].text,
369
- message: new AIMessage({
370
- content: content[0].text,
371
- additional_kwargs: additionalKwargs,
372
- }),
373
- },
374
- ],
495
+ generations,
375
496
  };
376
497
  }
377
498
  }
@@ -380,12 +501,12 @@ export class ChatAnthropicMessages extends BaseChatModel {
380
501
  * @param request The parameters for creating a completion.
381
502
  * @returns A streaming request.
382
503
  */
383
- async createStreamWithRetry(request) {
504
+ async createStreamWithRetry(request, options) {
384
505
  if (!this.streamingClient) {
385
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
506
+ const options_ = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
386
507
  this.streamingClient = new Anthropic({
387
508
  ...this.clientOptions,
388
- ...options,
509
+ ...options_,
389
510
  apiKey: this.anthropicApiKey,
390
511
  // Prefer LangChain built-in retries
391
512
  maxRetries: 0,
@@ -395,7 +516,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
395
516
  ...request,
396
517
  ...this.invocationKwargs,
397
518
  stream: true,
398
- });
519
+ }, options);
399
520
  return this.caller.call(makeCompletionRequest);
400
521
  }
401
522
  /** @ignore */
@@ -415,12 +536,88 @@ export class ChatAnthropicMessages extends BaseChatModel {
415
536
  const makeCompletionRequest = async () => this.batchClient.messages.create({
416
537
  ...request,
417
538
  ...this.invocationKwargs,
418
- });
419
- return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
539
+ }, options);
540
+ return this.caller.callWithOptions({ signal: options.signal ?? undefined }, makeCompletionRequest);
420
541
  }
421
542
  _llmType() {
422
543
  return "anthropic";
423
544
  }
545
+ withStructuredOutput(outputSchema, config) {
546
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
547
+ const schema = outputSchema;
548
+ const name = config?.name;
549
+ const method = config?.method;
550
+ const includeRaw = config?.includeRaw;
551
+ if (method === "jsonMode") {
552
+ throw new Error(`Anthropic only supports "functionCalling" as a method.`);
553
+ }
554
+ let functionName = name ?? "extract";
555
+ let outputParser;
556
+ let tools;
557
+ if (isZodSchema(schema)) {
558
+ const jsonSchema = zodToJsonSchema(schema);
559
+ tools = [
560
+ {
561
+ name: functionName,
562
+ description: jsonSchema.description ?? "A function available to call.",
563
+ input_schema: jsonSchema,
564
+ },
565
+ ];
566
+ outputParser = new AnthropicToolsOutputParser({
567
+ returnSingle: true,
568
+ keyName: functionName,
569
+ zodSchema: schema,
570
+ });
571
+ }
572
+ else {
573
+ let anthropicTools;
574
+ if (typeof schema.name === "string" &&
575
+ typeof schema.description === "string" &&
576
+ typeof schema.input_schema === "object" &&
577
+ schema.input_schema != null) {
578
+ anthropicTools = schema;
579
+ functionName = schema.name;
580
+ }
581
+ else {
582
+ anthropicTools = {
583
+ name: functionName,
584
+ description: schema.description ?? "",
585
+ input_schema: schema,
586
+ };
587
+ }
588
+ tools = [anthropicTools];
589
+ outputParser = new AnthropicToolsOutputParser({
590
+ returnSingle: true,
591
+ keyName: functionName,
592
+ });
593
+ }
594
+ const llm = this.bind({
595
+ tools,
596
+ });
597
+ if (!includeRaw) {
598
+ return llm.pipe(outputParser).withConfig({
599
+ runName: "ChatAnthropicStructuredOutput",
600
+ });
601
+ }
602
+ const parserAssign = RunnablePassthrough.assign({
603
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
604
+ parsed: (input, config) => outputParser.invoke(input.raw, config),
605
+ });
606
+ const parserNone = RunnablePassthrough.assign({
607
+ parsed: () => null,
608
+ });
609
+ const parsedWithFallback = parserAssign.withFallbacks({
610
+ fallbacks: [parserNone],
611
+ });
612
+ return RunnableSequence.from([
613
+ {
614
+ raw: llm,
615
+ },
616
+ parsedWithFallback,
617
+ ]).withConfig({
618
+ runName: "StructuredOutputRunnable",
619
+ });
620
+ }
424
621
  }
425
622
  export class ChatAnthropic extends ChatAnthropicMessages {
426
623
  }
@@ -6,7 +6,7 @@ import { zodToJsonSchema } from "zod-to-json-schema";
6
6
  import { HumanMessage } from "@langchain/core/messages";
7
7
  import { ChatPromptTemplate } from "@langchain/core/prompts";
8
8
  import { ChatAnthropicTools } from "../tool_calling.js";
9
- test("Test ChatAnthropicTools", async () => {
9
+ test.skip("Test ChatAnthropicTools", async () => {
10
10
  const chat = new ChatAnthropicTools({
11
11
  modelName: "claude-3-sonnet-20240229",
12
12
  maxRetries: 0,
@@ -15,7 +15,7 @@ test("Test ChatAnthropicTools", async () => {
15
15
  const res = await chat.invoke([message]);
16
16
  console.log(JSON.stringify(res));
17
17
  });
18
- test("Test ChatAnthropicTools streaming", async () => {
18
+ test.skip("Test ChatAnthropicTools streaming", async () => {
19
19
  const chat = new ChatAnthropicTools({
20
20
  modelName: "claude-3-sonnet-20240229",
21
21
  maxRetries: 0,
@@ -29,7 +29,7 @@ test("Test ChatAnthropicTools streaming", async () => {
29
29
  }
30
30
  expect(chunks.length).toBeGreaterThan(1);
31
31
  });
32
- test("Test ChatAnthropicTools with tools", async () => {
32
+ test.skip("Test ChatAnthropicTools with tools", async () => {
33
33
  const chat = new ChatAnthropicTools({
34
34
  modelName: "claude-3-sonnet-20240229",
35
35
  temperature: 0.1,
@@ -65,7 +65,7 @@ test("Test ChatAnthropicTools with tools", async () => {
65
65
  expect(res.additional_kwargs.tool_calls).toBeDefined();
66
66
  expect(res.additional_kwargs.tool_calls?.[0].function.name).toEqual("get_current_weather");
67
67
  });
68
- test("Test ChatAnthropicTools with a forced function call", async () => {
68
+ test.skip("Test ChatAnthropicTools with a forced function call", async () => {
69
69
  const chat = new ChatAnthropicTools({
70
70
  modelName: "claude-3-sonnet-20240229",
71
71
  temperature: 0.1,
@@ -106,7 +106,7 @@ test("Test ChatAnthropicTools with a forced function call", async () => {
106
106
  expect(res.additional_kwargs.tool_calls).toBeDefined();
107
107
  expect(res.additional_kwargs.tool_calls?.[0]?.function.name).toEqual("extract_data");
108
108
  });
109
- test("ChatAnthropicTools with Zod schema", async () => {
109
+ test.skip("ChatAnthropicTools with Zod schema", async () => {
110
110
  const schema = z.object({
111
111
  people: z.array(z.object({
112
112
  name: z.string().describe("The name of a person"),
@@ -148,7 +148,7 @@ test("ChatAnthropicTools with Zod schema", async () => {
148
148
  ]),
149
149
  });
150
150
  });
151
- test("ChatAnthropicTools with parallel tool calling", async () => {
151
+ test.skip("ChatAnthropicTools with parallel tool calling", async () => {
152
152
  const schema = z.object({
153
153
  name: z.string().describe("The name of a person"),
154
154
  height: z.number().describe("The person's height"),
@@ -186,7 +186,7 @@ test("ChatAnthropicTools with parallel tool calling", async () => {
186
186
  { name: "Claudia", height: 6, hairColor: "brunette" },
187
187
  ]));
188
188
  });
189
- test("Test ChatAnthropic withStructuredOutput", async () => {
189
+ test.skip("Test ChatAnthropic withStructuredOutput", async () => {
190
190
  const runnable = new ChatAnthropicTools({
191
191
  modelName: "claude-3-sonnet-20240229",
192
192
  maxRetries: 0,
@@ -202,7 +202,7 @@ test("Test ChatAnthropic withStructuredOutput", async () => {
202
202
  console.log(JSON.stringify(res, null, 2));
203
203
  expect(res).toEqual({ name: "Alex", height: 5, hairColor: "blonde" });
204
204
  });
205
- test("Test ChatAnthropic withStructuredOutput on a single array item", async () => {
205
+ test.skip("Test ChatAnthropic withStructuredOutput on a single array item", async () => {
206
206
  const runnable = new ChatAnthropicTools({
207
207
  modelName: "claude-3-sonnet-20240229",
208
208
  maxRetries: 0,
@@ -220,7 +220,7 @@ test("Test ChatAnthropic withStructuredOutput on a single array item", async ()
220
220
  people: [{ hairColor: "blonde", height: 5, name: "Alex" }],
221
221
  });
222
222
  });
223
- test("Test ChatAnthropic withStructuredOutput on a single array item", async () => {
223
+ test.skip("Test ChatAnthropic withStructuredOutput on a single array item", async () => {
224
224
  const runnable = new ChatAnthropicTools({
225
225
  modelName: "claude-3-sonnet-20240229",
226
226
  maxRetries: 0,
@@ -262,7 +262,7 @@ test("Test ChatAnthropic withStructuredOutput on a single array item", async ()
262
262
  tone: "positive",
263
263
  });
264
264
  });
265
- test("Test ChatAnthropicTools", async () => {
265
+ test.skip("Test ChatAnthropicTools", async () => {
266
266
  const chat = new ChatAnthropicTools({
267
267
  modelName: "claude-3-sonnet-20240229",
268
268
  maxRetries: 0,
@@ -12,6 +12,7 @@ const tool_calling_js_1 = require("./utils/tool_calling.cjs");
12
12
  /**
13
13
  * Experimental wrapper over Anthropic chat models that adds support for
14
14
  * a function calling interface.
15
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
15
16
  */
16
17
  class ChatAnthropicTools extends chat_models_1.BaseChatModel {
17
18
  static lc_name() {
@@ -23,6 +23,7 @@ export type ChatAnthropicToolsInput = Partial<AnthropicInput> & BaseChatModelPar
23
23
  /**
24
24
  * Experimental wrapper over Anthropic chat models that adds support for
25
25
  * a function calling interface.
26
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
26
27
  */
27
28
  export declare class ChatAnthropicTools extends BaseChatModel<ChatAnthropicToolsCallOptions> {
28
29
  llm: BaseChatModel;
@@ -9,6 +9,7 @@ import { DEFAULT_TOOL_SYSTEM_PROMPT, formatAsXMLRepresentation, fixArrayXMLParam
9
9
  /**
10
10
  * Experimental wrapper over Anthropic chat models that adds support for
11
11
  * a function calling interface.
12
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
12
13
  */
13
14
  export class ChatAnthropicTools extends BaseChatModel {
14
15
  static lc_name() {