@langchain/anthropic 0.1.8 → 0.1.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,12 @@
1
1
  import { Anthropic } from "@anthropic-ai/sdk";
2
2
  import { AIMessage, AIMessageChunk, } from "@langchain/core/messages";
3
- import { ChatGenerationChunk } from "@langchain/core/outputs";
3
+ import { ChatGenerationChunk, } from "@langchain/core/outputs";
4
4
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
5
  import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
6
+ import { zodToJsonSchema } from "zod-to-json-schema";
7
+ import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
8
+ import { isZodSchema } from "@langchain/core/utils/types";
9
+ import { AnthropicToolsOutputParser } from "./output_parsers.js";
6
10
  function _formatImage(imageUrl) {
7
11
  const regex = /^data:(image\/.+);base64,(.+)$/;
8
12
  const match = imageUrl.match(regex);
@@ -19,6 +23,34 @@ function _formatImage(imageUrl) {
19
23
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
20
24
  };
21
25
  }
26
+ function anthropicResponseToChatMessages(messages, additionalKwargs) {
27
+ if (messages.length === 1 && messages[0].type === "text") {
28
+ return [
29
+ {
30
+ text: messages[0].text,
31
+ message: new AIMessage(messages[0].text, additionalKwargs),
32
+ },
33
+ ];
34
+ }
35
+ else {
36
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
37
+ const castMessage = messages;
38
+ const generations = [
39
+ {
40
+ text: "",
41
+ message: new AIMessage({
42
+ content: castMessage,
43
+ additional_kwargs: additionalKwargs,
44
+ }),
45
+ },
46
+ ];
47
+ return generations;
48
+ }
49
+ }
50
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
51
+ function isAnthropicTool(tool) {
52
+ return "input_schema" in tool;
53
+ }
22
54
  /**
23
55
  * Wrapper around Anthropic large language models.
24
56
  *
@@ -162,6 +194,32 @@ export class ChatAnthropicMessages extends BaseChatModel {
162
194
  this.streaming = fields?.streaming ?? false;
163
195
  this.clientOptions = fields?.clientOptions ?? {};
164
196
  }
197
+ /**
198
+ * Formats LangChain StructuredTools to AnthropicTools.
199
+ *
200
+ * @param {ChatAnthropicCallOptions["tools"]} tools The tools to format
201
+ * @returns {AnthropicTool[] | undefined} The formatted tools, or undefined if none are passed.
202
+ * @throws {Error} If a mix of AnthropicTools and StructuredTools are passed.
203
+ */
204
+ formatStructuredToolToAnthropic(tools) {
205
+ if (!tools || !tools.length) {
206
+ return undefined;
207
+ }
208
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
209
+ if (tools.every((tool) => isAnthropicTool(tool))) {
210
+ // If the tool is already an anthropic tool, return it
211
+ return tools;
212
+ }
213
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
214
+ if (tools.some((tool) => isAnthropicTool(tool))) {
215
+ throw new Error(`Can not pass in a mix of AnthropicTools and StructuredTools`);
216
+ }
217
+ return tools.map((tool) => ({
218
+ name: tool.name,
219
+ description: tool.description,
220
+ input_schema: zodToJsonSchema(tool.schema),
221
+ }));
222
+ }
165
223
  /**
166
224
  * Get the parameters used to invoke the model
167
225
  */
@@ -177,6 +235,27 @@ export class ChatAnthropicMessages extends BaseChatModel {
177
235
  ...this.invocationKwargs,
178
236
  };
179
237
  }
238
+ invocationOptions(request, options) {
239
+ const toolUseBetaHeader = {
240
+ "anthropic-beta": "tools-2024-04-04",
241
+ };
242
+ const tools = this.formatStructuredToolToAnthropic(options?.tools);
243
+ // If tools are present, populate the body with the message request params.
244
+ // This is because Anthropic overwrites the message request params if a body
245
+ // is passed.
246
+ const body = tools
247
+ ? {
248
+ ...request,
249
+ tools,
250
+ }
251
+ : undefined;
252
+ const headers = tools ? toolUseBetaHeader : undefined;
253
+ return {
254
+ signal: options.signal,
255
+ ...(body ? { body } : {}),
256
+ ...(headers ? { headers } : {}),
257
+ };
258
+ }
180
259
  /** @ignore */
181
260
  _identifyingParams() {
182
261
  return {
@@ -195,56 +274,90 @@ export class ChatAnthropicMessages extends BaseChatModel {
195
274
  }
196
275
  async *_streamResponseChunks(messages, options, runManager) {
197
276
  const params = this.invocationParams(options);
198
- const stream = await this.createStreamWithRetry({
277
+ const requestOptions = this.invocationOptions({
199
278
  ...params,
279
+ stream: false,
200
280
  ...this.formatMessagesForAnthropic(messages),
201
- stream: true,
202
- });
203
- for await (const data of stream) {
204
- if (options.signal?.aborted) {
205
- stream.controller.abort();
206
- throw new Error("AbortError: User aborted the request.");
207
- }
208
- if (data.type === "message_start") {
209
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
210
- const { content, ...additionalKwargs } = data.message;
211
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
212
- const filteredAdditionalKwargs = {};
213
- for (const [key, value] of Object.entries(additionalKwargs)) {
214
- if (value !== undefined && value !== null) {
215
- filteredAdditionalKwargs[key] = value;
281
+ }, options);
282
+ if (options.tools !== undefined && options.tools.length > 0) {
283
+ const requestOptions = this.invocationOptions({
284
+ ...params,
285
+ stream: false,
286
+ ...this.formatMessagesForAnthropic(messages),
287
+ }, options);
288
+ const generations = await this._generateNonStreaming(messages, params, requestOptions);
289
+ yield new ChatGenerationChunk({
290
+ message: new AIMessageChunk({
291
+ content: generations[0].message.content,
292
+ additional_kwargs: generations[0].message.additional_kwargs,
293
+ }),
294
+ text: generations[0].text,
295
+ });
296
+ }
297
+ else {
298
+ const stream = await this.createStreamWithRetry({
299
+ ...params,
300
+ ...this.formatMessagesForAnthropic(messages),
301
+ stream: true,
302
+ }, requestOptions);
303
+ let usageData = { input_tokens: 0, output_tokens: 0 };
304
+ for await (const data of stream) {
305
+ if (options.signal?.aborted) {
306
+ stream.controller.abort();
307
+ throw new Error("AbortError: User aborted the request.");
308
+ }
309
+ if (data.type === "message_start") {
310
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
311
+ const { content, usage, ...additionalKwargs } = data.message;
312
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
313
+ const filteredAdditionalKwargs = {};
314
+ for (const [key, value] of Object.entries(additionalKwargs)) {
315
+ if (value !== undefined && value !== null) {
316
+ filteredAdditionalKwargs[key] = value;
317
+ }
216
318
  }
319
+ usageData = usage;
320
+ yield new ChatGenerationChunk({
321
+ message: new AIMessageChunk({
322
+ content: "",
323
+ additional_kwargs: filteredAdditionalKwargs,
324
+ }),
325
+ text: "",
326
+ });
217
327
  }
218
- yield new ChatGenerationChunk({
219
- message: new AIMessageChunk({
220
- content: "",
221
- additional_kwargs: filteredAdditionalKwargs,
222
- }),
223
- text: "",
224
- });
225
- }
226
- else if (data.type === "message_delta") {
227
- yield new ChatGenerationChunk({
228
- message: new AIMessageChunk({
229
- content: "",
230
- additional_kwargs: { ...data.delta },
231
- }),
232
- text: "",
233
- });
234
- }
235
- else if (data.type === "content_block_delta") {
236
- const content = data.delta?.text;
237
- if (content !== undefined) {
328
+ else if (data.type === "message_delta") {
238
329
  yield new ChatGenerationChunk({
239
330
  message: new AIMessageChunk({
240
- content,
241
- additional_kwargs: {},
331
+ content: "",
332
+ additional_kwargs: { ...data.delta },
242
333
  }),
243
- text: content,
334
+ text: "",
244
335
  });
245
- await runManager?.handleLLMNewToken(content);
336
+ if (data?.usage !== undefined) {
337
+ usageData.output_tokens += data.usage.output_tokens;
338
+ }
339
+ }
340
+ else if (data.type === "content_block_delta") {
341
+ const content = data.delta?.text;
342
+ if (content !== undefined) {
343
+ yield new ChatGenerationChunk({
344
+ message: new AIMessageChunk({
345
+ content,
346
+ additional_kwargs: {},
347
+ }),
348
+ text: content,
349
+ });
350
+ await runManager?.handleLLMNewToken(content);
351
+ }
246
352
  }
247
353
  }
354
+ yield new ChatGenerationChunk({
355
+ message: new AIMessageChunk({
356
+ content: "",
357
+ additional_kwargs: { usage: usageData },
358
+ }),
359
+ text: "",
360
+ });
248
361
  }
249
362
  }
250
363
  /**
@@ -269,6 +382,9 @@ export class ChatAnthropicMessages extends BaseChatModel {
269
382
  else if (message._getType() === "ai") {
270
383
  role = "assistant";
271
384
  }
385
+ else if (message._getType() === "tool") {
386
+ role = "user";
387
+ }
272
388
  else if (message._getType() === "system") {
273
389
  throw new Error("System messages are only permitted as the first passed message.");
274
390
  }
@@ -281,29 +397,40 @@ export class ChatAnthropicMessages extends BaseChatModel {
281
397
  content: message.content,
282
398
  };
283
399
  }
284
- else {
285
- return {
286
- role,
287
- content: message.content.map((contentPart) => {
288
- if (contentPart.type === "image_url") {
289
- let source;
290
- if (typeof contentPart.image_url === "string") {
291
- source = _formatImage(contentPart.image_url);
292
- }
293
- else {
294
- source = _formatImage(contentPart.image_url.url);
295
- }
296
- return {
297
- type: "image",
298
- source,
299
- };
400
+ else if ("type" in message.content) {
401
+ const contentBlocks = message.content.map((contentPart) => {
402
+ if (contentPart.type === "image_url") {
403
+ let source;
404
+ if (typeof contentPart.image_url === "string") {
405
+ source = _formatImage(contentPart.image_url);
300
406
  }
301
407
  else {
302
- return contentPart;
408
+ source = _formatImage(contentPart.image_url.url);
303
409
  }
304
- }),
410
+ return {
411
+ type: "image",
412
+ source,
413
+ };
414
+ }
415
+ else if (contentPart.type === "text") {
416
+ // Assuming contentPart is of type MessageContentText here
417
+ return {
418
+ type: "text",
419
+ text: contentPart.text,
420
+ };
421
+ }
422
+ else {
423
+ throw new Error("Unsupported message content format");
424
+ }
425
+ });
426
+ return {
427
+ role,
428
+ content: contentBlocks,
305
429
  };
306
430
  }
431
+ else {
432
+ throw new Error("Unsupported message content format");
433
+ }
307
434
  });
308
435
  return {
309
436
  messages: formattedMessages,
@@ -311,6 +438,17 @@ export class ChatAnthropicMessages extends BaseChatModel {
311
438
  };
312
439
  }
313
440
  /** @ignore */
441
+ async _generateNonStreaming(messages, params, requestOptions) {
442
+ const response = await this.completionWithRetry({
443
+ ...params,
444
+ stream: false,
445
+ ...this.formatMessagesForAnthropic(messages),
446
+ }, requestOptions);
447
+ const { content, ...additionalKwargs } = response;
448
+ const generations = anthropicResponseToChatMessages(content, additionalKwargs);
449
+ return generations;
450
+ }
451
+ /** @ignore */
314
452
  async _generate(messages, options, runManager) {
315
453
  if (this.stopSequences && options.stop) {
316
454
  throw new Error(`"stopSequence" parameter found in input and default params`);
@@ -318,7 +456,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
318
456
  const params = this.invocationParams(options);
319
457
  if (params.stream) {
320
458
  let finalChunk;
321
- const stream = await this._streamResponseChunks(messages, options, runManager);
459
+ const stream = this._streamResponseChunks(messages, options, runManager);
322
460
  for await (const chunk of stream) {
323
461
  if (finalChunk === undefined) {
324
462
  finalChunk = chunk;
@@ -340,26 +478,14 @@ export class ChatAnthropicMessages extends BaseChatModel {
340
478
  };
341
479
  }
342
480
  else {
343
- const response = await this.completionWithRetry({
481
+ const requestOptions = this.invocationOptions({
344
482
  ...params,
345
483
  stream: false,
346
484
  ...this.formatMessagesForAnthropic(messages),
347
- }, { signal: options.signal });
348
- const { content, ...additionalKwargs } = response;
349
- if (!Array.isArray(content) || content.length !== 1) {
350
- console.log(content);
351
- throw new Error("Received multiple content parts in Anthropic response. Only single part messages are currently supported.");
352
- }
485
+ }, options);
486
+ const generations = await this._generateNonStreaming(messages, params, requestOptions);
353
487
  return {
354
- generations: [
355
- {
356
- text: content[0].text,
357
- message: new AIMessage({
358
- content: content[0].text,
359
- additional_kwargs: additionalKwargs,
360
- }),
361
- },
362
- ],
488
+ generations,
363
489
  };
364
490
  }
365
491
  }
@@ -368,12 +494,12 @@ export class ChatAnthropicMessages extends BaseChatModel {
368
494
  * @param request The parameters for creating a completion.
369
495
  * @returns A streaming request.
370
496
  */
371
- async createStreamWithRetry(request) {
497
+ async createStreamWithRetry(request, options) {
372
498
  if (!this.streamingClient) {
373
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
499
+ const options_ = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
374
500
  this.streamingClient = new Anthropic({
375
501
  ...this.clientOptions,
376
- ...options,
502
+ ...options_,
377
503
  apiKey: this.anthropicApiKey,
378
504
  // Prefer LangChain built-in retries
379
505
  maxRetries: 0,
@@ -383,7 +509,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
383
509
  ...request,
384
510
  ...this.invocationKwargs,
385
511
  stream: true,
386
- });
512
+ }, options);
387
513
  return this.caller.call(makeCompletionRequest);
388
514
  }
389
515
  /** @ignore */
@@ -403,12 +529,88 @@ export class ChatAnthropicMessages extends BaseChatModel {
403
529
  const makeCompletionRequest = async () => this.batchClient.messages.create({
404
530
  ...request,
405
531
  ...this.invocationKwargs,
406
- });
407
- return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
532
+ }, options);
533
+ return this.caller.callWithOptions({ signal: options.signal ?? undefined }, makeCompletionRequest);
408
534
  }
409
535
  _llmType() {
410
536
  return "anthropic";
411
537
  }
538
+ withStructuredOutput(outputSchema, config) {
539
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
540
+ const schema = outputSchema;
541
+ const name = config?.name;
542
+ const method = config?.method;
543
+ const includeRaw = config?.includeRaw;
544
+ if (method === "jsonMode") {
545
+ throw new Error(`Anthropic only supports "functionCalling" as a method.`);
546
+ }
547
+ let functionName = name ?? "extract";
548
+ let outputParser;
549
+ let tools;
550
+ if (isZodSchema(schema)) {
551
+ const jsonSchema = zodToJsonSchema(schema);
552
+ tools = [
553
+ {
554
+ name: functionName,
555
+ description: jsonSchema.description ?? "A function available to call.",
556
+ input_schema: jsonSchema,
557
+ },
558
+ ];
559
+ outputParser = new AnthropicToolsOutputParser({
560
+ returnSingle: true,
561
+ keyName: functionName,
562
+ zodSchema: schema,
563
+ });
564
+ }
565
+ else {
566
+ let anthropicTools;
567
+ if (typeof schema.name === "string" &&
568
+ typeof schema.description === "string" &&
569
+ typeof schema.input_schema === "object" &&
570
+ schema.input_schema != null) {
571
+ anthropicTools = schema;
572
+ functionName = schema.name;
573
+ }
574
+ else {
575
+ anthropicTools = {
576
+ name: functionName,
577
+ description: schema.description ?? "",
578
+ input_schema: schema,
579
+ };
580
+ }
581
+ tools = [anthropicTools];
582
+ outputParser = new AnthropicToolsOutputParser({
583
+ returnSingle: true,
584
+ keyName: functionName,
585
+ });
586
+ }
587
+ const llm = this.bind({
588
+ tools,
589
+ });
590
+ if (!includeRaw) {
591
+ return llm.pipe(outputParser).withConfig({
592
+ runName: "ChatAnthropicStructuredOutput",
593
+ });
594
+ }
595
+ const parserAssign = RunnablePassthrough.assign({
596
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
597
+ parsed: (input, config) => outputParser.invoke(input.raw, config),
598
+ });
599
+ const parserNone = RunnablePassthrough.assign({
600
+ parsed: () => null,
601
+ });
602
+ const parsedWithFallback = parserAssign.withFallbacks({
603
+ fallbacks: [parserNone],
604
+ });
605
+ return RunnableSequence.from([
606
+ {
607
+ raw: llm,
608
+ },
609
+ parsedWithFallback,
610
+ ]).withConfig({
611
+ runName: "StructuredOutputRunnable",
612
+ });
613
+ }
412
614
  }
413
615
  export class ChatAnthropic extends ChatAnthropicMessages {
414
616
  }
@@ -12,6 +12,7 @@ const tool_calling_js_1 = require("./utils/tool_calling.cjs");
12
12
  /**
13
13
  * Experimental wrapper over Anthropic chat models that adds support for
14
14
  * a function calling interface.
15
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
15
16
  */
16
17
  class ChatAnthropicTools extends chat_models_1.BaseChatModel {
17
18
  static lc_name() {
@@ -23,6 +23,7 @@ export type ChatAnthropicToolsInput = Partial<AnthropicInput> & BaseChatModelPar
23
23
  /**
24
24
  * Experimental wrapper over Anthropic chat models that adds support for
25
25
  * a function calling interface.
26
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
26
27
  */
27
28
  export declare class ChatAnthropicTools extends BaseChatModel<ChatAnthropicToolsCallOptions> {
28
29
  llm: BaseChatModel;
@@ -9,6 +9,7 @@ import { DEFAULT_TOOL_SYSTEM_PROMPT, formatAsXMLRepresentation, fixArrayXMLParam
9
9
  /**
10
10
  * Experimental wrapper over Anthropic chat models that adds support for
11
11
  * a function calling interface.
12
+ * @deprecated Prefer traditional tool use through ChatAnthropic.
12
13
  */
13
14
  export class ChatAnthropicTools extends BaseChatModel {
14
15
  static lc_name() {
@@ -0,0 +1,60 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AnthropicToolsOutputParser = void 0;
4
+ const output_parsers_1 = require("@langchain/core/output_parsers");
5
+ class AnthropicToolsOutputParser extends output_parsers_1.BaseLLMOutputParser {
6
+ static lc_name() {
7
+ return "AnthropicToolsOutputParser";
8
+ }
9
+ constructor(params) {
10
+ super(params);
11
+ Object.defineProperty(this, "lc_namespace", {
12
+ enumerable: true,
13
+ configurable: true,
14
+ writable: true,
15
+ value: ["langchain", "anthropic", "output_parsers"]
16
+ });
17
+ Object.defineProperty(this, "returnId", {
18
+ enumerable: true,
19
+ configurable: true,
20
+ writable: true,
21
+ value: false
22
+ });
23
+ /** The type of tool calls to return. */
24
+ Object.defineProperty(this, "keyName", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: void 0
29
+ });
30
+ /** Whether to return only the first tool call. */
31
+ Object.defineProperty(this, "returnSingle", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: false
36
+ });
37
+ this.keyName = params.keyName;
38
+ this.returnSingle = params.returnSingle ?? this.returnSingle;
39
+ }
40
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
41
+ async parseResult(generations) {
42
+ const tools = generations.flatMap((generation) => {
43
+ const { message } = generation;
44
+ if (typeof message === "string") {
45
+ return [];
46
+ }
47
+ if (!Array.isArray(message.content)) {
48
+ return [];
49
+ }
50
+ const tool = message.content.find((item) => item.type === "tool_use");
51
+ return tool;
52
+ });
53
+ if (tools.length === 0 || !tools[0]) {
54
+ throw new Error("No tools provided to AnthropicToolsOutputParser.");
55
+ }
56
+ const [tool] = tools;
57
+ return tool.input;
58
+ }
59
+ }
60
+ exports.AnthropicToolsOutputParser = AnthropicToolsOutputParser;
@@ -0,0 +1,17 @@
1
+ import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
2
+ import { JsonOutputKeyToolsParserParams } from "@langchain/core/output_parsers/openai_tools";
3
+ import { ChatGeneration } from "@langchain/core/outputs";
4
+ interface AnthropicToolsOutputParserParams extends JsonOutputKeyToolsParserParams {
5
+ }
6
+ export declare class AnthropicToolsOutputParser<T extends Record<string, any> = Record<string, any>> extends BaseLLMOutputParser<T> {
7
+ static lc_name(): string;
8
+ lc_namespace: string[];
9
+ returnId: boolean;
10
+ /** The type of tool calls to return. */
11
+ keyName: string;
12
+ /** Whether to return only the first tool call. */
13
+ returnSingle: boolean;
14
+ constructor(params: AnthropicToolsOutputParserParams);
15
+ parseResult(generations: ChatGeneration[]): Promise<T>;
16
+ }
17
+ export {};
@@ -0,0 +1,56 @@
1
+ import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
2
+ export class AnthropicToolsOutputParser extends BaseLLMOutputParser {
3
+ static lc_name() {
4
+ return "AnthropicToolsOutputParser";
5
+ }
6
+ constructor(params) {
7
+ super(params);
8
+ Object.defineProperty(this, "lc_namespace", {
9
+ enumerable: true,
10
+ configurable: true,
11
+ writable: true,
12
+ value: ["langchain", "anthropic", "output_parsers"]
13
+ });
14
+ Object.defineProperty(this, "returnId", {
15
+ enumerable: true,
16
+ configurable: true,
17
+ writable: true,
18
+ value: false
19
+ });
20
+ /** The type of tool calls to return. */
21
+ Object.defineProperty(this, "keyName", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ /** Whether to return only the first tool call. */
28
+ Object.defineProperty(this, "returnSingle", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: false
33
+ });
34
+ this.keyName = params.keyName;
35
+ this.returnSingle = params.returnSingle ?? this.returnSingle;
36
+ }
37
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
38
+ async parseResult(generations) {
39
+ const tools = generations.flatMap((generation) => {
40
+ const { message } = generation;
41
+ if (typeof message === "string") {
42
+ return [];
43
+ }
44
+ if (!Array.isArray(message.content)) {
45
+ return [];
46
+ }
47
+ const tool = message.content.find((item) => item.type === "tool_use");
48
+ return tool;
49
+ });
50
+ if (tools.length === 0 || !tools[0]) {
51
+ throw new Error("No tools provided to AnthropicToolsOutputParser.");
52
+ }
53
+ const [tool] = tools;
54
+ return tool.input;
55
+ }
56
+ }