@langchain/anthropic 0.1.15 → 0.1.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -459,7 +459,7 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
459
459
  const params = this.invocationParams(options);
460
460
  const formattedMessages = _formatMessagesForAnthropic(messages);
461
461
  if (options.tools !== undefined && options.tools.length > 0) {
462
- const generations = await this._generateNonStreaming(messages, params, {
462
+ const { generations } = await this._generateNonStreaming(messages, params, {
463
463
  signal: options.signal,
464
464
  });
465
465
  const result = generations[0].message;
@@ -562,7 +562,8 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
562
562
  }, options);
563
563
  const { content, ...additionalKwargs } = response;
564
564
  const generations = anthropicResponseToChatMessages(content, additionalKwargs);
565
- return generations;
565
+ const { role: _role, type: _type, ...rest } = additionalKwargs;
566
+ return { generations, llmOutput: rest };
566
567
  }
567
568
  /** @ignore */
568
569
  async _generate(messages, options, runManager) {
@@ -594,12 +595,9 @@ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
594
595
  };
595
596
  }
596
597
  else {
597
- const generations = await this._generateNonStreaming(messages, params, {
598
+ return this._generateNonStreaming(messages, params, {
598
599
  signal: options.signal,
599
600
  });
600
- return {
601
- generations,
602
- };
603
601
  }
604
602
  }
605
603
  /**
@@ -176,7 +176,16 @@ export declare class ChatAnthropicMessages<CallOptions extends ChatAnthropicCall
176
176
  };
177
177
  _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
178
178
  /** @ignore */
179
- _generateNonStreaming(messages: BaseMessage[], params: Omit<Anthropic.Messages.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageCreateParamsStreaming, "messages"> & Kwargs, requestOptions: AnthropicRequestOptions): Promise<ChatGeneration[]>;
179
+ _generateNonStreaming(messages: BaseMessage[], params: Omit<Anthropic.Messages.MessageCreateParamsNonStreaming | Anthropic.Messages.MessageCreateParamsStreaming, "messages"> & Kwargs, requestOptions: AnthropicRequestOptions): Promise<{
180
+ generations: ChatGeneration[];
181
+ llmOutput: {
182
+ id: string;
183
+ model: string;
184
+ stop_reason: "max_tokens" | "stop_sequence" | "end_turn" | null;
185
+ stop_sequence: string | null;
186
+ usage: Anthropic.Messages.Usage;
187
+ };
188
+ }>;
180
189
  /** @ignore */
181
190
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
182
191
  /**
@@ -455,7 +455,7 @@ export class ChatAnthropicMessages extends BaseChatModel {
455
455
  const params = this.invocationParams(options);
456
456
  const formattedMessages = _formatMessagesForAnthropic(messages);
457
457
  if (options.tools !== undefined && options.tools.length > 0) {
458
- const generations = await this._generateNonStreaming(messages, params, {
458
+ const { generations } = await this._generateNonStreaming(messages, params, {
459
459
  signal: options.signal,
460
460
  });
461
461
  const result = generations[0].message;
@@ -558,7 +558,8 @@ export class ChatAnthropicMessages extends BaseChatModel {
558
558
  }, options);
559
559
  const { content, ...additionalKwargs } = response;
560
560
  const generations = anthropicResponseToChatMessages(content, additionalKwargs);
561
- return generations;
561
+ const { role: _role, type: _type, ...rest } = additionalKwargs;
562
+ return { generations, llmOutput: rest };
562
563
  }
563
564
  /** @ignore */
564
565
  async _generate(messages, options, runManager) {
@@ -590,12 +591,9 @@ export class ChatAnthropicMessages extends BaseChatModel {
590
591
  };
591
592
  }
592
593
  else {
593
- const generations = await this._generateNonStreaming(messages, params, {
594
+ return this._generateNonStreaming(messages, params, {
594
595
  signal: options.signal,
595
596
  });
596
- return {
597
- generations,
598
- };
599
597
  }
600
598
  }
601
599
  /**
@@ -5,7 +5,7 @@ import { ChatPromptValue } from "@langchain/core/prompt_values";
5
5
  import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts";
6
6
  import { CallbackManager } from "@langchain/core/callbacks/manager";
7
7
  import { ChatAnthropic } from "../chat_models.js";
8
- test.skip("Test ChatAnthropic", async () => {
8
+ test("Test ChatAnthropic", async () => {
9
9
  const chat = new ChatAnthropic({
10
10
  modelName: "claude-3-sonnet-20240229",
11
11
  maxRetries: 0,
@@ -13,6 +13,7 @@ test.skip("Test ChatAnthropic", async () => {
13
13
  const message = new HumanMessage("Hello!");
14
14
  const res = await chat.invoke([message]);
15
15
  console.log({ res });
16
+ expect(res.response_metadata.usage).toBeDefined();
16
17
  });
17
18
  test("Test ChatAnthropic Generate", async () => {
18
19
  const chat = new ChatAnthropic({
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/anthropic",
3
- "version": "0.1.15",
3
+ "version": "0.1.16",
4
4
  "description": "Anthropic integrations for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {