@langchain/anthropic 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -21,14 +21,14 @@ You can do so by adding appropriate fields to your project's `package.json` like
21
21
  "langchain": "0.0.207"
22
22
  },
23
23
  "resolutions": {
24
- "@langchain/core": "0.1.1"
24
+ "@langchain/core": "0.1.2"
25
25
  },
26
26
  "overrides": {
27
- "@langchain/core": "0.1.1"
27
+ "@langchain/core": "0.1.2"
28
28
  },
29
29
  "pnpm": {
30
30
  "overrides": {
31
- "@langchain/core": "0.1.1"
31
+ "@langchain/core": "0.1.2"
32
32
  }
33
33
  }
34
34
  }
@@ -49,7 +49,7 @@ export ANTHROPIC_API_KEY=your-api-key
49
49
  Then initialize
50
50
 
51
51
  ```typescript
52
- import { ChatAnthropic } from "@langchain/anthropic";
52
+ import { ChatAnthropicMessages } from "@langchain/anthropic";
53
53
 
54
54
  const model = new ChatAnthropic({
55
55
  anthropicApiKey: process.env.ANTHROPIC_API_KEY,
@@ -60,11 +60,11 @@ const response = await model.invoke(new HumanMessage("Hello world!"));
60
60
  ### Streaming
61
61
 
62
62
  ```typescript
63
- import { ChatAnthropic } from "@langchain/anthropic";
63
+ import { ChatAnthropicMessages } from "@langchain/anthropic";
64
64
 
65
65
  const model = new ChatAnthropic({
66
66
  anthropicApiKey: process.env.ANTHROPIC_API_KEY,
67
- modelName: "claude-2",
67
+ modelName: "claude-2.1",
68
68
  });
69
69
  const response = await model.stream(new HumanMessage("Hello world!"));
70
70
  ```
@@ -1,50 +1,11 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ChatAnthropic = exports.DEFAULT_STOP_SEQUENCES = exports.HUMAN_PROMPT = exports.AI_PROMPT = void 0;
3
+ exports.ChatAnthropic = exports.ChatAnthropicMessages = void 0;
4
4
  const sdk_1 = require("@anthropic-ai/sdk");
5
- Object.defineProperty(exports, "AI_PROMPT", { enumerable: true, get: function () { return sdk_1.AI_PROMPT; } });
6
- Object.defineProperty(exports, "HUMAN_PROMPT", { enumerable: true, get: function () { return sdk_1.HUMAN_PROMPT; } });
7
5
  const messages_1 = require("@langchain/core/messages");
8
6
  const outputs_1 = require("@langchain/core/outputs");
9
7
  const env_1 = require("@langchain/core/utils/env");
10
8
  const chat_models_1 = require("@langchain/core/language_models/chat_models");
11
- /**
12
- * Extracts the custom role of a generic chat message.
13
- * @param message The chat message from which to extract the custom role.
14
- * @returns The custom role of the chat message.
15
- */
16
- function extractGenericMessageCustomRole(message) {
17
- if (message.role !== sdk_1.AI_PROMPT &&
18
- message.role !== sdk_1.HUMAN_PROMPT &&
19
- message.role !== "") {
20
- console.warn(`Unknown message role: ${message.role}`);
21
- }
22
- return message.role;
23
- }
24
- /**
25
- * Gets the Anthropic prompt from a base message.
26
- * @param message The base message from which to get the Anthropic prompt.
27
- * @returns The Anthropic prompt from the base message.
28
- */
29
- function getAnthropicPromptFromMessage(message) {
30
- const type = message._getType();
31
- switch (type) {
32
- case "ai":
33
- return sdk_1.AI_PROMPT;
34
- case "human":
35
- return sdk_1.HUMAN_PROMPT;
36
- case "system":
37
- return "";
38
- case "generic": {
39
- if (!messages_1.ChatMessage.isInstance(message))
40
- throw new Error("Invalid generic chat message");
41
- return extractGenericMessageCustomRole(message);
42
- }
43
- default:
44
- throw new Error(`Unknown message type: ${type}`);
45
- }
46
- }
47
- exports.DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
48
9
  /**
49
10
  * Wrapper around Anthropic large language models.
50
11
  *
@@ -54,7 +15,7 @@ exports.DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
54
15
  * @remarks
55
16
  * Any parameters that are valid to be passed to {@link
56
17
  * https://console.anthropic.com/docs/api/reference |
57
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
18
+ * `anthropic.beta.messages`} can be passed through {@link invocationKwargs},
58
19
  * even if not explicitly available on this class.
59
20
  * @example
60
21
  * ```typescript
@@ -66,7 +27,7 @@ exports.DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
66
27
  * console.log(res);
67
28
  * ```
68
29
  */
69
- class ChatAnthropic extends chat_models_1.BaseChatModel {
30
+ class ChatAnthropicMessages extends chat_models_1.BaseChatModel {
70
31
  static lc_name() {
71
32
  return "ChatAnthropic";
72
33
  }
@@ -118,7 +79,7 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
118
79
  writable: true,
119
80
  value: -1
120
81
  });
121
- Object.defineProperty(this, "maxTokensToSample", {
82
+ Object.defineProperty(this, "maxTokens", {
122
83
  enumerable: true,
123
84
  configurable: true,
124
85
  writable: true,
@@ -128,7 +89,7 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
128
89
  enumerable: true,
129
90
  configurable: true,
130
91
  writable: true,
131
- value: "claude-2"
92
+ value: "claude-2.1"
132
93
  });
133
94
  Object.defineProperty(this, "invocationKwargs", {
134
95
  enumerable: true,
@@ -180,8 +141,8 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
180
141
  this.temperature = fields?.temperature ?? this.temperature;
181
142
  this.topK = fields?.topK ?? this.topK;
182
143
  this.topP = fields?.topP ?? this.topP;
183
- this.maxTokensToSample =
184
- fields?.maxTokensToSample ?? this.maxTokensToSample;
144
+ this.maxTokens =
145
+ fields?.maxTokensToSample ?? fields?.maxTokens ?? this.maxTokens;
185
146
  this.stopSequences = fields?.stopSequences ?? this.stopSequences;
186
147
  this.streaming = fields?.streaming ?? false;
187
148
  this.clientOptions = fields?.clientOptions ?? {};
@@ -195,11 +156,9 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
195
156
  temperature: this.temperature,
196
157
  top_k: this.topK,
197
158
  top_p: this.topP,
198
- stop_sequences: options?.stop?.concat(exports.DEFAULT_STOP_SEQUENCES) ??
199
- this.stopSequences ??
200
- exports.DEFAULT_STOP_SEQUENCES,
201
- max_tokens_to_sample: this.maxTokensToSample,
159
+ stop_sequences: options?.stop ?? this.stopSequences,
202
160
  stream: this.streaming,
161
+ max_tokens: this.maxTokens,
203
162
  ...this.invocationKwargs,
204
163
  };
205
164
  }
@@ -223,35 +182,53 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
223
182
  const params = this.invocationParams(options);
224
183
  const stream = await this.createStreamWithRetry({
225
184
  ...params,
226
- prompt: this.formatMessagesAsPrompt(messages),
185
+ ...this.formatMessagesForAnthropic(messages),
186
+ stream: true,
227
187
  });
228
- let modelSent = false;
229
- let stopReasonSent = false;
230
188
  for await (const data of stream) {
231
189
  if (options.signal?.aborted) {
232
190
  stream.controller.abort();
233
191
  throw new Error("AbortError: User aborted the request.");
234
192
  }
235
- const additional_kwargs = {};
236
- if (data.model && !modelSent) {
237
- additional_kwargs.model = data.model;
238
- modelSent = true;
193
+ if (data.type === "message_start") {
194
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
195
+ const { content, ...additionalKwargs } = data.message;
196
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
197
+ const filteredAdditionalKwargs = {};
198
+ for (const [key, value] of Object.entries(additionalKwargs)) {
199
+ if (value !== undefined && value !== null) {
200
+ filteredAdditionalKwargs[key] = value;
201
+ }
202
+ }
203
+ yield new outputs_1.ChatGenerationChunk({
204
+ message: new messages_1.AIMessageChunk({
205
+ content: "",
206
+ additional_kwargs: filteredAdditionalKwargs,
207
+ }),
208
+ text: "",
209
+ });
239
210
  }
240
- else if (data.stop_reason && !stopReasonSent) {
241
- additional_kwargs.stop_reason = data.stop_reason;
242
- stopReasonSent = true;
211
+ else if (data.type === "message_delta") {
212
+ yield new outputs_1.ChatGenerationChunk({
213
+ message: new messages_1.AIMessageChunk({
214
+ content: "",
215
+ additional_kwargs: { ...data.delta },
216
+ }),
217
+ text: "",
218
+ });
243
219
  }
244
- const delta = data.completion ?? "";
245
- yield new outputs_1.ChatGenerationChunk({
246
- message: new messages_1.AIMessageChunk({
247
- content: delta,
248
- additional_kwargs,
249
- }),
250
- text: delta,
251
- });
252
- await runManager?.handleLLMNewToken(delta);
253
- if (data.stop_reason) {
254
- break;
220
+ else if (data.type === "content_block_delta") {
221
+ const content = data.delta?.text;
222
+ if (content !== undefined) {
223
+ yield new outputs_1.ChatGenerationChunk({
224
+ message: new messages_1.AIMessageChunk({
225
+ content,
226
+ additional_kwargs: {},
227
+ }),
228
+ text: content,
229
+ });
230
+ await runManager?.handleLLMNewToken(content);
231
+ }
255
232
  }
256
233
  }
257
234
  }
@@ -260,13 +237,41 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
260
237
  * @param messages The base messages to format as a prompt.
261
238
  * @returns The formatted prompt.
262
239
  */
263
- formatMessagesAsPrompt(messages) {
264
- return (messages
265
- .map((message) => {
266
- const messagePrompt = getAnthropicPromptFromMessage(message);
267
- return `${messagePrompt} ${message.content}`;
268
- })
269
- .join("") + sdk_1.AI_PROMPT);
240
+ formatMessagesForAnthropic(messages) {
241
+ let system;
242
+ if (messages.length > 0 && messages[0]._getType() === "system") {
243
+ if (typeof messages[0].content !== "string") {
244
+ throw new Error("Currently only string content messages are supported.");
245
+ }
246
+ system = messages[0].content;
247
+ }
248
+ const conversationMessages = system !== undefined ? messages.slice(1) : messages;
249
+ const formattedMessages = conversationMessages.map((message) => {
250
+ let role;
251
+ if (typeof message.content !== "string") {
252
+ throw new Error("Currently only string content messages are supported.");
253
+ }
254
+ if (message._getType() === "human") {
255
+ role = "user";
256
+ }
257
+ else if (message._getType() === "ai") {
258
+ role = "assistant";
259
+ }
260
+ else if (message._getType() === "system") {
261
+ throw new Error("System messages are only permitted as the first passed message.");
262
+ }
263
+ else {
264
+ throw new Error(`Message type "${message._getType()}" is not supported.`);
265
+ }
266
+ return {
267
+ role,
268
+ content: message.content,
269
+ };
270
+ });
271
+ return {
272
+ messages: formattedMessages,
273
+ system,
274
+ };
270
275
  }
271
276
  /** @ignore */
272
277
  async _generate(messages, options, runManager) {
@@ -274,38 +279,52 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
274
279
  throw new Error(`"stopSequence" parameter found in input and default params`);
275
280
  }
276
281
  const params = this.invocationParams(options);
277
- let response;
278
282
  if (params.stream) {
279
- response = {
280
- completion: "",
281
- model: "",
282
- stop_reason: "",
283
- };
283
+ let finalChunk;
284
284
  const stream = await this._streamResponseChunks(messages, options, runManager);
285
285
  for await (const chunk of stream) {
286
- response.completion += chunk.message.content;
287
- response.model =
288
- chunk.message.additional_kwargs.model ?? response.model;
289
- response.stop_reason =
290
- chunk.message.additional_kwargs.stop_reason ??
291
- response.stop_reason;
286
+ if (finalChunk === undefined) {
287
+ finalChunk = chunk;
288
+ }
289
+ else {
290
+ finalChunk = finalChunk.concat(chunk);
291
+ }
292
+ }
293
+ if (finalChunk === undefined) {
294
+ throw new Error("No chunks returned from Anthropic API.");
292
295
  }
296
+ return {
297
+ generations: [
298
+ {
299
+ text: finalChunk.text,
300
+ message: finalChunk.message,
301
+ },
302
+ ],
303
+ };
293
304
  }
294
305
  else {
295
- response = await this.completionWithRetry({
306
+ const response = await this.completionWithRetry({
296
307
  ...params,
297
- prompt: this.formatMessagesAsPrompt(messages),
308
+ stream: false,
309
+ ...this.formatMessagesForAnthropic(messages),
298
310
  }, { signal: options.signal });
311
+ const { content, ...additionalKwargs } = response;
312
+ if (!Array.isArray(content) || content.length !== 1) {
313
+ console.log(content);
314
+ throw new Error("Received multiple content parts in Anthropic response. Only single part messages are currently supported.");
315
+ }
316
+ return {
317
+ generations: [
318
+ {
319
+ text: content[0].text,
320
+ message: new messages_1.AIMessage({
321
+ content: content[0].text,
322
+ additional_kwargs: additionalKwargs,
323
+ }),
324
+ },
325
+ ],
326
+ };
299
327
  }
300
- const generations = (response.completion ?? "")
301
- .split(sdk_1.AI_PROMPT)
302
- .map((message) => ({
303
- text: message,
304
- message: new messages_1.AIMessage(message),
305
- }));
306
- return {
307
- generations,
308
- };
309
328
  }
310
329
  /**
311
330
  * Creates a streaming request with retry.
@@ -319,10 +338,17 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
319
338
  ...this.clientOptions,
320
339
  ...options,
321
340
  apiKey: this.anthropicApiKey,
341
+ // Prefer LangChain built-in retries
322
342
  maxRetries: 0,
323
343
  });
324
344
  }
325
- const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
345
+ const makeCompletionRequest = async () => this.streamingClient.beta.messages.create(
346
+ // TODO: Fix typing once underlying SDK is fixed to not require unnecessary "anthropic-beta" param
347
+ {
348
+ ...request,
349
+ ...this.invocationKwargs,
350
+ stream: true,
351
+ });
326
352
  return this.caller.call(makeCompletionRequest);
327
353
  }
328
354
  /** @ignore */
@@ -339,7 +365,12 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
339
365
  maxRetries: 0,
340
366
  });
341
367
  }
342
- const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
368
+ const makeCompletionRequest = async () => this.batchClient.beta.messages.create(
369
+ // TODO: Fix typing once underlying SDK is fixed to not require unnecessary "anthropic-beta" param
370
+ {
371
+ ...request,
372
+ ...this.invocationKwargs,
373
+ });
343
374
  return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
344
375
  }
345
376
  _llmType() {
@@ -350,4 +381,7 @@ class ChatAnthropic extends chat_models_1.BaseChatModel {
350
381
  return [];
351
382
  }
352
383
  }
384
+ exports.ChatAnthropicMessages = ChatAnthropicMessages;
385
+ class ChatAnthropic extends ChatAnthropicMessages {
386
+ }
353
387
  exports.ChatAnthropic = ChatAnthropic;
@@ -1,13 +1,14 @@
1
- import { Anthropic, AI_PROMPT, HUMAN_PROMPT, ClientOptions } from "@anthropic-ai/sdk";
2
- import type { CompletionCreateParams } from "@anthropic-ai/sdk/resources/completions";
1
+ import { Anthropic, type ClientOptions } from "@anthropic-ai/sdk";
3
2
  import type { Stream } from "@anthropic-ai/sdk/streaming";
4
3
  import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
5
4
  import { type BaseMessage } from "@langchain/core/messages";
6
5
  import { ChatGenerationChunk, type ChatResult } from "@langchain/core/outputs";
7
6
  import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
8
7
  import { type BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
9
- export { AI_PROMPT, HUMAN_PROMPT };
10
- export declare const DEFAULT_STOP_SEQUENCES: string[];
8
+ type AnthropicMessage = Anthropic.Beta.MessageParam;
9
+ type AnthropicMessageCreateParams = Omit<Anthropic.Beta.MessageCreateParamsNonStreaming, "anthropic-beta">;
10
+ type AnthropicStreamingMessageCreateParams = Omit<Anthropic.Beta.MessageCreateParamsStreaming, "anthropic-beta">;
11
+ type AnthropicMessageStreamEvent = Anthropic.Beta.MessageStreamEvent;
11
12
  /**
12
13
  * Input to AnthropicChat class.
13
14
  */
@@ -33,7 +34,12 @@ export interface AnthropicInput {
33
34
  */
34
35
  topP?: number;
35
36
  /** A maximum number of tokens to generate before stopping. */
36
- maxTokensToSample: number;
37
+ maxTokens?: number;
38
+ /**
39
+ * A maximum number of tokens to generate before stopping.
40
+ * @deprecated Use "maxTokens" instead.
41
+ */
42
+ maxTokensToSample?: number;
37
43
  /** A list of strings upon which to stop generating.
38
44
  * You probably want `["\n\nHuman:"]`, as that's the cue for
39
45
  * the next turn in the dialog agent.
@@ -69,7 +75,7 @@ type Kwargs = Record<string, any>;
69
75
  * @remarks
70
76
  * Any parameters that are valid to be passed to {@link
71
77
  * https://console.anthropic.com/docs/api/reference |
72
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
78
+ * `anthropic.beta.messages`} can be passed through {@link invocationKwargs},
73
79
  * even if not explicitly available on this class.
74
80
  * @example
75
81
  * ```typescript
@@ -81,7 +87,7 @@ type Kwargs = Record<string, any>;
81
87
  * console.log(res);
82
88
  * ```
83
89
  */
84
- export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseChatModel<CallOptions> implements AnthropicInput {
90
+ export declare class ChatAnthropicMessages<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseChatModel<CallOptions> implements AnthropicInput {
85
91
  static lc_name(): string;
86
92
  get lc_secrets(): {
87
93
  [key: string]: string;
@@ -93,7 +99,7 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
93
99
  temperature: number;
94
100
  topK: number;
95
101
  topP: number;
96
- maxTokensToSample: number;
102
+ maxTokens: number;
97
103
  modelName: string;
98
104
  invocationKwargs?: Kwargs;
99
105
  stopSequences?: string[];
@@ -105,31 +111,35 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
105
111
  /**
106
112
  * Get the parameters used to invoke the model
107
113
  */
108
- invocationParams(options?: this["ParsedCallOptions"]): Omit<CompletionCreateParams, "prompt"> & Kwargs;
114
+ invocationParams(options?: this["ParsedCallOptions"]): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, "messages" | "anthropic-beta"> & Kwargs;
109
115
  /** @ignore */
110
116
  _identifyingParams(): {
111
- metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
112
117
  stream?: boolean | undefined;
113
- max_tokens_to_sample: number;
114
- model: "claude-2" | (string & {}) | "claude-instant-1";
118
+ max_tokens: number;
119
+ model: string;
120
+ metadata?: Anthropic.Beta.Messages.MessageCreateParams.Metadata | undefined;
115
121
  stop_sequences?: string[] | undefined;
122
+ system?: string | undefined;
116
123
  temperature?: number | undefined;
117
124
  top_k?: number | undefined;
118
125
  top_p?: number | undefined;
126
+ 'x-api-key'?: string | undefined;
119
127
  model_name: string;
120
128
  };
121
129
  /**
122
130
  * Get the identifying parameters for the model
123
131
  */
124
132
  identifyingParams(): {
125
- metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
126
133
  stream?: boolean | undefined;
127
- max_tokens_to_sample: number;
128
- model: "claude-2" | (string & {}) | "claude-instant-1";
134
+ max_tokens: number;
135
+ model: string;
136
+ metadata?: Anthropic.Beta.Messages.MessageCreateParams.Metadata | undefined;
129
137
  stop_sequences?: string[] | undefined;
138
+ system?: string | undefined;
130
139
  temperature?: number | undefined;
131
140
  top_k?: number | undefined;
132
141
  top_p?: number | undefined;
142
+ 'x-api-key'?: string | undefined;
133
143
  model_name: string;
134
144
  };
135
145
  _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
@@ -138,7 +148,10 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
138
148
  * @param messages The base messages to format as a prompt.
139
149
  * @returns The formatted prompt.
140
150
  */
141
- protected formatMessagesAsPrompt(messages: BaseMessage[]): string;
151
+ protected formatMessagesForAnthropic(messages: BaseMessage[]): {
152
+ system?: string;
153
+ messages: AnthropicMessage[];
154
+ };
142
155
  /** @ignore */
143
156
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
144
157
  /**
@@ -146,12 +159,15 @@ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOpti
146
159
  * @param request The parameters for creating a completion.
147
160
  * @returns A streaming request.
148
161
  */
149
- protected createStreamWithRetry(request: CompletionCreateParams & Kwargs): Promise<Stream<Anthropic.Completions.Completion>>;
162
+ protected createStreamWithRetry(request: AnthropicStreamingMessageCreateParams & Kwargs): Promise<Stream<AnthropicMessageStreamEvent>>;
150
163
  /** @ignore */
151
- protected completionWithRetry(request: CompletionCreateParams & Kwargs, options: {
164
+ protected completionWithRetry(request: AnthropicMessageCreateParams & Kwargs, options: {
152
165
  signal?: AbortSignal;
153
- }): Promise<Anthropic.Completions.Completion>;
166
+ }): Promise<Anthropic.Beta.Message>;
154
167
  _llmType(): string;
155
168
  /** @ignore */
156
169
  _combineLLMOutput(): never[];
157
170
  }
171
+ export declare class ChatAnthropic extends ChatAnthropicMessages {
172
+ }
173
+ export {};
@@ -1,46 +1,8 @@
1
- import { Anthropic, AI_PROMPT, HUMAN_PROMPT, } from "@anthropic-ai/sdk";
2
- import { AIMessage, AIMessageChunk, ChatMessage, } from "@langchain/core/messages";
3
- import { ChatGenerationChunk, } from "@langchain/core/outputs";
1
+ import { Anthropic } from "@anthropic-ai/sdk";
2
+ import { AIMessage, AIMessageChunk, } from "@langchain/core/messages";
3
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
4
4
  import { getEnvironmentVariable } from "@langchain/core/utils/env";
5
5
  import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
6
- export { AI_PROMPT, HUMAN_PROMPT };
7
- /**
8
- * Extracts the custom role of a generic chat message.
9
- * @param message The chat message from which to extract the custom role.
10
- * @returns The custom role of the chat message.
11
- */
12
- function extractGenericMessageCustomRole(message) {
13
- if (message.role !== AI_PROMPT &&
14
- message.role !== HUMAN_PROMPT &&
15
- message.role !== "") {
16
- console.warn(`Unknown message role: ${message.role}`);
17
- }
18
- return message.role;
19
- }
20
- /**
21
- * Gets the Anthropic prompt from a base message.
22
- * @param message The base message from which to get the Anthropic prompt.
23
- * @returns The Anthropic prompt from the base message.
24
- */
25
- function getAnthropicPromptFromMessage(message) {
26
- const type = message._getType();
27
- switch (type) {
28
- case "ai":
29
- return AI_PROMPT;
30
- case "human":
31
- return HUMAN_PROMPT;
32
- case "system":
33
- return "";
34
- case "generic": {
35
- if (!ChatMessage.isInstance(message))
36
- throw new Error("Invalid generic chat message");
37
- return extractGenericMessageCustomRole(message);
38
- }
39
- default:
40
- throw new Error(`Unknown message type: ${type}`);
41
- }
42
- }
43
- export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
44
6
  /**
45
7
  * Wrapper around Anthropic large language models.
46
8
  *
@@ -50,7 +12,7 @@ export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
50
12
  * @remarks
51
13
  * Any parameters that are valid to be passed to {@link
52
14
  * https://console.anthropic.com/docs/api/reference |
53
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
15
+ * `anthropic.beta.messages`} can be passed through {@link invocationKwargs},
54
16
  * even if not explicitly available on this class.
55
17
  * @example
56
18
  * ```typescript
@@ -62,7 +24,7 @@ export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
62
24
  * console.log(res);
63
25
  * ```
64
26
  */
65
- export class ChatAnthropic extends BaseChatModel {
27
+ export class ChatAnthropicMessages extends BaseChatModel {
66
28
  static lc_name() {
67
29
  return "ChatAnthropic";
68
30
  }
@@ -114,7 +76,7 @@ export class ChatAnthropic extends BaseChatModel {
114
76
  writable: true,
115
77
  value: -1
116
78
  });
117
- Object.defineProperty(this, "maxTokensToSample", {
79
+ Object.defineProperty(this, "maxTokens", {
118
80
  enumerable: true,
119
81
  configurable: true,
120
82
  writable: true,
@@ -124,7 +86,7 @@ export class ChatAnthropic extends BaseChatModel {
124
86
  enumerable: true,
125
87
  configurable: true,
126
88
  writable: true,
127
- value: "claude-2"
89
+ value: "claude-2.1"
128
90
  });
129
91
  Object.defineProperty(this, "invocationKwargs", {
130
92
  enumerable: true,
@@ -176,8 +138,8 @@ export class ChatAnthropic extends BaseChatModel {
176
138
  this.temperature = fields?.temperature ?? this.temperature;
177
139
  this.topK = fields?.topK ?? this.topK;
178
140
  this.topP = fields?.topP ?? this.topP;
179
- this.maxTokensToSample =
180
- fields?.maxTokensToSample ?? this.maxTokensToSample;
141
+ this.maxTokens =
142
+ fields?.maxTokensToSample ?? fields?.maxTokens ?? this.maxTokens;
181
143
  this.stopSequences = fields?.stopSequences ?? this.stopSequences;
182
144
  this.streaming = fields?.streaming ?? false;
183
145
  this.clientOptions = fields?.clientOptions ?? {};
@@ -191,11 +153,9 @@ export class ChatAnthropic extends BaseChatModel {
191
153
  temperature: this.temperature,
192
154
  top_k: this.topK,
193
155
  top_p: this.topP,
194
- stop_sequences: options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
195
- this.stopSequences ??
196
- DEFAULT_STOP_SEQUENCES,
197
- max_tokens_to_sample: this.maxTokensToSample,
156
+ stop_sequences: options?.stop ?? this.stopSequences,
198
157
  stream: this.streaming,
158
+ max_tokens: this.maxTokens,
199
159
  ...this.invocationKwargs,
200
160
  };
201
161
  }
@@ -219,35 +179,53 @@ export class ChatAnthropic extends BaseChatModel {
219
179
  const params = this.invocationParams(options);
220
180
  const stream = await this.createStreamWithRetry({
221
181
  ...params,
222
- prompt: this.formatMessagesAsPrompt(messages),
182
+ ...this.formatMessagesForAnthropic(messages),
183
+ stream: true,
223
184
  });
224
- let modelSent = false;
225
- let stopReasonSent = false;
226
185
  for await (const data of stream) {
227
186
  if (options.signal?.aborted) {
228
187
  stream.controller.abort();
229
188
  throw new Error("AbortError: User aborted the request.");
230
189
  }
231
- const additional_kwargs = {};
232
- if (data.model && !modelSent) {
233
- additional_kwargs.model = data.model;
234
- modelSent = true;
190
+ if (data.type === "message_start") {
191
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
192
+ const { content, ...additionalKwargs } = data.message;
193
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
194
+ const filteredAdditionalKwargs = {};
195
+ for (const [key, value] of Object.entries(additionalKwargs)) {
196
+ if (value !== undefined && value !== null) {
197
+ filteredAdditionalKwargs[key] = value;
198
+ }
199
+ }
200
+ yield new ChatGenerationChunk({
201
+ message: new AIMessageChunk({
202
+ content: "",
203
+ additional_kwargs: filteredAdditionalKwargs,
204
+ }),
205
+ text: "",
206
+ });
235
207
  }
236
- else if (data.stop_reason && !stopReasonSent) {
237
- additional_kwargs.stop_reason = data.stop_reason;
238
- stopReasonSent = true;
208
+ else if (data.type === "message_delta") {
209
+ yield new ChatGenerationChunk({
210
+ message: new AIMessageChunk({
211
+ content: "",
212
+ additional_kwargs: { ...data.delta },
213
+ }),
214
+ text: "",
215
+ });
239
216
  }
240
- const delta = data.completion ?? "";
241
- yield new ChatGenerationChunk({
242
- message: new AIMessageChunk({
243
- content: delta,
244
- additional_kwargs,
245
- }),
246
- text: delta,
247
- });
248
- await runManager?.handleLLMNewToken(delta);
249
- if (data.stop_reason) {
250
- break;
217
+ else if (data.type === "content_block_delta") {
218
+ const content = data.delta?.text;
219
+ if (content !== undefined) {
220
+ yield new ChatGenerationChunk({
221
+ message: new AIMessageChunk({
222
+ content,
223
+ additional_kwargs: {},
224
+ }),
225
+ text: content,
226
+ });
227
+ await runManager?.handleLLMNewToken(content);
228
+ }
251
229
  }
252
230
  }
253
231
  }
@@ -256,13 +234,41 @@ export class ChatAnthropic extends BaseChatModel {
256
234
  * @param messages The base messages to format as a prompt.
257
235
  * @returns The formatted prompt.
258
236
  */
259
- formatMessagesAsPrompt(messages) {
260
- return (messages
261
- .map((message) => {
262
- const messagePrompt = getAnthropicPromptFromMessage(message);
263
- return `${messagePrompt} ${message.content}`;
264
- })
265
- .join("") + AI_PROMPT);
237
+ formatMessagesForAnthropic(messages) {
238
+ let system;
239
+ if (messages.length > 0 && messages[0]._getType() === "system") {
240
+ if (typeof messages[0].content !== "string") {
241
+ throw new Error("Currently only string content messages are supported.");
242
+ }
243
+ system = messages[0].content;
244
+ }
245
+ const conversationMessages = system !== undefined ? messages.slice(1) : messages;
246
+ const formattedMessages = conversationMessages.map((message) => {
247
+ let role;
248
+ if (typeof message.content !== "string") {
249
+ throw new Error("Currently only string content messages are supported.");
250
+ }
251
+ if (message._getType() === "human") {
252
+ role = "user";
253
+ }
254
+ else if (message._getType() === "ai") {
255
+ role = "assistant";
256
+ }
257
+ else if (message._getType() === "system") {
258
+ throw new Error("System messages are only permitted as the first passed message.");
259
+ }
260
+ else {
261
+ throw new Error(`Message type "${message._getType()}" is not supported.`);
262
+ }
263
+ return {
264
+ role,
265
+ content: message.content,
266
+ };
267
+ });
268
+ return {
269
+ messages: formattedMessages,
270
+ system,
271
+ };
266
272
  }
267
273
  /** @ignore */
268
274
  async _generate(messages, options, runManager) {
@@ -270,38 +276,52 @@ export class ChatAnthropic extends BaseChatModel {
270
276
  throw new Error(`"stopSequence" parameter found in input and default params`);
271
277
  }
272
278
  const params = this.invocationParams(options);
273
- let response;
274
279
  if (params.stream) {
275
- response = {
276
- completion: "",
277
- model: "",
278
- stop_reason: "",
279
- };
280
+ let finalChunk;
280
281
  const stream = await this._streamResponseChunks(messages, options, runManager);
281
282
  for await (const chunk of stream) {
282
- response.completion += chunk.message.content;
283
- response.model =
284
- chunk.message.additional_kwargs.model ?? response.model;
285
- response.stop_reason =
286
- chunk.message.additional_kwargs.stop_reason ??
287
- response.stop_reason;
283
+ if (finalChunk === undefined) {
284
+ finalChunk = chunk;
285
+ }
286
+ else {
287
+ finalChunk = finalChunk.concat(chunk);
288
+ }
289
+ }
290
+ if (finalChunk === undefined) {
291
+ throw new Error("No chunks returned from Anthropic API.");
288
292
  }
293
+ return {
294
+ generations: [
295
+ {
296
+ text: finalChunk.text,
297
+ message: finalChunk.message,
298
+ },
299
+ ],
300
+ };
289
301
  }
290
302
  else {
291
- response = await this.completionWithRetry({
303
+ const response = await this.completionWithRetry({
292
304
  ...params,
293
- prompt: this.formatMessagesAsPrompt(messages),
305
+ stream: false,
306
+ ...this.formatMessagesForAnthropic(messages),
294
307
  }, { signal: options.signal });
308
+ const { content, ...additionalKwargs } = response;
309
+ if (!Array.isArray(content) || content.length !== 1) {
310
+ console.log(content);
311
+ throw new Error("Received multiple content parts in Anthropic response. Only single part messages are currently supported.");
312
+ }
313
+ return {
314
+ generations: [
315
+ {
316
+ text: content[0].text,
317
+ message: new AIMessage({
318
+ content: content[0].text,
319
+ additional_kwargs: additionalKwargs,
320
+ }),
321
+ },
322
+ ],
323
+ };
295
324
  }
296
- const generations = (response.completion ?? "")
297
- .split(AI_PROMPT)
298
- .map((message) => ({
299
- text: message,
300
- message: new AIMessage(message),
301
- }));
302
- return {
303
- generations,
304
- };
305
325
  }
306
326
  /**
307
327
  * Creates a streaming request with retry.
@@ -315,10 +335,17 @@ export class ChatAnthropic extends BaseChatModel {
315
335
  ...this.clientOptions,
316
336
  ...options,
317
337
  apiKey: this.anthropicApiKey,
338
+ // Prefer LangChain built-in retries
318
339
  maxRetries: 0,
319
340
  });
320
341
  }
321
- const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
342
+ const makeCompletionRequest = async () => this.streamingClient.beta.messages.create(
343
+ // TODO: Fix typing once underlying SDK is fixed to not require unnecessary "anthropic-beta" param
344
+ {
345
+ ...request,
346
+ ...this.invocationKwargs,
347
+ stream: true,
348
+ });
322
349
  return this.caller.call(makeCompletionRequest);
323
350
  }
324
351
  /** @ignore */
@@ -335,7 +362,12 @@ export class ChatAnthropic extends BaseChatModel {
335
362
  maxRetries: 0,
336
363
  });
337
364
  }
338
- const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
365
+ const makeCompletionRequest = async () => this.batchClient.beta.messages.create(
366
+ // TODO: Fix typing once underlying SDK is fixed to not require unnecessary "anthropic-beta" param
367
+ {
368
+ ...request,
369
+ ...this.invocationKwargs,
370
+ });
339
371
  return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
340
372
  }
341
373
  _llmType() {
@@ -346,3 +378,5 @@ export class ChatAnthropic extends BaseChatModel {
346
378
  return [];
347
379
  }
348
380
  }
381
+ export class ChatAnthropic extends ChatAnthropicMessages {
382
+ }
@@ -0,0 +1 @@
1
+ export {};
@@ -0,0 +1,272 @@
1
+ /* eslint-disable no-process-env */
2
+ import { expect, test } from "@jest/globals";
3
+ import { HumanMessage } from "@langchain/core/messages";
4
+ import { ChatPromptValue } from "@langchain/core/prompt_values";
5
+ import { PromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts";
6
+ import { CallbackManager } from "@langchain/core/callbacks/manager";
7
+ import { ChatAnthropicMessages } from "../chat_models.js";
8
+ test("Test ChatAnthropicMessages", async () => {
9
+ const chat = new ChatAnthropicMessages({
10
+ modelName: "claude-instant-1.2",
11
+ maxRetries: 0,
12
+ });
13
+ const message = new HumanMessage("Hello!");
14
+ const res = await chat.call([message]);
15
+ console.log({ res });
16
+ });
17
+ test("Test ChatAnthropicMessages Generate", async () => {
18
+ const chat = new ChatAnthropicMessages({
19
+ modelName: "claude-instant-1.2",
20
+ maxRetries: 0,
21
+ });
22
+ const message = new HumanMessage("Hello!");
23
+ const res = await chat.generate([[message], [message]]);
24
+ expect(res.generations.length).toBe(2);
25
+ for (const generation of res.generations) {
26
+ expect(generation.length).toBe(1);
27
+ for (const message of generation) {
28
+ console.log(message.text);
29
+ }
30
+ }
31
+ console.log({ res });
32
+ });
33
+ test("Test ChatAnthropicMessages Generate w/ ClientOptions", async () => {
34
+ const chat = new ChatAnthropicMessages({
35
+ modelName: "claude-instant-1.2",
36
+ maxRetries: 0,
37
+ clientOptions: {
38
+ defaultHeaders: {
39
+ "Helicone-Auth": "HELICONE_API_KEY",
40
+ },
41
+ },
42
+ });
43
+ const message = new HumanMessage("Hello!");
44
+ const res = await chat.generate([[message], [message]]);
45
+ expect(res.generations.length).toBe(2);
46
+ for (const generation of res.generations) {
47
+ expect(generation.length).toBe(1);
48
+ for (const message of generation) {
49
+ console.log(message.text);
50
+ }
51
+ }
52
+ console.log({ res });
53
+ });
54
+ test("Test ChatAnthropicMessages Generate with a signal in call options", async () => {
55
+ const chat = new ChatAnthropicMessages({
56
+ modelName: "claude-instant-1.2",
57
+ maxRetries: 0,
58
+ });
59
+ const controller = new AbortController();
60
+ const message = new HumanMessage("How is your day going? Be extremely verbose!");
61
+ await expect(() => {
62
+ const res = chat.generate([[message], [message]], {
63
+ signal: controller.signal,
64
+ });
65
+ setTimeout(() => {
66
+ controller.abort();
67
+ }, 1000);
68
+ return res;
69
+ }).rejects.toThrow();
70
+ }, 10000);
71
+ test("Test ChatAnthropicMessages tokenUsage with a batch", async () => {
72
+ const model = new ChatAnthropicMessages({
73
+ temperature: 0,
74
+ maxRetries: 0,
75
+ modelName: "claude-instant-1.2",
76
+ });
77
+ const res = await model.generate([
78
+ [new HumanMessage(`Hello!`)],
79
+ [new HumanMessage(`Hi!`)],
80
+ ]);
81
+ console.log({ res });
82
+ });
83
+ test("Test ChatAnthropicMessages in streaming mode", async () => {
84
+ let nrNewTokens = 0;
85
+ let streamedCompletion = "";
86
+ const model = new ChatAnthropicMessages({
87
+ modelName: "claude-instant-1.2",
88
+ maxRetries: 0,
89
+ streaming: true,
90
+ callbacks: CallbackManager.fromHandlers({
91
+ async handleLLMNewToken(token) {
92
+ nrNewTokens += 1;
93
+ streamedCompletion += token;
94
+ },
95
+ }),
96
+ });
97
+ const message = new HumanMessage("Hello!");
98
+ const res = await model.call([message]);
99
+ console.log({ res });
100
+ expect(nrNewTokens > 0).toBe(true);
101
+ expect(res.content).toBe(streamedCompletion);
102
+ });
103
+ test("Test ChatAnthropicMessages in streaming mode with a signal", async () => {
104
+ let nrNewTokens = 0;
105
+ let streamedCompletion = "";
106
+ const model = new ChatAnthropicMessages({
107
+ modelName: "claude-instant-1.2",
108
+ maxRetries: 0,
109
+ streaming: true,
110
+ callbacks: CallbackManager.fromHandlers({
111
+ async handleLLMNewToken(token) {
112
+ nrNewTokens += 1;
113
+ streamedCompletion += token;
114
+ },
115
+ }),
116
+ });
117
+ const controller = new AbortController();
118
+ const message = new HumanMessage("Hello! Give me an extremely verbose response");
119
+ await expect(() => {
120
+ const res = model.call([message], {
121
+ signal: controller.signal,
122
+ });
123
+ setTimeout(() => {
124
+ controller.abort();
125
+ }, 500);
126
+ return res;
127
+ }).rejects.toThrow();
128
+ console.log({ nrNewTokens, streamedCompletion });
129
+ }, 5000);
130
+ test("Test ChatAnthropicMessages prompt value", async () => {
131
+ const chat = new ChatAnthropicMessages({
132
+ modelName: "claude-instant-1.2",
133
+ maxRetries: 0,
134
+ });
135
+ const message = new HumanMessage("Hello!");
136
+ const res = await chat.generatePrompt([new ChatPromptValue([message])]);
137
+ expect(res.generations.length).toBe(1);
138
+ for (const generation of res.generations) {
139
+ for (const g of generation) {
140
+ console.log(g.text);
141
+ }
142
+ }
143
+ console.log({ res });
144
+ });
145
+ test("ChatAnthropicMessages, docs, prompt templates", async () => {
146
+ const chat = new ChatAnthropicMessages({
147
+ modelName: "claude-instant-1.2",
148
+ maxRetries: 0,
149
+ temperature: 0,
150
+ });
151
+ const systemPrompt = PromptTemplate.fromTemplate("You are a helpful assistant that translates {input_language} to {output_language}.");
152
+ const chatPrompt = ChatPromptTemplate.fromMessages([
153
+ new SystemMessagePromptTemplate(systemPrompt),
154
+ HumanMessagePromptTemplate.fromTemplate("{text}"),
155
+ ]);
156
+ const responseA = await chat.generatePrompt([
157
+ await chatPrompt.formatPromptValue({
158
+ input_language: "English",
159
+ output_language: "French",
160
+ text: "I love programming.",
161
+ }),
162
+ ]);
163
+ console.log(responseA.generations);
164
+ });
165
+ test("ChatAnthropicMessages, longer chain of messages", async () => {
166
+ const chat = new ChatAnthropicMessages({
167
+ modelName: "claude-1.3",
168
+ maxRetries: 0,
169
+ temperature: 0,
170
+ });
171
+ const chatPrompt = ChatPromptTemplate.fromMessages([
172
+ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`),
173
+ AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`),
174
+ HumanMessagePromptTemplate.fromTemplate("{text}"),
175
+ ]);
176
+ const responseA = await chat.generatePrompt([
177
+ await chatPrompt.formatPromptValue({
178
+ text: "What did I just say my name was?",
179
+ }),
180
+ ]);
181
+ console.log(responseA.generations);
182
+ });
183
+ test("ChatAnthropicMessages, Anthropic apiUrl set manually via constructor", async () => {
184
+ // Pass the default URL through (should use this, and work as normal)
185
+ const anthropicApiUrl = "https://api.anthropic.com";
186
+ const chat = new ChatAnthropicMessages({
187
+ modelName: "claude-instant-1.2",
188
+ maxRetries: 0,
189
+ anthropicApiUrl,
190
+ });
191
+ const message = new HumanMessage("Hello!");
192
+ const res = await chat.call([message]);
193
+ console.log({ res });
194
+ });
195
+ test("ChatAnthropicMessages, Claude V2", async () => {
196
+ const chat = new ChatAnthropicMessages({
197
+ modelName: "claude-2.1",
198
+ maxRetries: 0,
199
+ temperature: 0,
200
+ });
201
+ const chatPrompt = ChatPromptTemplate.fromMessages([
202
+ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`),
203
+ AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`),
204
+ HumanMessagePromptTemplate.fromTemplate("{text}"),
205
+ ]);
206
+ const responseA = await chat.generatePrompt([
207
+ await chatPrompt.formatPromptValue({
208
+ text: "What did I just say my name was?",
209
+ }),
210
+ ]);
211
+ console.log(responseA.generations);
212
+ });
213
+ test("Test ChatAnthropicMessages stream method", async () => {
214
+ const model = new ChatAnthropicMessages({
215
+ maxTokens: 50,
216
+ maxRetries: 0,
217
+ modelName: "claude-instant-1.2",
218
+ });
219
+ const stream = await model.stream("Print hello world.");
220
+ const chunks = [];
221
+ for await (const chunk of stream) {
222
+ console.log(chunk);
223
+ chunks.push(chunk);
224
+ }
225
+ expect(chunks.length).toBeGreaterThan(1);
226
+ });
227
+ test("Test ChatAnthropicMessages stream method with abort", async () => {
228
+ await expect(async () => {
229
+ const model = new ChatAnthropicMessages({
230
+ maxTokens: 500,
231
+ maxRetries: 0,
232
+ modelName: "claude-instant-1.2",
233
+ });
234
+ const stream = await model.stream("How is your day going? Be extremely verbose.", {
235
+ signal: AbortSignal.timeout(1000),
236
+ });
237
+ for await (const chunk of stream) {
238
+ console.log(chunk);
239
+ }
240
+ }).rejects.toThrow();
241
+ });
242
+ test("Test ChatAnthropicMessages stream method with early break", async () => {
243
+ const model = new ChatAnthropicMessages({
244
+ maxTokens: 50,
245
+ maxRetries: 0,
246
+ modelName: "claude-instant-1.2",
247
+ });
248
+ const stream = await model.stream("How is your day going? Be extremely verbose.");
249
+ let i = 0;
250
+ for await (const chunk of stream) {
251
+ console.log(chunk);
252
+ i += 1;
253
+ if (i > 10) {
254
+ break;
255
+ }
256
+ }
257
+ });
258
+ test("Test ChatAnthropicMessages headers passed through", async () => {
259
+ const chat = new ChatAnthropicMessages({
260
+ modelName: "claude-instant-1.2",
261
+ maxRetries: 0,
262
+ anthropicApiKey: "NOT_REAL",
263
+ clientOptions: {
264
+ defaultHeaders: {
265
+ "X-Api-Key": process.env.ANTHROPIC_API_KEY,
266
+ },
267
+ },
268
+ });
269
+ const message = new HumanMessage("Hello!");
270
+ const res = await chat.call([message]);
271
+ console.log({ res });
272
+ });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/anthropic",
3
- "version": "0.0.5",
3
+ "version": "0.0.7",
4
4
  "description": "Anthropic integrations for LangChain.js",
5
5
  "type": "module",
6
6
  "engines": {
@@ -35,8 +35,8 @@
35
35
  "author": "LangChain",
36
36
  "license": "MIT",
37
37
  "dependencies": {
38
- "@anthropic-ai/sdk": "^0.10.0",
39
- "@langchain/core": "~0.1.2"
38
+ "@anthropic-ai/sdk": "^0.11.0",
39
+ "@langchain/core": "~0.1.3"
40
40
  },
41
41
  "devDependencies": {
42
42
  "@jest/globals": "^29.5.0",