langchain 0.0.196 → 0.0.197-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (136) hide show
  1. package/LICENSE +21 -0
  2. package/dist/agents/openai/index.cjs +6 -2
  3. package/dist/agents/openai/index.js +6 -2
  4. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
  5. package/dist/base_language/count_tokens.cjs +4 -4
  6. package/dist/base_language/count_tokens.d.ts +1 -1
  7. package/dist/base_language/count_tokens.js +1 -1
  8. package/dist/base_language/index.cjs +3 -3
  9. package/dist/base_language/index.d.ts +1 -1
  10. package/dist/base_language/index.js +1 -1
  11. package/dist/cache/base.cjs +1 -1
  12. package/dist/cache/base.d.ts +1 -1
  13. package/dist/cache/base.js +1 -1
  14. package/dist/cache/index.cjs +2 -2
  15. package/dist/cache/index.d.ts +1 -1
  16. package/dist/cache/index.js +1 -1
  17. package/dist/callbacks/handlers/console.cjs +1 -1
  18. package/dist/callbacks/handlers/console.d.ts +1 -1
  19. package/dist/callbacks/handlers/console.js +1 -1
  20. package/dist/callbacks/handlers/initialize.cjs +1 -1
  21. package/dist/callbacks/handlers/initialize.d.ts +1 -1
  22. package/dist/callbacks/handlers/initialize.js +1 -1
  23. package/dist/callbacks/handlers/log_stream.cjs +1 -1
  24. package/dist/callbacks/handlers/log_stream.d.ts +1 -1
  25. package/dist/callbacks/handlers/log_stream.js +1 -1
  26. package/dist/callbacks/handlers/run_collector.cjs +1 -1
  27. package/dist/callbacks/handlers/run_collector.d.ts +1 -1
  28. package/dist/callbacks/handlers/run_collector.js +1 -1
  29. package/dist/callbacks/handlers/tracer.cjs +1 -1
  30. package/dist/callbacks/handlers/tracer.d.ts +1 -1
  31. package/dist/callbacks/handlers/tracer.js +1 -1
  32. package/dist/callbacks/handlers/tracer_langchain.cjs +1 -1
  33. package/dist/callbacks/handlers/tracer_langchain.d.ts +1 -1
  34. package/dist/callbacks/handlers/tracer_langchain.js +1 -1
  35. package/dist/callbacks/handlers/tracer_langchain_v1.cjs +1 -1
  36. package/dist/callbacks/handlers/tracer_langchain_v1.d.ts +1 -1
  37. package/dist/callbacks/handlers/tracer_langchain_v1.js +1 -1
  38. package/dist/chains/openai_functions/structured_output.cjs +1 -1
  39. package/dist/chains/openai_functions/structured_output.d.ts +1 -1
  40. package/dist/chains/openai_functions/structured_output.js +1 -1
  41. package/dist/chat_models/anthropic.cjs +15 -348
  42. package/dist/chat_models/anthropic.d.ts +1 -156
  43. package/dist/chat_models/anthropic.js +1 -346
  44. package/dist/chat_models/base.cjs +1 -1
  45. package/dist/chat_models/base.d.ts +1 -1
  46. package/dist/chat_models/base.js +1 -1
  47. package/dist/chat_models/bedrock/web.cjs +21 -1
  48. package/dist/chat_models/bedrock/web.d.ts +1 -1
  49. package/dist/chat_models/bedrock/web.js +21 -1
  50. package/dist/document.cjs +2 -2
  51. package/dist/document.d.ts +1 -1
  52. package/dist/document.js +1 -1
  53. package/dist/document_loaders/web/azure_blob_storage_file.d.ts +1 -1
  54. package/dist/document_loaders/web/github.cjs +105 -0
  55. package/dist/document_loaders/web/github.d.ts +26 -0
  56. package/dist/document_loaders/web/github.js +105 -0
  57. package/dist/document_loaders/web/s3.d.ts +1 -1
  58. package/dist/embeddings/base.cjs +1 -1
  59. package/dist/embeddings/base.d.ts +1 -1
  60. package/dist/embeddings/base.js +1 -1
  61. package/dist/embeddings/cache_backed.cjs +1 -1
  62. package/dist/embeddings/cache_backed.js +1 -1
  63. package/dist/experimental/plan_and_execute/prompt.d.ts +1 -1
  64. package/dist/llms/base.cjs +1 -1
  65. package/dist/llms/base.d.ts +1 -1
  66. package/dist/llms/base.js +1 -1
  67. package/dist/llms/bedrock/web.cjs +21 -1
  68. package/dist/llms/bedrock/web.d.ts +1 -1
  69. package/dist/llms/bedrock/web.js +21 -1
  70. package/dist/memory/base.cjs +2 -2
  71. package/dist/memory/base.d.ts +2 -2
  72. package/dist/memory/base.js +2 -2
  73. package/dist/output_parsers/list.cjs +4 -122
  74. package/dist/output_parsers/list.d.ts +1 -57
  75. package/dist/output_parsers/list.js +1 -119
  76. package/dist/output_parsers/openai_functions.cjs +1 -1
  77. package/dist/output_parsers/openai_functions.d.ts +1 -1
  78. package/dist/output_parsers/openai_functions.js +1 -1
  79. package/dist/prompts/base.cjs +8 -8
  80. package/dist/prompts/base.d.ts +3 -3
  81. package/dist/prompts/base.js +3 -3
  82. package/dist/prompts/chat.cjs +13 -15
  83. package/dist/prompts/chat.d.ts +2 -1
  84. package/dist/prompts/chat.js +2 -1
  85. package/dist/prompts/few_shot.cjs +4 -15
  86. package/dist/prompts/few_shot.d.ts +1 -1
  87. package/dist/prompts/few_shot.js +1 -1
  88. package/dist/prompts/index.cjs +2 -2
  89. package/dist/prompts/index.d.ts +1 -1
  90. package/dist/prompts/index.js +1 -1
  91. package/dist/prompts/pipeline.cjs +3 -15
  92. package/dist/prompts/pipeline.d.ts +1 -1
  93. package/dist/prompts/pipeline.js +1 -1
  94. package/dist/prompts/prompt.cjs +3 -15
  95. package/dist/prompts/prompt.d.ts +1 -1
  96. package/dist/prompts/prompt.js +1 -1
  97. package/dist/prompts/selectors/LengthBasedExampleSelector.cjs +3 -15
  98. package/dist/prompts/selectors/LengthBasedExampleSelector.d.ts +1 -1
  99. package/dist/prompts/selectors/LengthBasedExampleSelector.js +1 -1
  100. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.cjs +1 -1
  101. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.d.ts +1 -1
  102. package/dist/prompts/selectors/SemanticSimilarityExampleSelector.js +1 -1
  103. package/dist/prompts/selectors/conditional.cjs +6 -15
  104. package/dist/prompts/selectors/conditional.d.ts +1 -1
  105. package/dist/prompts/selectors/conditional.js +1 -1
  106. package/dist/prompts/serde.cjs +0 -15
  107. package/dist/prompts/serde.d.ts +1 -1
  108. package/dist/prompts/serde.js +1 -1
  109. package/dist/prompts/template.cjs +9 -15
  110. package/dist/prompts/template.d.ts +1 -1
  111. package/dist/prompts/template.js +1 -1
  112. package/dist/schema/document.cjs +3 -3
  113. package/dist/schema/document.d.ts +1 -1
  114. package/dist/schema/document.js +1 -1
  115. package/dist/schema/index.cjs +12 -12
  116. package/dist/schema/index.d.ts +10 -10
  117. package/dist/schema/index.js +7 -7
  118. package/dist/schema/output_parser.cjs +1 -1
  119. package/dist/schema/output_parser.d.ts +1 -1
  120. package/dist/schema/output_parser.js +1 -1
  121. package/dist/schema/retriever.cjs +1 -1
  122. package/dist/schema/retriever.d.ts +1 -1
  123. package/dist/schema/retriever.js +1 -1
  124. package/dist/schema/storage.cjs +1 -1
  125. package/dist/schema/storage.d.ts +1 -1
  126. package/dist/schema/storage.js +1 -1
  127. package/dist/util/async_caller.cjs +1 -1
  128. package/dist/util/async_caller.d.ts +1 -1
  129. package/dist/util/async_caller.js +1 -1
  130. package/dist/vectorstores/momento_vector_index.cjs +39 -0
  131. package/dist/vectorstores/momento_vector_index.d.ts +17 -1
  132. package/dist/vectorstores/momento_vector_index.js +40 -1
  133. package/dist/vectorstores/mongodb_atlas.cjs +22 -2
  134. package/dist/vectorstores/mongodb_atlas.d.ts +13 -0
  135. package/dist/vectorstores/mongodb_atlas.js +22 -2
  136. package/package.json +9 -8
@@ -1,350 +1,17 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
2
16
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ChatAnthropic = exports.DEFAULT_STOP_SEQUENCES = void 0;
4
- const sdk_1 = require("@anthropic-ai/sdk");
5
- const index_js_1 = require("../schema/index.cjs");
6
- const env_js_1 = require("../util/env.cjs");
7
- const base_js_1 = require("./base.cjs");
8
- /**
9
- * Extracts the custom role of a generic chat message.
10
- * @param message The chat message from which to extract the custom role.
11
- * @returns The custom role of the chat message.
12
- */
13
- function extractGenericMessageCustomRole(message) {
14
- if (message.role !== sdk_1.AI_PROMPT &&
15
- message.role !== sdk_1.HUMAN_PROMPT &&
16
- message.role !== "") {
17
- console.warn(`Unknown message role: ${message.role}`);
18
- }
19
- return message.role;
20
- }
21
- /**
22
- * Gets the Anthropic prompt from a base message.
23
- * @param message The base message from which to get the Anthropic prompt.
24
- * @returns The Anthropic prompt from the base message.
25
- */
26
- function getAnthropicPromptFromMessage(message) {
27
- const type = message._getType();
28
- switch (type) {
29
- case "ai":
30
- return sdk_1.AI_PROMPT;
31
- case "human":
32
- return sdk_1.HUMAN_PROMPT;
33
- case "system":
34
- return "";
35
- case "generic": {
36
- if (!index_js_1.ChatMessage.isInstance(message))
37
- throw new Error("Invalid generic chat message");
38
- return extractGenericMessageCustomRole(message);
39
- }
40
- default:
41
- throw new Error(`Unknown message type: ${type}`);
42
- }
43
- }
44
- exports.DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
45
- /**
46
- * Wrapper around Anthropic large language models.
47
- *
48
- * To use you should have the `@anthropic-ai/sdk` package installed, with the
49
- * `ANTHROPIC_API_KEY` environment variable set.
50
- *
51
- * @remarks
52
- * Any parameters that are valid to be passed to {@link
53
- * https://console.anthropic.com/docs/api/reference |
54
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
55
- * even if not explicitly available on this class.
56
- * @example
57
- * ```typescript
58
- * const model = new ChatAnthropic({
59
- * temperature: 0.9,
60
- * anthropicApiKey: 'YOUR-API-KEY',
61
- * });
62
- * const res = await model.invoke({ input: 'Hello!' });
63
- * console.log(res);
64
- * ```
65
- */
66
- class ChatAnthropic extends base_js_1.BaseChatModel {
67
- static lc_name() {
68
- return "ChatAnthropic";
69
- }
70
- get lc_secrets() {
71
- return {
72
- anthropicApiKey: "ANTHROPIC_API_KEY",
73
- };
74
- }
75
- get lc_aliases() {
76
- return {
77
- modelName: "model",
78
- };
79
- }
80
- constructor(fields) {
81
- super(fields ?? {});
82
- Object.defineProperty(this, "lc_serializable", {
83
- enumerable: true,
84
- configurable: true,
85
- writable: true,
86
- value: true
87
- });
88
- Object.defineProperty(this, "anthropicApiKey", {
89
- enumerable: true,
90
- configurable: true,
91
- writable: true,
92
- value: void 0
93
- });
94
- Object.defineProperty(this, "apiUrl", {
95
- enumerable: true,
96
- configurable: true,
97
- writable: true,
98
- value: void 0
99
- });
100
- Object.defineProperty(this, "temperature", {
101
- enumerable: true,
102
- configurable: true,
103
- writable: true,
104
- value: 1
105
- });
106
- Object.defineProperty(this, "topK", {
107
- enumerable: true,
108
- configurable: true,
109
- writable: true,
110
- value: -1
111
- });
112
- Object.defineProperty(this, "topP", {
113
- enumerable: true,
114
- configurable: true,
115
- writable: true,
116
- value: -1
117
- });
118
- Object.defineProperty(this, "maxTokensToSample", {
119
- enumerable: true,
120
- configurable: true,
121
- writable: true,
122
- value: 2048
123
- });
124
- Object.defineProperty(this, "modelName", {
125
- enumerable: true,
126
- configurable: true,
127
- writable: true,
128
- value: "claude-2"
129
- });
130
- Object.defineProperty(this, "invocationKwargs", {
131
- enumerable: true,
132
- configurable: true,
133
- writable: true,
134
- value: void 0
135
- });
136
- Object.defineProperty(this, "stopSequences", {
137
- enumerable: true,
138
- configurable: true,
139
- writable: true,
140
- value: void 0
141
- });
142
- Object.defineProperty(this, "streaming", {
143
- enumerable: true,
144
- configurable: true,
145
- writable: true,
146
- value: false
147
- });
148
- Object.defineProperty(this, "clientOptions", {
149
- enumerable: true,
150
- configurable: true,
151
- writable: true,
152
- value: void 0
153
- });
154
- // Used for non-streaming requests
155
- Object.defineProperty(this, "batchClient", {
156
- enumerable: true,
157
- configurable: true,
158
- writable: true,
159
- value: void 0
160
- });
161
- // Used for streaming requests
162
- Object.defineProperty(this, "streamingClient", {
163
- enumerable: true,
164
- configurable: true,
165
- writable: true,
166
- value: void 0
167
- });
168
- this.anthropicApiKey =
169
- fields?.anthropicApiKey ?? (0, env_js_1.getEnvironmentVariable)("ANTHROPIC_API_KEY");
170
- if (!this.anthropicApiKey) {
171
- throw new Error("Anthropic API key not found");
172
- }
173
- // Support overriding the default API URL (i.e., https://api.anthropic.com)
174
- this.apiUrl = fields?.anthropicApiUrl;
175
- this.modelName = fields?.modelName ?? this.modelName;
176
- this.invocationKwargs = fields?.invocationKwargs ?? {};
177
- this.temperature = fields?.temperature ?? this.temperature;
178
- this.topK = fields?.topK ?? this.topK;
179
- this.topP = fields?.topP ?? this.topP;
180
- this.maxTokensToSample =
181
- fields?.maxTokensToSample ?? this.maxTokensToSample;
182
- this.stopSequences = fields?.stopSequences ?? this.stopSequences;
183
- this.streaming = fields?.streaming ?? false;
184
- this.clientOptions = fields?.clientOptions ?? {};
185
- }
186
- /**
187
- * Get the parameters used to invoke the model
188
- */
189
- invocationParams(options) {
190
- return {
191
- model: this.modelName,
192
- temperature: this.temperature,
193
- top_k: this.topK,
194
- top_p: this.topP,
195
- stop_sequences: options?.stop?.concat(exports.DEFAULT_STOP_SEQUENCES) ??
196
- this.stopSequences ??
197
- exports.DEFAULT_STOP_SEQUENCES,
198
- max_tokens_to_sample: this.maxTokensToSample,
199
- stream: this.streaming,
200
- ...this.invocationKwargs,
201
- };
202
- }
203
- /** @ignore */
204
- _identifyingParams() {
205
- return {
206
- model_name: this.modelName,
207
- ...this.invocationParams(),
208
- };
209
- }
210
- /**
211
- * Get the identifying parameters for the model
212
- */
213
- identifyingParams() {
214
- return {
215
- model_name: this.modelName,
216
- ...this.invocationParams(),
217
- };
218
- }
219
- async *_streamResponseChunks(messages, options, runManager) {
220
- const params = this.invocationParams(options);
221
- const stream = await this.createStreamWithRetry({
222
- ...params,
223
- prompt: this.formatMessagesAsPrompt(messages),
224
- });
225
- let modelSent = false;
226
- let stopReasonSent = false;
227
- for await (const data of stream) {
228
- if (options.signal?.aborted) {
229
- stream.controller.abort();
230
- throw new Error("AbortError: User aborted the request.");
231
- }
232
- const additional_kwargs = {};
233
- if (data.model && !modelSent) {
234
- additional_kwargs.model = data.model;
235
- modelSent = true;
236
- }
237
- else if (data.stop_reason && !stopReasonSent) {
238
- additional_kwargs.stop_reason = data.stop_reason;
239
- stopReasonSent = true;
240
- }
241
- const delta = data.completion ?? "";
242
- yield new index_js_1.ChatGenerationChunk({
243
- message: new index_js_1.AIMessageChunk({
244
- content: delta,
245
- additional_kwargs,
246
- }),
247
- text: delta,
248
- });
249
- await runManager?.handleLLMNewToken(delta);
250
- if (data.stop_reason) {
251
- break;
252
- }
253
- }
254
- }
255
- /**
256
- * Formats messages as a prompt for the model.
257
- * @param messages The base messages to format as a prompt.
258
- * @returns The formatted prompt.
259
- */
260
- formatMessagesAsPrompt(messages) {
261
- return (messages
262
- .map((message) => {
263
- const messagePrompt = getAnthropicPromptFromMessage(message);
264
- return `${messagePrompt} ${message.content}`;
265
- })
266
- .join("") + sdk_1.AI_PROMPT);
267
- }
268
- /** @ignore */
269
- async _generate(messages, options, runManager) {
270
- if (this.stopSequences && options.stop) {
271
- throw new Error(`"stopSequence" parameter found in input and default params`);
272
- }
273
- const params = this.invocationParams(options);
274
- let response;
275
- if (params.stream) {
276
- response = {
277
- completion: "",
278
- model: "",
279
- stop_reason: "",
280
- };
281
- const stream = await this._streamResponseChunks(messages, options, runManager);
282
- for await (const chunk of stream) {
283
- response.completion += chunk.message.content;
284
- response.model =
285
- chunk.message.additional_kwargs.model ?? response.model;
286
- response.stop_reason =
287
- chunk.message.additional_kwargs.stop_reason ??
288
- response.stop_reason;
289
- }
290
- }
291
- else {
292
- response = await this.completionWithRetry({
293
- ...params,
294
- prompt: this.formatMessagesAsPrompt(messages),
295
- }, { signal: options.signal });
296
- }
297
- const generations = (response.completion ?? "")
298
- .split(sdk_1.AI_PROMPT)
299
- .map((message) => ({
300
- text: message,
301
- message: new index_js_1.AIMessage(message),
302
- }));
303
- return {
304
- generations,
305
- };
306
- }
307
- /**
308
- * Creates a streaming request with retry.
309
- * @param request The parameters for creating a completion.
310
- * @returns A streaming request.
311
- */
312
- async createStreamWithRetry(request) {
313
- if (!this.streamingClient) {
314
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
315
- this.streamingClient = new sdk_1.Anthropic({
316
- ...this.clientOptions,
317
- ...options,
318
- apiKey: this.anthropicApiKey,
319
- maxRetries: 0,
320
- });
321
- }
322
- const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
323
- return this.caller.call(makeCompletionRequest);
324
- }
325
- /** @ignore */
326
- async completionWithRetry(request, options) {
327
- if (!this.anthropicApiKey) {
328
- throw new Error("Missing Anthropic API key.");
329
- }
330
- if (!this.batchClient) {
331
- const options = this.apiUrl ? { baseURL: this.apiUrl } : undefined;
332
- this.batchClient = new sdk_1.Anthropic({
333
- ...this.clientOptions,
334
- ...options,
335
- apiKey: this.anthropicApiKey,
336
- maxRetries: 0,
337
- });
338
- }
339
- const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
340
- return this.caller.callWithOptions({ signal: options.signal }, makeCompletionRequest);
341
- }
342
- _llmType() {
343
- return "anthropic";
344
- }
345
- /** @ignore */
346
- _combineLLMOutput() {
347
- return [];
348
- }
349
- }
350
- exports.ChatAnthropic = ChatAnthropic;
17
+ __exportStar(require("@langchain/anthropic"), exports);
@@ -1,156 +1 @@
1
- import { Anthropic, ClientOptions } from "@anthropic-ai/sdk";
2
- import type { CompletionCreateParams } from "@anthropic-ai/sdk/resources/completions";
3
- import type { Stream } from "@anthropic-ai/sdk/streaming";
4
- import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
5
- import { BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
6
- import { BaseChatModel, BaseChatModelParams } from "./base.js";
7
- import { BaseLanguageModelCallOptions } from "../base_language/index.js";
8
- export declare const DEFAULT_STOP_SEQUENCES: string[];
9
- /**
10
- * Input to AnthropicChat class.
11
- */
12
- export interface AnthropicInput {
13
- /** Amount of randomness injected into the response. Ranges
14
- * from 0 to 1. Use temp closer to 0 for analytical /
15
- * multiple choice, and temp closer to 1 for creative
16
- * and generative tasks.
17
- */
18
- temperature?: number;
19
- /** Only sample from the top K options for each subsequent
20
- * token. Used to remove "long tail" low probability
21
- * responses. Defaults to -1, which disables it.
22
- */
23
- topK?: number;
24
- /** Does nucleus sampling, in which we compute the
25
- * cumulative distribution over all the options for each
26
- * subsequent token in decreasing probability order and
27
- * cut it off once it reaches a particular probability
28
- * specified by top_p. Defaults to -1, which disables it.
29
- * Note that you should either alter temperature or top_p,
30
- * but not both.
31
- */
32
- topP?: number;
33
- /** A maximum number of tokens to generate before stopping. */
34
- maxTokensToSample: number;
35
- /** A list of strings upon which to stop generating.
36
- * You probably want `["\n\nHuman:"]`, as that's the cue for
37
- * the next turn in the dialog agent.
38
- */
39
- stopSequences?: string[];
40
- /** Whether to stream the results or not */
41
- streaming?: boolean;
42
- /** Anthropic API key */
43
- anthropicApiKey?: string;
44
- /** Anthropic API URL */
45
- anthropicApiUrl?: string;
46
- /** Model name to use */
47
- modelName: string;
48
- /** Overridable Anthropic ClientOptions */
49
- clientOptions: ClientOptions;
50
- /** Holds any additional parameters that are valid to pass to {@link
51
- * https://console.anthropic.com/docs/api/reference |
52
- * `anthropic.complete`} that are not explicitly specified on this class.
53
- */
54
- invocationKwargs?: Kwargs;
55
- }
56
- /**
57
- * A type representing additional parameters that can be passed to the
58
- * Anthropic API.
59
- */
60
- type Kwargs = Record<string, any>;
61
- /**
62
- * Wrapper around Anthropic large language models.
63
- *
64
- * To use you should have the `@anthropic-ai/sdk` package installed, with the
65
- * `ANTHROPIC_API_KEY` environment variable set.
66
- *
67
- * @remarks
68
- * Any parameters that are valid to be passed to {@link
69
- * https://console.anthropic.com/docs/api/reference |
70
- * `anthropic.complete`} can be passed through {@link invocationKwargs},
71
- * even if not explicitly available on this class.
72
- * @example
73
- * ```typescript
74
- * const model = new ChatAnthropic({
75
- * temperature: 0.9,
76
- * anthropicApiKey: 'YOUR-API-KEY',
77
- * });
78
- * const res = await model.invoke({ input: 'Hello!' });
79
- * console.log(res);
80
- * ```
81
- */
82
- export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseChatModel<CallOptions> implements AnthropicInput {
83
- static lc_name(): string;
84
- get lc_secrets(): {
85
- [key: string]: string;
86
- } | undefined;
87
- get lc_aliases(): Record<string, string>;
88
- lc_serializable: boolean;
89
- anthropicApiKey?: string;
90
- apiUrl?: string;
91
- temperature: number;
92
- topK: number;
93
- topP: number;
94
- maxTokensToSample: number;
95
- modelName: string;
96
- invocationKwargs?: Kwargs;
97
- stopSequences?: string[];
98
- streaming: boolean;
99
- clientOptions: ClientOptions;
100
- protected batchClient: Anthropic;
101
- protected streamingClient: Anthropic;
102
- constructor(fields?: Partial<AnthropicInput> & BaseChatModelParams);
103
- /**
104
- * Get the parameters used to invoke the model
105
- */
106
- invocationParams(options?: this["ParsedCallOptions"]): Omit<CompletionCreateParams, "prompt"> & Kwargs;
107
- /** @ignore */
108
- _identifyingParams(): {
109
- metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
110
- stream?: boolean | undefined;
111
- model: (string & {}) | "claude-2" | "claude-instant-1";
112
- temperature?: number | undefined;
113
- top_p?: number | undefined;
114
- top_k?: number | undefined;
115
- max_tokens_to_sample: number;
116
- stop_sequences?: string[] | undefined;
117
- model_name: string;
118
- };
119
- /**
120
- * Get the identifying parameters for the model
121
- */
122
- identifyingParams(): {
123
- metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
124
- stream?: boolean | undefined;
125
- model: (string & {}) | "claude-2" | "claude-instant-1";
126
- temperature?: number | undefined;
127
- top_p?: number | undefined;
128
- top_k?: number | undefined;
129
- max_tokens_to_sample: number;
130
- stop_sequences?: string[] | undefined;
131
- model_name: string;
132
- };
133
- _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
134
- /**
135
- * Formats messages as a prompt for the model.
136
- * @param messages The base messages to format as a prompt.
137
- * @returns The formatted prompt.
138
- */
139
- protected formatMessagesAsPrompt(messages: BaseMessage[]): string;
140
- /** @ignore */
141
- _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
142
- /**
143
- * Creates a streaming request with retry.
144
- * @param request The parameters for creating a completion.
145
- * @returns A streaming request.
146
- */
147
- protected createStreamWithRetry(request: CompletionCreateParams & Kwargs): Promise<Stream<Anthropic.Completions.Completion>>;
148
- /** @ignore */
149
- protected completionWithRetry(request: CompletionCreateParams & Kwargs, options: {
150
- signal?: AbortSignal;
151
- }): Promise<Anthropic.Completions.Completion>;
152
- _llmType(): string;
153
- /** @ignore */
154
- _combineLLMOutput(): never[];
155
- }
156
- export {};
1
+ export * from "@langchain/anthropic";