langchain 0.0.137 → 0.0.139

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/chat_models/minimax.cjs +1 -0
  2. package/chat_models/minimax.d.ts +1 -0
  3. package/chat_models/minimax.js +1 -0
  4. package/dist/agents/initialize.cjs +11 -0
  5. package/dist/agents/initialize.d.ts +4 -0
  6. package/dist/agents/initialize.js +11 -0
  7. package/dist/agents/xml/index.cjs +119 -0
  8. package/dist/agents/xml/index.d.ts +51 -0
  9. package/dist/agents/xml/index.js +114 -0
  10. package/dist/agents/xml/prompt.cjs +23 -0
  11. package/dist/agents/xml/prompt.d.ts +1 -0
  12. package/dist/agents/xml/prompt.js +20 -0
  13. package/dist/callbacks/base.d.ts +12 -4
  14. package/dist/callbacks/handlers/run_collector.cjs +50 -0
  15. package/dist/callbacks/handlers/run_collector.d.ts +26 -0
  16. package/dist/callbacks/handlers/run_collector.js +46 -0
  17. package/dist/callbacks/handlers/tracer.cjs +33 -20
  18. package/dist/callbacks/handlers/tracer.d.ts +7 -3
  19. package/dist/callbacks/handlers/tracer.js +33 -20
  20. package/dist/callbacks/handlers/tracer_langchain.cjs +1 -0
  21. package/dist/callbacks/handlers/tracer_langchain.d.ts +2 -1
  22. package/dist/callbacks/handlers/tracer_langchain.js +1 -0
  23. package/dist/callbacks/index.cjs +3 -1
  24. package/dist/callbacks/index.d.ts +1 -0
  25. package/dist/callbacks/index.js +1 -0
  26. package/dist/callbacks/manager.cjs +29 -14
  27. package/dist/callbacks/manager.d.ts +9 -4
  28. package/dist/callbacks/manager.js +29 -14
  29. package/dist/chains/openai_functions/extraction.cjs +2 -2
  30. package/dist/chains/openai_functions/extraction.d.ts +5 -4
  31. package/dist/chains/openai_functions/extraction.js +2 -2
  32. package/dist/chains/openai_functions/openapi.d.ts +2 -1
  33. package/dist/chains/openai_functions/structured_output.d.ts +4 -3
  34. package/dist/chains/openai_functions/tagging.cjs +2 -2
  35. package/dist/chains/openai_functions/tagging.d.ts +5 -4
  36. package/dist/chains/openai_functions/tagging.js +2 -2
  37. package/dist/chat_models/anthropic.cjs +7 -5
  38. package/dist/chat_models/anthropic.d.ts +17 -12
  39. package/dist/chat_models/anthropic.js +4 -2
  40. package/dist/chat_models/minimax.cjs +547 -0
  41. package/dist/chat_models/minimax.d.ts +364 -0
  42. package/dist/chat_models/minimax.js +543 -0
  43. package/dist/chat_models/ollama.cjs +136 -0
  44. package/dist/chat_models/ollama.d.ts +34 -0
  45. package/dist/chat_models/ollama.js +136 -0
  46. package/dist/embeddings/minimax.cjs +152 -0
  47. package/dist/embeddings/minimax.d.ts +104 -0
  48. package/dist/embeddings/minimax.js +148 -0
  49. package/dist/experimental/chat_models/anthropic_functions.cjs +129 -0
  50. package/dist/experimental/chat_models/anthropic_functions.d.ts +20 -0
  51. package/dist/experimental/chat_models/anthropic_functions.js +125 -0
  52. package/dist/llms/ollama.cjs +136 -0
  53. package/dist/llms/ollama.d.ts +34 -0
  54. package/dist/llms/ollama.js +136 -0
  55. package/dist/load/import_constants.cjs +1 -0
  56. package/dist/load/import_constants.js +1 -0
  57. package/dist/load/import_map.cjs +4 -2
  58. package/dist/load/import_map.d.ts +2 -0
  59. package/dist/load/import_map.js +2 -0
  60. package/dist/schema/output_parser.cjs +1 -1
  61. package/dist/schema/output_parser.js +1 -1
  62. package/dist/schema/runnable.cjs +54 -15
  63. package/dist/schema/runnable.d.ts +9 -3
  64. package/dist/schema/runnable.js +55 -16
  65. package/dist/sql_db.cjs +3 -1
  66. package/dist/sql_db.js +3 -1
  67. package/dist/util/ollama.d.ts +34 -0
  68. package/dist/vectorstores/redis.cjs +17 -2
  69. package/dist/vectorstores/redis.d.ts +10 -1
  70. package/dist/vectorstores/redis.js +17 -2
  71. package/dist/vectorstores/zep.cjs +2 -1
  72. package/dist/vectorstores/zep.js +3 -2
  73. package/embeddings/minimax.cjs +1 -0
  74. package/embeddings/minimax.d.ts +1 -0
  75. package/embeddings/minimax.js +1 -0
  76. package/experimental/chat_models/anthropic_functions.cjs +1 -0
  77. package/experimental/chat_models/anthropic_functions.d.ts +1 -0
  78. package/experimental/chat_models/anthropic_functions.js +1 -0
  79. package/package.json +34 -5
@@ -180,11 +180,11 @@ export class CallbackManagerForChainRun extends BaseRunManager {
180
180
  }
181
181
  return manager;
182
182
  }
183
- async handleChainError(err) {
183
+ async handleChainError(err, _runId, _parentRunId, _tags, kwargs) {
184
184
  await Promise.all(this.handlers.map((handler) => consumeCallback(async () => {
185
185
  if (!handler.ignoreChain) {
186
186
  try {
187
- await handler.handleChainError?.(err, this.runId, this._parentRunId, this.tags);
187
+ await handler.handleChainError?.(err, this.runId, this._parentRunId, this.tags, kwargs);
188
188
  }
189
189
  catch (err) {
190
190
  console.error(`Error in handler ${handler.constructor.name}, handleChainError: ${err}`);
@@ -192,11 +192,11 @@ export class CallbackManagerForChainRun extends BaseRunManager {
192
192
  }
193
193
  }, handler.awaitHandlers)));
194
194
  }
195
- async handleChainEnd(output) {
195
+ async handleChainEnd(output, _runId, _parentRunId, _tags, kwargs) {
196
196
  await Promise.all(this.handlers.map((handler) => consumeCallback(async () => {
197
197
  if (!handler.ignoreChain) {
198
198
  try {
199
- await handler.handleChainEnd?.(output, this.runId, this._parentRunId, this.tags);
199
+ await handler.handleChainEnd?.(output, this.runId, this._parentRunId, this.tags, kwargs);
200
200
  }
201
201
  catch (err) {
202
202
  console.error(`Error in handler ${handler.constructor.name}, handleChainEnd: ${err}`);
@@ -561,40 +561,55 @@ export class TraceGroup {
561
561
  value: void 0
562
562
  });
563
563
  }
564
- async getTraceGroupCallbackManager(group_name, options) {
564
+ async getTraceGroupCallbackManager(group_name, inputs, options) {
565
565
  const cb = new LangChainTracer(options);
566
566
  const cm = await CallbackManager.configure([cb]);
567
567
  const runManager = await cm?.handleChainStart({
568
568
  lc: 1,
569
569
  type: "not_implemented",
570
570
  id: ["langchain", "callbacks", "groups", group_name],
571
- }, {});
571
+ }, inputs ?? {});
572
572
  if (!runManager) {
573
573
  throw new Error("Failed to create run group callback manager.");
574
574
  }
575
575
  return runManager;
576
576
  }
577
- async start() {
577
+ async start(inputs) {
578
578
  if (!this.runManager) {
579
- this.runManager = await this.getTraceGroupCallbackManager(this.groupName, this.options);
579
+ this.runManager = await this.getTraceGroupCallbackManager(this.groupName, inputs, this.options);
580
580
  }
581
581
  return this.runManager.getChild();
582
582
  }
583
- async end() {
583
+ async error(err) {
584
584
  if (this.runManager) {
585
- await this.runManager.handleChainEnd({});
585
+ await this.runManager.handleChainError(err);
586
586
  this.runManager = undefined;
587
587
  }
588
588
  }
589
+ async end(output) {
590
+ if (this.runManager) {
591
+ await this.runManager.handleChainEnd(output ?? {});
592
+ this.runManager = undefined;
593
+ }
594
+ }
595
+ }
596
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
597
+ function _coerceToDict(value, defaultKey) {
598
+ return value && !Array.isArray(value) && typeof value === "object"
599
+ ? value
600
+ : { [defaultKey]: value };
589
601
  }
590
602
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
591
603
  export async function traceAsGroup(groupOptions, enclosedCode, ...args) {
592
604
  const traceGroup = new TraceGroup(groupOptions.name, groupOptions);
593
- const callbackManager = await traceGroup.start();
605
+ const callbackManager = await traceGroup.start({ ...args });
594
606
  try {
595
- return await enclosedCode(callbackManager, ...args);
607
+ const result = await enclosedCode(callbackManager, ...args);
608
+ await traceGroup.end(_coerceToDict(result, "output"));
609
+ return result;
596
610
  }
597
- finally {
598
- await traceGroup.end();
611
+ catch (err) {
612
+ await traceGroup.error(err);
613
+ throw err;
599
614
  }
600
615
  }
@@ -42,7 +42,7 @@ Passage:
42
42
  * Function that creates an extraction chain using the provided JSON schema.
43
43
  * It sets up the necessary components, such as the prompt, output parser, and tags.
44
44
  * @param schema JSON schema of the function parameters.
45
- * @param llm Must be a ChatOpenAI model that supports function calling.
45
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
46
46
  * @returns A LLMChain instance configured to return data matching the schema.
47
47
  */
48
48
  function createExtractionChain(schema, llm) {
@@ -63,7 +63,7 @@ exports.createExtractionChain = createExtractionChain;
63
63
  * converts the Zod schema to a JSON schema using zod-to-json-schema
64
64
  * before creating the extraction chain.
65
65
  * @param schema The Zod schema which extracted data should match
66
- * @param llm Must be a ChatOpenAI model that supports function calling.
66
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
67
67
  * @returns A LLMChain instance configured to return data matching the schema.
68
68
  */
69
69
  function createExtractionChainFromZod(
@@ -2,20 +2,21 @@ import { z } from "zod";
2
2
  import { ChatOpenAI } from "../../chat_models/openai.js";
3
3
  import { FunctionParameters } from "../../output_parsers/openai_functions.js";
4
4
  import { LLMChain } from "../llm_chain.js";
5
+ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_functions.js";
5
6
  /**
6
7
  * Function that creates an extraction chain using the provided JSON schema.
7
8
  * It sets up the necessary components, such as the prompt, output parser, and tags.
8
9
  * @param schema JSON schema of the function parameters.
9
- * @param llm Must be a ChatOpenAI model that supports function calling.
10
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
10
11
  * @returns A LLMChain instance configured to return data matching the schema.
11
12
  */
12
- export declare function createExtractionChain(schema: FunctionParameters, llm: ChatOpenAI): LLMChain<object, ChatOpenAI>;
13
+ export declare function createExtractionChain(schema: FunctionParameters, llm: ChatOpenAI | AnthropicFunctions): LLMChain<object, ChatOpenAI | AnthropicFunctions>;
13
14
  /**
14
15
  * Function that creates an extraction chain from a Zod schema. It
15
16
  * converts the Zod schema to a JSON schema using zod-to-json-schema
16
17
  * before creating the extraction chain.
17
18
  * @param schema The Zod schema which extracted data should match
18
- * @param llm Must be a ChatOpenAI model that supports function calling.
19
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
19
20
  * @returns A LLMChain instance configured to return data matching the schema.
20
21
  */
21
- export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: ChatOpenAI): LLMChain<object, ChatOpenAI>;
22
+ export declare function createExtractionChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: ChatOpenAI | AnthropicFunctions): LLMChain<object, ChatOpenAI | AnthropicFunctions>;
@@ -39,7 +39,7 @@ Passage:
39
39
  * Function that creates an extraction chain using the provided JSON schema.
40
40
  * It sets up the necessary components, such as the prompt, output parser, and tags.
41
41
  * @param schema JSON schema of the function parameters.
42
- * @param llm Must be a ChatOpenAI model that supports function calling.
42
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
43
43
  * @returns A LLMChain instance configured to return data matching the schema.
44
44
  */
45
45
  export function createExtractionChain(schema, llm) {
@@ -59,7 +59,7 @@ export function createExtractionChain(schema, llm) {
59
59
  * converts the Zod schema to a JSON schema using zod-to-json-schema
60
60
  * before creating the extraction chain.
61
61
  * @param schema The Zod schema which extracted data should match
62
- * @param llm Must be a ChatOpenAI model that supports function calling.
62
+ * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.
63
63
  * @returns A LLMChain instance configured to return data matching the schema.
64
64
  */
65
65
  export function createExtractionChainFromZod(
@@ -4,11 +4,12 @@ import { LLMChainInput } from "../llm_chain.js";
4
4
  import { ChatOpenAI } from "../../chat_models/openai.js";
5
5
  import { BasePromptTemplate } from "../../prompts/base.js";
6
6
  import { SequentialChain } from "../sequential_chain.js";
7
+ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_functions.js";
7
8
  /**
8
9
  * Type representing the options for creating an OpenAPI chain.
9
10
  */
10
11
  export type OpenAPIChainOptions = {
11
- llm?: ChatOpenAI;
12
+ llm?: ChatOpenAI | AnthropicFunctions;
12
13
  prompt?: BasePromptTemplate;
13
14
  requestChain?: BaseChain;
14
15
  llmChainInputs?: LLMChainInput;
@@ -7,6 +7,7 @@ import { BasePromptTemplate } from "../../prompts/index.js";
7
7
  import { BaseLLMOutputParser } from "../../schema/output_parser.js";
8
8
  import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js";
9
9
  import { ChatGeneration } from "../../schema/index.js";
10
+ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_functions.js";
10
11
  /**
11
12
  * Type representing the input for creating a structured output chain. It
12
13
  * extends the LLMChainInput type and includes an additional
@@ -16,7 +17,7 @@ import { ChatGeneration } from "../../schema/index.js";
16
17
  export type StructuredOutputChainInput = Omit<LLMChainInput, "outputParser" | "llm"> & {
17
18
  outputSchema: JsonSchema7Type;
18
19
  prompt: BasePromptTemplate;
19
- llm?: ChatOpenAI;
20
+ llm?: ChatOpenAI | AnthropicFunctions;
20
21
  };
21
22
  /**
22
23
  * Class that extends the BaseLLMOutputParser class. It provides
@@ -44,5 +45,5 @@ export declare class FunctionCallStructuredOutputParser<T extends z.AnyZodObject
44
45
  * as well as an additional required "outputSchema" JSON Schema object.
45
46
  * @returns OpenAPIChain
46
47
  */
47
- export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any, ChatOpenAI>;
48
- export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any, ChatOpenAI>;
48
+ export declare function createStructuredOutputChain<T extends z.AnyZodObject = z.AnyZodObject>(input: StructuredOutputChainInput): LLMChain<any, ChatOpenAI | AnthropicFunctions>;
49
+ export declare function createStructuredOutputChainFromZod<T extends z.AnyZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput, "outputSchema">): LLMChain<any, ChatOpenAI | AnthropicFunctions>;
@@ -30,7 +30,7 @@ Passage:
30
30
  * LLM, and options. It constructs the LLM with the necessary
31
31
  * functions, prompt, output parser, and tags.
32
32
  * @param schema The schema defining the structure of function parameters.
33
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
33
+ * @param llm LLM to use in the chain. Must support function calling.
34
34
  * @param options Options for creating the tagging chain.
35
35
  * @returns A new instance of LLMChain configured for tagging.
36
36
  */
@@ -53,7 +53,7 @@ exports.createTaggingChain = createTaggingChain;
53
53
  * the Zod schema to a JSON schema using the zodToJsonSchema function and
54
54
  * then calls createTaggingChain with the converted schema.
55
55
  * @param schema The Zod schema which extracted data should match.
56
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
56
+ * @param llm LLM to use in the chain. Must support function calling.
57
57
  * @param options Options for creating the tagging chain.
58
58
  * @returns A new instance of LLMChain configured for tagging.
59
59
  */
@@ -3,6 +3,7 @@ import { ChatOpenAI } from "../../chat_models/openai.js";
3
3
  import { PromptTemplate } from "../../prompts/prompt.js";
4
4
  import { FunctionParameters } from "../../output_parsers/openai_functions.js";
5
5
  import { LLMChain, LLMChainInput } from "../llm_chain.js";
6
+ import { AnthropicFunctions } from "../../experimental/chat_models/anthropic_functions.js";
6
7
  /**
7
8
  * Type representing the options for creating a tagging chain.
8
9
  */
@@ -14,18 +15,18 @@ export type TaggingChainOptions = {
14
15
  * LLM, and options. It constructs the LLM with the necessary
15
16
  * functions, prompt, output parser, and tags.
16
17
  * @param schema The schema defining the structure of function parameters.
17
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
18
+ * @param llm LLM to use in the chain. Must support function calling.
18
19
  * @param options Options for creating the tagging chain.
19
20
  * @returns A new instance of LLMChain configured for tagging.
20
21
  */
21
- export declare function createTaggingChain(schema: FunctionParameters, llm: ChatOpenAI, options?: TaggingChainOptions): LLMChain<object, ChatOpenAI>;
22
+ export declare function createTaggingChain(schema: FunctionParameters, llm: ChatOpenAI | AnthropicFunctions, options?: TaggingChainOptions): LLMChain<object, ChatOpenAI | AnthropicFunctions>;
22
23
  /**
23
24
  * Function that creates a tagging chain from a Zod schema. It converts
24
25
  * the Zod schema to a JSON schema using the zodToJsonSchema function and
25
26
  * then calls createTaggingChain with the converted schema.
26
27
  * @param schema The Zod schema which extracted data should match.
27
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
28
+ * @param llm LLM to use in the chain. Must support function calling.
28
29
  * @param options Options for creating the tagging chain.
29
30
  * @returns A new instance of LLMChain configured for tagging.
30
31
  */
31
- export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: ChatOpenAI, options?: TaggingChainOptions): LLMChain<object, ChatOpenAI>;
32
+ export declare function createTaggingChainFromZod(schema: z.ZodObject<any, any, any, any>, llm: ChatOpenAI | AnthropicFunctions, options?: TaggingChainOptions): LLMChain<object, ChatOpenAI | AnthropicFunctions>;
@@ -27,7 +27,7 @@ Passage:
27
27
  * LLM, and options. It constructs the LLM with the necessary
28
28
  * functions, prompt, output parser, and tags.
29
29
  * @param schema The schema defining the structure of function parameters.
30
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
30
+ * @param llm LLM to use in the chain. Must support function calling.
31
31
  * @param options Options for creating the tagging chain.
32
32
  * @returns A new instance of LLMChain configured for tagging.
33
33
  */
@@ -49,7 +49,7 @@ export function createTaggingChain(schema, llm, options = {}) {
49
49
  * the Zod schema to a JSON schema using the zodToJsonSchema function and
50
50
  * then calls createTaggingChain with the converted schema.
51
51
  * @param schema The Zod schema which extracted data should match.
52
- * @param llm LLM to use in the chain. Must support OpenAI function calling.
52
+ * @param llm LLM to use in the chain. Must support function calling.
53
53
  * @param options Options for creating the tagging chain.
54
54
  * @returns A new instance of LLMChain configured for tagging.
55
55
  */
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ChatAnthropic = void 0;
3
+ exports.ChatAnthropic = exports.DEFAULT_STOP_SEQUENCES = void 0;
4
4
  const sdk_1 = require("@anthropic-ai/sdk");
5
5
  const index_js_1 = require("../schema/index.cjs");
6
6
  const env_js_1 = require("../util/env.cjs");
@@ -41,7 +41,7 @@ function getAnthropicPromptFromMessage(message) {
41
41
  throw new Error(`Unknown message type: ${type}`);
42
42
  }
43
43
  }
44
- const DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
44
+ exports.DEFAULT_STOP_SEQUENCES = [sdk_1.HUMAN_PROMPT];
45
45
  /**
46
46
  * Wrapper around Anthropic large language models.
47
47
  *
@@ -117,7 +117,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
117
117
  enumerable: true,
118
118
  configurable: true,
119
119
  writable: true,
120
- value: "claude-v1"
120
+ value: "claude-2"
121
121
  });
122
122
  Object.defineProperty(this, "invocationKwargs", {
123
123
  enumerable: true,
@@ -184,9 +184,9 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
184
184
  temperature: this.temperature,
185
185
  top_k: this.topK,
186
186
  top_p: this.topP,
187
- stop_sequences: options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
187
+ stop_sequences: options?.stop?.concat(exports.DEFAULT_STOP_SEQUENCES) ??
188
188
  this.stopSequences ??
189
- DEFAULT_STOP_SEQUENCES,
189
+ exports.DEFAULT_STOP_SEQUENCES,
190
190
  max_tokens_to_sample: this.maxTokensToSample,
191
191
  stream: this.streaming,
192
192
  ...this.invocationKwargs,
@@ -308,6 +308,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
308
308
  ...this.clientOptions,
309
309
  ...options,
310
310
  apiKey: this.anthropicApiKey,
311
+ maxRetries: 0,
311
312
  });
312
313
  }
313
314
  const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
@@ -324,6 +325,7 @@ class ChatAnthropic extends base_js_1.BaseChatModel {
324
325
  ...this.clientOptions,
325
326
  ...options,
326
327
  apiKey: this.anthropicApiKey,
328
+ maxRetries: 0,
327
329
  });
328
330
  }
329
331
  const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });
@@ -1,8 +1,11 @@
1
1
  import { Anthropic, ClientOptions } from "@anthropic-ai/sdk";
2
2
  import type { CompletionCreateParams } from "@anthropic-ai/sdk/resources/completions";
3
+ import type { Stream } from "@anthropic-ai/sdk/streaming";
3
4
  import { CallbackManagerForLLMRun } from "../callbacks/manager.js";
4
5
  import { BaseMessage, ChatGenerationChunk, ChatResult } from "../schema/index.js";
5
6
  import { BaseChatModel, BaseChatModelParams } from "./base.js";
7
+ import { BaseLanguageModelCallOptions } from "../base_language/index.js";
8
+ export declare const DEFAULT_STOP_SEQUENCES: string[];
6
9
  /**
7
10
  * Input to AnthropicChat class.
8
11
  */
@@ -68,7 +71,7 @@ type Kwargs = Record<string, any>;
68
71
  * even if not explicitly available on this class.
69
72
  *
70
73
  */
71
- export declare class ChatAnthropic extends BaseChatModel implements AnthropicInput {
74
+ export declare class ChatAnthropic<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseChatModel<CallOptions> implements AnthropicInput {
72
75
  static lc_name(): string;
73
76
  get lc_secrets(): {
74
77
  [key: string]: string;
@@ -86,8 +89,8 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
86
89
  stopSequences?: string[];
87
90
  streaming: boolean;
88
91
  clientOptions: ClientOptions;
89
- private batchClient;
90
- private streamingClient;
92
+ protected batchClient: Anthropic;
93
+ protected streamingClient: Anthropic;
91
94
  constructor(fields?: Partial<AnthropicInput> & BaseChatModelParams);
92
95
  /**
93
96
  * Get the parameters used to invoke the model
@@ -95,28 +98,28 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
95
98
  invocationParams(options?: this["ParsedCallOptions"]): Omit<CompletionCreateParams, "prompt"> & Kwargs;
96
99
  /** @ignore */
97
100
  _identifyingParams(): {
98
- metadata?: Anthropic.Completions.CompletionCreateParams.CompletionRequestNonStreaming.Metadata | Anthropic.Completions.CompletionCreateParams.CompletionRequestStreaming.Metadata | undefined;
101
+ metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
99
102
  stream?: boolean | undefined;
100
- model: (string & {}) | "claude-2" | "claude-instant-1";
103
+ model: "claude-2" | (string & {}) | "claude-instant-1";
101
104
  temperature?: number | undefined;
102
105
  top_p?: number | undefined;
103
- top_k?: number | undefined;
104
106
  max_tokens_to_sample: number;
105
107
  stop_sequences?: string[] | undefined;
108
+ top_k?: number | undefined;
106
109
  model_name: string;
107
110
  };
108
111
  /**
109
112
  * Get the identifying parameters for the model
110
113
  */
111
114
  identifyingParams(): {
112
- metadata?: Anthropic.Completions.CompletionCreateParams.CompletionRequestNonStreaming.Metadata | Anthropic.Completions.CompletionCreateParams.CompletionRequestStreaming.Metadata | undefined;
115
+ metadata?: Anthropic.Completions.CompletionCreateParams.Metadata | undefined;
113
116
  stream?: boolean | undefined;
114
- model: (string & {}) | "claude-2" | "claude-instant-1";
117
+ model: "claude-2" | (string & {}) | "claude-instant-1";
115
118
  temperature?: number | undefined;
116
119
  top_p?: number | undefined;
117
- top_k?: number | undefined;
118
120
  max_tokens_to_sample: number;
119
121
  stop_sequences?: string[] | undefined;
122
+ top_k?: number | undefined;
120
123
  model_name: string;
121
124
  };
122
125
  _streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
@@ -125,7 +128,7 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
125
128
  * @param messages The base messages to format as a prompt.
126
129
  * @returns The formatted prompt.
127
130
  */
128
- private formatMessagesAsPrompt;
131
+ protected formatMessagesAsPrompt(messages: BaseMessage[]): string;
129
132
  /** @ignore */
130
133
  _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
131
134
  /**
@@ -133,9 +136,11 @@ export declare class ChatAnthropic extends BaseChatModel implements AnthropicInp
133
136
  * @param request The parameters for creating a completion.
134
137
  * @returns A streaming request.
135
138
  */
136
- private createStreamWithRetry;
139
+ protected createStreamWithRetry(request: CompletionCreateParams & Kwargs): Promise<Stream<Anthropic.Completions.Completion>>;
137
140
  /** @ignore */
138
- private completionWithRetry;
141
+ protected completionWithRetry(request: CompletionCreateParams & Kwargs, options: {
142
+ signal?: AbortSignal;
143
+ }): Promise<Anthropic.Completions.Completion>;
139
144
  _llmType(): string;
140
145
  /** @ignore */
141
146
  _combineLLMOutput(): never[];
@@ -38,7 +38,7 @@ function getAnthropicPromptFromMessage(message) {
38
38
  throw new Error(`Unknown message type: ${type}`);
39
39
  }
40
40
  }
41
- const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
41
+ export const DEFAULT_STOP_SEQUENCES = [HUMAN_PROMPT];
42
42
  /**
43
43
  * Wrapper around Anthropic large language models.
44
44
  *
@@ -114,7 +114,7 @@ export class ChatAnthropic extends BaseChatModel {
114
114
  enumerable: true,
115
115
  configurable: true,
116
116
  writable: true,
117
- value: "claude-v1"
117
+ value: "claude-2"
118
118
  });
119
119
  Object.defineProperty(this, "invocationKwargs", {
120
120
  enumerable: true,
@@ -305,6 +305,7 @@ export class ChatAnthropic extends BaseChatModel {
305
305
  ...this.clientOptions,
306
306
  ...options,
307
307
  apiKey: this.anthropicApiKey,
308
+ maxRetries: 0,
308
309
  });
309
310
  }
310
311
  const makeCompletionRequest = async () => this.streamingClient.completions.create({ ...request, stream: true }, { headers: request.headers });
@@ -321,6 +322,7 @@ export class ChatAnthropic extends BaseChatModel {
321
322
  ...this.clientOptions,
322
323
  ...options,
323
324
  apiKey: this.anthropicApiKey,
325
+ maxRetries: 0,
324
326
  });
325
327
  }
326
328
  const makeCompletionRequest = async () => this.batchClient.completions.create({ ...request, stream: false }, { headers: request.headers });