langchain 0.0.137 → 0.0.139

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/chat_models/minimax.cjs +1 -0
  2. package/chat_models/minimax.d.ts +1 -0
  3. package/chat_models/minimax.js +1 -0
  4. package/dist/agents/initialize.cjs +11 -0
  5. package/dist/agents/initialize.d.ts +4 -0
  6. package/dist/agents/initialize.js +11 -0
  7. package/dist/agents/xml/index.cjs +119 -0
  8. package/dist/agents/xml/index.d.ts +51 -0
  9. package/dist/agents/xml/index.js +114 -0
  10. package/dist/agents/xml/prompt.cjs +23 -0
  11. package/dist/agents/xml/prompt.d.ts +1 -0
  12. package/dist/agents/xml/prompt.js +20 -0
  13. package/dist/callbacks/base.d.ts +12 -4
  14. package/dist/callbacks/handlers/run_collector.cjs +50 -0
  15. package/dist/callbacks/handlers/run_collector.d.ts +26 -0
  16. package/dist/callbacks/handlers/run_collector.js +46 -0
  17. package/dist/callbacks/handlers/tracer.cjs +33 -20
  18. package/dist/callbacks/handlers/tracer.d.ts +7 -3
  19. package/dist/callbacks/handlers/tracer.js +33 -20
  20. package/dist/callbacks/handlers/tracer_langchain.cjs +1 -0
  21. package/dist/callbacks/handlers/tracer_langchain.d.ts +2 -1
  22. package/dist/callbacks/handlers/tracer_langchain.js +1 -0
  23. package/dist/callbacks/index.cjs +3 -1
  24. package/dist/callbacks/index.d.ts +1 -0
  25. package/dist/callbacks/index.js +1 -0
  26. package/dist/callbacks/manager.cjs +29 -14
  27. package/dist/callbacks/manager.d.ts +9 -4
  28. package/dist/callbacks/manager.js +29 -14
  29. package/dist/chains/openai_functions/extraction.cjs +2 -2
  30. package/dist/chains/openai_functions/extraction.d.ts +5 -4
  31. package/dist/chains/openai_functions/extraction.js +2 -2
  32. package/dist/chains/openai_functions/openapi.d.ts +2 -1
  33. package/dist/chains/openai_functions/structured_output.d.ts +4 -3
  34. package/dist/chains/openai_functions/tagging.cjs +2 -2
  35. package/dist/chains/openai_functions/tagging.d.ts +5 -4
  36. package/dist/chains/openai_functions/tagging.js +2 -2
  37. package/dist/chat_models/anthropic.cjs +7 -5
  38. package/dist/chat_models/anthropic.d.ts +17 -12
  39. package/dist/chat_models/anthropic.js +4 -2
  40. package/dist/chat_models/minimax.cjs +547 -0
  41. package/dist/chat_models/minimax.d.ts +364 -0
  42. package/dist/chat_models/minimax.js +543 -0
  43. package/dist/chat_models/ollama.cjs +136 -0
  44. package/dist/chat_models/ollama.d.ts +34 -0
  45. package/dist/chat_models/ollama.js +136 -0
  46. package/dist/embeddings/minimax.cjs +152 -0
  47. package/dist/embeddings/minimax.d.ts +104 -0
  48. package/dist/embeddings/minimax.js +148 -0
  49. package/dist/experimental/chat_models/anthropic_functions.cjs +129 -0
  50. package/dist/experimental/chat_models/anthropic_functions.d.ts +20 -0
  51. package/dist/experimental/chat_models/anthropic_functions.js +125 -0
  52. package/dist/llms/ollama.cjs +136 -0
  53. package/dist/llms/ollama.d.ts +34 -0
  54. package/dist/llms/ollama.js +136 -0
  55. package/dist/load/import_constants.cjs +1 -0
  56. package/dist/load/import_constants.js +1 -0
  57. package/dist/load/import_map.cjs +4 -2
  58. package/dist/load/import_map.d.ts +2 -0
  59. package/dist/load/import_map.js +2 -0
  60. package/dist/schema/output_parser.cjs +1 -1
  61. package/dist/schema/output_parser.js +1 -1
  62. package/dist/schema/runnable.cjs +54 -15
  63. package/dist/schema/runnable.d.ts +9 -3
  64. package/dist/schema/runnable.js +55 -16
  65. package/dist/sql_db.cjs +3 -1
  66. package/dist/sql_db.js +3 -1
  67. package/dist/util/ollama.d.ts +34 -0
  68. package/dist/vectorstores/redis.cjs +17 -2
  69. package/dist/vectorstores/redis.d.ts +10 -1
  70. package/dist/vectorstores/redis.js +17 -2
  71. package/dist/vectorstores/zep.cjs +2 -1
  72. package/dist/vectorstores/zep.js +3 -2
  73. package/embeddings/minimax.cjs +1 -0
  74. package/embeddings/minimax.d.ts +1 -0
  75. package/embeddings/minimax.js +1 -0
  76. package/experimental/chat_models/anthropic_functions.cjs +1 -0
  77. package/experimental/chat_models/anthropic_functions.d.ts +1 -0
  78. package/experimental/chat_models/anthropic_functions.js +1 -0
  79. package/package.json +34 -5
@@ -0,0 +1,104 @@
1
+ import { Embeddings, EmbeddingsParams } from "./base.js";
2
+ import { ConfigurationParameters } from "../chat_models/minimax.js";
3
+ /**
4
+ * Interface for MinimaxEmbeddings parameters. Extends EmbeddingsParams and
5
+ * defines additional parameters specific to the MinimaxEmbeddings class.
6
+ */
7
+ export interface MinimaxEmbeddingsParams extends EmbeddingsParams {
8
+ /** Model name to use */
9
+ modelName: string;
10
+ /**
11
+ * API key to use when making requests. Defaults to the value of
12
+ * `MINIMAX_GROUP_ID` environment variable.
13
+ */
14
+ minimaxGroupId?: string;
15
+ /**
16
+ * Secret key to use when making requests. Defaults to the value of
17
+ * `MINIMAX_API_KEY` environment variable.
18
+ */
19
+ minimaxApiKey?: string;
20
+ /**
21
+ * The maximum number of documents to embed in a single request. This is
22
+ * limited by the Minimax API to a maximum of 4096.
23
+ */
24
+ batchSize?: number;
25
+ /**
26
+ * Whether to strip new lines from the input text. This is recommended by
27
+ * Minimax, but may not be suitable for all use cases.
28
+ */
29
+ stripNewLines?: boolean;
30
+ /**
31
+ * The target use-case after generating the vector.
32
+ * When using embeddings, the vector of the target content is first generated through the db and stored in the vector database,
33
+ * and then the vector of the retrieval text is generated through the query.
34
+ * Note: For the parameters of the partial algorithm, we adopted a separate algorithm plan for query and db.
35
+ * Therefore, for a paragraph of text, if it is to be used as a retrieval text, it should use the db,
36
+ * and if it is used as a retrieval text, it should use the query.
37
+ */
38
+ type?: "db" | "query";
39
+ }
40
+ export interface CreateMinimaxEmbeddingRequest {
41
+ /**
42
+ * @type {string}
43
+ * @memberof CreateMinimaxEmbeddingRequest
44
+ */
45
+ model: string;
46
+ /**
47
+ * Text to generate vector expectation
48
+ * @type {CreateEmbeddingRequestInput}
49
+ * @memberof CreateMinimaxEmbeddingRequest
50
+ */
51
+ texts: string[];
52
+ /**
53
+ * The target use-case after generating the vector. When using embeddings,
54
+ * first generate the vector of the target content through the db and store it in the vector database,
55
+ * and then generate the vector of the retrieval text through the query.
56
+ * Note: For the parameter of the algorithm, we use the algorithm scheme of query and db separation,
57
+ * so a text, if it is to be retrieved as a text, should use the db,
58
+ * if it is used as a retrieval text, should use the query.
59
+ * @type {string}
60
+ * @memberof CreateMinimaxEmbeddingRequest
61
+ */
62
+ type: "db" | "query";
63
+ }
64
+ /**
65
+ * Class for generating embeddings using the Minimax API. Extends the
66
+ * Embeddings class and implements MinimaxEmbeddingsParams
67
+ */
68
+ export declare class MinimaxEmbeddings extends Embeddings implements MinimaxEmbeddingsParams {
69
+ modelName: string;
70
+ batchSize: number;
71
+ stripNewLines: boolean;
72
+ minimaxGroupId?: string;
73
+ minimaxApiKey?: string;
74
+ type: "db" | "query";
75
+ apiUrl: string;
76
+ basePath?: string;
77
+ headers?: Record<string, string>;
78
+ constructor(fields?: Partial<MinimaxEmbeddingsParams> & {
79
+ configuration?: ConfigurationParameters;
80
+ });
81
+ /**
82
+ * Method to generate embeddings for an array of documents. Splits the
83
+ * documents into batches and makes requests to the Minimax API to generate
84
+ * embeddings.
85
+ * @param texts Array of documents to generate embeddings for.
86
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
87
+ */
88
+ embedDocuments(texts: string[]): Promise<number[][]>;
89
+ /**
90
+ * Method to generate an embedding for a single document. Calls the
91
+ * embeddingWithRetry method with the document as the input.
92
+ * @param text Document to generate an embedding for.
93
+ * @returns Promise that resolves to an embedding for the document.
94
+ */
95
+ embedQuery(text: string): Promise<number[]>;
96
+ /**
97
+ * Private method to make a request to the Minimax API to generate
98
+ * embeddings. Handles the retry logic and returns the response from the
99
+ * API.
100
+ * @param request Request to send to the Minimax API.
101
+ * @returns Promise that resolves to the response from the API.
102
+ */
103
+ private embeddingWithRetry;
104
+ }
@@ -0,0 +1,148 @@
1
+ import { getEnvironmentVariable } from "../util/env.js";
2
+ import { chunkArray } from "../util/chunk.js";
3
+ import { Embeddings } from "./base.js";
4
+ /**
5
+ * Class for generating embeddings using the Minimax API. Extends the
6
+ * Embeddings class and implements MinimaxEmbeddingsParams
7
+ */
8
+ export class MinimaxEmbeddings extends Embeddings {
9
+ constructor(fields) {
10
+ const fieldsWithDefaults = { maxConcurrency: 2, ...fields };
11
+ super(fieldsWithDefaults);
12
+ Object.defineProperty(this, "modelName", {
13
+ enumerable: true,
14
+ configurable: true,
15
+ writable: true,
16
+ value: "embo-01"
17
+ });
18
+ Object.defineProperty(this, "batchSize", {
19
+ enumerable: true,
20
+ configurable: true,
21
+ writable: true,
22
+ value: 512
23
+ });
24
+ Object.defineProperty(this, "stripNewLines", {
25
+ enumerable: true,
26
+ configurable: true,
27
+ writable: true,
28
+ value: true
29
+ });
30
+ Object.defineProperty(this, "minimaxGroupId", {
31
+ enumerable: true,
32
+ configurable: true,
33
+ writable: true,
34
+ value: void 0
35
+ });
36
+ Object.defineProperty(this, "minimaxApiKey", {
37
+ enumerable: true,
38
+ configurable: true,
39
+ writable: true,
40
+ value: void 0
41
+ });
42
+ Object.defineProperty(this, "type", {
43
+ enumerable: true,
44
+ configurable: true,
45
+ writable: true,
46
+ value: "db"
47
+ });
48
+ Object.defineProperty(this, "apiUrl", {
49
+ enumerable: true,
50
+ configurable: true,
51
+ writable: true,
52
+ value: void 0
53
+ });
54
+ Object.defineProperty(this, "basePath", {
55
+ enumerable: true,
56
+ configurable: true,
57
+ writable: true,
58
+ value: "https://api.minimax.chat/v1"
59
+ });
60
+ Object.defineProperty(this, "headers", {
61
+ enumerable: true,
62
+ configurable: true,
63
+ writable: true,
64
+ value: void 0
65
+ });
66
+ this.minimaxGroupId =
67
+ fields?.minimaxGroupId ?? getEnvironmentVariable("MINIMAX_GROUP_ID");
68
+ if (!this.minimaxGroupId) {
69
+ throw new Error("Minimax GroupID not found");
70
+ }
71
+ this.minimaxApiKey =
72
+ fields?.minimaxApiKey ?? getEnvironmentVariable("MINIMAX_API_KEY");
73
+ if (!this.minimaxApiKey) {
74
+ throw new Error("Minimax ApiKey not found");
75
+ }
76
+ this.modelName = fieldsWithDefaults?.modelName ?? this.modelName;
77
+ this.batchSize = fieldsWithDefaults?.batchSize ?? this.batchSize;
78
+ this.type = fieldsWithDefaults?.type ?? this.type;
79
+ this.stripNewLines =
80
+ fieldsWithDefaults?.stripNewLines ?? this.stripNewLines;
81
+ this.apiUrl = `${this.basePath}/embeddings`;
82
+ this.basePath = fields?.configuration?.basePath ?? this.basePath;
83
+ this.headers = fields?.configuration?.headers ?? this.headers;
84
+ }
85
+ /**
86
+ * Method to generate embeddings for an array of documents. Splits the
87
+ * documents into batches and makes requests to the Minimax API to generate
88
+ * embeddings.
89
+ * @param texts Array of documents to generate embeddings for.
90
+ * @returns Promise that resolves to a 2D array of embeddings for each document.
91
+ */
92
+ async embedDocuments(texts) {
93
+ const batches = chunkArray(this.stripNewLines ? texts.map((t) => t.replace(/\n/g, " ")) : texts, this.batchSize);
94
+ const batchRequests = batches.map((batch) => this.embeddingWithRetry({
95
+ model: this.modelName,
96
+ texts: batch,
97
+ type: this.type,
98
+ }));
99
+ const batchResponses = await Promise.all(batchRequests);
100
+ const embeddings = [];
101
+ for (let i = 0; i < batchResponses.length; i += 1) {
102
+ const batch = batches[i];
103
+ const { vectors: batchResponse } = batchResponses[i];
104
+ for (let j = 0; j < batch.length; j += 1) {
105
+ embeddings.push(batchResponse[j]);
106
+ }
107
+ }
108
+ return embeddings;
109
+ }
110
+ /**
111
+ * Method to generate an embedding for a single document. Calls the
112
+ * embeddingWithRetry method with the document as the input.
113
+ * @param text Document to generate an embedding for.
114
+ * @returns Promise that resolves to an embedding for the document.
115
+ */
116
+ async embedQuery(text) {
117
+ const { vectors } = await this.embeddingWithRetry({
118
+ model: this.modelName,
119
+ texts: [this.stripNewLines ? text.replace(/\n/g, " ") : text],
120
+ type: this.type,
121
+ });
122
+ return vectors[0];
123
+ }
124
+ /**
125
+ * Private method to make a request to the Minimax API to generate
126
+ * embeddings. Handles the retry logic and returns the response from the
127
+ * API.
128
+ * @param request Request to send to the Minimax API.
129
+ * @returns Promise that resolves to the response from the API.
130
+ */
131
+ async embeddingWithRetry(request) {
132
+ const makeCompletionRequest = async () => {
133
+ const url = `${this.apiUrl}?GroupId=${this.minimaxGroupId}`;
134
+ const response = await fetch(url, {
135
+ method: "POST",
136
+ headers: {
137
+ "Content-Type": "application/json",
138
+ Authorization: `Bearer ${this.minimaxApiKey}`,
139
+ ...this.headers,
140
+ },
141
+ body: JSON.stringify(request),
142
+ });
143
+ const json = await response.json();
144
+ return json;
145
+ };
146
+ return this.caller.call(makeCompletionRequest);
147
+ }
148
+ }
@@ -0,0 +1,129 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.AnthropicFunctions = void 0;
4
+ const fast_xml_parser_1 = require("fast-xml-parser");
5
+ const index_js_1 = require("../../schema/index.cjs");
6
+ const anthropic_js_1 = require("../../chat_models/anthropic.cjs");
7
+ const prompt_js_1 = require("../../prompts/prompt.cjs");
8
+ const convert_to_openai_js_1 = require("../../tools/convert_to_openai.cjs");
9
+ const TOOL_SYSTEM_PROMPT =
10
+ /* #__PURE__ */
11
+ prompt_js_1.PromptTemplate.fromTemplate(`In addition to responding, you can use tools.
12
+ You have access to the following tools.
13
+
14
+ {tools}
15
+
16
+ In order to use a tool, you can use <tool></tool> to specify the name,
17
+ and the <tool_input></tool_input> tags to specify the parameters.
18
+ Each parameter should be passed in as <$param_name>$value</$param_name>,
19
+ Where $param_name is the name of the specific parameter, and $value
20
+ is the value for that parameter.
21
+
22
+ You will then get back a response in the form <observation></observation>
23
+ For example, if you have a tool called 'search' that accepts a single
24
+ parameter 'query' that could run a google search, in order to search
25
+ for the weather in SF you would respond:
26
+
27
+ <tool>search</tool><tool_input><query>weather in SF</query></tool_input>
28
+ <observation>64 degrees</observation>`);
29
+ class AnthropicFunctions extends anthropic_js_1.ChatAnthropic {
30
+ static lc_name() {
31
+ return "AnthropicFunctions";
32
+ }
33
+ constructor(fields) {
34
+ super(fields ?? {});
35
+ }
36
+ async _generate(messages, options, runManager) {
37
+ let promptMessages = messages;
38
+ let forced = false;
39
+ let functionCall;
40
+ if (options.tools) {
41
+ // eslint-disable-next-line no-param-reassign
42
+ options.functions = (options.functions ?? []).concat(options.tools.map(convert_to_openai_js_1.formatToOpenAIFunction));
43
+ }
44
+ if (options.functions !== undefined && options.functions.length > 0) {
45
+ const content = await TOOL_SYSTEM_PROMPT.format({
46
+ tools: JSON.stringify(options.functions, null, 2),
47
+ });
48
+ const systemMessage = new index_js_1.SystemMessage({ content });
49
+ promptMessages = [systemMessage].concat(promptMessages);
50
+ const stopSequences = options?.stop?.concat(anthropic_js_1.DEFAULT_STOP_SEQUENCES) ??
51
+ this.stopSequences ??
52
+ anthropic_js_1.DEFAULT_STOP_SEQUENCES;
53
+ // eslint-disable-next-line no-param-reassign
54
+ options.stop = stopSequences.concat(["</tool_input>"]);
55
+ if (options.function_call) {
56
+ if (typeof options.function_call === "string") {
57
+ functionCall = JSON.parse(options.function_call).name;
58
+ }
59
+ else {
60
+ functionCall = options.function_call.name;
61
+ }
62
+ forced = true;
63
+ const matchingFunction = options.functions.find((tool) => tool.name === functionCall);
64
+ if (!matchingFunction) {
65
+ throw new Error(`No matching function found for passed "function_call"`);
66
+ }
67
+ promptMessages = promptMessages.concat([
68
+ new index_js_1.AIMessage({
69
+ content: `<tool>${functionCall}</tool>`,
70
+ }),
71
+ ]);
72
+ // eslint-disable-next-line no-param-reassign
73
+ delete options.function_call;
74
+ }
75
+ // eslint-disable-next-line no-param-reassign
76
+ delete options.functions;
77
+ }
78
+ else if (options.function_call !== undefined) {
79
+ throw new Error(`If "function_call" is provided, "functions" must also be.`);
80
+ }
81
+ const chatResult = await super._generate(promptMessages, options, runManager);
82
+ const chatGenerationContent = chatResult.generations[0].message.content;
83
+ if (forced) {
84
+ const parser = new fast_xml_parser_1.XMLParser();
85
+ const result = parser.parse(`${chatGenerationContent}</tool_input>`);
86
+ const responseMessageWithFunctions = new index_js_1.AIMessage({
87
+ content: "",
88
+ additional_kwargs: {
89
+ function_call: {
90
+ name: functionCall,
91
+ arguments: result.tool_input
92
+ ? JSON.stringify(result.tool_input)
93
+ : "",
94
+ },
95
+ },
96
+ });
97
+ return {
98
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
99
+ };
100
+ }
101
+ else if (chatGenerationContent.includes("<tool>")) {
102
+ const parser = new fast_xml_parser_1.XMLParser();
103
+ const result = parser.parse(`${chatGenerationContent}</tool_input>`);
104
+ const responseMessageWithFunctions = new index_js_1.AIMessage({
105
+ content: chatGenerationContent.split("<tool>")[0],
106
+ additional_kwargs: {
107
+ function_call: {
108
+ name: result.tool,
109
+ arguments: result.tool_input
110
+ ? JSON.stringify(result.tool_input)
111
+ : "",
112
+ },
113
+ },
114
+ });
115
+ return {
116
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
117
+ };
118
+ }
119
+ return chatResult;
120
+ }
121
+ _llmType() {
122
+ return "anthropic_functions";
123
+ }
124
+ /** @ignore */
125
+ _combineLLMOutput() {
126
+ return [];
127
+ }
128
+ }
129
+ exports.AnthropicFunctions = AnthropicFunctions;
@@ -0,0 +1,20 @@
1
+ import type { ChatCompletionFunctions, CreateChatCompletionRequestFunctionCall } from "openai";
2
+ import { BaseChatModelParams } from "../../chat_models/base.js";
3
+ import { CallbackManagerForLLMRun } from "../../callbacks/manager.js";
4
+ import { BaseMessage, ChatResult } from "../../schema/index.js";
5
+ import { ChatAnthropic, type AnthropicInput } from "../../chat_models/anthropic.js";
6
+ import { BaseLanguageModelCallOptions } from "../../base_language/index.js";
7
+ import { StructuredTool } from "../../tools/base.js";
8
+ export interface ChatAnthropicFunctionsCallOptions extends BaseLanguageModelCallOptions {
9
+ function_call?: CreateChatCompletionRequestFunctionCall;
10
+ functions?: ChatCompletionFunctions[];
11
+ tools?: StructuredTool[];
12
+ }
13
+ export declare class AnthropicFunctions extends ChatAnthropic<ChatAnthropicFunctionsCallOptions> {
14
+ static lc_name(): string;
15
+ constructor(fields?: Partial<AnthropicInput> & BaseChatModelParams);
16
+ _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun | undefined): Promise<ChatResult>;
17
+ _llmType(): string;
18
+ /** @ignore */
19
+ _combineLLMOutput(): never[];
20
+ }
@@ -0,0 +1,125 @@
1
+ import { XMLParser } from "fast-xml-parser";
2
+ import { AIMessage, SystemMessage, } from "../../schema/index.js";
3
+ import { ChatAnthropic, DEFAULT_STOP_SEQUENCES, } from "../../chat_models/anthropic.js";
4
+ import { PromptTemplate } from "../../prompts/prompt.js";
5
+ import { formatToOpenAIFunction } from "../../tools/convert_to_openai.js";
6
+ const TOOL_SYSTEM_PROMPT =
7
+ /* #__PURE__ */
8
+ PromptTemplate.fromTemplate(`In addition to responding, you can use tools.
9
+ You have access to the following tools.
10
+
11
+ {tools}
12
+
13
+ In order to use a tool, you can use <tool></tool> to specify the name,
14
+ and the <tool_input></tool_input> tags to specify the parameters.
15
+ Each parameter should be passed in as <$param_name>$value</$param_name>,
16
+ Where $param_name is the name of the specific parameter, and $value
17
+ is the value for that parameter.
18
+
19
+ You will then get back a response in the form <observation></observation>
20
+ For example, if you have a tool called 'search' that accepts a single
21
+ parameter 'query' that could run a google search, in order to search
22
+ for the weather in SF you would respond:
23
+
24
+ <tool>search</tool><tool_input><query>weather in SF</query></tool_input>
25
+ <observation>64 degrees</observation>`);
26
+ export class AnthropicFunctions extends ChatAnthropic {
27
+ static lc_name() {
28
+ return "AnthropicFunctions";
29
+ }
30
+ constructor(fields) {
31
+ super(fields ?? {});
32
+ }
33
+ async _generate(messages, options, runManager) {
34
+ let promptMessages = messages;
35
+ let forced = false;
36
+ let functionCall;
37
+ if (options.tools) {
38
+ // eslint-disable-next-line no-param-reassign
39
+ options.functions = (options.functions ?? []).concat(options.tools.map(formatToOpenAIFunction));
40
+ }
41
+ if (options.functions !== undefined && options.functions.length > 0) {
42
+ const content = await TOOL_SYSTEM_PROMPT.format({
43
+ tools: JSON.stringify(options.functions, null, 2),
44
+ });
45
+ const systemMessage = new SystemMessage({ content });
46
+ promptMessages = [systemMessage].concat(promptMessages);
47
+ const stopSequences = options?.stop?.concat(DEFAULT_STOP_SEQUENCES) ??
48
+ this.stopSequences ??
49
+ DEFAULT_STOP_SEQUENCES;
50
+ // eslint-disable-next-line no-param-reassign
51
+ options.stop = stopSequences.concat(["</tool_input>"]);
52
+ if (options.function_call) {
53
+ if (typeof options.function_call === "string") {
54
+ functionCall = JSON.parse(options.function_call).name;
55
+ }
56
+ else {
57
+ functionCall = options.function_call.name;
58
+ }
59
+ forced = true;
60
+ const matchingFunction = options.functions.find((tool) => tool.name === functionCall);
61
+ if (!matchingFunction) {
62
+ throw new Error(`No matching function found for passed "function_call"`);
63
+ }
64
+ promptMessages = promptMessages.concat([
65
+ new AIMessage({
66
+ content: `<tool>${functionCall}</tool>`,
67
+ }),
68
+ ]);
69
+ // eslint-disable-next-line no-param-reassign
70
+ delete options.function_call;
71
+ }
72
+ // eslint-disable-next-line no-param-reassign
73
+ delete options.functions;
74
+ }
75
+ else if (options.function_call !== undefined) {
76
+ throw new Error(`If "function_call" is provided, "functions" must also be.`);
77
+ }
78
+ const chatResult = await super._generate(promptMessages, options, runManager);
79
+ const chatGenerationContent = chatResult.generations[0].message.content;
80
+ if (forced) {
81
+ const parser = new XMLParser();
82
+ const result = parser.parse(`${chatGenerationContent}</tool_input>`);
83
+ const responseMessageWithFunctions = new AIMessage({
84
+ content: "",
85
+ additional_kwargs: {
86
+ function_call: {
87
+ name: functionCall,
88
+ arguments: result.tool_input
89
+ ? JSON.stringify(result.tool_input)
90
+ : "",
91
+ },
92
+ },
93
+ });
94
+ return {
95
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
96
+ };
97
+ }
98
+ else if (chatGenerationContent.includes("<tool>")) {
99
+ const parser = new XMLParser();
100
+ const result = parser.parse(`${chatGenerationContent}</tool_input>`);
101
+ const responseMessageWithFunctions = new AIMessage({
102
+ content: chatGenerationContent.split("<tool>")[0],
103
+ additional_kwargs: {
104
+ function_call: {
105
+ name: result.tool,
106
+ arguments: result.tool_input
107
+ ? JSON.stringify(result.tool_input)
108
+ : "",
109
+ },
110
+ },
111
+ });
112
+ return {
113
+ generations: [{ message: responseMessageWithFunctions, text: "" }],
114
+ };
115
+ }
116
+ return chatResult;
117
+ }
118
+ _llmType() {
119
+ return "anthropic_functions";
120
+ }
121
+ /** @ignore */
122
+ _combineLLMOutput() {
123
+ return [];
124
+ }
125
+ }