langchain 0.0.212 → 0.0.213
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chains/combine_documents.cjs +1 -0
- package/chains/combine_documents.d.ts +1 -0
- package/chains/combine_documents.js +1 -0
- package/chains/history_aware_retriever.cjs +1 -0
- package/chains/history_aware_retriever.d.ts +1 -0
- package/chains/history_aware_retriever.js +1 -0
- package/chains/retrieval.cjs +1 -0
- package/chains/retrieval.d.ts +1 -0
- package/chains/retrieval.js +1 -0
- package/dist/agents/agent.cjs +1 -0
- package/dist/agents/agent.js +1 -0
- package/dist/agents/executor.cjs +21 -3
- package/dist/agents/executor.d.ts +1 -0
- package/dist/agents/executor.js +21 -3
- package/dist/agents/format_scratchpad/openai_functions.cjs +22 -1
- package/dist/agents/format_scratchpad/openai_functions.d.ts +10 -0
- package/dist/agents/format_scratchpad/openai_functions.js +21 -1
- package/dist/agents/index.cjs +11 -4
- package/dist/agents/index.d.ts +6 -3
- package/dist/agents/index.js +5 -3
- package/dist/agents/initialize.cjs +1 -1
- package/dist/agents/initialize.d.ts +1 -1
- package/dist/agents/initialize.js +1 -1
- package/dist/agents/openai/output_parser.cjs +20 -196
- package/dist/agents/openai/output_parser.d.ts +2 -111
- package/dist/agents/openai/output_parser.js +6 -193
- package/dist/agents/{openai → openai_functions}/index.cjs +78 -2
- package/dist/agents/{openai → openai_functions}/index.d.ts +75 -3
- package/dist/agents/{openai → openai_functions}/index.js +76 -1
- package/dist/agents/openai_functions/output_parser.cjs +102 -0
- package/dist/agents/openai_functions/output_parser.d.ts +56 -0
- package/dist/agents/openai_functions/output_parser.js +98 -0
- package/dist/agents/openai_tools/index.cjs +81 -0
- package/dist/agents/openai_tools/index.d.ts +80 -0
- package/dist/agents/openai_tools/index.js +77 -0
- package/dist/agents/openai_tools/output_parser.cjs +102 -0
- package/dist/agents/openai_tools/output_parser.d.ts +57 -0
- package/dist/agents/openai_tools/output_parser.js +98 -0
- package/dist/agents/react/index.cjs +75 -0
- package/dist/agents/react/index.d.ts +60 -0
- package/dist/agents/react/index.js +71 -0
- package/dist/agents/react/output_parser.cjs +0 -1
- package/dist/agents/react/output_parser.d.ts +0 -1
- package/dist/agents/react/output_parser.js +0 -1
- package/dist/agents/structured_chat/index.cjs +85 -1
- package/dist/agents/structured_chat/index.d.ts +71 -0
- package/dist/agents/structured_chat/index.js +83 -0
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.cjs +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.js +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.d.ts +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -0
- package/dist/agents/toolkits/json/json.cjs +2 -0
- package/dist/agents/toolkits/json/json.d.ts +2 -0
- package/dist/agents/toolkits/json/json.js +2 -0
- package/dist/agents/toolkits/openapi/openapi.cjs +2 -0
- package/dist/agents/toolkits/openapi/openapi.d.ts +2 -0
- package/dist/agents/toolkits/openapi/openapi.js +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.cjs +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.js +2 -0
- package/dist/agents/xml/index.cjs +75 -1
- package/dist/agents/xml/index.d.ts +65 -0
- package/dist/agents/xml/index.js +73 -0
- package/dist/chains/combine_documents/base.cjs +16 -0
- package/dist/chains/combine_documents/base.d.ts +13 -0
- package/dist/chains/combine_documents/base.js +12 -0
- package/dist/chains/combine_documents/index.cjs +5 -0
- package/dist/chains/combine_documents/index.d.ts +1 -0
- package/dist/chains/combine_documents/index.js +1 -0
- package/dist/chains/combine_documents/reduce.cjs +5 -2
- package/dist/chains/combine_documents/reduce.js +4 -1
- package/dist/chains/combine_documents/stuff.cjs +42 -0
- package/dist/chains/combine_documents/stuff.d.ts +28 -0
- package/dist/chains/combine_documents/stuff.js +38 -0
- package/dist/chains/conversational_retrieval_chain.cjs +3 -3
- package/dist/chains/conversational_retrieval_chain.js +1 -1
- package/dist/chains/history_aware_retriever.cjs +55 -0
- package/dist/chains/history_aware_retriever.d.ts +55 -0
- package/dist/chains/history_aware_retriever.js +51 -0
- package/dist/chains/retrieval.cjs +60 -0
- package/dist/chains/retrieval.d.ts +65 -0
- package/dist/chains/retrieval.js +56 -0
- package/dist/load/import_map.cjs +7 -3
- package/dist/load/import_map.d.ts +4 -0
- package/dist/load/import_map.js +4 -0
- package/dist/output_parsers/json.cjs +2 -78
- package/dist/output_parsers/json.d.ts +1 -1
- package/dist/output_parsers/json.js +1 -77
- package/dist/output_parsers/openai_functions.d.ts +1 -1
- package/dist/tools/retriever.cjs +17 -0
- package/dist/tools/retriever.d.ts +10 -0
- package/dist/tools/retriever.js +13 -0
- package/package.json +36 -4
- package/tools/retriever.cjs +1 -0
- package/tools/retriever.d.ts +1 -0
- package/tools/retriever.js +1 -0
- /package/dist/agents/{openai → openai_functions}/prompt.cjs +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.d.ts +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.js +0 -0
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createReactAgent = void 0;
|
|
4
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
5
|
+
const render_js_1 = require("../../tools/render.cjs");
|
|
6
|
+
const log_js_1 = require("../format_scratchpad/log.cjs");
|
|
7
|
+
const output_parser_js_1 = require("./output_parser.cjs");
|
|
8
|
+
/**
|
|
9
|
+
* Create an agent that uses ReAct prompting.
|
|
10
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
11
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
12
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
13
|
+
* AgentAction or AgentFinish.
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* ```typescript
|
|
17
|
+
* import { AgentExecutor, createReactAgent } from "langchain/agents";
|
|
18
|
+
* import { pull } from "langchain/hub";
|
|
19
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
20
|
+
*
|
|
21
|
+
* import { OpenAI } from "@langchain/openai";
|
|
22
|
+
*
|
|
23
|
+
* // Define the tools the agent will have access to.
|
|
24
|
+
* const tools = [...];
|
|
25
|
+
*
|
|
26
|
+
* // Get the prompt to use - you can modify this!
|
|
27
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/react");
|
|
28
|
+
*
|
|
29
|
+
* const llm = new OpenAI({
|
|
30
|
+
* temperature: 0,
|
|
31
|
+
* });
|
|
32
|
+
*
|
|
33
|
+
* const agent = await createReactAgent({
|
|
34
|
+
* llm,
|
|
35
|
+
* tools,
|
|
36
|
+
* prompt,
|
|
37
|
+
* });
|
|
38
|
+
*
|
|
39
|
+
* const agentExecutor = new AgentExecutor({
|
|
40
|
+
* agent,
|
|
41
|
+
* tools,
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* const result = await agentExecutor.invoke({
|
|
45
|
+
* input: "what is LangChain?",
|
|
46
|
+
* });
|
|
47
|
+
* ```
|
|
48
|
+
*/
|
|
49
|
+
async function createReactAgent({ llm, tools, prompt, }) {
|
|
50
|
+
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
51
|
+
if (missingVariables.length > 0) {
|
|
52
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
53
|
+
}
|
|
54
|
+
const toolNames = tools.map((tool) => tool.name);
|
|
55
|
+
const partialedPrompt = await prompt.partial({
|
|
56
|
+
tools: (0, render_js_1.renderTextDescription)(tools),
|
|
57
|
+
tool_names: toolNames.join(", "),
|
|
58
|
+
});
|
|
59
|
+
// TODO: Add .bind to core runnable interface.
|
|
60
|
+
const llmWithStop = llm.bind({
|
|
61
|
+
stop: ["\nObservation:"],
|
|
62
|
+
});
|
|
63
|
+
const agent = runnables_1.RunnableSequence.from([
|
|
64
|
+
runnables_1.RunnablePassthrough.assign({
|
|
65
|
+
agent_scratchpad: (input) => (0, log_js_1.formatLogToString)(input.steps),
|
|
66
|
+
}),
|
|
67
|
+
partialedPrompt,
|
|
68
|
+
llmWithStop,
|
|
69
|
+
new output_parser_js_1.ReActSingleInputOutputParser({
|
|
70
|
+
toolNames,
|
|
71
|
+
}),
|
|
72
|
+
]);
|
|
73
|
+
return agent;
|
|
74
|
+
}
|
|
75
|
+
exports.createReactAgent = createReactAgent;
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
import type { ToolInterface } from "@langchain/core/tools";
|
|
2
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
3
|
+
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
4
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
5
|
+
/**
|
|
6
|
+
* Params used by the createXmlAgent function.
|
|
7
|
+
*/
|
|
8
|
+
export type CreateReactAgentParams = {
|
|
9
|
+
/** LLM to use for the agent. */
|
|
10
|
+
llm: BaseLanguageModelInterface;
|
|
11
|
+
/** Tools this agent has access to. */
|
|
12
|
+
tools: ToolInterface[];
|
|
13
|
+
/**
|
|
14
|
+
* The prompt to use. Must have input keys for
|
|
15
|
+
* `tools`, `tool_names`, and `agent_scratchpad`.
|
|
16
|
+
*/
|
|
17
|
+
prompt: BasePromptTemplate;
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* Create an agent that uses ReAct prompting.
|
|
21
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
22
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
23
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
24
|
+
* AgentAction or AgentFinish.
|
|
25
|
+
*
|
|
26
|
+
* @example
|
|
27
|
+
* ```typescript
|
|
28
|
+
* import { AgentExecutor, createReactAgent } from "langchain/agents";
|
|
29
|
+
* import { pull } from "langchain/hub";
|
|
30
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
31
|
+
*
|
|
32
|
+
* import { OpenAI } from "@langchain/openai";
|
|
33
|
+
*
|
|
34
|
+
* // Define the tools the agent will have access to.
|
|
35
|
+
* const tools = [...];
|
|
36
|
+
*
|
|
37
|
+
* // Get the prompt to use - you can modify this!
|
|
38
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/react");
|
|
39
|
+
*
|
|
40
|
+
* const llm = new OpenAI({
|
|
41
|
+
* temperature: 0,
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* const agent = await createReactAgent({
|
|
45
|
+
* llm,
|
|
46
|
+
* tools,
|
|
47
|
+
* prompt,
|
|
48
|
+
* });
|
|
49
|
+
*
|
|
50
|
+
* const agentExecutor = new AgentExecutor({
|
|
51
|
+
* agent,
|
|
52
|
+
* tools,
|
|
53
|
+
* });
|
|
54
|
+
*
|
|
55
|
+
* const result = await agentExecutor.invoke({
|
|
56
|
+
* input: "what is LangChain?",
|
|
57
|
+
* });
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
export declare function createReactAgent({ llm, tools, prompt, }: CreateReactAgentParams): Promise<RunnableSequence<Record<string, unknown>, import("@langchain/core/agents").AgentAction | import("@langchain/core/agents").AgentFinish>>;
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
|
|
2
|
+
import { renderTextDescription } from "../../tools/render.js";
|
|
3
|
+
import { formatLogToString } from "../format_scratchpad/log.js";
|
|
4
|
+
import { ReActSingleInputOutputParser } from "./output_parser.js";
|
|
5
|
+
/**
|
|
6
|
+
* Create an agent that uses ReAct prompting.
|
|
7
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
8
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
9
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
10
|
+
* AgentAction or AgentFinish.
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* ```typescript
|
|
14
|
+
* import { AgentExecutor, createReactAgent } from "langchain/agents";
|
|
15
|
+
* import { pull } from "langchain/hub";
|
|
16
|
+
* import type { PromptTemplate } from "@langchain/core/prompts";
|
|
17
|
+
*
|
|
18
|
+
* import { OpenAI } from "@langchain/openai";
|
|
19
|
+
*
|
|
20
|
+
* // Define the tools the agent will have access to.
|
|
21
|
+
* const tools = [...];
|
|
22
|
+
*
|
|
23
|
+
* // Get the prompt to use - you can modify this!
|
|
24
|
+
* const prompt = await pull<PromptTemplate>("hwchase17/react");
|
|
25
|
+
*
|
|
26
|
+
* const llm = new OpenAI({
|
|
27
|
+
* temperature: 0,
|
|
28
|
+
* });
|
|
29
|
+
*
|
|
30
|
+
* const agent = await createReactAgent({
|
|
31
|
+
* llm,
|
|
32
|
+
* tools,
|
|
33
|
+
* prompt,
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* const agentExecutor = new AgentExecutor({
|
|
37
|
+
* agent,
|
|
38
|
+
* tools,
|
|
39
|
+
* });
|
|
40
|
+
*
|
|
41
|
+
* const result = await agentExecutor.invoke({
|
|
42
|
+
* input: "what is LangChain?",
|
|
43
|
+
* });
|
|
44
|
+
* ```
|
|
45
|
+
*/
|
|
46
|
+
export async function createReactAgent({ llm, tools, prompt, }) {
|
|
47
|
+
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
48
|
+
if (missingVariables.length > 0) {
|
|
49
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
50
|
+
}
|
|
51
|
+
const toolNames = tools.map((tool) => tool.name);
|
|
52
|
+
const partialedPrompt = await prompt.partial({
|
|
53
|
+
tools: renderTextDescription(tools),
|
|
54
|
+
tool_names: toolNames.join(", "),
|
|
55
|
+
});
|
|
56
|
+
// TODO: Add .bind to core runnable interface.
|
|
57
|
+
const llmWithStop = llm.bind({
|
|
58
|
+
stop: ["\nObservation:"],
|
|
59
|
+
});
|
|
60
|
+
const agent = RunnableSequence.from([
|
|
61
|
+
RunnablePassthrough.assign({
|
|
62
|
+
agent_scratchpad: (input) => formatLogToString(input.steps),
|
|
63
|
+
}),
|
|
64
|
+
partialedPrompt,
|
|
65
|
+
llmWithStop,
|
|
66
|
+
new ReActSingleInputOutputParser({
|
|
67
|
+
toolNames,
|
|
68
|
+
}),
|
|
69
|
+
]);
|
|
70
|
+
return agent;
|
|
71
|
+
}
|
|
@@ -43,7 +43,6 @@ const FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "Parsing LLM output produ
|
|
|
43
43
|
* const result = await agent.invoke({
|
|
44
44
|
* input: "whats the weather in pomfret?",
|
|
45
45
|
* });
|
|
46
|
-
*
|
|
47
46
|
* ```
|
|
48
47
|
*/
|
|
49
48
|
class ReActSingleInputOutputParser extends types_js_1.AgentActionOutputParser {
|
|
@@ -37,7 +37,6 @@ import { AgentAction, AgentFinish } from "../../schema/index.js";
|
|
|
37
37
|
* const result = await agent.invoke({
|
|
38
38
|
* input: "whats the weather in pomfret?",
|
|
39
39
|
* });
|
|
40
|
-
*
|
|
41
40
|
* ```
|
|
42
41
|
*/
|
|
43
42
|
export declare class ReActSingleInputOutputParser extends AgentActionOutputParser {
|
|
@@ -40,7 +40,6 @@ const FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "Parsing LLM output produ
|
|
|
40
40
|
* const result = await agent.invoke({
|
|
41
41
|
* input: "whats the weather in pomfret?",
|
|
42
42
|
* });
|
|
43
|
-
*
|
|
44
43
|
* ```
|
|
45
44
|
*/
|
|
46
45
|
export class ReActSingleInputOutputParser extends AgentActionOutputParser {
|
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.StructuredChatAgent = void 0;
|
|
3
|
+
exports.createStructuredChatAgent = exports.StructuredChatAgent = void 0;
|
|
4
4
|
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
5
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
5
6
|
const llm_chain_js_1 = require("../../chains/llm_chain.cjs");
|
|
6
7
|
const prompt_js_1 = require("../../prompts/prompt.cjs");
|
|
7
8
|
const chat_js_1 = require("../../prompts/chat.cjs");
|
|
8
9
|
const agent_js_1 = require("../agent.cjs");
|
|
9
10
|
const outputParser_js_1 = require("./outputParser.cjs");
|
|
10
11
|
const prompt_js_2 = require("./prompt.cjs");
|
|
12
|
+
const render_js_1 = require("../../tools/render.cjs");
|
|
13
|
+
const log_js_1 = require("../format_scratchpad/log.cjs");
|
|
11
14
|
/**
|
|
12
15
|
* Agent that interoperates with Structured Tools using React logic.
|
|
13
16
|
* @augments Agent
|
|
@@ -151,3 +154,84 @@ class StructuredChatAgent extends agent_js_1.Agent {
|
|
|
151
154
|
}
|
|
152
155
|
}
|
|
153
156
|
exports.StructuredChatAgent = StructuredChatAgent;
|
|
157
|
+
/**
|
|
158
|
+
* Create an agent aimed at supporting tools with multiple inputs.
|
|
159
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
160
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
161
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
162
|
+
* AgentAction or AgentFinish.
|
|
163
|
+
*
|
|
164
|
+
* @example
|
|
165
|
+
* ```typescript
|
|
166
|
+
* import { AgentExecutor, createStructuredChatAgent } from "langchain/agents";
|
|
167
|
+
* import { pull } from "langchain/hub";
|
|
168
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
169
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
170
|
+
*
|
|
171
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
172
|
+
*
|
|
173
|
+
* // Define the tools the agent will have access to.
|
|
174
|
+
* const tools = [...];
|
|
175
|
+
*
|
|
176
|
+
* // Get the prompt to use - you can modify this!
|
|
177
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
178
|
+
* "hwchase17/structured-chat-agent"
|
|
179
|
+
* );
|
|
180
|
+
*
|
|
181
|
+
* const llm = new ChatOpenAI({
|
|
182
|
+
* temperature: 0,
|
|
183
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
184
|
+
* });
|
|
185
|
+
*
|
|
186
|
+
* const agent = await createStructuredChatAgent({
|
|
187
|
+
* llm,
|
|
188
|
+
* tools,
|
|
189
|
+
* prompt,
|
|
190
|
+
* });
|
|
191
|
+
*
|
|
192
|
+
* const agentExecutor = new AgentExecutor({
|
|
193
|
+
* agent,
|
|
194
|
+
* tools,
|
|
195
|
+
* });
|
|
196
|
+
*
|
|
197
|
+
* const result = await agentExecutor.invoke({
|
|
198
|
+
* input: "what is LangChain?",
|
|
199
|
+
* });
|
|
200
|
+
*
|
|
201
|
+
* // With chat history
|
|
202
|
+
* const result2 = await agentExecutor.invoke({
|
|
203
|
+
* input: "what's my name?",
|
|
204
|
+
* chat_history: [
|
|
205
|
+
* new HumanMessage("hi! my name is cob"),
|
|
206
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
207
|
+
* ],
|
|
208
|
+
* });
|
|
209
|
+
* ```
|
|
210
|
+
*/
|
|
211
|
+
async function createStructuredChatAgent({ llm, tools, prompt, }) {
|
|
212
|
+
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
213
|
+
if (missingVariables.length > 0) {
|
|
214
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
215
|
+
}
|
|
216
|
+
const toolNames = tools.map((tool) => tool.name);
|
|
217
|
+
const partialedPrompt = await prompt.partial({
|
|
218
|
+
tools: (0, render_js_1.renderTextDescriptionAndArgs)(tools),
|
|
219
|
+
tool_names: toolNames.join(", "),
|
|
220
|
+
});
|
|
221
|
+
// TODO: Add .bind to core runnable interface.
|
|
222
|
+
const llmWithStop = llm.bind({
|
|
223
|
+
stop: ["Observation"],
|
|
224
|
+
});
|
|
225
|
+
const agent = runnables_1.RunnableSequence.from([
|
|
226
|
+
runnables_1.RunnablePassthrough.assign({
|
|
227
|
+
agent_scratchpad: (input) => (0, log_js_1.formatLogToString)(input.steps),
|
|
228
|
+
}),
|
|
229
|
+
partialedPrompt,
|
|
230
|
+
llmWithStop,
|
|
231
|
+
outputParser_js_1.StructuredChatOutputParserWithRetries.fromLLM(llm, {
|
|
232
|
+
toolNames,
|
|
233
|
+
}),
|
|
234
|
+
]);
|
|
235
|
+
return agent;
|
|
236
|
+
}
|
|
237
|
+
exports.createStructuredChatAgent = createStructuredChatAgent;
|
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import type { StructuredToolInterface } from "@langchain/core/tools";
|
|
2
2
|
import type { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
3
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
4
|
+
import type { BasePromptTemplate } from "@langchain/core/prompts";
|
|
3
5
|
import { BaseMessagePromptTemplate, ChatPromptTemplate } from "../../prompts/chat.js";
|
|
4
6
|
import { AgentStep } from "../../schema/index.js";
|
|
5
7
|
import { Optional } from "../../types/type-utils.js";
|
|
@@ -90,3 +92,72 @@ export declare class StructuredChatAgent extends Agent {
|
|
|
90
92
|
*/
|
|
91
93
|
static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;
|
|
92
94
|
}
|
|
95
|
+
/**
|
|
96
|
+
* Params used by the createStructuredChatAgent function.
|
|
97
|
+
*/
|
|
98
|
+
export type CreateStructuredChatAgentParams = {
|
|
99
|
+
/** LLM to use as the agent. */
|
|
100
|
+
llm: BaseLanguageModelInterface;
|
|
101
|
+
/** Tools this agent has access to. */
|
|
102
|
+
tools: StructuredToolInterface[];
|
|
103
|
+
/**
|
|
104
|
+
* The prompt to use. Must have input keys for
|
|
105
|
+
* `tools`, `tool_names`, and `agent_scratchpad`.
|
|
106
|
+
*/
|
|
107
|
+
prompt: BasePromptTemplate;
|
|
108
|
+
};
|
|
109
|
+
/**
|
|
110
|
+
* Create an agent aimed at supporting tools with multiple inputs.
|
|
111
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
112
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
113
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
114
|
+
* AgentAction or AgentFinish.
|
|
115
|
+
*
|
|
116
|
+
* @example
|
|
117
|
+
* ```typescript
|
|
118
|
+
* import { AgentExecutor, createStructuredChatAgent } from "langchain/agents";
|
|
119
|
+
* import { pull } from "langchain/hub";
|
|
120
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
121
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
122
|
+
*
|
|
123
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
124
|
+
*
|
|
125
|
+
* // Define the tools the agent will have access to.
|
|
126
|
+
* const tools = [...];
|
|
127
|
+
*
|
|
128
|
+
* // Get the prompt to use - you can modify this!
|
|
129
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
130
|
+
* "hwchase17/structured-chat-agent"
|
|
131
|
+
* );
|
|
132
|
+
*
|
|
133
|
+
* const llm = new ChatOpenAI({
|
|
134
|
+
* temperature: 0,
|
|
135
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
136
|
+
* });
|
|
137
|
+
*
|
|
138
|
+
* const agent = await createStructuredChatAgent({
|
|
139
|
+
* llm,
|
|
140
|
+
* tools,
|
|
141
|
+
* prompt,
|
|
142
|
+
* });
|
|
143
|
+
*
|
|
144
|
+
* const agentExecutor = new AgentExecutor({
|
|
145
|
+
* agent,
|
|
146
|
+
* tools,
|
|
147
|
+
* });
|
|
148
|
+
*
|
|
149
|
+
* const result = await agentExecutor.invoke({
|
|
150
|
+
* input: "what is LangChain?",
|
|
151
|
+
* });
|
|
152
|
+
*
|
|
153
|
+
* // With chat history
|
|
154
|
+
* const result2 = await agentExecutor.invoke({
|
|
155
|
+
* input: "what's my name?",
|
|
156
|
+
* chat_history: [
|
|
157
|
+
* new HumanMessage("hi! my name is cob"),
|
|
158
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
159
|
+
* ],
|
|
160
|
+
* });
|
|
161
|
+
* ```
|
|
162
|
+
*/
|
|
163
|
+
export declare function createStructuredChatAgent({ llm, tools, prompt, }: CreateStructuredChatAgentParams): Promise<RunnableSequence<Record<string, unknown>, import("../../schema/index.js").AgentAction | import("../../schema/index.js").AgentFinish>>;
|
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
2
|
+
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
|
|
2
3
|
import { LLMChain } from "../../chains/llm_chain.js";
|
|
3
4
|
import { PromptTemplate } from "../../prompts/prompt.js";
|
|
4
5
|
import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "../../prompts/chat.js";
|
|
5
6
|
import { Agent } from "../agent.js";
|
|
6
7
|
import { StructuredChatOutputParserWithRetries } from "./outputParser.js";
|
|
7
8
|
import { FORMAT_INSTRUCTIONS, PREFIX, SUFFIX } from "./prompt.js";
|
|
9
|
+
import { renderTextDescriptionAndArgs } from "../../tools/render.js";
|
|
10
|
+
import { formatLogToString } from "../format_scratchpad/log.js";
|
|
8
11
|
/**
|
|
9
12
|
* Agent that interoperates with Structured Tools using React logic.
|
|
10
13
|
* @augments Agent
|
|
@@ -147,3 +150,83 @@ export class StructuredChatAgent extends Agent {
|
|
|
147
150
|
});
|
|
148
151
|
}
|
|
149
152
|
}
|
|
153
|
+
/**
|
|
154
|
+
* Create an agent aimed at supporting tools with multiple inputs.
|
|
155
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
156
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
157
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
158
|
+
* AgentAction or AgentFinish.
|
|
159
|
+
*
|
|
160
|
+
* @example
|
|
161
|
+
* ```typescript
|
|
162
|
+
* import { AgentExecutor, createStructuredChatAgent } from "langchain/agents";
|
|
163
|
+
* import { pull } from "langchain/hub";
|
|
164
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
165
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
166
|
+
*
|
|
167
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
168
|
+
*
|
|
169
|
+
* // Define the tools the agent will have access to.
|
|
170
|
+
* const tools = [...];
|
|
171
|
+
*
|
|
172
|
+
* // Get the prompt to use - you can modify this!
|
|
173
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
174
|
+
* "hwchase17/structured-chat-agent"
|
|
175
|
+
* );
|
|
176
|
+
*
|
|
177
|
+
* const llm = new ChatOpenAI({
|
|
178
|
+
* temperature: 0,
|
|
179
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
180
|
+
* });
|
|
181
|
+
*
|
|
182
|
+
* const agent = await createStructuredChatAgent({
|
|
183
|
+
* llm,
|
|
184
|
+
* tools,
|
|
185
|
+
* prompt,
|
|
186
|
+
* });
|
|
187
|
+
*
|
|
188
|
+
* const agentExecutor = new AgentExecutor({
|
|
189
|
+
* agent,
|
|
190
|
+
* tools,
|
|
191
|
+
* });
|
|
192
|
+
*
|
|
193
|
+
* const result = await agentExecutor.invoke({
|
|
194
|
+
* input: "what is LangChain?",
|
|
195
|
+
* });
|
|
196
|
+
*
|
|
197
|
+
* // With chat history
|
|
198
|
+
* const result2 = await agentExecutor.invoke({
|
|
199
|
+
* input: "what's my name?",
|
|
200
|
+
* chat_history: [
|
|
201
|
+
* new HumanMessage("hi! my name is cob"),
|
|
202
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
203
|
+
* ],
|
|
204
|
+
* });
|
|
205
|
+
* ```
|
|
206
|
+
*/
|
|
207
|
+
export async function createStructuredChatAgent({ llm, tools, prompt, }) {
|
|
208
|
+
const missingVariables = ["tools", "tool_names", "agent_scratchpad"].filter((v) => !prompt.inputVariables.includes(v));
|
|
209
|
+
if (missingVariables.length > 0) {
|
|
210
|
+
throw new Error(`Provided prompt is missing required input variables: ${JSON.stringify(missingVariables)}`);
|
|
211
|
+
}
|
|
212
|
+
const toolNames = tools.map((tool) => tool.name);
|
|
213
|
+
const partialedPrompt = await prompt.partial({
|
|
214
|
+
tools: renderTextDescriptionAndArgs(tools),
|
|
215
|
+
tool_names: toolNames.join(", "),
|
|
216
|
+
});
|
|
217
|
+
// TODO: Add .bind to core runnable interface.
|
|
218
|
+
const llmWithStop = llm.bind({
|
|
219
|
+
stop: ["Observation"],
|
|
220
|
+
});
|
|
221
|
+
const agent = RunnableSequence.from([
|
|
222
|
+
RunnablePassthrough.assign({
|
|
223
|
+
agent_scratchpad: (input) => formatLogToString(input.steps),
|
|
224
|
+
}),
|
|
225
|
+
partialedPrompt,
|
|
226
|
+
llmWithStop,
|
|
227
|
+
StructuredChatOutputParserWithRetries.fromLLM(llm, {
|
|
228
|
+
toolNames,
|
|
229
|
+
}),
|
|
230
|
+
]);
|
|
231
|
+
return agent;
|
|
232
|
+
}
|
|
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
3
3
|
exports.OpenAIAgentTokenBufferMemory = void 0;
|
|
4
4
|
const base_js_1 = require("../../../memory/base.cjs");
|
|
5
5
|
const chat_memory_js_1 = require("../../../memory/chat_memory.cjs");
|
|
6
|
-
const index_js_1 = require("../../
|
|
6
|
+
const index_js_1 = require("../../openai_functions/index.cjs");
|
|
7
7
|
/**
|
|
8
8
|
* Memory used to save agent output and intermediate steps.
|
|
9
9
|
*/
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { getBufferString, getInputValue, getOutputValue, } from "../../../memory/base.js";
|
|
2
2
|
import { BaseChatMemory, } from "../../../memory/chat_memory.js";
|
|
3
|
-
import { _formatIntermediateSteps } from "../../
|
|
3
|
+
import { _formatIntermediateSteps } from "../../openai_functions/index.js";
|
|
4
4
|
/**
|
|
5
5
|
* Memory used to save agent output and intermediate steps.
|
|
6
6
|
*/
|
|
@@ -4,6 +4,7 @@ exports.createRetrieverTool = void 0;
|
|
|
4
4
|
const zod_1 = require("zod");
|
|
5
5
|
const dynamic_js_1 = require("../../../tools/dynamic.cjs");
|
|
6
6
|
const document_js_1 = require("../../../util/document.cjs");
|
|
7
|
+
/** @deprecated Use "langchain/tools/retriever" instead. */
|
|
7
8
|
function createRetrieverTool(retriever, input) {
|
|
8
9
|
const func = async ({ input }, runManager) => {
|
|
9
10
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import type { BaseRetrieverInterface } from "@langchain/core/retrievers";
|
|
2
2
|
import { z } from "zod";
|
|
3
3
|
import { DynamicStructuredTool, DynamicStructuredToolInput } from "../../../tools/dynamic.js";
|
|
4
|
+
/** @deprecated Use "langchain/tools/retriever" instead. */
|
|
4
5
|
export declare function createRetrieverTool(retriever: BaseRetrieverInterface, input: Omit<DynamicStructuredToolInput, "func" | "schema">): DynamicStructuredTool<z.ZodObject<{
|
|
5
6
|
input: z.ZodString;
|
|
6
7
|
}, "strip", z.ZodTypeAny, {
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { z } from "zod";
|
|
2
2
|
import { DynamicStructuredTool, } from "../../../tools/dynamic.js";
|
|
3
3
|
import { formatDocumentsAsString } from "../../../util/document.js";
|
|
4
|
+
/** @deprecated Use "langchain/tools/retriever" instead. */
|
|
4
5
|
export function createRetrieverTool(retriever, input) {
|
|
5
6
|
const func = async ({ input }, runManager) => {
|
|
6
7
|
const docs = await retriever.getRelevantDocuments(input, runManager?.getChild("retriever"));
|
|
@@ -42,6 +42,8 @@ class JsonToolkit extends base_js_1.Toolkit {
|
|
|
42
42
|
}
|
|
43
43
|
exports.JsonToolkit = JsonToolkit;
|
|
44
44
|
/**
|
|
45
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
46
|
+
*
|
|
45
47
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
46
48
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
47
49
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -22,6 +22,8 @@ export declare class JsonToolkit extends Toolkit {
|
|
|
22
22
|
constructor(jsonSpec: JsonSpec);
|
|
23
23
|
}
|
|
24
24
|
/**
|
|
25
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
26
|
+
*
|
|
25
27
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
26
28
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
27
29
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -38,6 +38,8 @@ export class JsonToolkit extends Toolkit {
|
|
|
38
38
|
}
|
|
39
39
|
}
|
|
40
40
|
/**
|
|
41
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
42
|
+
*
|
|
41
43
|
* Creates a JSON agent using a language model, a JSON toolkit, and
|
|
42
44
|
* optional prompt arguments. It creates a prompt for the agent using the
|
|
43
45
|
* JSON tools and the provided prefix and suffix. It then creates a
|
|
@@ -69,6 +69,8 @@ class OpenApiToolkit extends RequestsToolkit {
|
|
|
69
69
|
}
|
|
70
70
|
exports.OpenApiToolkit = OpenApiToolkit;
|
|
71
71
|
/**
|
|
72
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
73
|
+
*
|
|
72
74
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
73
75
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
74
76
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -41,6 +41,8 @@ export declare class OpenApiToolkit extends RequestsToolkit {
|
|
|
41
41
|
constructor(jsonSpec: JsonSpec, llm: BaseLanguageModelInterface, headers?: Headers);
|
|
42
42
|
}
|
|
43
43
|
/**
|
|
44
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
45
|
+
*
|
|
44
46
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
45
47
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
46
48
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -64,6 +64,8 @@ export class OpenApiToolkit extends RequestsToolkit {
|
|
|
64
64
|
}
|
|
65
65
|
}
|
|
66
66
|
/**
|
|
67
|
+
* @deprecated Create a specific agent with a custom tool instead.
|
|
68
|
+
*
|
|
67
69
|
* Creates an OpenAPI agent using a language model, an OpenAPI toolkit,
|
|
68
70
|
* and optional prompt arguments. It creates a prompt for the agent using
|
|
69
71
|
* the OpenAPI tools and the provided prefix and suffix. It then creates a
|
|
@@ -93,6 +93,7 @@ class VectorStoreRouterToolkit extends base_js_1.Toolkit {
|
|
|
93
93
|
}
|
|
94
94
|
}
|
|
95
95
|
exports.VectorStoreRouterToolkit = VectorStoreRouterToolkit;
|
|
96
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
96
97
|
function createVectorStoreAgent(llm, toolkit, args) {
|
|
97
98
|
const { prefix = prompt_js_1.VECTOR_PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
98
99
|
const { tools } = toolkit;
|
|
@@ -113,6 +114,7 @@ function createVectorStoreAgent(llm, toolkit, args) {
|
|
|
113
114
|
});
|
|
114
115
|
}
|
|
115
116
|
exports.createVectorStoreAgent = createVectorStoreAgent;
|
|
117
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
116
118
|
function createVectorStoreRouterAgent(llm, toolkit, args) {
|
|
117
119
|
const { prefix = prompt_js_1.VECTOR_ROUTER_PREFIX, suffix = prompt_js_2.SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
118
120
|
const { tools } = toolkit;
|
|
@@ -50,5 +50,7 @@ export declare class VectorStoreRouterToolkit extends Toolkit {
|
|
|
50
50
|
llm: BaseLanguageModelInterface;
|
|
51
51
|
constructor(vectorStoreInfos: VectorStoreInfo[], llm: BaseLanguageModelInterface);
|
|
52
52
|
}
|
|
53
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
53
54
|
export declare function createVectorStoreAgent(llm: BaseLanguageModelInterface, toolkit: VectorStoreToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
55
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
54
56
|
export declare function createVectorStoreRouterAgent(llm: BaseLanguageModelInterface, toolkit: VectorStoreRouterToolkit, args?: ZeroShotCreatePromptArgs): AgentExecutor;
|
|
@@ -88,6 +88,7 @@ export class VectorStoreRouterToolkit extends Toolkit {
|
|
|
88
88
|
});
|
|
89
89
|
}
|
|
90
90
|
}
|
|
91
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
91
92
|
export function createVectorStoreAgent(llm, toolkit, args) {
|
|
92
93
|
const { prefix = VECTOR_PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
93
94
|
const { tools } = toolkit;
|
|
@@ -107,6 +108,7 @@ export function createVectorStoreAgent(llm, toolkit, args) {
|
|
|
107
108
|
returnIntermediateSteps: true,
|
|
108
109
|
});
|
|
109
110
|
}
|
|
111
|
+
/** @deprecated Create a specific agent with a custom tool instead. */
|
|
110
112
|
export function createVectorStoreRouterAgent(llm, toolkit, args) {
|
|
111
113
|
const { prefix = VECTOR_ROUTER_PREFIX, suffix = SUFFIX, inputVariables = ["input", "agent_scratchpad"], } = args ?? {};
|
|
112
114
|
const { tools } = toolkit;
|