langchain 0.0.212 → 0.0.213
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/chains/combine_documents.cjs +1 -0
- package/chains/combine_documents.d.ts +1 -0
- package/chains/combine_documents.js +1 -0
- package/chains/history_aware_retriever.cjs +1 -0
- package/chains/history_aware_retriever.d.ts +1 -0
- package/chains/history_aware_retriever.js +1 -0
- package/chains/retrieval.cjs +1 -0
- package/chains/retrieval.d.ts +1 -0
- package/chains/retrieval.js +1 -0
- package/dist/agents/agent.cjs +1 -0
- package/dist/agents/agent.js +1 -0
- package/dist/agents/executor.cjs +21 -3
- package/dist/agents/executor.d.ts +1 -0
- package/dist/agents/executor.js +21 -3
- package/dist/agents/format_scratchpad/openai_functions.cjs +22 -1
- package/dist/agents/format_scratchpad/openai_functions.d.ts +10 -0
- package/dist/agents/format_scratchpad/openai_functions.js +21 -1
- package/dist/agents/index.cjs +11 -4
- package/dist/agents/index.d.ts +6 -3
- package/dist/agents/index.js +5 -3
- package/dist/agents/initialize.cjs +1 -1
- package/dist/agents/initialize.d.ts +1 -1
- package/dist/agents/initialize.js +1 -1
- package/dist/agents/openai/output_parser.cjs +20 -196
- package/dist/agents/openai/output_parser.d.ts +2 -111
- package/dist/agents/openai/output_parser.js +6 -193
- package/dist/agents/{openai → openai_functions}/index.cjs +78 -2
- package/dist/agents/{openai → openai_functions}/index.d.ts +75 -3
- package/dist/agents/{openai → openai_functions}/index.js +76 -1
- package/dist/agents/openai_functions/output_parser.cjs +102 -0
- package/dist/agents/openai_functions/output_parser.d.ts +56 -0
- package/dist/agents/openai_functions/output_parser.js +98 -0
- package/dist/agents/openai_tools/index.cjs +81 -0
- package/dist/agents/openai_tools/index.d.ts +80 -0
- package/dist/agents/openai_tools/index.js +77 -0
- package/dist/agents/openai_tools/output_parser.cjs +102 -0
- package/dist/agents/openai_tools/output_parser.d.ts +57 -0
- package/dist/agents/openai_tools/output_parser.js +98 -0
- package/dist/agents/react/index.cjs +75 -0
- package/dist/agents/react/index.d.ts +60 -0
- package/dist/agents/react/index.js +71 -0
- package/dist/agents/react/output_parser.cjs +0 -1
- package/dist/agents/react/output_parser.d.ts +0 -1
- package/dist/agents/react/output_parser.js +0 -1
- package/dist/agents/structured_chat/index.cjs +85 -1
- package/dist/agents/structured_chat/index.d.ts +71 -0
- package/dist/agents/structured_chat/index.js +83 -0
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.cjs +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.js +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.cjs +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.d.ts +1 -0
- package/dist/agents/toolkits/conversational_retrieval/tool.js +1 -0
- package/dist/agents/toolkits/json/json.cjs +2 -0
- package/dist/agents/toolkits/json/json.d.ts +2 -0
- package/dist/agents/toolkits/json/json.js +2 -0
- package/dist/agents/toolkits/openapi/openapi.cjs +2 -0
- package/dist/agents/toolkits/openapi/openapi.d.ts +2 -0
- package/dist/agents/toolkits/openapi/openapi.js +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.cjs +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.d.ts +2 -0
- package/dist/agents/toolkits/vectorstore/vectorstore.js +2 -0
- package/dist/agents/xml/index.cjs +75 -1
- package/dist/agents/xml/index.d.ts +65 -0
- package/dist/agents/xml/index.js +73 -0
- package/dist/chains/combine_documents/base.cjs +16 -0
- package/dist/chains/combine_documents/base.d.ts +13 -0
- package/dist/chains/combine_documents/base.js +12 -0
- package/dist/chains/combine_documents/index.cjs +5 -0
- package/dist/chains/combine_documents/index.d.ts +1 -0
- package/dist/chains/combine_documents/index.js +1 -0
- package/dist/chains/combine_documents/reduce.cjs +5 -2
- package/dist/chains/combine_documents/reduce.js +4 -1
- package/dist/chains/combine_documents/stuff.cjs +42 -0
- package/dist/chains/combine_documents/stuff.d.ts +28 -0
- package/dist/chains/combine_documents/stuff.js +38 -0
- package/dist/chains/conversational_retrieval_chain.cjs +3 -3
- package/dist/chains/conversational_retrieval_chain.js +1 -1
- package/dist/chains/history_aware_retriever.cjs +55 -0
- package/dist/chains/history_aware_retriever.d.ts +55 -0
- package/dist/chains/history_aware_retriever.js +51 -0
- package/dist/chains/retrieval.cjs +60 -0
- package/dist/chains/retrieval.d.ts +65 -0
- package/dist/chains/retrieval.js +56 -0
- package/dist/load/import_map.cjs +7 -3
- package/dist/load/import_map.d.ts +4 -0
- package/dist/load/import_map.js +4 -0
- package/dist/output_parsers/json.cjs +2 -78
- package/dist/output_parsers/json.d.ts +1 -1
- package/dist/output_parsers/json.js +1 -77
- package/dist/output_parsers/openai_functions.d.ts +1 -1
- package/dist/tools/retriever.cjs +17 -0
- package/dist/tools/retriever.d.ts +10 -0
- package/dist/tools/retriever.js +13 -0
- package/package.json +36 -4
- package/tools/retriever.cjs +1 -0
- package/tools/retriever.d.ts +1 -0
- package/tools/retriever.js +1 -0
- /package/dist/agents/{openai → openai_functions}/prompt.cjs +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.d.ts +0 -0
- /package/dist/agents/{openai → openai_functions}/prompt.js +0 -0
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import { AgentAction, AgentFinish, BaseMessage, ChatGeneration } from "../../schema/index.js";
|
|
2
|
+
import { AgentActionOutputParser } from "../types.js";
|
|
3
|
+
/**
|
|
4
|
+
* Type that represents an agent action with an optional message log.
|
|
5
|
+
*/
|
|
6
|
+
export type FunctionsAgentAction = AgentAction & {
|
|
7
|
+
messageLog?: BaseMessage[];
|
|
8
|
+
};
|
|
9
|
+
/**
|
|
10
|
+
* @example
|
|
11
|
+
* ```typescript
|
|
12
|
+
*
|
|
13
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
14
|
+
* ["ai", "You are a helpful assistant"],
|
|
15
|
+
* ["human", "{input}"],
|
|
16
|
+
* new MessagesPlaceholder("agent_scratchpad"),
|
|
17
|
+
* ]);
|
|
18
|
+
*
|
|
19
|
+
* const modelWithFunctions = new ChatOpenAI({
|
|
20
|
+
* modelName: "gpt-4",
|
|
21
|
+
* temperature: 0,
|
|
22
|
+
* }).bind({
|
|
23
|
+
* functions: tools.map((tool) => formatToOpenAIFunction(tool)),
|
|
24
|
+
* });
|
|
25
|
+
*
|
|
26
|
+
* const runnableAgent = RunnableSequence.from([
|
|
27
|
+
* {
|
|
28
|
+
* input: (i) => i.input,
|
|
29
|
+
* agent_scratchpad: (i) => formatAgentSteps(i.steps),
|
|
30
|
+
* },
|
|
31
|
+
* prompt,
|
|
32
|
+
* modelWithFunctions,
|
|
33
|
+
* new OpenAIFunctionsAgentOutputParser(),
|
|
34
|
+
* ]);
|
|
35
|
+
*
|
|
36
|
+
* const result = await runnableAgent.invoke({
|
|
37
|
+
* input: "What is the weather in New York?",
|
|
38
|
+
* steps: agentSteps,
|
|
39
|
+
* });
|
|
40
|
+
*
|
|
41
|
+
* ```
|
|
42
|
+
*/
|
|
43
|
+
export declare class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
|
|
44
|
+
lc_namespace: string[];
|
|
45
|
+
static lc_name(): string;
|
|
46
|
+
parse(text: string): Promise<AgentAction | AgentFinish>;
|
|
47
|
+
parseResult(generations: ChatGeneration[]): Promise<AgentFinish | FunctionsAgentAction>;
|
|
48
|
+
/**
|
|
49
|
+
* Parses the output message into a FunctionsAgentAction or AgentFinish
|
|
50
|
+
* object.
|
|
51
|
+
* @param message The BaseMessage to parse.
|
|
52
|
+
* @returns A FunctionsAgentAction or AgentFinish object.
|
|
53
|
+
*/
|
|
54
|
+
parseAIMessage(message: BaseMessage): FunctionsAgentAction | AgentFinish;
|
|
55
|
+
getFormatInstructions(): string;
|
|
56
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { isBaseMessage, } from "../../schema/index.js";
|
|
2
|
+
import { AgentActionOutputParser } from "../types.js";
|
|
3
|
+
import { OutputParserException } from "../../schema/output_parser.js";
|
|
4
|
+
/**
|
|
5
|
+
* @example
|
|
6
|
+
* ```typescript
|
|
7
|
+
*
|
|
8
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
9
|
+
* ["ai", "You are a helpful assistant"],
|
|
10
|
+
* ["human", "{input}"],
|
|
11
|
+
* new MessagesPlaceholder("agent_scratchpad"),
|
|
12
|
+
* ]);
|
|
13
|
+
*
|
|
14
|
+
* const modelWithFunctions = new ChatOpenAI({
|
|
15
|
+
* modelName: "gpt-4",
|
|
16
|
+
* temperature: 0,
|
|
17
|
+
* }).bind({
|
|
18
|
+
* functions: tools.map((tool) => formatToOpenAIFunction(tool)),
|
|
19
|
+
* });
|
|
20
|
+
*
|
|
21
|
+
* const runnableAgent = RunnableSequence.from([
|
|
22
|
+
* {
|
|
23
|
+
* input: (i) => i.input,
|
|
24
|
+
* agent_scratchpad: (i) => formatAgentSteps(i.steps),
|
|
25
|
+
* },
|
|
26
|
+
* prompt,
|
|
27
|
+
* modelWithFunctions,
|
|
28
|
+
* new OpenAIFunctionsAgentOutputParser(),
|
|
29
|
+
* ]);
|
|
30
|
+
*
|
|
31
|
+
* const result = await runnableAgent.invoke({
|
|
32
|
+
* input: "What is the weather in New York?",
|
|
33
|
+
* steps: agentSteps,
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* ```
|
|
37
|
+
*/
|
|
38
|
+
export class OpenAIFunctionsAgentOutputParser extends AgentActionOutputParser {
|
|
39
|
+
constructor() {
|
|
40
|
+
super(...arguments);
|
|
41
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
42
|
+
enumerable: true,
|
|
43
|
+
configurable: true,
|
|
44
|
+
writable: true,
|
|
45
|
+
value: ["langchain", "agents", "openai"]
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
static lc_name() {
|
|
49
|
+
return "OpenAIFunctionsAgentOutputParser";
|
|
50
|
+
}
|
|
51
|
+
async parse(text) {
|
|
52
|
+
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
|
|
53
|
+
}
|
|
54
|
+
async parseResult(generations) {
|
|
55
|
+
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
|
|
56
|
+
return this.parseAIMessage(generations[0].message);
|
|
57
|
+
}
|
|
58
|
+
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Parses the output message into a FunctionsAgentAction or AgentFinish
|
|
62
|
+
* object.
|
|
63
|
+
* @param message The BaseMessage to parse.
|
|
64
|
+
* @returns A FunctionsAgentAction or AgentFinish object.
|
|
65
|
+
*/
|
|
66
|
+
parseAIMessage(message) {
|
|
67
|
+
if (message.content && typeof message.content !== "string") {
|
|
68
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
69
|
+
}
|
|
70
|
+
if (message.additional_kwargs.function_call) {
|
|
71
|
+
// eslint-disable-next-line prefer-destructuring
|
|
72
|
+
const function_call = message.additional_kwargs.function_call;
|
|
73
|
+
try {
|
|
74
|
+
const toolInput = function_call.arguments
|
|
75
|
+
? JSON.parse(function_call.arguments)
|
|
76
|
+
: {};
|
|
77
|
+
return {
|
|
78
|
+
tool: function_call.name,
|
|
79
|
+
toolInput,
|
|
80
|
+
log: `Invoking "${function_call.name}" with ${function_call.arguments ?? "{}"}\n${message.content}`,
|
|
81
|
+
messageLog: [message],
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
throw new OutputParserException(`Failed to parse function arguments from chat model response. Text: "${function_call.arguments}". ${error}`);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
return {
|
|
90
|
+
returnValues: { output: message.content },
|
|
91
|
+
log: message.content,
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
getFormatInstructions() {
|
|
96
|
+
throw new Error("getFormatInstructions not implemented inside OpenAIFunctionsAgentOutputParser.");
|
|
97
|
+
}
|
|
98
|
+
}
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createOpenAIToolsAgent = exports.OpenAIToolsAgentOutputParser = void 0;
|
|
4
|
+
const runnables_1 = require("@langchain/core/runnables");
|
|
5
|
+
const openai_tools_js_1 = require("../format_scratchpad/openai_tools.cjs");
|
|
6
|
+
const convert_to_openai_js_1 = require("../../tools/convert_to_openai.cjs");
|
|
7
|
+
const output_parser_js_1 = require("../openai/output_parser.cjs");
|
|
8
|
+
Object.defineProperty(exports, "OpenAIToolsAgentOutputParser", { enumerable: true, get: function () { return output_parser_js_1.OpenAIToolsAgentOutputParser; } });
|
|
9
|
+
/**
|
|
10
|
+
* Create an agent that uses OpenAI-style tool calling.
|
|
11
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
12
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
13
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
14
|
+
* AgentAction or AgentFinish.
|
|
15
|
+
*
|
|
16
|
+
* @example
|
|
17
|
+
* ```typescript
|
|
18
|
+
* import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";
|
|
19
|
+
* import { pull } from "langchain/hub";
|
|
20
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
21
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
22
|
+
*
|
|
23
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
24
|
+
*
|
|
25
|
+
* // Define the tools the agent will have access to.
|
|
26
|
+
* const tools = [...];
|
|
27
|
+
*
|
|
28
|
+
* // Get the prompt to use - you can modify this!
|
|
29
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
30
|
+
* "hwchase17/openai-tools-agent"
|
|
31
|
+
* );
|
|
32
|
+
*
|
|
33
|
+
* const llm = new ChatOpenAI({
|
|
34
|
+
* temperature: 0,
|
|
35
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
36
|
+
* });
|
|
37
|
+
*
|
|
38
|
+
* const agent = await createOpenAIToolsAgent({
|
|
39
|
+
* llm,
|
|
40
|
+
* tools,
|
|
41
|
+
* prompt,
|
|
42
|
+
* });
|
|
43
|
+
*
|
|
44
|
+
* const agentExecutor = new AgentExecutor({
|
|
45
|
+
* agent,
|
|
46
|
+
* tools,
|
|
47
|
+
* });
|
|
48
|
+
*
|
|
49
|
+
* const result = await agentExecutor.invoke({
|
|
50
|
+
* input: "what is LangChain?",
|
|
51
|
+
* });
|
|
52
|
+
*
|
|
53
|
+
* // With chat history
|
|
54
|
+
* const result2 = await agentExecutor.invoke({
|
|
55
|
+
* input: "what's my name?",
|
|
56
|
+
* chat_history: [
|
|
57
|
+
* new HumanMessage("hi! my name is cob"),
|
|
58
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
59
|
+
* ],
|
|
60
|
+
* });
|
|
61
|
+
* ```
|
|
62
|
+
*/
|
|
63
|
+
async function createOpenAIToolsAgent({ llm, tools, prompt, }) {
|
|
64
|
+
if (!prompt.inputVariables.includes("agent_scratchpad")) {
|
|
65
|
+
throw new Error([
|
|
66
|
+
`Prompt must have an input variable named "agent_scratchpad".`,
|
|
67
|
+
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
68
|
+
].join("\n"));
|
|
69
|
+
}
|
|
70
|
+
const modelWithTools = llm.bind({ tools: tools.map(convert_to_openai_js_1.formatToOpenAITool) });
|
|
71
|
+
const agent = runnables_1.RunnableSequence.from([
|
|
72
|
+
runnables_1.RunnablePassthrough.assign({
|
|
73
|
+
agent_scratchpad: (input) => (0, openai_tools_js_1.formatToOpenAIToolMessages)(input.steps),
|
|
74
|
+
}),
|
|
75
|
+
prompt,
|
|
76
|
+
modelWithTools,
|
|
77
|
+
new output_parser_js_1.OpenAIToolsAgentOutputParser(),
|
|
78
|
+
]);
|
|
79
|
+
return agent;
|
|
80
|
+
}
|
|
81
|
+
exports.createOpenAIToolsAgent = createOpenAIToolsAgent;
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
import type { StructuredToolInterface } from "@langchain/core/tools";
|
|
2
|
+
import type { BaseChatModel, BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models";
|
|
3
|
+
import { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
4
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
5
|
+
import { OpenAIClient } from "@langchain/openai";
|
|
6
|
+
import { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from "../openai/output_parser.js";
|
|
7
|
+
export { OpenAIToolsAgentOutputParser, type ToolsAgentStep };
|
|
8
|
+
/**
|
|
9
|
+
* Params used by the createOpenAIToolsAgent function.
|
|
10
|
+
*/
|
|
11
|
+
export type CreateOpenAIToolsAgentParams = {
|
|
12
|
+
/**
|
|
13
|
+
* LLM to use as the agent. Should work with OpenAI tool calling,
|
|
14
|
+
* so must either be an OpenAI model that supports that or a wrapper of
|
|
15
|
+
* a different model that adds in equivalent support.
|
|
16
|
+
*/
|
|
17
|
+
llm: BaseChatModel<BaseChatModelCallOptions & {
|
|
18
|
+
tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[];
|
|
19
|
+
tool_choice?: OpenAIClient.ChatCompletionToolChoiceOption;
|
|
20
|
+
}>;
|
|
21
|
+
/** Tools this agent has access to. */
|
|
22
|
+
tools: StructuredToolInterface[];
|
|
23
|
+
/** The prompt to use, must have an input key of `agent_scratchpad`. */
|
|
24
|
+
prompt: ChatPromptTemplate;
|
|
25
|
+
};
|
|
26
|
+
/**
|
|
27
|
+
* Create an agent that uses OpenAI-style tool calling.
|
|
28
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
29
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
30
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
31
|
+
* AgentAction or AgentFinish.
|
|
32
|
+
*
|
|
33
|
+
* @example
|
|
34
|
+
* ```typescript
|
|
35
|
+
* import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";
|
|
36
|
+
* import { pull } from "langchain/hub";
|
|
37
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
38
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
39
|
+
*
|
|
40
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
41
|
+
*
|
|
42
|
+
* // Define the tools the agent will have access to.
|
|
43
|
+
* const tools = [...];
|
|
44
|
+
*
|
|
45
|
+
* // Get the prompt to use - you can modify this!
|
|
46
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
47
|
+
* "hwchase17/openai-tools-agent"
|
|
48
|
+
* );
|
|
49
|
+
*
|
|
50
|
+
* const llm = new ChatOpenAI({
|
|
51
|
+
* temperature: 0,
|
|
52
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
53
|
+
* });
|
|
54
|
+
*
|
|
55
|
+
* const agent = await createOpenAIToolsAgent({
|
|
56
|
+
* llm,
|
|
57
|
+
* tools,
|
|
58
|
+
* prompt,
|
|
59
|
+
* });
|
|
60
|
+
*
|
|
61
|
+
* const agentExecutor = new AgentExecutor({
|
|
62
|
+
* agent,
|
|
63
|
+
* tools,
|
|
64
|
+
* });
|
|
65
|
+
*
|
|
66
|
+
* const result = await agentExecutor.invoke({
|
|
67
|
+
* input: "what is LangChain?",
|
|
68
|
+
* });
|
|
69
|
+
*
|
|
70
|
+
* // With chat history
|
|
71
|
+
* const result2 = await agentExecutor.invoke({
|
|
72
|
+
* input: "what's my name?",
|
|
73
|
+
* chat_history: [
|
|
74
|
+
* new HumanMessage("hi! my name is cob"),
|
|
75
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
76
|
+
* ],
|
|
77
|
+
* });
|
|
78
|
+
* ```
|
|
79
|
+
*/
|
|
80
|
+
export declare function createOpenAIToolsAgent({ llm, tools, prompt, }: CreateOpenAIToolsAgentParams): Promise<RunnableSequence<Record<string, unknown>, import("@langchain/core/agents").AgentFinish | import("@langchain/core/agents").AgentAction[]>>;
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
|
|
2
|
+
import { formatToOpenAIToolMessages } from "../format_scratchpad/openai_tools.js";
|
|
3
|
+
import { formatToOpenAITool } from "../../tools/convert_to_openai.js";
|
|
4
|
+
import { OpenAIToolsAgentOutputParser, } from "../openai/output_parser.js";
|
|
5
|
+
export { OpenAIToolsAgentOutputParser };
|
|
6
|
+
/**
|
|
7
|
+
* Create an agent that uses OpenAI-style tool calling.
|
|
8
|
+
* @param params Params required to create the agent. Includes an LLM, tools, and prompt.
|
|
9
|
+
* @returns A runnable sequence representing an agent. It takes as input all the same input
|
|
10
|
+
* variables as the prompt passed in does. It returns as output either an
|
|
11
|
+
* AgentAction or AgentFinish.
|
|
12
|
+
*
|
|
13
|
+
* @example
|
|
14
|
+
* ```typescript
|
|
15
|
+
* import { AgentExecutor, createOpenAIToolsAgent } from "langchain/agents";
|
|
16
|
+
* import { pull } from "langchain/hub";
|
|
17
|
+
* import type { ChatPromptTemplate } from "@langchain/core/prompts";
|
|
18
|
+
* import { AIMessage, HumanMessage } from "@langchain/core/messages";
|
|
19
|
+
*
|
|
20
|
+
* import { ChatOpenAI } from "@langchain/openai";
|
|
21
|
+
*
|
|
22
|
+
* // Define the tools the agent will have access to.
|
|
23
|
+
* const tools = [...];
|
|
24
|
+
*
|
|
25
|
+
* // Get the prompt to use - you can modify this!
|
|
26
|
+
* const prompt = await pull<ChatPromptTemplate>(
|
|
27
|
+
* "hwchase17/openai-tools-agent"
|
|
28
|
+
* );
|
|
29
|
+
*
|
|
30
|
+
* const llm = new ChatOpenAI({
|
|
31
|
+
* temperature: 0,
|
|
32
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
33
|
+
* });
|
|
34
|
+
*
|
|
35
|
+
* const agent = await createOpenAIToolsAgent({
|
|
36
|
+
* llm,
|
|
37
|
+
* tools,
|
|
38
|
+
* prompt,
|
|
39
|
+
* });
|
|
40
|
+
*
|
|
41
|
+
* const agentExecutor = new AgentExecutor({
|
|
42
|
+
* agent,
|
|
43
|
+
* tools,
|
|
44
|
+
* });
|
|
45
|
+
*
|
|
46
|
+
* const result = await agentExecutor.invoke({
|
|
47
|
+
* input: "what is LangChain?",
|
|
48
|
+
* });
|
|
49
|
+
*
|
|
50
|
+
* // With chat history
|
|
51
|
+
* const result2 = await agentExecutor.invoke({
|
|
52
|
+
* input: "what's my name?",
|
|
53
|
+
* chat_history: [
|
|
54
|
+
* new HumanMessage("hi! my name is cob"),
|
|
55
|
+
* new AIMessage("Hello Cob! How can I assist you today?"),
|
|
56
|
+
* ],
|
|
57
|
+
* });
|
|
58
|
+
* ```
|
|
59
|
+
*/
|
|
60
|
+
export async function createOpenAIToolsAgent({ llm, tools, prompt, }) {
|
|
61
|
+
if (!prompt.inputVariables.includes("agent_scratchpad")) {
|
|
62
|
+
throw new Error([
|
|
63
|
+
`Prompt must have an input variable named "agent_scratchpad".`,
|
|
64
|
+
`Found ${JSON.stringify(prompt.inputVariables)} instead.`,
|
|
65
|
+
].join("\n"));
|
|
66
|
+
}
|
|
67
|
+
const modelWithTools = llm.bind({ tools: tools.map(formatToOpenAITool) });
|
|
68
|
+
const agent = RunnableSequence.from([
|
|
69
|
+
RunnablePassthrough.assign({
|
|
70
|
+
agent_scratchpad: (input) => formatToOpenAIToolMessages(input.steps),
|
|
71
|
+
}),
|
|
72
|
+
prompt,
|
|
73
|
+
modelWithTools,
|
|
74
|
+
new OpenAIToolsAgentOutputParser(),
|
|
75
|
+
]);
|
|
76
|
+
return agent;
|
|
77
|
+
}
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.OpenAIToolsAgentOutputParser = void 0;
|
|
4
|
+
const index_js_1 = require("../../schema/index.cjs");
|
|
5
|
+
const types_js_1 = require("../types.cjs");
|
|
6
|
+
const output_parser_js_1 = require("../../schema/output_parser.cjs");
|
|
7
|
+
/**
|
|
8
|
+
* @example
|
|
9
|
+
* ```typescript
|
|
10
|
+
*
|
|
11
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
12
|
+
* ["ai", "You are a helpful assistant"],
|
|
13
|
+
* ["human", "{input}"],
|
|
14
|
+
* new MessagesPlaceholder("agent_scratchpad"),
|
|
15
|
+
* ]);
|
|
16
|
+
*
|
|
17
|
+
* const runnableAgent = RunnableSequence.from([
|
|
18
|
+
* {
|
|
19
|
+
* input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
|
20
|
+
* agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) =>
|
|
21
|
+
* formatToOpenAIToolMessages(i.steps),
|
|
22
|
+
* },
|
|
23
|
+
* prompt,
|
|
24
|
+
* new ChatOpenAI({
|
|
25
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
26
|
+
* temperature: 0,
|
|
27
|
+
* }).bind({ tools: tools.map(formatToOpenAITool) }),
|
|
28
|
+
* new OpenAIToolsAgentOutputParser(),
|
|
29
|
+
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
30
|
+
*
|
|
31
|
+
* const result = await runnableAgent.invoke({
|
|
32
|
+
* input:
|
|
33
|
+
* "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
|
|
34
|
+
* });
|
|
35
|
+
*
|
|
36
|
+
* ```
|
|
37
|
+
*/
|
|
38
|
+
class OpenAIToolsAgentOutputParser extends types_js_1.AgentMultiActionOutputParser {
|
|
39
|
+
constructor() {
|
|
40
|
+
super(...arguments);
|
|
41
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
42
|
+
enumerable: true,
|
|
43
|
+
configurable: true,
|
|
44
|
+
writable: true,
|
|
45
|
+
value: ["langchain", "agents", "openai"]
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
static lc_name() {
|
|
49
|
+
return "OpenAIToolsAgentOutputParser";
|
|
50
|
+
}
|
|
51
|
+
async parse(text) {
|
|
52
|
+
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
|
|
53
|
+
}
|
|
54
|
+
async parseResult(generations) {
|
|
55
|
+
if ("message" in generations[0] && (0, index_js_1.isBaseMessage)(generations[0].message)) {
|
|
56
|
+
return this.parseAIMessage(generations[0].message);
|
|
57
|
+
}
|
|
58
|
+
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
62
|
+
* object.
|
|
63
|
+
* @param message The BaseMessage to parse.
|
|
64
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
65
|
+
*/
|
|
66
|
+
parseAIMessage(message) {
|
|
67
|
+
if (message.content && typeof message.content !== "string") {
|
|
68
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
69
|
+
}
|
|
70
|
+
if (message.additional_kwargs.tool_calls) {
|
|
71
|
+
const toolCalls = message.additional_kwargs.tool_calls;
|
|
72
|
+
try {
|
|
73
|
+
return toolCalls.map((toolCall, i) => {
|
|
74
|
+
const toolInput = toolCall.function.arguments
|
|
75
|
+
? JSON.parse(toolCall.function.arguments)
|
|
76
|
+
: {};
|
|
77
|
+
const messageLog = i === 0 ? [message] : [];
|
|
78
|
+
return {
|
|
79
|
+
tool: toolCall.function.name,
|
|
80
|
+
toolInput,
|
|
81
|
+
toolCallId: toolCall.id,
|
|
82
|
+
log: `Invoking "${toolCall.function.name}" with ${toolCall.function.arguments ?? "{}"}\n${message.content}`,
|
|
83
|
+
messageLog,
|
|
84
|
+
};
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
catch (error) {
|
|
88
|
+
throw new output_parser_js_1.OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${error}`);
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
else {
|
|
92
|
+
return {
|
|
93
|
+
returnValues: { output: message.content },
|
|
94
|
+
log: message.content,
|
|
95
|
+
};
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
getFormatInstructions() {
|
|
99
|
+
throw new Error("getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser.");
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
exports.OpenAIToolsAgentOutputParser = OpenAIToolsAgentOutputParser;
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import { AgentAction, AgentFinish, AgentStep, BaseMessage, ChatGeneration } from "../../schema/index.js";
|
|
2
|
+
import { AgentMultiActionOutputParser } from "../types.js";
|
|
3
|
+
/**
|
|
4
|
+
* Type that represents an agent action with an optional message log.
|
|
5
|
+
*/
|
|
6
|
+
export type ToolsAgentAction = AgentAction & {
|
|
7
|
+
toolCallId: string;
|
|
8
|
+
messageLog?: BaseMessage[];
|
|
9
|
+
};
|
|
10
|
+
export type ToolsAgentStep = AgentStep & {
|
|
11
|
+
action: ToolsAgentAction;
|
|
12
|
+
};
|
|
13
|
+
/**
|
|
14
|
+
* @example
|
|
15
|
+
* ```typescript
|
|
16
|
+
*
|
|
17
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
18
|
+
* ["ai", "You are a helpful assistant"],
|
|
19
|
+
* ["human", "{input}"],
|
|
20
|
+
* new MessagesPlaceholder("agent_scratchpad"),
|
|
21
|
+
* ]);
|
|
22
|
+
*
|
|
23
|
+
* const runnableAgent = RunnableSequence.from([
|
|
24
|
+
* {
|
|
25
|
+
* input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
|
26
|
+
* agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) =>
|
|
27
|
+
* formatToOpenAIToolMessages(i.steps),
|
|
28
|
+
* },
|
|
29
|
+
* prompt,
|
|
30
|
+
* new ChatOpenAI({
|
|
31
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
32
|
+
* temperature: 0,
|
|
33
|
+
* }).bind({ tools: tools.map(formatToOpenAITool) }),
|
|
34
|
+
* new OpenAIToolsAgentOutputParser(),
|
|
35
|
+
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
36
|
+
*
|
|
37
|
+
* const result = await runnableAgent.invoke({
|
|
38
|
+
* input:
|
|
39
|
+
* "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
|
|
40
|
+
* });
|
|
41
|
+
*
|
|
42
|
+
* ```
|
|
43
|
+
*/
|
|
44
|
+
export declare class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
|
|
45
|
+
lc_namespace: string[];
|
|
46
|
+
static lc_name(): string;
|
|
47
|
+
parse(text: string): Promise<AgentAction[] | AgentFinish>;
|
|
48
|
+
parseResult(generations: ChatGeneration[]): Promise<AgentFinish | ToolsAgentAction[]>;
|
|
49
|
+
/**
|
|
50
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
51
|
+
* object.
|
|
52
|
+
* @param message The BaseMessage to parse.
|
|
53
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
54
|
+
*/
|
|
55
|
+
parseAIMessage(message: BaseMessage): ToolsAgentAction[] | AgentFinish;
|
|
56
|
+
getFormatInstructions(): string;
|
|
57
|
+
}
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
import { isBaseMessage, } from "../../schema/index.js";
|
|
2
|
+
import { AgentMultiActionOutputParser } from "../types.js";
|
|
3
|
+
import { OutputParserException } from "../../schema/output_parser.js";
|
|
4
|
+
/**
|
|
5
|
+
* @example
|
|
6
|
+
* ```typescript
|
|
7
|
+
*
|
|
8
|
+
* const prompt = ChatPromptTemplate.fromMessages([
|
|
9
|
+
* ["ai", "You are a helpful assistant"],
|
|
10
|
+
* ["human", "{input}"],
|
|
11
|
+
* new MessagesPlaceholder("agent_scratchpad"),
|
|
12
|
+
* ]);
|
|
13
|
+
*
|
|
14
|
+
* const runnableAgent = RunnableSequence.from([
|
|
15
|
+
* {
|
|
16
|
+
* input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
|
17
|
+
* agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) =>
|
|
18
|
+
* formatToOpenAIToolMessages(i.steps),
|
|
19
|
+
* },
|
|
20
|
+
* prompt,
|
|
21
|
+
* new ChatOpenAI({
|
|
22
|
+
* modelName: "gpt-3.5-turbo-1106",
|
|
23
|
+
* temperature: 0,
|
|
24
|
+
* }).bind({ tools: tools.map(formatToOpenAITool) }),
|
|
25
|
+
* new OpenAIToolsAgentOutputParser(),
|
|
26
|
+
* ]).withConfig({ runName: "OpenAIToolsAgent" });
|
|
27
|
+
*
|
|
28
|
+
* const result = await runnableAgent.invoke({
|
|
29
|
+
* input:
|
|
30
|
+
* "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
|
|
31
|
+
* });
|
|
32
|
+
*
|
|
33
|
+
* ```
|
|
34
|
+
*/
|
|
35
|
+
export class OpenAIToolsAgentOutputParser extends AgentMultiActionOutputParser {
|
|
36
|
+
constructor() {
|
|
37
|
+
super(...arguments);
|
|
38
|
+
Object.defineProperty(this, "lc_namespace", {
|
|
39
|
+
enumerable: true,
|
|
40
|
+
configurable: true,
|
|
41
|
+
writable: true,
|
|
42
|
+
value: ["langchain", "agents", "openai"]
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
static lc_name() {
|
|
46
|
+
return "OpenAIToolsAgentOutputParser";
|
|
47
|
+
}
|
|
48
|
+
async parse(text) {
|
|
49
|
+
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
|
|
50
|
+
}
|
|
51
|
+
async parseResult(generations) {
|
|
52
|
+
if ("message" in generations[0] && isBaseMessage(generations[0].message)) {
|
|
53
|
+
return this.parseAIMessage(generations[0].message);
|
|
54
|
+
}
|
|
55
|
+
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
|
|
56
|
+
}
|
|
57
|
+
/**
|
|
58
|
+
* Parses the output message into a ToolsAgentAction[] or AgentFinish
|
|
59
|
+
* object.
|
|
60
|
+
* @param message The BaseMessage to parse.
|
|
61
|
+
* @returns A ToolsAgentAction[] or AgentFinish object.
|
|
62
|
+
*/
|
|
63
|
+
parseAIMessage(message) {
|
|
64
|
+
if (message.content && typeof message.content !== "string") {
|
|
65
|
+
throw new Error("This agent cannot parse non-string model responses.");
|
|
66
|
+
}
|
|
67
|
+
if (message.additional_kwargs.tool_calls) {
|
|
68
|
+
const toolCalls = message.additional_kwargs.tool_calls;
|
|
69
|
+
try {
|
|
70
|
+
return toolCalls.map((toolCall, i) => {
|
|
71
|
+
const toolInput = toolCall.function.arguments
|
|
72
|
+
? JSON.parse(toolCall.function.arguments)
|
|
73
|
+
: {};
|
|
74
|
+
const messageLog = i === 0 ? [message] : [];
|
|
75
|
+
return {
|
|
76
|
+
tool: toolCall.function.name,
|
|
77
|
+
toolInput,
|
|
78
|
+
toolCallId: toolCall.id,
|
|
79
|
+
log: `Invoking "${toolCall.function.name}" with ${toolCall.function.arguments ?? "{}"}\n${message.content}`,
|
|
80
|
+
messageLog,
|
|
81
|
+
};
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
catch (error) {
|
|
85
|
+
throw new OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${error}`);
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
return {
|
|
90
|
+
returnValues: { output: message.content },
|
|
91
|
+
log: message.content,
|
|
92
|
+
};
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
getFormatInstructions() {
|
|
96
|
+
throw new Error("getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser.");
|
|
97
|
+
}
|
|
98
|
+
}
|