@langchain/classic 1.0.8 → 1.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/dist/agents/agent.cjs.map +1 -1
- package/dist/agents/agent.js.map +1 -1
- package/dist/agents/chat/outputParser.d.cts +2 -2
- package/dist/agents/chat/outputParser.d.cts.map +1 -1
- package/dist/agents/helpers.cjs.map +1 -1
- package/dist/agents/helpers.js.map +1 -1
- package/dist/agents/openai_functions/index.cjs.map +1 -1
- package/dist/agents/openai_functions/index.js.map +1 -1
- package/dist/agents/react/index.d.cts +2 -2
- package/dist/agents/react/index.d.cts.map +1 -1
- package/dist/agents/react/index.d.ts +2 -2
- package/dist/agents/react/index.d.ts.map +1 -1
- package/dist/agents/structured_chat/index.d.cts +2 -2
- package/dist/agents/structured_chat/index.d.cts.map +1 -1
- package/dist/agents/structured_chat/index.d.ts +2 -2
- package/dist/agents/structured_chat/index.d.ts.map +1 -1
- package/dist/agents/tool_calling/index.d.cts +2 -2
- package/dist/agents/tool_calling/index.d.cts.map +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts.map +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +1 -1
- package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts.map +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.d.cts +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.d.cts.map +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.d.ts +1 -1
- package/dist/agents/toolkits/conversational_retrieval/tool.d.ts.map +1 -1
- package/dist/agents/types.cjs.map +1 -1
- package/dist/agents/types.js.map +1 -1
- package/dist/chains/base.cjs.map +1 -1
- package/dist/chains/base.js.map +1 -1
- package/dist/chains/combine_docs_chain.cjs.map +1 -1
- package/dist/chains/combine_docs_chain.js.map +1 -1
- package/dist/chains/llm_chain.cjs.map +1 -1
- package/dist/chains/llm_chain.js.map +1 -1
- package/dist/chains/openai_functions/base.cjs.map +1 -1
- package/dist/chains/openai_functions/base.js.map +1 -1
- package/dist/chains/openai_functions/openapi.cjs.map +1 -1
- package/dist/chains/openai_functions/openapi.js.map +1 -1
- package/dist/chains/query_constructor/index.cjs.map +1 -1
- package/dist/chains/query_constructor/index.js.map +1 -1
- package/dist/chains/query_constructor/parser.cjs.map +1 -1
- package/dist/chains/query_constructor/parser.js.map +1 -1
- package/dist/chains/router/multi_prompt.cjs.map +1 -1
- package/dist/chains/router/multi_prompt.js.map +1 -1
- package/dist/chains/transform.cjs.map +1 -1
- package/dist/chains/transform.js.map +1 -1
- package/dist/chat_models/universal.cjs.map +1 -1
- package/dist/chat_models/universal.js.map +1 -1
- package/dist/document_loaders/fs/json.cjs.map +1 -1
- package/dist/document_loaders/fs/json.js.map +1 -1
- package/dist/embeddings/cache_backed.cjs.map +1 -1
- package/dist/embeddings/cache_backed.js.map +1 -1
- package/dist/evaluation/base.cjs.map +1 -1
- package/dist/evaluation/base.js.map +1 -1
- package/dist/evaluation/comparison/pairwise.d.ts +3 -3
- package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
- package/dist/evaluation/criteria/criteria.d.ts +3 -3
- package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
- package/dist/experimental/autogpt/prompt.d.cts +2 -2
- package/dist/experimental/autogpt/prompt.d.cts.map +1 -1
- package/dist/experimental/autogpt/prompt.d.ts +2 -2
- package/dist/experimental/autogpt/prompt.d.ts.map +1 -1
- package/dist/experimental/masking/regex_masking_transformer.cjs.map +1 -1
- package/dist/experimental/masking/regex_masking_transformer.js.map +1 -1
- package/dist/experimental/openai_assistant/index.cjs.map +1 -1
- package/dist/experimental/openai_assistant/index.js.map +1 -1
- package/dist/experimental/plan_and_execute/agent_executor.cjs.map +1 -1
- package/dist/experimental/plan_and_execute/agent_executor.js.map +1 -1
- package/dist/experimental/prompts/custom_format.cjs.map +1 -1
- package/dist/experimental/prompts/custom_format.js.map +1 -1
- package/dist/experimental/prompts/handlebars.cjs.map +1 -1
- package/dist/experimental/prompts/handlebars.js.map +1 -1
- package/dist/load/import_constants.cjs +24 -24
- package/dist/load/import_constants.cjs.map +1 -1
- package/dist/load/import_constants.js +24 -24
- package/dist/load/import_constants.js.map +1 -1
- package/dist/load/import_map.cjs +44 -44
- package/dist/load/import_map.cjs.map +1 -1
- package/dist/load/import_map.js +52 -52
- package/dist/load/import_map.js.map +1 -1
- package/dist/output_parsers/openai_functions.cjs.map +1 -1
- package/dist/output_parsers/openai_functions.js.map +1 -1
- package/dist/output_parsers/regex.cjs.map +1 -1
- package/dist/output_parsers/regex.js.map +1 -1
- package/dist/output_parsers/router.cjs.map +1 -1
- package/dist/output_parsers/router.js.map +1 -1
- package/dist/output_parsers/structured.cjs.map +1 -1
- package/dist/output_parsers/structured.js.map +1 -1
- package/dist/retrievers/document_compressors/index.cjs.map +1 -1
- package/dist/retrievers/document_compressors/index.js.map +1 -1
- package/dist/retrievers/hyde.cjs +1 -1
- package/dist/retrievers/hyde.cjs.map +1 -1
- package/dist/retrievers/hyde.js +1 -1
- package/dist/retrievers/hyde.js.map +1 -1
- package/dist/retrievers/matryoshka_retriever.cjs +1 -1
- package/dist/retrievers/matryoshka_retriever.cjs.map +1 -1
- package/dist/retrievers/matryoshka_retriever.js +1 -1
- package/dist/retrievers/matryoshka_retriever.js.map +1 -1
- package/dist/retrievers/score_threshold.cjs.map +1 -1
- package/dist/retrievers/score_threshold.js.map +1 -1
- package/dist/retrievers/self_query/index.cjs +1 -1
- package/dist/retrievers/self_query/index.d.ts +1 -1
- package/dist/retrievers/self_query/index.js +1 -1
- package/dist/smith/config.cjs.map +1 -1
- package/dist/smith/config.js.map +1 -1
- package/dist/smith/runner_utils.cjs.map +1 -1
- package/dist/smith/runner_utils.js.map +1 -1
- package/dist/tools/retriever.d.cts +1 -1
- package/dist/tools/retriever.d.cts.map +1 -1
- package/dist/tools/retriever.d.ts +1 -1
- package/dist/tools/retriever.d.ts.map +1 -1
- package/dist/util/sql_utils.cjs.map +1 -1
- package/dist/util/sql_utils.js.map +1 -1
- package/dist/vectorstores/memory.cjs +1 -1
- package/dist/vectorstores/memory.js +1 -1
- package/package.json +17 -18
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,13 @@
|
|
|
1
1
|
# @langchain/classic
|
|
2
2
|
|
|
3
|
+
## 1.0.9
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- Updated dependencies [[`3efe79c`](https://github.com/langchain-ai/langchainjs/commit/3efe79c62ff2ffe0ada562f7eecd85be074b649a)]:
|
|
8
|
+
- @langchain/openai@1.2.2
|
|
9
|
+
- @langchain/textsplitters@1.0.1
|
|
10
|
+
|
|
3
11
|
## 1.0.8
|
|
4
12
|
|
|
5
13
|
### Patch Changes
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"agent.cjs","names":["msg: string","output: string","Serializable","earlyStoppingMethod: StoppingMethod","_steps: AgentStep[]","_inputs: ChainValues","_callbackManager?: CallbackManager","_returnValues: AgentFinish[\"returnValues\"]","input: unknown","x: BaseAgent","RunnableSequence","config: { singleAction: boolean; streamRunnable?: boolean; name?: string }","x: Runnable","fields: RunnableSingleActionAgentInput","steps: AgentStep[]","inputs: ChainValues","callbackManager?: CallbackManager","config?: RunnableConfig","finalOutput: AgentAction | AgentFinish | undefined","fields: RunnableMultiActionAgentInput","finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined","input: LLMSingleActionAgentInput","input: AgentInput","_fields?: OutputParserArgs","_tools: StructuredToolInterface[]","_fields?: Record<string, any>","_llm: BaseLanguageModelInterface","_args?: AgentArgs","suffix?: string","newInputs: ChainValues","data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }"],"sources":["../../src/agents/agent.ts"],"sourcesContent":["import type {\n StructuredToolInterface,\n ToolInterface,\n} from \"@langchain/core/tools\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentAction, AgentFinish, AgentStep } from \"@langchain/core/agents\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { Serializable } from \"@langchain/core/load/serializable\";\nimport {\n Runnable,\n patchConfig,\n type RunnableConfig,\n RunnableSequence,\n RunnableLike,\n} from \"@langchain/core/runnables\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type {\n AgentActionOutputParser,\n AgentInput,\n RunnableMultiActionAgentInput,\n RunnableSingleActionAgentInput,\n SerializedAgent,\n StoppingMethod,\n} from \"./types.js\";\n\n/**\n * Record type for arguments passed to output parsers.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type OutputParserArgs = Record<string, any>;\n\n/**\n * Error class for parse errors in LangChain. Contains information about\n * the error message and the output that caused the error.\n */\nclass ParseError extends Error {\n output: string;\n\n constructor(msg: string, output: string) {\n super(msg);\n this.output = output;\n }\n}\n\n/**\n * Abstract base class for agents in LangChain. Provides common\n * functionality for agents, such as handling inputs and outputs.\n */\nexport abstract class BaseAgent extends Serializable {\n declare ToolType: StructuredToolInterface;\n\n abstract get inputKeys(): string[];\n\n get returnValues(): string[] {\n return [\"output\"];\n }\n\n get allowedTools(): string[] | undefined {\n return undefined;\n }\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n _agentType(): string {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Return the string type key uniquely identifying multi or single action agents.\n */\n abstract _agentActionType(): string;\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n _steps: AgentStep[],\n _inputs: ChainValues,\n _callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return Promise.resolve({\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n });\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Prepare the agent for output, if needed\n */\n async prepareForOutput(\n _returnValues: AgentFinish[\"returnValues\"],\n _steps: AgentStep[]\n ): Promise<AgentFinish[\"returnValues\"]> {\n return {};\n }\n}\n\n/**\n * Abstract base class for single action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * single action agents.\n */\nexport abstract class BaseSingleActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"single\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish>;\n}\n\n/**\n * Abstract base class for multi-action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * multi-action agents.\n */\nexport abstract class BaseMultiActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"multi\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Actions specifying what tools to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish>;\n}\n\nfunction isAgentAction(input: unknown): input is AgentAction {\n return !Array.isArray(input) && (input as AgentAction)?.tool !== undefined;\n}\n\nexport function isRunnableAgent(x: BaseAgent) {\n return (\n (x as RunnableMultiActionAgent | RunnableSingleActionAgent).runnable !==\n undefined\n );\n}\n\n// TODO: Remove in the future. Only for backwards compatibility.\n// Allows for the creation of runnables with properties that will\n// be passed to the agent executor constructor.\nexport class AgentRunnableSequence<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunInput = any,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any\n> extends RunnableSequence<RunInput, RunOutput> {\n streamRunnable?: boolean;\n\n singleAction: boolean;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static fromRunnables<RunInput = any, RunOutput = any>(\n [first, ...runnables]: [\n RunnableLike<RunInput>,\n ...RunnableLike[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunnableLike<any, RunOutput>\n ],\n config: { singleAction: boolean; streamRunnable?: boolean; name?: string }\n ): AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>> {\n const sequence = RunnableSequence.from(\n [first, ...runnables],\n config.name\n ) as AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>>;\n sequence.singleAction = config.singleAction;\n sequence.streamRunnable = config.streamRunnable;\n return sequence;\n }\n\n static isAgentRunnableSequence(x: Runnable): x is AgentRunnableSequence {\n return typeof (x as AgentRunnableSequence).singleAction === \"boolean\";\n }\n}\n\n/**\n * Class representing a single-action agent powered by runnables.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction | AgentFinish\n >;\n\n get inputKeys(): string[] {\n return [];\n }\n\n /**\n * Whether to stream from the runnable or not.\n * If true, the underlying LLM is invoked in a streaming fashion to make it\n * possible to get access to the individual LLM tokens when using\n * `streamLog` with the Agent Executor. If false then LLM is invoked in a\n * non-streaming fashion and individual LLM tokens will not be available\n * in `streamLog`.\n *\n * Note that the runnable should still only stream a single action or\n * finish chunk.\n */\n streamRunnable = true;\n\n defaultRunName = \"RunnableAgent\";\n\n constructor(fields: RunnableSingleActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n return finalOutput;\n } else {\n return this.runnable.invoke(combinedInput, combinedConfig);\n }\n }\n}\n\n/**\n * Class representing a multi-action agent powered by runnables.\n * Extends the BaseMultiActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableMultiActionAgent extends BaseMultiActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n // TODO: Rename input to \"intermediate_steps\"\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction[] | AgentAction | AgentFinish\n >;\n\n defaultRunName = \"RunnableAgent\";\n\n stop?: string[];\n\n streamRunnable = true;\n\n get inputKeys(): string[] {\n return [];\n }\n\n constructor(fields: RunnableMultiActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.stop = fields.stop;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n let output;\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n output = finalOutput;\n } else {\n output = await this.runnable.invoke(combinedInput, combinedConfig);\n }\n\n if (isAgentAction(output)) {\n return [output];\n }\n\n return output;\n }\n}\n\nexport class RunnableAgent extends RunnableMultiActionAgent {}\n\n/**\n * Interface for input data for creating a LLMSingleActionAgent.\n */\nexport interface LLMSingleActionAgentInput {\n llmChain: LLMChain;\n outputParser: AgentActionOutputParser;\n stop?: string[];\n}\n\n/**\n * Class representing a single action agent using a LLMChain in LangChain.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions based on LLMChain outputs.\n * @example\n * ```typescript\n * const customPromptTemplate = new CustomPromptTemplate({\n * tools: [new Calculator()],\n * inputVariables: [\"input\", \"agent_scratchpad\"],\n * });\n * const customOutputParser = new CustomOutputParser();\n * const agent = new LLMSingleActionAgent({\n * llmChain: new LLMChain({\n * prompt: customPromptTemplate,\n * llm: new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 }),\n * }),\n * outputParser: customOutputParser,\n * stop: [\"\\nObservation\"],\n * });\n * const executor = new AgentExecutor({\n * agent,\n * tools: [new Calculator()],\n * });\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport class LLMSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\"];\n\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser;\n\n stop?: string[];\n\n constructor(input: LLMSingleActionAgentInput) {\n super(input);\n this.stop = input.stop;\n this.llmChain = input.llmChain;\n this.outputParser = input.outputParser;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys;\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const output = await this.llmChain.call(\n {\n intermediate_steps: steps,\n stop: this.stop,\n ...inputs,\n },\n callbackManager\n );\n return this.outputParser.parse(\n output[this.llmChain.outputKey],\n callbackManager\n );\n }\n}\n\n/**\n * Interface for arguments used to create an agent in LangChain.\n */\nexport interface AgentArgs {\n outputParser?: AgentActionOutputParser;\n\n callbacks?: Callbacks;\n\n /**\n * @deprecated Use `callbacks` instead.\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Class responsible for calling a language model and deciding an action.\n *\n * @remarks This is driven by an LLMChain. The prompt in the LLMChain *must*\n * include a variable called \"agent_scratchpad\" where the agent can put its\n * intermediary work.\n */\nexport abstract class Agent extends BaseSingleActionAgent {\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser | undefined;\n\n private _allowedTools?: string[] = undefined;\n\n get allowedTools(): string[] | undefined {\n return this._allowedTools;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys.filter((k) => k !== \"agent_scratchpad\");\n }\n\n constructor(input: AgentInput) {\n super(input);\n\n this.llmChain = input.llmChain;\n this._allowedTools = input.allowedTools;\n this.outputParser = input.outputParser;\n }\n\n /**\n * Prefix to append the observation with.\n */\n abstract observationPrefix(): string;\n\n /**\n * Prefix to append the LLM call with.\n */\n abstract llmPrefix(): string;\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n abstract _agentType(): string;\n\n /**\n * Get the default output parser for this agent.\n */\n static getDefaultOutputParser(\n _fields?: OutputParserArgs\n ): AgentActionOutputParser {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Create a prompt for this class\n *\n * @param _tools - List of tools the agent will have access to, used to format the prompt.\n * @param _fields - Additional fields used to format the prompt.\n *\n * @returns A PromptTemplate assembled from the given tools and fields.\n * */\n static createPrompt(\n _tools: StructuredToolInterface[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _fields?: Record<string, any>\n ): BasePromptTemplate {\n throw new Error(\"Not implemented\");\n }\n\n /** Construct an agent from an LLM and a list of tools */\n static fromLLMAndTools(\n _llm: BaseLanguageModelInterface,\n _tools: StructuredToolInterface[],\n\n _args?: AgentArgs\n ): Agent {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Validate that appropriate tools are passed in\n */\n static validateTools(_tools: StructuredToolInterface[]): void {}\n\n _stop(): string[] {\n return [`\\n${this.observationPrefix()}`];\n }\n\n /**\n * Name of tool to use to terminate the chain.\n */\n finishToolName(): string {\n return \"Final Answer\";\n }\n\n /**\n * Construct a scratchpad to let the agent continue its thought process\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return steps.reduce(\n (thoughts, { action, observation }) =>\n thoughts +\n [\n action.log,\n `${this.observationPrefix()}${observation}`,\n this.llmPrefix(),\n ].join(\"\\n\"),\n \"\"\n );\n }\n\n private async _plan(\n steps: AgentStep[],\n inputs: ChainValues,\n suffix?: string,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: suffix ? `${thoughts}${suffix}` : thoughts,\n };\n\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n const output = await this.llmChain.predict(newInputs, callbackManager);\n if (!this.outputParser) {\n throw new Error(\"Output parser not set\");\n }\n return this.outputParser.parse(output, callbackManager);\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager to use for this call.\n *\n * @returns Action specifying what tool to use.\n */\n plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n return this._plan(steps, inputs, undefined, callbackManager);\n }\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n async returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return {\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n };\n }\n\n if (earlyStoppingMethod === \"generate\") {\n try {\n const action = await this._plan(\n steps,\n inputs,\n \"\\n\\nI now need to return a final answer based on the previous steps:\",\n callbackManager\n );\n if (\"returnValues\" in action) {\n return action;\n }\n\n return { returnValues: { output: action.log }, log: action.log };\n } catch (err) {\n // fine to use instanceof because we're in the same module\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (!(err instanceof ParseError)) {\n throw err;\n }\n return { returnValues: { output: err.output }, log: err.output };\n }\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Load an agent from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }\n ): Promise<Agent> {\n switch (data._type) {\n case \"zero-shot-react-description\": {\n const { ZeroShotAgent } = await import(\"./mrkl/index.js\");\n return ZeroShotAgent.deserialize(data);\n }\n default:\n throw new Error(\"Unknown agent type\");\n }\n }\n}\n"],"mappings":";;;;;;;;;AAsCA,IAAM,aAAN,cAAyB,MAAM;CAC7B;CAEA,YAAYA,KAAaC,QAAgB;EACvC,MAAM,IAAI;EACV,KAAK,SAAS;CACf;AACF;;;;;AAMD,IAAsB,YAAtB,cAAwCC,gDAAa;CAKnD,IAAI,eAAyB;AAC3B,SAAO,CAAC,QAAS;CAClB;CAED,IAAI,eAAqC;AACvC,SAAO;CACR;;;;CAKD,aAAqB;AACnB,QAAM,IAAI,MAAM;CACjB;;;;CAUD,sBACEC,qBACAC,QACAC,SACAC,kBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO,QAAQ,QAAQ;GACrB,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN,EAAC;AAGJ,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,MAAM,iBACJC,eACAH,QACsC;AACtC,SAAO,CAAE;CACV;AACF;;;;;;AAOD,IAAsB,wBAAtB,cAAoD,UAAU;CAC5D,mBAA2B;AACzB,SAAO;CACR;AAiBF;;;;;;AAOD,IAAsB,uBAAtB,cAAmD,UAAU;CAC3D,mBAA2B;AACzB,SAAO;CACR;AAiBF;AAED,SAAS,cAAcI,OAAsC;AAC3D,QAAO,CAAC,MAAM,QAAQ,MAAM,IAAK,OAAuB,SAAS;AAClE;AAED,SAAgB,gBAAgBC,GAAc;AAC5C,QACG,EAA2D,aAC5D;AAEH;AAKD,IAAa,wBAAb,cAKUC,4CAAsC;CAC9C;CAEA;CAGA,OAAO,cACL,CAAC,OAAO,GAAG,UAKV,EACDC,QAC4D;EAC5D,MAAM,WAAWD,4CAAiB,KAChC,CAAC,OAAO,GAAG,SAAU,GACrB,OAAO,KACR;EACD,SAAS,eAAe,OAAO;EAC/B,SAAS,iBAAiB,OAAO;AACjC,SAAO;CACR;CAED,OAAO,wBAAwBE,GAAyC;AACtE,SAAO,OAAQ,EAA4B,iBAAiB;CAC7D;AACF;;;;;;AAOD,IAAa,4BAAb,cAA+C,sBAAsB;CACnE,eAAe;EAAC;EAAa;EAAU;CAAW;CAElD;CAKA,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;;;;;;;;;;;;CAaD,iBAAiB;CAEjB,iBAAiB;CAEjB,YAAYC,QAAwC;EAClD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJC,OACAC,QACAC,iBACAC,QACoC;EACpC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,6DAA6B,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;AACF,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIC;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAGhB,UAAO;EACR,MACC,QAAO,KAAK,SAAS,OAAO,eAAe,eAAe;CAE7D;AACF;;;;;;AAOD,IAAa,2BAAb,cAA8C,qBAAqB;CACjE,eAAe;EAAC;EAAa;EAAU;CAAW;CAGlD;CAKA,iBAAiB;CAEjB;CAEA,iBAAiB;CAEjB,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;CAED,YAAYC,QAAuC;EACjD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,OAAO,OAAO;EACnB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJL,OACAC,QACAC,iBACAC,QACsC;EACtC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,6DAA6B,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;EACF,IAAI;AACJ,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIG;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;GAGhB,SAAS;EACV,OACC,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;AAGpE,MAAI,cAAc,OAAO,CACvB,QAAO,CAAC,MAAO;AAGjB,SAAO;CACR;AACF;AAED,IAAa,gBAAb,cAAmC,yBAAyB,CAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwC9D,IAAa,uBAAb,cAA0C,sBAAsB;CAC9D,eAAe,CAAC,aAAa,QAAS;CAEtC;CAEA;CAEA;CAEA,YAAYC,OAAkC;EAC5C,MAAM,MAAM;EACZ,KAAK,OAAO,MAAM;EAClB,KAAK,WAAW,MAAM;EACtB,KAAK,eAAe,MAAM;CAC3B;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS;CACtB;;;;;;;;;;CAWD,MAAM,KACJP,OACAC,QACAC,iBACoC;EACpC,MAAM,SAAS,MAAM,KAAK,SAAS,KACjC;GACE,oBAAoB;GACpB,MAAM,KAAK;GACX,GAAG;EACJ,GACD,gBACD;AACD,SAAO,KAAK,aAAa,MACvB,OAAO,KAAK,SAAS,YACrB,gBACD;CACF;AACF;;;;;;;;AAuBD,IAAsB,QAAtB,cAAoC,sBAAsB;CACxD;CAEA;CAEA,AAAQ,gBAA2B;CAEnC,IAAI,eAAqC;AACvC,SAAO,KAAK;CACb;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS,UAAU,OAAO,CAAC,MAAM,MAAM,mBAAmB;CACvE;CAED,YAAYM,OAAmB;EAC7B,MAAM,MAAM;EAEZ,KAAK,WAAW,MAAM;EACtB,KAAK,gBAAgB,MAAM;EAC3B,KAAK,eAAe,MAAM;CAC3B;;;;CAoBD,OAAO,uBACLC,SACyB;AACzB,QAAM,IAAI,MAAM;CACjB;;;;;;;;;CAUD,OAAO,aACLC,QAEAC,SACoB;AACpB,QAAM,IAAI,MAAM;CACjB;;CAGD,OAAO,gBACLC,MACAF,QAEAG,OACO;AACP,QAAM,IAAI,MAAM;CACjB;;;;CAKD,OAAO,cAAcH,QAAyC,CAAE;CAEhE,QAAkB;AAChB,SAAO,CAAC,CAAC,EAAE,EAAE,KAAK,mBAAmB,EAAE,AAAC;CACzC;;;;CAKD,iBAAyB;AACvB,SAAO;CACR;;;;CAKD,MAAM,oBACJV,OACiC;AACjC,SAAO,MAAM,OACX,CAAC,UAAU,EAAE,QAAQ,aAAa,KAChC,WACA;GACE,OAAO;GACP,GAAG,KAAK,mBAAmB,GAAG,aAAa;GAC3C,KAAK,WAAW;EACjB,EAAC,KAAK,KAAK,EACd,GACD;CACF;CAED,MAAc,MACZA,OACAC,QACAa,QACAZ,iBACoC;EACpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMa,YAAyB;GAC7B,GAAG;GACH,kBAAkB,SAAS,GAAG,WAAW,QAAQ,GAAG;EACrD;AAED,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAG/B,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ,WAAW,gBAAgB;AACtE,MAAI,CAAC,KAAK,aACR,OAAM,IAAI,MAAM;AAElB,SAAO,KAAK,aAAa,MAAM,QAAQ,gBAAgB;CACxD;;;;;;;;;;CAWD,KACEf,OACAC,QACAC,iBACoC;AACpC,SAAO,KAAK,MAAM,OAAO,QAAQ,QAAW,gBAAgB;CAC7D;;;;CAKD,MAAM,sBACJb,qBACAW,OACAC,QACAC,iBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO;GACL,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN;AAGH,MAAI,wBAAwB,WAC1B,KAAI;GACF,MAAM,SAAS,MAAM,KAAK,MACxB,OACA,QACA,wEACA,gBACD;AACD,OAAI,kBAAkB,OACpB,QAAO;AAGT,UAAO;IAAE,cAAc,EAAE,QAAQ,OAAO,IAAK;IAAE,KAAK,OAAO;GAAK;EACjE,SAAQ,KAAK;AAGZ,OAAI,EAAE,eAAe,YACnB,OAAM;AAER,UAAO;IAAE,cAAc,EAAE,QAAQ,IAAI,OAAQ;IAAE,KAAK,IAAI;GAAQ;EACjE;AAGH,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,aAAa,YACXc,MAIgB;AAChB,UAAQ,KAAK,OAAb;GACE,KAAK,+BAA+B;IAClC,MAAM,EAAE,eAAe,GAAG,2CAAM;AAChC,WAAO,cAAc,YAAY,KAAK;GACvC;GACD,QACE,OAAM,IAAI,MAAM;EACnB;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"agent.cjs","names":["msg: string","output: string","Serializable","earlyStoppingMethod: StoppingMethod","_steps: AgentStep[]","_inputs: ChainValues","_callbackManager?: CallbackManager","_returnValues: AgentFinish[\"returnValues\"]","input: unknown","x: BaseAgent","RunnableSequence","config: { singleAction: boolean; streamRunnable?: boolean; name?: string }","x: Runnable","fields: RunnableSingleActionAgentInput","steps: AgentStep[]","inputs: ChainValues","callbackManager?: CallbackManager","config?: RunnableConfig","finalOutput: AgentAction | AgentFinish | undefined","fields: RunnableMultiActionAgentInput","finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined","input: LLMSingleActionAgentInput","input: AgentInput","_fields?: OutputParserArgs","_tools: StructuredToolInterface[]","_fields?: Record<string, any>","_llm: BaseLanguageModelInterface","_args?: AgentArgs","suffix?: string","newInputs: ChainValues","data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }"],"sources":["../../src/agents/agent.ts"],"sourcesContent":["import type {\n StructuredToolInterface,\n ToolInterface,\n} from \"@langchain/core/tools\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentAction, AgentFinish, AgentStep } from \"@langchain/core/agents\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { Serializable } from \"@langchain/core/load/serializable\";\nimport {\n Runnable,\n patchConfig,\n type RunnableConfig,\n RunnableSequence,\n RunnableLike,\n} from \"@langchain/core/runnables\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type {\n AgentActionOutputParser,\n AgentInput,\n RunnableMultiActionAgentInput,\n RunnableSingleActionAgentInput,\n SerializedAgent,\n StoppingMethod,\n} from \"./types.js\";\n\n/**\n * Record type for arguments passed to output parsers.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type OutputParserArgs = Record<string, any>;\n\n/**\n * Error class for parse errors in LangChain. Contains information about\n * the error message and the output that caused the error.\n */\nclass ParseError extends Error {\n output: string;\n\n constructor(msg: string, output: string) {\n super(msg);\n this.output = output;\n }\n}\n\n/**\n * Abstract base class for agents in LangChain. Provides common\n * functionality for agents, such as handling inputs and outputs.\n */\nexport abstract class BaseAgent extends Serializable {\n declare ToolType: StructuredToolInterface;\n\n abstract get inputKeys(): string[];\n\n get returnValues(): string[] {\n return [\"output\"];\n }\n\n get allowedTools(): string[] | undefined {\n return undefined;\n }\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n _agentType(): string {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Return the string type key uniquely identifying multi or single action agents.\n */\n abstract _agentActionType(): string;\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n _steps: AgentStep[],\n _inputs: ChainValues,\n _callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return Promise.resolve({\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n });\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Prepare the agent for output, if needed\n */\n async prepareForOutput(\n _returnValues: AgentFinish[\"returnValues\"],\n _steps: AgentStep[]\n ): Promise<AgentFinish[\"returnValues\"]> {\n return {};\n }\n}\n\n/**\n * Abstract base class for single action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * single action agents.\n */\nexport abstract class BaseSingleActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"single\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish>;\n}\n\n/**\n * Abstract base class for multi-action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * multi-action agents.\n */\nexport abstract class BaseMultiActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"multi\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Actions specifying what tools to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish>;\n}\n\nfunction isAgentAction(input: unknown): input is AgentAction {\n return !Array.isArray(input) && (input as AgentAction)?.tool !== undefined;\n}\n\nexport function isRunnableAgent(x: BaseAgent) {\n return (\n (x as RunnableMultiActionAgent | RunnableSingleActionAgent).runnable !==\n undefined\n );\n}\n\n// TODO: Remove in the future. Only for backwards compatibility.\n// Allows for the creation of runnables with properties that will\n// be passed to the agent executor constructor.\nexport class AgentRunnableSequence<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunInput = any,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n> extends RunnableSequence<RunInput, RunOutput> {\n streamRunnable?: boolean;\n\n singleAction: boolean;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static fromRunnables<RunInput = any, RunOutput = any>(\n [first, ...runnables]: [\n RunnableLike<RunInput>,\n ...RunnableLike[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunnableLike<any, RunOutput>,\n ],\n config: { singleAction: boolean; streamRunnable?: boolean; name?: string }\n ): AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>> {\n const sequence = RunnableSequence.from(\n [first, ...runnables],\n config.name\n ) as AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>>;\n sequence.singleAction = config.singleAction;\n sequence.streamRunnable = config.streamRunnable;\n return sequence;\n }\n\n static isAgentRunnableSequence(x: Runnable): x is AgentRunnableSequence {\n return typeof (x as AgentRunnableSequence).singleAction === \"boolean\";\n }\n}\n\n/**\n * Class representing a single-action agent powered by runnables.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction | AgentFinish\n >;\n\n get inputKeys(): string[] {\n return [];\n }\n\n /**\n * Whether to stream from the runnable or not.\n * If true, the underlying LLM is invoked in a streaming fashion to make it\n * possible to get access to the individual LLM tokens when using\n * `streamLog` with the Agent Executor. If false then LLM is invoked in a\n * non-streaming fashion and individual LLM tokens will not be available\n * in `streamLog`.\n *\n * Note that the runnable should still only stream a single action or\n * finish chunk.\n */\n streamRunnable = true;\n\n defaultRunName = \"RunnableAgent\";\n\n constructor(fields: RunnableSingleActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n return finalOutput;\n } else {\n return this.runnable.invoke(combinedInput, combinedConfig);\n }\n }\n}\n\n/**\n * Class representing a multi-action agent powered by runnables.\n * Extends the BaseMultiActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableMultiActionAgent extends BaseMultiActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n // TODO: Rename input to \"intermediate_steps\"\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction[] | AgentAction | AgentFinish\n >;\n\n defaultRunName = \"RunnableAgent\";\n\n stop?: string[];\n\n streamRunnable = true;\n\n get inputKeys(): string[] {\n return [];\n }\n\n constructor(fields: RunnableMultiActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.stop = fields.stop;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n let output;\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n output = finalOutput;\n } else {\n output = await this.runnable.invoke(combinedInput, combinedConfig);\n }\n\n if (isAgentAction(output)) {\n return [output];\n }\n\n return output;\n }\n}\n\nexport class RunnableAgent extends RunnableMultiActionAgent {}\n\n/**\n * Interface for input data for creating a LLMSingleActionAgent.\n */\nexport interface LLMSingleActionAgentInput {\n llmChain: LLMChain;\n outputParser: AgentActionOutputParser;\n stop?: string[];\n}\n\n/**\n * Class representing a single action agent using a LLMChain in LangChain.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions based on LLMChain outputs.\n * @example\n * ```typescript\n * const customPromptTemplate = new CustomPromptTemplate({\n * tools: [new Calculator()],\n * inputVariables: [\"input\", \"agent_scratchpad\"],\n * });\n * const customOutputParser = new CustomOutputParser();\n * const agent = new LLMSingleActionAgent({\n * llmChain: new LLMChain({\n * prompt: customPromptTemplate,\n * llm: new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 }),\n * }),\n * outputParser: customOutputParser,\n * stop: [\"\\nObservation\"],\n * });\n * const executor = new AgentExecutor({\n * agent,\n * tools: [new Calculator()],\n * });\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport class LLMSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\"];\n\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser;\n\n stop?: string[];\n\n constructor(input: LLMSingleActionAgentInput) {\n super(input);\n this.stop = input.stop;\n this.llmChain = input.llmChain;\n this.outputParser = input.outputParser;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys;\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const output = await this.llmChain.call(\n {\n intermediate_steps: steps,\n stop: this.stop,\n ...inputs,\n },\n callbackManager\n );\n return this.outputParser.parse(\n output[this.llmChain.outputKey],\n callbackManager\n );\n }\n}\n\n/**\n * Interface for arguments used to create an agent in LangChain.\n */\nexport interface AgentArgs {\n outputParser?: AgentActionOutputParser;\n\n callbacks?: Callbacks;\n\n /**\n * @deprecated Use `callbacks` instead.\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Class responsible for calling a language model and deciding an action.\n *\n * @remarks This is driven by an LLMChain. The prompt in the LLMChain *must*\n * include a variable called \"agent_scratchpad\" where the agent can put its\n * intermediary work.\n */\nexport abstract class Agent extends BaseSingleActionAgent {\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser | undefined;\n\n private _allowedTools?: string[] = undefined;\n\n get allowedTools(): string[] | undefined {\n return this._allowedTools;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys.filter((k) => k !== \"agent_scratchpad\");\n }\n\n constructor(input: AgentInput) {\n super(input);\n\n this.llmChain = input.llmChain;\n this._allowedTools = input.allowedTools;\n this.outputParser = input.outputParser;\n }\n\n /**\n * Prefix to append the observation with.\n */\n abstract observationPrefix(): string;\n\n /**\n * Prefix to append the LLM call with.\n */\n abstract llmPrefix(): string;\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n abstract _agentType(): string;\n\n /**\n * Get the default output parser for this agent.\n */\n static getDefaultOutputParser(\n _fields?: OutputParserArgs\n ): AgentActionOutputParser {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Create a prompt for this class\n *\n * @param _tools - List of tools the agent will have access to, used to format the prompt.\n * @param _fields - Additional fields used to format the prompt.\n *\n * @returns A PromptTemplate assembled from the given tools and fields.\n * */\n static createPrompt(\n _tools: StructuredToolInterface[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _fields?: Record<string, any>\n ): BasePromptTemplate {\n throw new Error(\"Not implemented\");\n }\n\n /** Construct an agent from an LLM and a list of tools */\n static fromLLMAndTools(\n _llm: BaseLanguageModelInterface,\n _tools: StructuredToolInterface[],\n\n _args?: AgentArgs\n ): Agent {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Validate that appropriate tools are passed in\n */\n static validateTools(_tools: StructuredToolInterface[]): void {}\n\n _stop(): string[] {\n return [`\\n${this.observationPrefix()}`];\n }\n\n /**\n * Name of tool to use to terminate the chain.\n */\n finishToolName(): string {\n return \"Final Answer\";\n }\n\n /**\n * Construct a scratchpad to let the agent continue its thought process\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return steps.reduce(\n (thoughts, { action, observation }) =>\n thoughts +\n [\n action.log,\n `${this.observationPrefix()}${observation}`,\n this.llmPrefix(),\n ].join(\"\\n\"),\n \"\"\n );\n }\n\n private async _plan(\n steps: AgentStep[],\n inputs: ChainValues,\n suffix?: string,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: suffix ? `${thoughts}${suffix}` : thoughts,\n };\n\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n const output = await this.llmChain.predict(newInputs, callbackManager);\n if (!this.outputParser) {\n throw new Error(\"Output parser not set\");\n }\n return this.outputParser.parse(output, callbackManager);\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager to use for this call.\n *\n * @returns Action specifying what tool to use.\n */\n plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n return this._plan(steps, inputs, undefined, callbackManager);\n }\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n async returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return {\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n };\n }\n\n if (earlyStoppingMethod === \"generate\") {\n try {\n const action = await this._plan(\n steps,\n inputs,\n \"\\n\\nI now need to return a final answer based on the previous steps:\",\n callbackManager\n );\n if (\"returnValues\" in action) {\n return action;\n }\n\n return { returnValues: { output: action.log }, log: action.log };\n } catch (err) {\n // fine to use instanceof because we're in the same module\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (!(err instanceof ParseError)) {\n throw err;\n }\n return { returnValues: { output: err.output }, log: err.output };\n }\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Load an agent from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }\n ): Promise<Agent> {\n switch (data._type) {\n case \"zero-shot-react-description\": {\n const { ZeroShotAgent } = await import(\"./mrkl/index.js\");\n return ZeroShotAgent.deserialize(data);\n }\n default:\n throw new Error(\"Unknown agent type\");\n }\n }\n}\n"],"mappings":";;;;;;;;;AAsCA,IAAM,aAAN,cAAyB,MAAM;CAC7B;CAEA,YAAYA,KAAaC,QAAgB;EACvC,MAAM,IAAI;EACV,KAAK,SAAS;CACf;AACF;;;;;AAMD,IAAsB,YAAtB,cAAwCC,gDAAa;CAKnD,IAAI,eAAyB;AAC3B,SAAO,CAAC,QAAS;CAClB;CAED,IAAI,eAAqC;AACvC,SAAO;CACR;;;;CAKD,aAAqB;AACnB,QAAM,IAAI,MAAM;CACjB;;;;CAUD,sBACEC,qBACAC,QACAC,SACAC,kBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO,QAAQ,QAAQ;GACrB,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN,EAAC;AAGJ,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,MAAM,iBACJC,eACAH,QACsC;AACtC,SAAO,CAAE;CACV;AACF;;;;;;AAOD,IAAsB,wBAAtB,cAAoD,UAAU;CAC5D,mBAA2B;AACzB,SAAO;CACR;AAiBF;;;;;;AAOD,IAAsB,uBAAtB,cAAmD,UAAU;CAC3D,mBAA2B;AACzB,SAAO;CACR;AAiBF;AAED,SAAS,cAAcI,OAAsC;AAC3D,QAAO,CAAC,MAAM,QAAQ,MAAM,IAAK,OAAuB,SAAS;AAClE;AAED,SAAgB,gBAAgBC,GAAc;AAC5C,QACG,EAA2D,aAC5D;AAEH;AAKD,IAAa,wBAAb,cAKUC,4CAAsC;CAC9C;CAEA;CAGA,OAAO,cACL,CAAC,OAAO,GAAG,UAKV,EACDC,QAC4D;EAC5D,MAAM,WAAWD,4CAAiB,KAChC,CAAC,OAAO,GAAG,SAAU,GACrB,OAAO,KACR;EACD,SAAS,eAAe,OAAO;EAC/B,SAAS,iBAAiB,OAAO;AACjC,SAAO;CACR;CAED,OAAO,wBAAwBE,GAAyC;AACtE,SAAO,OAAQ,EAA4B,iBAAiB;CAC7D;AACF;;;;;;AAOD,IAAa,4BAAb,cAA+C,sBAAsB;CACnE,eAAe;EAAC;EAAa;EAAU;CAAW;CAElD;CAKA,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;;;;;;;;;;;;CAaD,iBAAiB;CAEjB,iBAAiB;CAEjB,YAAYC,QAAwC;EAClD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJC,OACAC,QACAC,iBACAC,QACoC;EACpC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,6DAA6B,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;AACF,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIC;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAGhB,UAAO;EACR,MACC,QAAO,KAAK,SAAS,OAAO,eAAe,eAAe;CAE7D;AACF;;;;;;AAOD,IAAa,2BAAb,cAA8C,qBAAqB;CACjE,eAAe;EAAC;EAAa;EAAU;CAAW;CAGlD;CAKA,iBAAiB;CAEjB;CAEA,iBAAiB;CAEjB,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;CAED,YAAYC,QAAuC;EACjD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,OAAO,OAAO;EACnB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJL,OACAC,QACAC,iBACAC,QACsC;EACtC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,6DAA6B,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;EACF,IAAI;AACJ,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIG;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;GAGhB,SAAS;EACV,OACC,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;AAGpE,MAAI,cAAc,OAAO,CACvB,QAAO,CAAC,MAAO;AAGjB,SAAO;CACR;AACF;AAED,IAAa,gBAAb,cAAmC,yBAAyB,CAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwC9D,IAAa,uBAAb,cAA0C,sBAAsB;CAC9D,eAAe,CAAC,aAAa,QAAS;CAEtC;CAEA;CAEA;CAEA,YAAYC,OAAkC;EAC5C,MAAM,MAAM;EACZ,KAAK,OAAO,MAAM;EAClB,KAAK,WAAW,MAAM;EACtB,KAAK,eAAe,MAAM;CAC3B;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS;CACtB;;;;;;;;;;CAWD,MAAM,KACJP,OACAC,QACAC,iBACoC;EACpC,MAAM,SAAS,MAAM,KAAK,SAAS,KACjC;GACE,oBAAoB;GACpB,MAAM,KAAK;GACX,GAAG;EACJ,GACD,gBACD;AACD,SAAO,KAAK,aAAa,MACvB,OAAO,KAAK,SAAS,YACrB,gBACD;CACF;AACF;;;;;;;;AAuBD,IAAsB,QAAtB,cAAoC,sBAAsB;CACxD;CAEA;CAEA,AAAQ,gBAA2B;CAEnC,IAAI,eAAqC;AACvC,SAAO,KAAK;CACb;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS,UAAU,OAAO,CAAC,MAAM,MAAM,mBAAmB;CACvE;CAED,YAAYM,OAAmB;EAC7B,MAAM,MAAM;EAEZ,KAAK,WAAW,MAAM;EACtB,KAAK,gBAAgB,MAAM;EAC3B,KAAK,eAAe,MAAM;CAC3B;;;;CAoBD,OAAO,uBACLC,SACyB;AACzB,QAAM,IAAI,MAAM;CACjB;;;;;;;;;CAUD,OAAO,aACLC,QAEAC,SACoB;AACpB,QAAM,IAAI,MAAM;CACjB;;CAGD,OAAO,gBACLC,MACAF,QAEAG,OACO;AACP,QAAM,IAAI,MAAM;CACjB;;;;CAKD,OAAO,cAAcH,QAAyC,CAAE;CAEhE,QAAkB;AAChB,SAAO,CAAC,CAAC,EAAE,EAAE,KAAK,mBAAmB,EAAE,AAAC;CACzC;;;;CAKD,iBAAyB;AACvB,SAAO;CACR;;;;CAKD,MAAM,oBACJV,OACiC;AACjC,SAAO,MAAM,OACX,CAAC,UAAU,EAAE,QAAQ,aAAa,KAChC,WACA;GACE,OAAO;GACP,GAAG,KAAK,mBAAmB,GAAG,aAAa;GAC3C,KAAK,WAAW;EACjB,EAAC,KAAK,KAAK,EACd,GACD;CACF;CAED,MAAc,MACZA,OACAC,QACAa,QACAZ,iBACoC;EACpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMa,YAAyB;GAC7B,GAAG;GACH,kBAAkB,SAAS,GAAG,WAAW,QAAQ,GAAG;EACrD;AAED,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAG/B,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ,WAAW,gBAAgB;AACtE,MAAI,CAAC,KAAK,aACR,OAAM,IAAI,MAAM;AAElB,SAAO,KAAK,aAAa,MAAM,QAAQ,gBAAgB;CACxD;;;;;;;;;;CAWD,KACEf,OACAC,QACAC,iBACoC;AACpC,SAAO,KAAK,MAAM,OAAO,QAAQ,QAAW,gBAAgB;CAC7D;;;;CAKD,MAAM,sBACJb,qBACAW,OACAC,QACAC,iBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO;GACL,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN;AAGH,MAAI,wBAAwB,WAC1B,KAAI;GACF,MAAM,SAAS,MAAM,KAAK,MACxB,OACA,QACA,wEACA,gBACD;AACD,OAAI,kBAAkB,OACpB,QAAO;AAGT,UAAO;IAAE,cAAc,EAAE,QAAQ,OAAO,IAAK;IAAE,KAAK,OAAO;GAAK;EACjE,SAAQ,KAAK;AAGZ,OAAI,EAAE,eAAe,YACnB,OAAM;AAER,UAAO;IAAE,cAAc,EAAE,QAAQ,IAAI,OAAQ;IAAE,KAAK,IAAI;GAAQ;EACjE;AAGH,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,aAAa,YACXc,MAIgB;AAChB,UAAQ,KAAK,OAAb;GACE,KAAK,+BAA+B;IAClC,MAAM,EAAE,eAAe,GAAG,2CAAM;AAChC,WAAO,cAAc,YAAY,KAAK;GACvC;GACD,QACE,OAAM,IAAI,MAAM;EACnB;CACF;AACF"}
|
package/dist/agents/agent.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"agent.js","names":["msg: string","output: string","earlyStoppingMethod: StoppingMethod","_steps: AgentStep[]","_inputs: ChainValues","_callbackManager?: CallbackManager","_returnValues: AgentFinish[\"returnValues\"]","input: unknown","x: BaseAgent","config: { singleAction: boolean; streamRunnable?: boolean; name?: string }","x: Runnable","fields: RunnableSingleActionAgentInput","steps: AgentStep[]","inputs: ChainValues","callbackManager?: CallbackManager","config?: RunnableConfig","finalOutput: AgentAction | AgentFinish | undefined","fields: RunnableMultiActionAgentInput","finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined","input: LLMSingleActionAgentInput","input: AgentInput","_fields?: OutputParserArgs","_tools: StructuredToolInterface[]","_fields?: Record<string, any>","_llm: BaseLanguageModelInterface","_args?: AgentArgs","suffix?: string","newInputs: ChainValues","data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }"],"sources":["../../src/agents/agent.ts"],"sourcesContent":["import type {\n StructuredToolInterface,\n ToolInterface,\n} from \"@langchain/core/tools\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentAction, AgentFinish, AgentStep } from \"@langchain/core/agents\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { Serializable } from \"@langchain/core/load/serializable\";\nimport {\n Runnable,\n patchConfig,\n type RunnableConfig,\n RunnableSequence,\n RunnableLike,\n} from \"@langchain/core/runnables\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type {\n AgentActionOutputParser,\n AgentInput,\n RunnableMultiActionAgentInput,\n RunnableSingleActionAgentInput,\n SerializedAgent,\n StoppingMethod,\n} from \"./types.js\";\n\n/**\n * Record type for arguments passed to output parsers.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type OutputParserArgs = Record<string, any>;\n\n/**\n * Error class for parse errors in LangChain. Contains information about\n * the error message and the output that caused the error.\n */\nclass ParseError extends Error {\n output: string;\n\n constructor(msg: string, output: string) {\n super(msg);\n this.output = output;\n }\n}\n\n/**\n * Abstract base class for agents in LangChain. Provides common\n * functionality for agents, such as handling inputs and outputs.\n */\nexport abstract class BaseAgent extends Serializable {\n declare ToolType: StructuredToolInterface;\n\n abstract get inputKeys(): string[];\n\n get returnValues(): string[] {\n return [\"output\"];\n }\n\n get allowedTools(): string[] | undefined {\n return undefined;\n }\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n _agentType(): string {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Return the string type key uniquely identifying multi or single action agents.\n */\n abstract _agentActionType(): string;\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n _steps: AgentStep[],\n _inputs: ChainValues,\n _callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return Promise.resolve({\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n });\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Prepare the agent for output, if needed\n */\n async prepareForOutput(\n _returnValues: AgentFinish[\"returnValues\"],\n _steps: AgentStep[]\n ): Promise<AgentFinish[\"returnValues\"]> {\n return {};\n }\n}\n\n/**\n * Abstract base class for single action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * single action agents.\n */\nexport abstract class BaseSingleActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"single\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish>;\n}\n\n/**\n * Abstract base class for multi-action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * multi-action agents.\n */\nexport abstract class BaseMultiActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"multi\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Actions specifying what tools to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish>;\n}\n\nfunction isAgentAction(input: unknown): input is AgentAction {\n return !Array.isArray(input) && (input as AgentAction)?.tool !== undefined;\n}\n\nexport function isRunnableAgent(x: BaseAgent) {\n return (\n (x as RunnableMultiActionAgent | RunnableSingleActionAgent).runnable !==\n undefined\n );\n}\n\n// TODO: Remove in the future. Only for backwards compatibility.\n// Allows for the creation of runnables with properties that will\n// be passed to the agent executor constructor.\nexport class AgentRunnableSequence<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunInput = any,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any\n> extends RunnableSequence<RunInput, RunOutput> {\n streamRunnable?: boolean;\n\n singleAction: boolean;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static fromRunnables<RunInput = any, RunOutput = any>(\n [first, ...runnables]: [\n RunnableLike<RunInput>,\n ...RunnableLike[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunnableLike<any, RunOutput>\n ],\n config: { singleAction: boolean; streamRunnable?: boolean; name?: string }\n ): AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>> {\n const sequence = RunnableSequence.from(\n [first, ...runnables],\n config.name\n ) as AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>>;\n sequence.singleAction = config.singleAction;\n sequence.streamRunnable = config.streamRunnable;\n return sequence;\n }\n\n static isAgentRunnableSequence(x: Runnable): x is AgentRunnableSequence {\n return typeof (x as AgentRunnableSequence).singleAction === \"boolean\";\n }\n}\n\n/**\n * Class representing a single-action agent powered by runnables.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction | AgentFinish\n >;\n\n get inputKeys(): string[] {\n return [];\n }\n\n /**\n * Whether to stream from the runnable or not.\n * If true, the underlying LLM is invoked in a streaming fashion to make it\n * possible to get access to the individual LLM tokens when using\n * `streamLog` with the Agent Executor. If false then LLM is invoked in a\n * non-streaming fashion and individual LLM tokens will not be available\n * in `streamLog`.\n *\n * Note that the runnable should still only stream a single action or\n * finish chunk.\n */\n streamRunnable = true;\n\n defaultRunName = \"RunnableAgent\";\n\n constructor(fields: RunnableSingleActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n return finalOutput;\n } else {\n return this.runnable.invoke(combinedInput, combinedConfig);\n }\n }\n}\n\n/**\n * Class representing a multi-action agent powered by runnables.\n * Extends the BaseMultiActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableMultiActionAgent extends BaseMultiActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n // TODO: Rename input to \"intermediate_steps\"\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction[] | AgentAction | AgentFinish\n >;\n\n defaultRunName = \"RunnableAgent\";\n\n stop?: string[];\n\n streamRunnable = true;\n\n get inputKeys(): string[] {\n return [];\n }\n\n constructor(fields: RunnableMultiActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.stop = fields.stop;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n let output;\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n output = finalOutput;\n } else {\n output = await this.runnable.invoke(combinedInput, combinedConfig);\n }\n\n if (isAgentAction(output)) {\n return [output];\n }\n\n return output;\n }\n}\n\nexport class RunnableAgent extends RunnableMultiActionAgent {}\n\n/**\n * Interface for input data for creating a LLMSingleActionAgent.\n */\nexport interface LLMSingleActionAgentInput {\n llmChain: LLMChain;\n outputParser: AgentActionOutputParser;\n stop?: string[];\n}\n\n/**\n * Class representing a single action agent using a LLMChain in LangChain.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions based on LLMChain outputs.\n * @example\n * ```typescript\n * const customPromptTemplate = new CustomPromptTemplate({\n * tools: [new Calculator()],\n * inputVariables: [\"input\", \"agent_scratchpad\"],\n * });\n * const customOutputParser = new CustomOutputParser();\n * const agent = new LLMSingleActionAgent({\n * llmChain: new LLMChain({\n * prompt: customPromptTemplate,\n * llm: new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 }),\n * }),\n * outputParser: customOutputParser,\n * stop: [\"\\nObservation\"],\n * });\n * const executor = new AgentExecutor({\n * agent,\n * tools: [new Calculator()],\n * });\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport class LLMSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\"];\n\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser;\n\n stop?: string[];\n\n constructor(input: LLMSingleActionAgentInput) {\n super(input);\n this.stop = input.stop;\n this.llmChain = input.llmChain;\n this.outputParser = input.outputParser;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys;\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const output = await this.llmChain.call(\n {\n intermediate_steps: steps,\n stop: this.stop,\n ...inputs,\n },\n callbackManager\n );\n return this.outputParser.parse(\n output[this.llmChain.outputKey],\n callbackManager\n );\n }\n}\n\n/**\n * Interface for arguments used to create an agent in LangChain.\n */\nexport interface AgentArgs {\n outputParser?: AgentActionOutputParser;\n\n callbacks?: Callbacks;\n\n /**\n * @deprecated Use `callbacks` instead.\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Class responsible for calling a language model and deciding an action.\n *\n * @remarks This is driven by an LLMChain. The prompt in the LLMChain *must*\n * include a variable called \"agent_scratchpad\" where the agent can put its\n * intermediary work.\n */\nexport abstract class Agent extends BaseSingleActionAgent {\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser | undefined;\n\n private _allowedTools?: string[] = undefined;\n\n get allowedTools(): string[] | undefined {\n return this._allowedTools;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys.filter((k) => k !== \"agent_scratchpad\");\n }\n\n constructor(input: AgentInput) {\n super(input);\n\n this.llmChain = input.llmChain;\n this._allowedTools = input.allowedTools;\n this.outputParser = input.outputParser;\n }\n\n /**\n * Prefix to append the observation with.\n */\n abstract observationPrefix(): string;\n\n /**\n * Prefix to append the LLM call with.\n */\n abstract llmPrefix(): string;\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n abstract _agentType(): string;\n\n /**\n * Get the default output parser for this agent.\n */\n static getDefaultOutputParser(\n _fields?: OutputParserArgs\n ): AgentActionOutputParser {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Create a prompt for this class\n *\n * @param _tools - List of tools the agent will have access to, used to format the prompt.\n * @param _fields - Additional fields used to format the prompt.\n *\n * @returns A PromptTemplate assembled from the given tools and fields.\n * */\n static createPrompt(\n _tools: StructuredToolInterface[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _fields?: Record<string, any>\n ): BasePromptTemplate {\n throw new Error(\"Not implemented\");\n }\n\n /** Construct an agent from an LLM and a list of tools */\n static fromLLMAndTools(\n _llm: BaseLanguageModelInterface,\n _tools: StructuredToolInterface[],\n\n _args?: AgentArgs\n ): Agent {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Validate that appropriate tools are passed in\n */\n static validateTools(_tools: StructuredToolInterface[]): void {}\n\n _stop(): string[] {\n return [`\\n${this.observationPrefix()}`];\n }\n\n /**\n * Name of tool to use to terminate the chain.\n */\n finishToolName(): string {\n return \"Final Answer\";\n }\n\n /**\n * Construct a scratchpad to let the agent continue its thought process\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return steps.reduce(\n (thoughts, { action, observation }) =>\n thoughts +\n [\n action.log,\n `${this.observationPrefix()}${observation}`,\n this.llmPrefix(),\n ].join(\"\\n\"),\n \"\"\n );\n }\n\n private async _plan(\n steps: AgentStep[],\n inputs: ChainValues,\n suffix?: string,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: suffix ? `${thoughts}${suffix}` : thoughts,\n };\n\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n const output = await this.llmChain.predict(newInputs, callbackManager);\n if (!this.outputParser) {\n throw new Error(\"Output parser not set\");\n }\n return this.outputParser.parse(output, callbackManager);\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager to use for this call.\n *\n * @returns Action specifying what tool to use.\n */\n plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n return this._plan(steps, inputs, undefined, callbackManager);\n }\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n async returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return {\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n };\n }\n\n if (earlyStoppingMethod === \"generate\") {\n try {\n const action = await this._plan(\n steps,\n inputs,\n \"\\n\\nI now need to return a final answer based on the previous steps:\",\n callbackManager\n );\n if (\"returnValues\" in action) {\n return action;\n }\n\n return { returnValues: { output: action.log }, log: action.log };\n } catch (err) {\n // fine to use instanceof because we're in the same module\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (!(err instanceof ParseError)) {\n throw err;\n }\n return { returnValues: { output: err.output }, log: err.output };\n }\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Load an agent from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }\n ): Promise<Agent> {\n switch (data._type) {\n case \"zero-shot-react-description\": {\n const { ZeroShotAgent } = await import(\"./mrkl/index.js\");\n return ZeroShotAgent.deserialize(data);\n }\n default:\n throw new Error(\"Unknown agent type\");\n }\n }\n}\n"],"mappings":";;;;;;;;AAsCA,IAAM,aAAN,cAAyB,MAAM;CAC7B;CAEA,YAAYA,KAAaC,QAAgB;EACvC,MAAM,IAAI;EACV,KAAK,SAAS;CACf;AACF;;;;;AAMD,IAAsB,YAAtB,cAAwC,aAAa;CAKnD,IAAI,eAAyB;AAC3B,SAAO,CAAC,QAAS;CAClB;CAED,IAAI,eAAqC;AACvC,SAAO;CACR;;;;CAKD,aAAqB;AACnB,QAAM,IAAI,MAAM;CACjB;;;;CAUD,sBACEC,qBACAC,QACAC,SACAC,kBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO,QAAQ,QAAQ;GACrB,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN,EAAC;AAGJ,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,MAAM,iBACJC,eACAH,QACsC;AACtC,SAAO,CAAE;CACV;AACF;;;;;;AAOD,IAAsB,wBAAtB,cAAoD,UAAU;CAC5D,mBAA2B;AACzB,SAAO;CACR;AAiBF;;;;;;AAOD,IAAsB,uBAAtB,cAAmD,UAAU;CAC3D,mBAA2B;AACzB,SAAO;CACR;AAiBF;AAED,SAAS,cAAcI,OAAsC;AAC3D,QAAO,CAAC,MAAM,QAAQ,MAAM,IAAK,OAAuB,SAAS;AAClE;AAED,SAAgB,gBAAgBC,GAAc;AAC5C,QACG,EAA2D,aAC5D;AAEH;AAKD,IAAa,wBAAb,cAKU,iBAAsC;CAC9C;CAEA;CAGA,OAAO,cACL,CAAC,OAAO,GAAG,UAKV,EACDC,QAC4D;EAC5D,MAAM,WAAW,iBAAiB,KAChC,CAAC,OAAO,GAAG,SAAU,GACrB,OAAO,KACR;EACD,SAAS,eAAe,OAAO;EAC/B,SAAS,iBAAiB,OAAO;AACjC,SAAO;CACR;CAED,OAAO,wBAAwBC,GAAyC;AACtE,SAAO,OAAQ,EAA4B,iBAAiB;CAC7D;AACF;;;;;;AAOD,IAAa,4BAAb,cAA+C,sBAAsB;CACnE,eAAe;EAAC;EAAa;EAAU;CAAW;CAElD;CAKA,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;;;;;;;;;;;;CAaD,iBAAiB;CAEjB,iBAAiB;CAEjB,YAAYC,QAAwC;EAClD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJC,OACAC,QACAC,iBACAC,QACoC;EACpC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,iBAAiB,YAAY,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;AACF,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIC;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAGhB,UAAO;EACR,MACC,QAAO,KAAK,SAAS,OAAO,eAAe,eAAe;CAE7D;AACF;;;;;;AAOD,IAAa,2BAAb,cAA8C,qBAAqB;CACjE,eAAe;EAAC;EAAa;EAAU;CAAW;CAGlD;CAKA,iBAAiB;CAEjB;CAEA,iBAAiB;CAEjB,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;CAED,YAAYC,QAAuC;EACjD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,OAAO,OAAO;EACnB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJL,OACAC,QACAC,iBACAC,QACsC;EACtC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,iBAAiB,YAAY,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;EACF,IAAI;AACJ,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIG;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;GAGhB,SAAS;EACV,OACC,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;AAGpE,MAAI,cAAc,OAAO,CACvB,QAAO,CAAC,MAAO;AAGjB,SAAO;CACR;AACF;AAED,IAAa,gBAAb,cAAmC,yBAAyB,CAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwC9D,IAAa,uBAAb,cAA0C,sBAAsB;CAC9D,eAAe,CAAC,aAAa,QAAS;CAEtC;CAEA;CAEA;CAEA,YAAYC,OAAkC;EAC5C,MAAM,MAAM;EACZ,KAAK,OAAO,MAAM;EAClB,KAAK,WAAW,MAAM;EACtB,KAAK,eAAe,MAAM;CAC3B;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS;CACtB;;;;;;;;;;CAWD,MAAM,KACJP,OACAC,QACAC,iBACoC;EACpC,MAAM,SAAS,MAAM,KAAK,SAAS,KACjC;GACE,oBAAoB;GACpB,MAAM,KAAK;GACX,GAAG;EACJ,GACD,gBACD;AACD,SAAO,KAAK,aAAa,MACvB,OAAO,KAAK,SAAS,YACrB,gBACD;CACF;AACF;;;;;;;;AAuBD,IAAsB,QAAtB,cAAoC,sBAAsB;CACxD;CAEA;CAEA,AAAQ,gBAA2B;CAEnC,IAAI,eAAqC;AACvC,SAAO,KAAK;CACb;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS,UAAU,OAAO,CAAC,MAAM,MAAM,mBAAmB;CACvE;CAED,YAAYM,OAAmB;EAC7B,MAAM,MAAM;EAEZ,KAAK,WAAW,MAAM;EACtB,KAAK,gBAAgB,MAAM;EAC3B,KAAK,eAAe,MAAM;CAC3B;;;;CAoBD,OAAO,uBACLC,SACyB;AACzB,QAAM,IAAI,MAAM;CACjB;;;;;;;;;CAUD,OAAO,aACLC,QAEAC,SACoB;AACpB,QAAM,IAAI,MAAM;CACjB;;CAGD,OAAO,gBACLC,MACAF,QAEAG,OACO;AACP,QAAM,IAAI,MAAM;CACjB;;;;CAKD,OAAO,cAAcH,QAAyC,CAAE;CAEhE,QAAkB;AAChB,SAAO,CAAC,CAAC,EAAE,EAAE,KAAK,mBAAmB,EAAE,AAAC;CACzC;;;;CAKD,iBAAyB;AACvB,SAAO;CACR;;;;CAKD,MAAM,oBACJV,OACiC;AACjC,SAAO,MAAM,OACX,CAAC,UAAU,EAAE,QAAQ,aAAa,KAChC,WACA;GACE,OAAO;GACP,GAAG,KAAK,mBAAmB,GAAG,aAAa;GAC3C,KAAK,WAAW;EACjB,EAAC,KAAK,KAAK,EACd,GACD;CACF;CAED,MAAc,MACZA,OACAC,QACAa,QACAZ,iBACoC;EACpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMa,YAAyB;GAC7B,GAAG;GACH,kBAAkB,SAAS,GAAG,WAAW,QAAQ,GAAG;EACrD;AAED,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAG/B,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ,WAAW,gBAAgB;AACtE,MAAI,CAAC,KAAK,aACR,OAAM,IAAI,MAAM;AAElB,SAAO,KAAK,aAAa,MAAM,QAAQ,gBAAgB;CACxD;;;;;;;;;;CAWD,KACEf,OACAC,QACAC,iBACoC;AACpC,SAAO,KAAK,MAAM,OAAO,QAAQ,QAAW,gBAAgB;CAC7D;;;;CAKD,MAAM,sBACJZ,qBACAU,OACAC,QACAC,iBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO;GACL,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN;AAGH,MAAI,wBAAwB,WAC1B,KAAI;GACF,MAAM,SAAS,MAAM,KAAK,MACxB,OACA,QACA,wEACA,gBACD;AACD,OAAI,kBAAkB,OACpB,QAAO;AAGT,UAAO;IAAE,cAAc,EAAE,QAAQ,OAAO,IAAK;IAAE,KAAK,OAAO;GAAK;EACjE,SAAQ,KAAK;AAGZ,OAAI,EAAE,eAAe,YACnB,OAAM;AAER,UAAO;IAAE,cAAc,EAAE,QAAQ,IAAI,OAAQ;IAAE,KAAK,IAAI;GAAQ;EACjE;AAGH,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,aAAa,YACXc,MAIgB;AAChB,UAAQ,KAAK,OAAb;GACE,KAAK,+BAA+B;IAClC,MAAM,EAAE,eAAe,GAAG,MAAM,OAAO;AACvC,WAAO,cAAc,YAAY,KAAK;GACvC;GACD,QACE,OAAM,IAAI,MAAM;EACnB;CACF;AACF"}
|
|
1
|
+
{"version":3,"file":"agent.js","names":["msg: string","output: string","earlyStoppingMethod: StoppingMethod","_steps: AgentStep[]","_inputs: ChainValues","_callbackManager?: CallbackManager","_returnValues: AgentFinish[\"returnValues\"]","input: unknown","x: BaseAgent","config: { singleAction: boolean; streamRunnable?: boolean; name?: string }","x: Runnable","fields: RunnableSingleActionAgentInput","steps: AgentStep[]","inputs: ChainValues","callbackManager?: CallbackManager","config?: RunnableConfig","finalOutput: AgentAction | AgentFinish | undefined","fields: RunnableMultiActionAgentInput","finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined","input: LLMSingleActionAgentInput","input: AgentInput","_fields?: OutputParserArgs","_tools: StructuredToolInterface[]","_fields?: Record<string, any>","_llm: BaseLanguageModelInterface","_args?: AgentArgs","suffix?: string","newInputs: ChainValues","data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }"],"sources":["../../src/agents/agent.ts"],"sourcesContent":["import type {\n StructuredToolInterface,\n ToolInterface,\n} from \"@langchain/core/tools\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { CallbackManager, Callbacks } from \"@langchain/core/callbacks/manager\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentAction, AgentFinish, AgentStep } from \"@langchain/core/agents\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { Serializable } from \"@langchain/core/load/serializable\";\nimport {\n Runnable,\n patchConfig,\n type RunnableConfig,\n RunnableSequence,\n RunnableLike,\n} from \"@langchain/core/runnables\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type {\n AgentActionOutputParser,\n AgentInput,\n RunnableMultiActionAgentInput,\n RunnableSingleActionAgentInput,\n SerializedAgent,\n StoppingMethod,\n} from \"./types.js\";\n\n/**\n * Record type for arguments passed to output parsers.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type OutputParserArgs = Record<string, any>;\n\n/**\n * Error class for parse errors in LangChain. Contains information about\n * the error message and the output that caused the error.\n */\nclass ParseError extends Error {\n output: string;\n\n constructor(msg: string, output: string) {\n super(msg);\n this.output = output;\n }\n}\n\n/**\n * Abstract base class for agents in LangChain. Provides common\n * functionality for agents, such as handling inputs and outputs.\n */\nexport abstract class BaseAgent extends Serializable {\n declare ToolType: StructuredToolInterface;\n\n abstract get inputKeys(): string[];\n\n get returnValues(): string[] {\n return [\"output\"];\n }\n\n get allowedTools(): string[] | undefined {\n return undefined;\n }\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n _agentType(): string {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Return the string type key uniquely identifying multi or single action agents.\n */\n abstract _agentActionType(): string;\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n _steps: AgentStep[],\n _inputs: ChainValues,\n _callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return Promise.resolve({\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n });\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Prepare the agent for output, if needed\n */\n async prepareForOutput(\n _returnValues: AgentFinish[\"returnValues\"],\n _steps: AgentStep[]\n ): Promise<AgentFinish[\"returnValues\"]> {\n return {};\n }\n}\n\n/**\n * Abstract base class for single action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * single action agents.\n */\nexport abstract class BaseSingleActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"single\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish>;\n}\n\n/**\n * Abstract base class for multi-action agents in LangChain. Extends the\n * BaseAgent class and provides additional functionality specific to\n * multi-action agents.\n */\nexport abstract class BaseMultiActionAgent extends BaseAgent {\n _agentActionType(): string {\n return \"multi\" as const;\n }\n\n /**\n * Decide what to do, given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Actions specifying what tools to use.\n */\n abstract plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish>;\n}\n\nfunction isAgentAction(input: unknown): input is AgentAction {\n return !Array.isArray(input) && (input as AgentAction)?.tool !== undefined;\n}\n\nexport function isRunnableAgent(x: BaseAgent) {\n return (\n (x as RunnableMultiActionAgent | RunnableSingleActionAgent).runnable !==\n undefined\n );\n}\n\n// TODO: Remove in the future. Only for backwards compatibility.\n// Allows for the creation of runnables with properties that will\n// be passed to the agent executor constructor.\nexport class AgentRunnableSequence<\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunInput = any,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunOutput = any,\n> extends RunnableSequence<RunInput, RunOutput> {\n streamRunnable?: boolean;\n\n singleAction: boolean;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static fromRunnables<RunInput = any, RunOutput = any>(\n [first, ...runnables]: [\n RunnableLike<RunInput>,\n ...RunnableLike[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n RunnableLike<any, RunOutput>,\n ],\n config: { singleAction: boolean; streamRunnable?: boolean; name?: string }\n ): AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>> {\n const sequence = RunnableSequence.from(\n [first, ...runnables],\n config.name\n ) as AgentRunnableSequence<RunInput, Exclude<RunOutput, Error>>;\n sequence.singleAction = config.singleAction;\n sequence.streamRunnable = config.streamRunnable;\n return sequence;\n }\n\n static isAgentRunnableSequence(x: Runnable): x is AgentRunnableSequence {\n return typeof (x as AgentRunnableSequence).singleAction === \"boolean\";\n }\n}\n\n/**\n * Class representing a single-action agent powered by runnables.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction | AgentFinish\n >;\n\n get inputKeys(): string[] {\n return [];\n }\n\n /**\n * Whether to stream from the runnable or not.\n * If true, the underlying LLM is invoked in a streaming fashion to make it\n * possible to get access to the individual LLM tokens when using\n * `streamLog` with the Agent Executor. If false then LLM is invoked in a\n * non-streaming fashion and individual LLM tokens will not be available\n * in `streamLog`.\n *\n * Note that the runnable should still only stream a single action or\n * finish chunk.\n */\n streamRunnable = true;\n\n defaultRunName = \"RunnableAgent\";\n\n constructor(fields: RunnableSingleActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n return finalOutput;\n } else {\n return this.runnable.invoke(combinedInput, combinedConfig);\n }\n }\n}\n\n/**\n * Class representing a multi-action agent powered by runnables.\n * Extends the BaseMultiActionAgent class and provides methods for\n * planning agent actions with runnables.\n */\nexport class RunnableMultiActionAgent extends BaseMultiActionAgent {\n lc_namespace = [\"langchain\", \"agents\", \"runnable\"];\n\n // TODO: Rename input to \"intermediate_steps\"\n runnable: Runnable<\n ChainValues & { steps: AgentStep[] },\n AgentAction[] | AgentAction | AgentFinish\n >;\n\n defaultRunName = \"RunnableAgent\";\n\n stop?: string[];\n\n streamRunnable = true;\n\n get inputKeys(): string[] {\n return [];\n }\n\n constructor(fields: RunnableMultiActionAgentInput) {\n super(fields);\n this.runnable = fields.runnable;\n this.stop = fields.stop;\n this.defaultRunName =\n fields.defaultRunName ?? this.runnable.name ?? this.defaultRunName;\n this.streamRunnable = fields.streamRunnable ?? this.streamRunnable;\n }\n\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager,\n config?: RunnableConfig\n ): Promise<AgentAction[] | AgentFinish> {\n const combinedInput = { ...inputs, steps };\n const combinedConfig = patchConfig(config, {\n callbacks: callbackManager,\n runName: this.defaultRunName,\n });\n let output;\n if (this.streamRunnable) {\n const stream = await this.runnable.stream(combinedInput, combinedConfig);\n let finalOutput: AgentAction | AgentFinish | AgentAction[] | undefined;\n for await (const chunk of stream) {\n if (finalOutput === undefined) {\n finalOutput = chunk;\n } else {\n throw new Error(\n [\n `Multiple agent actions/finishes received in streamed agent output.`,\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n }\n if (finalOutput === undefined) {\n throw new Error(\n [\n \"No streaming output received from underlying runnable.\",\n `Set \"streamRunnable: false\" when initializing the agent to invoke this agent in non-streaming mode.`,\n ].join(\"\\n\")\n );\n }\n output = finalOutput;\n } else {\n output = await this.runnable.invoke(combinedInput, combinedConfig);\n }\n\n if (isAgentAction(output)) {\n return [output];\n }\n\n return output;\n }\n}\n\nexport class RunnableAgent extends RunnableMultiActionAgent {}\n\n/**\n * Interface for input data for creating a LLMSingleActionAgent.\n */\nexport interface LLMSingleActionAgentInput {\n llmChain: LLMChain;\n outputParser: AgentActionOutputParser;\n stop?: string[];\n}\n\n/**\n * Class representing a single action agent using a LLMChain in LangChain.\n * Extends the BaseSingleActionAgent class and provides methods for\n * planning agent actions based on LLMChain outputs.\n * @example\n * ```typescript\n * const customPromptTemplate = new CustomPromptTemplate({\n * tools: [new Calculator()],\n * inputVariables: [\"input\", \"agent_scratchpad\"],\n * });\n * const customOutputParser = new CustomOutputParser();\n * const agent = new LLMSingleActionAgent({\n * llmChain: new LLMChain({\n * prompt: customPromptTemplate,\n * llm: new ChatOpenAI({ model: \"gpt-4o-mini\", temperature: 0 }),\n * }),\n * outputParser: customOutputParser,\n * stop: [\"\\nObservation\"],\n * });\n * const executor = new AgentExecutor({\n * agent,\n * tools: [new Calculator()],\n * });\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport class LLMSingleActionAgent extends BaseSingleActionAgent {\n lc_namespace = [\"langchain\", \"agents\"];\n\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser;\n\n stop?: string[];\n\n constructor(input: LLMSingleActionAgentInput) {\n super(input);\n this.stop = input.stop;\n this.llmChain = input.llmChain;\n this.outputParser = input.outputParser;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys;\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager.\n *\n * @returns Action specifying what tool to use.\n */\n async plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const output = await this.llmChain.call(\n {\n intermediate_steps: steps,\n stop: this.stop,\n ...inputs,\n },\n callbackManager\n );\n return this.outputParser.parse(\n output[this.llmChain.outputKey],\n callbackManager\n );\n }\n}\n\n/**\n * Interface for arguments used to create an agent in LangChain.\n */\nexport interface AgentArgs {\n outputParser?: AgentActionOutputParser;\n\n callbacks?: Callbacks;\n\n /**\n * @deprecated Use `callbacks` instead.\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Class responsible for calling a language model and deciding an action.\n *\n * @remarks This is driven by an LLMChain. The prompt in the LLMChain *must*\n * include a variable called \"agent_scratchpad\" where the agent can put its\n * intermediary work.\n */\nexport abstract class Agent extends BaseSingleActionAgent {\n llmChain: LLMChain;\n\n outputParser: AgentActionOutputParser | undefined;\n\n private _allowedTools?: string[] = undefined;\n\n get allowedTools(): string[] | undefined {\n return this._allowedTools;\n }\n\n get inputKeys(): string[] {\n return this.llmChain.inputKeys.filter((k) => k !== \"agent_scratchpad\");\n }\n\n constructor(input: AgentInput) {\n super(input);\n\n this.llmChain = input.llmChain;\n this._allowedTools = input.allowedTools;\n this.outputParser = input.outputParser;\n }\n\n /**\n * Prefix to append the observation with.\n */\n abstract observationPrefix(): string;\n\n /**\n * Prefix to append the LLM call with.\n */\n abstract llmPrefix(): string;\n\n /**\n * Return the string type key uniquely identifying this class of agent.\n */\n abstract _agentType(): string;\n\n /**\n * Get the default output parser for this agent.\n */\n static getDefaultOutputParser(\n _fields?: OutputParserArgs\n ): AgentActionOutputParser {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Create a prompt for this class\n *\n * @param _tools - List of tools the agent will have access to, used to format the prompt.\n * @param _fields - Additional fields used to format the prompt.\n *\n * @returns A PromptTemplate assembled from the given tools and fields.\n * */\n static createPrompt(\n _tools: StructuredToolInterface[],\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n _fields?: Record<string, any>\n ): BasePromptTemplate {\n throw new Error(\"Not implemented\");\n }\n\n /** Construct an agent from an LLM and a list of tools */\n static fromLLMAndTools(\n _llm: BaseLanguageModelInterface,\n _tools: StructuredToolInterface[],\n\n _args?: AgentArgs\n ): Agent {\n throw new Error(\"Not implemented\");\n }\n\n /**\n * Validate that appropriate tools are passed in\n */\n static validateTools(_tools: StructuredToolInterface[]): void {}\n\n _stop(): string[] {\n return [`\\n${this.observationPrefix()}`];\n }\n\n /**\n * Name of tool to use to terminate the chain.\n */\n finishToolName(): string {\n return \"Final Answer\";\n }\n\n /**\n * Construct a scratchpad to let the agent continue its thought process\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return steps.reduce(\n (thoughts, { action, observation }) =>\n thoughts +\n [\n action.log,\n `${this.observationPrefix()}${observation}`,\n this.llmPrefix(),\n ].join(\"\\n\"),\n \"\"\n );\n }\n\n private async _plan(\n steps: AgentStep[],\n inputs: ChainValues,\n suffix?: string,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: suffix ? `${thoughts}${suffix}` : thoughts,\n };\n\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n const output = await this.llmChain.predict(newInputs, callbackManager);\n if (!this.outputParser) {\n throw new Error(\"Output parser not set\");\n }\n return this.outputParser.parse(output, callbackManager);\n }\n\n /**\n * Decide what to do given some input.\n *\n * @param steps - Steps the LLM has taken so far, along with observations from each.\n * @param inputs - User inputs.\n * @param callbackManager - Callback manager to use for this call.\n *\n * @returns Action specifying what tool to use.\n */\n plan(\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n return this._plan(steps, inputs, undefined, callbackManager);\n }\n\n /**\n * Return response when agent has been stopped due to max iterations\n */\n async returnStoppedResponse(\n earlyStoppingMethod: StoppingMethod,\n steps: AgentStep[],\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentFinish> {\n if (earlyStoppingMethod === \"force\") {\n return {\n returnValues: { output: \"Agent stopped due to max iterations.\" },\n log: \"\",\n };\n }\n\n if (earlyStoppingMethod === \"generate\") {\n try {\n const action = await this._plan(\n steps,\n inputs,\n \"\\n\\nI now need to return a final answer based on the previous steps:\",\n callbackManager\n );\n if (\"returnValues\" in action) {\n return action;\n }\n\n return { returnValues: { output: action.log }, log: action.log };\n } catch (err) {\n // fine to use instanceof because we're in the same module\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (!(err instanceof ParseError)) {\n throw err;\n }\n return { returnValues: { output: err.output }, log: err.output };\n }\n }\n\n throw new Error(`Invalid stopping method: ${earlyStoppingMethod}`);\n }\n\n /**\n * Load an agent from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedAgent & {\n llm?: BaseLanguageModelInterface;\n tools?: ToolInterface[];\n }\n ): Promise<Agent> {\n switch (data._type) {\n case \"zero-shot-react-description\": {\n const { ZeroShotAgent } = await import(\"./mrkl/index.js\");\n return ZeroShotAgent.deserialize(data);\n }\n default:\n throw new Error(\"Unknown agent type\");\n }\n }\n}\n"],"mappings":";;;;;;;;AAsCA,IAAM,aAAN,cAAyB,MAAM;CAC7B;CAEA,YAAYA,KAAaC,QAAgB;EACvC,MAAM,IAAI;EACV,KAAK,SAAS;CACf;AACF;;;;;AAMD,IAAsB,YAAtB,cAAwC,aAAa;CAKnD,IAAI,eAAyB;AAC3B,SAAO,CAAC,QAAS;CAClB;CAED,IAAI,eAAqC;AACvC,SAAO;CACR;;;;CAKD,aAAqB;AACnB,QAAM,IAAI,MAAM;CACjB;;;;CAUD,sBACEC,qBACAC,QACAC,SACAC,kBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO,QAAQ,QAAQ;GACrB,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN,EAAC;AAGJ,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,MAAM,iBACJC,eACAH,QACsC;AACtC,SAAO,CAAE;CACV;AACF;;;;;;AAOD,IAAsB,wBAAtB,cAAoD,UAAU;CAC5D,mBAA2B;AACzB,SAAO;CACR;AAiBF;;;;;;AAOD,IAAsB,uBAAtB,cAAmD,UAAU;CAC3D,mBAA2B;AACzB,SAAO;CACR;AAiBF;AAED,SAAS,cAAcI,OAAsC;AAC3D,QAAO,CAAC,MAAM,QAAQ,MAAM,IAAK,OAAuB,SAAS;AAClE;AAED,SAAgB,gBAAgBC,GAAc;AAC5C,QACG,EAA2D,aAC5D;AAEH;AAKD,IAAa,wBAAb,cAKU,iBAAsC;CAC9C;CAEA;CAGA,OAAO,cACL,CAAC,OAAO,GAAG,UAKV,EACDC,QAC4D;EAC5D,MAAM,WAAW,iBAAiB,KAChC,CAAC,OAAO,GAAG,SAAU,GACrB,OAAO,KACR;EACD,SAAS,eAAe,OAAO;EAC/B,SAAS,iBAAiB,OAAO;AACjC,SAAO;CACR;CAED,OAAO,wBAAwBC,GAAyC;AACtE,SAAO,OAAQ,EAA4B,iBAAiB;CAC7D;AACF;;;;;;AAOD,IAAa,4BAAb,cAA+C,sBAAsB;CACnE,eAAe;EAAC;EAAa;EAAU;CAAW;CAElD;CAKA,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;;;;;;;;;;;;CAaD,iBAAiB;CAEjB,iBAAiB;CAEjB,YAAYC,QAAwC;EAClD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJC,OACAC,QACAC,iBACAC,QACoC;EACpC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,iBAAiB,YAAY,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;AACF,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIC;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAGhB,UAAO;EACR,MACC,QAAO,KAAK,SAAS,OAAO,eAAe,eAAe;CAE7D;AACF;;;;;;AAOD,IAAa,2BAAb,cAA8C,qBAAqB;CACjE,eAAe;EAAC;EAAa;EAAU;CAAW;CAGlD;CAKA,iBAAiB;CAEjB;CAEA,iBAAiB;CAEjB,IAAI,YAAsB;AACxB,SAAO,CAAE;CACV;CAED,YAAYC,QAAuC;EACjD,MAAM,OAAO;EACb,KAAK,WAAW,OAAO;EACvB,KAAK,OAAO,OAAO;EACnB,KAAK,iBACH,OAAO,kBAAkB,KAAK,SAAS,QAAQ,KAAK;EACtD,KAAK,iBAAiB,OAAO,kBAAkB,KAAK;CACrD;CAED,MAAM,KACJL,OACAC,QACAC,iBACAC,QACsC;EACtC,MAAM,gBAAgB;GAAE,GAAG;GAAQ;EAAO;EAC1C,MAAM,iBAAiB,YAAY,QAAQ;GACzC,WAAW;GACX,SAAS,KAAK;EACf,EAAC;EACF,IAAI;AACJ,MAAI,KAAK,gBAAgB;GACvB,MAAM,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;GACxE,IAAIG;AACJ,cAAW,MAAM,SAAS,OACxB,KAAI,gBAAgB,QAClB,cAAc;OAEd,OAAM,IAAI,MACR,CACE,CAAC,kEAAkE,CAAC,EACpE,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;AAIlB,OAAI,gBAAgB,OAClB,OAAM,IAAI,MACR,CACE,0DACA,CAAC,mGAAmG,CAAC,AACtG,EAAC,KAAK,KAAK;GAGhB,SAAS;EACV,OACC,SAAS,MAAM,KAAK,SAAS,OAAO,eAAe,eAAe;AAGpE,MAAI,cAAc,OAAO,CACvB,QAAO,CAAC,MAAO;AAGjB,SAAO;CACR;AACF;AAED,IAAa,gBAAb,cAAmC,yBAAyB,CAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwC9D,IAAa,uBAAb,cAA0C,sBAAsB;CAC9D,eAAe,CAAC,aAAa,QAAS;CAEtC;CAEA;CAEA;CAEA,YAAYC,OAAkC;EAC5C,MAAM,MAAM;EACZ,KAAK,OAAO,MAAM;EAClB,KAAK,WAAW,MAAM;EACtB,KAAK,eAAe,MAAM;CAC3B;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS;CACtB;;;;;;;;;;CAWD,MAAM,KACJP,OACAC,QACAC,iBACoC;EACpC,MAAM,SAAS,MAAM,KAAK,SAAS,KACjC;GACE,oBAAoB;GACpB,MAAM,KAAK;GACX,GAAG;EACJ,GACD,gBACD;AACD,SAAO,KAAK,aAAa,MACvB,OAAO,KAAK,SAAS,YACrB,gBACD;CACF;AACF;;;;;;;;AAuBD,IAAsB,QAAtB,cAAoC,sBAAsB;CACxD;CAEA;CAEA,AAAQ,gBAA2B;CAEnC,IAAI,eAAqC;AACvC,SAAO,KAAK;CACb;CAED,IAAI,YAAsB;AACxB,SAAO,KAAK,SAAS,UAAU,OAAO,CAAC,MAAM,MAAM,mBAAmB;CACvE;CAED,YAAYM,OAAmB;EAC7B,MAAM,MAAM;EAEZ,KAAK,WAAW,MAAM;EACtB,KAAK,gBAAgB,MAAM;EAC3B,KAAK,eAAe,MAAM;CAC3B;;;;CAoBD,OAAO,uBACLC,SACyB;AACzB,QAAM,IAAI,MAAM;CACjB;;;;;;;;;CAUD,OAAO,aACLC,QAEAC,SACoB;AACpB,QAAM,IAAI,MAAM;CACjB;;CAGD,OAAO,gBACLC,MACAF,QAEAG,OACO;AACP,QAAM,IAAI,MAAM;CACjB;;;;CAKD,OAAO,cAAcH,QAAyC,CAAE;CAEhE,QAAkB;AAChB,SAAO,CAAC,CAAC,EAAE,EAAE,KAAK,mBAAmB,EAAE,AAAC;CACzC;;;;CAKD,iBAAyB;AACvB,SAAO;CACR;;;;CAKD,MAAM,oBACJV,OACiC;AACjC,SAAO,MAAM,OACX,CAAC,UAAU,EAAE,QAAQ,aAAa,KAChC,WACA;GACE,OAAO;GACP,GAAG,KAAK,mBAAmB,GAAG,aAAa;GAC3C,KAAK,WAAW;EACjB,EAAC,KAAK,KAAK,EACd,GACD;CACF;CAED,MAAc,MACZA,OACAC,QACAa,QACAZ,iBACoC;EACpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMa,YAAyB;GAC7B,GAAG;GACH,kBAAkB,SAAS,GAAG,WAAW,QAAQ,GAAG;EACrD;AAED,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAG/B,MAAM,SAAS,MAAM,KAAK,SAAS,QAAQ,WAAW,gBAAgB;AACtE,MAAI,CAAC,KAAK,aACR,OAAM,IAAI,MAAM;AAElB,SAAO,KAAK,aAAa,MAAM,QAAQ,gBAAgB;CACxD;;;;;;;;;;CAWD,KACEf,OACAC,QACAC,iBACoC;AACpC,SAAO,KAAK,MAAM,OAAO,QAAQ,QAAW,gBAAgB;CAC7D;;;;CAKD,MAAM,sBACJZ,qBACAU,OACAC,QACAC,iBACsB;AACtB,MAAI,wBAAwB,QAC1B,QAAO;GACL,cAAc,EAAE,QAAQ,uCAAwC;GAChE,KAAK;EACN;AAGH,MAAI,wBAAwB,WAC1B,KAAI;GACF,MAAM,SAAS,MAAM,KAAK,MACxB,OACA,QACA,wEACA,gBACD;AACD,OAAI,kBAAkB,OACpB,QAAO;AAGT,UAAO;IAAE,cAAc,EAAE,QAAQ,OAAO,IAAK;IAAE,KAAK,OAAO;GAAK;EACjE,SAAQ,KAAK;AAGZ,OAAI,EAAE,eAAe,YACnB,OAAM;AAER,UAAO;IAAE,cAAc,EAAE,QAAQ,IAAI,OAAQ;IAAE,KAAK,IAAI;GAAQ;EACjE;AAGH,QAAM,IAAI,MAAM,CAAC,yBAAyB,EAAE,qBAAqB;CAClE;;;;CAKD,aAAa,YACXc,MAIgB;AAChB,UAAQ,KAAK,OAAb;GACE,KAAK,+BAA+B;IAClC,MAAM,EAAE,eAAe,GAAG,MAAM,OAAO;AACvC,WAAO,cAAc,YAAY,KAAK;GACvC;GACD,QACE,OAAM,IAAI,MAAM;EACnB;CACF;AACF"}
|
|
@@ -51,12 +51,12 @@ declare class ChatAgentOutputParser extends AgentActionOutputParser {
|
|
|
51
51
|
* @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.
|
|
52
52
|
*/
|
|
53
53
|
parse(text: string): Promise<{
|
|
54
|
+
tool?: undefined;
|
|
55
|
+
toolInput?: undefined;
|
|
54
56
|
returnValues: {
|
|
55
57
|
output: string;
|
|
56
58
|
};
|
|
57
59
|
log: string;
|
|
58
|
-
tool?: undefined;
|
|
59
|
-
toolInput?: undefined;
|
|
60
60
|
} | {
|
|
61
61
|
returnValues?: undefined;
|
|
62
62
|
tool: any;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"outputParser.d.cts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n
|
|
1
|
+
{"version":3,"file":"outputParser.d.cts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n tool?: undefined;\n toolInput?: undefined;\n returnValues: {\n output: string;\n };\n log: string;\n } | {\n returnValues?: undefined;\n tool: any;\n toolInput: any;\n log: string;\n }>;\n /**\n * Returns the format instructions used in the output parser for the\n * ChatAgent class.\n * @returns The format instructions as a string.\n */\n getFormatInstructions(): string;\n}\n//# sourceMappingURL=outputParser.d.ts.map"],"mappings":";;;;;AAuCA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAAqBE,qBAAAA,SAA8BF,uBAAuB;;;;;;;;;;;uBAWjDG"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"helpers.cjs","names":["llm: BaseLanguageModelInterface | undefined","tools: ToolInterface[] | undefined","data: SerializedAgentT<T, U, V>","fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z","fromConstructor: (args: V) => Z","LLMChain"],"sources":["../../src/agents/helpers.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { ToolInterface } from \"@langchain/core/tools\";\nimport type { SerializedAgentT, AgentInput } from \"./types.js\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\n\nexport const deserializeHelper = async <\n T extends string,\n U extends Record<string, unknown>,\n V extends AgentInput,\n Z
|
|
1
|
+
{"version":3,"file":"helpers.cjs","names":["llm: BaseLanguageModelInterface | undefined","tools: ToolInterface[] | undefined","data: SerializedAgentT<T, U, V>","fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z","fromConstructor: (args: V) => Z","LLMChain"],"sources":["../../src/agents/helpers.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { ToolInterface } from \"@langchain/core/tools\";\nimport type { SerializedAgentT, AgentInput } from \"./types.js\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\n\nexport const deserializeHelper = async <\n T extends string,\n U extends Record<string, unknown>,\n V extends AgentInput,\n Z,\n>(\n llm: BaseLanguageModelInterface | undefined,\n tools: ToolInterface[] | undefined,\n data: SerializedAgentT<T, U, V>,\n fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z,\n fromConstructor: (args: V) => Z\n): Promise<Z> => {\n if (data.load_from_llm_and_tools) {\n if (!llm) {\n throw new Error(\"Loading from llm and tools, llm must be provided.\");\n }\n\n if (!tools) {\n throw new Error(\"Loading from llm and tools, tools must be provided.\");\n }\n\n return fromLLMAndTools(llm, tools, data);\n }\n if (!data.llm_chain) {\n throw new Error(\"Loading from constructor, llm_chain must be provided.\");\n }\n\n const llmChain = await LLMChain.deserialize(data.llm_chain);\n return fromConstructor({ ...data, llmChain });\n};\n"],"mappings":";;;AAKA,MAAa,oBAAoB,OAM/BA,KACAC,OACAC,MACAC,iBAKAC,oBACe;AACf,KAAI,KAAK,yBAAyB;AAChC,MAAI,CAAC,IACH,OAAM,IAAI,MAAM;AAGlB,MAAI,CAAC,MACH,OAAM,IAAI,MAAM;AAGlB,SAAO,gBAAgB,KAAK,OAAO,KAAK;CACzC;AACD,KAAI,CAAC,KAAK,UACR,OAAM,IAAI,MAAM;CAGlB,MAAM,WAAW,MAAMC,2BAAS,YAAY,KAAK,UAAU;AAC3D,QAAO,gBAAgB;EAAE,GAAG;EAAM;CAAU,EAAC;AAC9C"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"helpers.js","names":["llm: BaseLanguageModelInterface | undefined","tools: ToolInterface[] | undefined","data: SerializedAgentT<T, U, V>","fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z","fromConstructor: (args: V) => Z"],"sources":["../../src/agents/helpers.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { ToolInterface } from \"@langchain/core/tools\";\nimport type { SerializedAgentT, AgentInput } from \"./types.js\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\n\nexport const deserializeHelper = async <\n T extends string,\n U extends Record<string, unknown>,\n V extends AgentInput,\n Z
|
|
1
|
+
{"version":3,"file":"helpers.js","names":["llm: BaseLanguageModelInterface | undefined","tools: ToolInterface[] | undefined","data: SerializedAgentT<T, U, V>","fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z","fromConstructor: (args: V) => Z"],"sources":["../../src/agents/helpers.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { ToolInterface } from \"@langchain/core/tools\";\nimport type { SerializedAgentT, AgentInput } from \"./types.js\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\n\nexport const deserializeHelper = async <\n T extends string,\n U extends Record<string, unknown>,\n V extends AgentInput,\n Z,\n>(\n llm: BaseLanguageModelInterface | undefined,\n tools: ToolInterface[] | undefined,\n data: SerializedAgentT<T, U, V>,\n fromLLMAndTools: (\n llm: BaseLanguageModelInterface,\n tools: ToolInterface[],\n args: U\n ) => Z,\n fromConstructor: (args: V) => Z\n): Promise<Z> => {\n if (data.load_from_llm_and_tools) {\n if (!llm) {\n throw new Error(\"Loading from llm and tools, llm must be provided.\");\n }\n\n if (!tools) {\n throw new Error(\"Loading from llm and tools, tools must be provided.\");\n }\n\n return fromLLMAndTools(llm, tools, data);\n }\n if (!data.llm_chain) {\n throw new Error(\"Loading from constructor, llm_chain must be provided.\");\n }\n\n const llmChain = await LLMChain.deserialize(data.llm_chain);\n return fromConstructor({ ...data, llmChain });\n};\n"],"mappings":";;;AAKA,MAAa,oBAAoB,OAM/BA,KACAC,OACAC,MACAC,iBAKAC,oBACe;AACf,KAAI,KAAK,yBAAyB;AAChC,MAAI,CAAC,IACH,OAAM,IAAI,MAAM;AAGlB,MAAI,CAAC,MACH,OAAM,IAAI,MAAM;AAGlB,SAAO,gBAAgB,KAAK,OAAO,KAAK;CACzC;AACD,KAAI,CAAC,KAAK,UACR,OAAM,IAAI,MAAM;CAGlB,MAAM,WAAW,MAAM,SAAS,YAAY,KAAK,UAAU;AAC3D,QAAO,gBAAgB;EAAE,GAAG;EAAM;CAAU,EAAC;AAC9C"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.cjs","names":["action: AgentAction | FunctionsAgentAction","observation: string","FunctionMessage","AIMessage","intermediateSteps: AgentStep[]","Agent","OpenAIFunctionsAgentOutputParser","input: Omit<OpenAIAgentInput, \"outputParser\">","_tools: StructuredToolInterface[]","fields?: OpenAIAgentCreatePromptArgs","PREFIX","ChatPromptTemplate","SystemMessagePromptTemplate","MessagesPlaceholder","HumanMessagePromptTemplate","llm: BaseLanguageModelInterface","tools: StructuredToolInterface[]","args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">","LLMChain","steps: AgentStep[]","steps: Array<AgentStep>","inputs: ChainValues","callbackManager?: CallbackManager","newInputs: ChainValues","valuesForLLM: CallOptionsIfAvailable<typeof llm>","AgentRunnableSequence","RunnablePassthrough","input: { steps: AgentStep[] }","formatToOpenAIFunctionMessages"],"sources":["../../../src/agents/openai_functions/index.ts"],"sourcesContent":["import type {\n BaseLanguageModelInterface,\n BaseLanguageModelInput,\n BaseFunctionCallOptions,\n} from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { Runnable, RunnablePassthrough } from \"@langchain/core/runnables\";\nimport { ChatOpenAI, ChatOpenAICallOptions } from \"@langchain/openai\";\nimport type {\n AgentAction,\n AgentFinish,\n AgentStep,\n} from \"@langchain/core/agents\";\nimport { convertToOpenAIFunction } from \"@langchain/core/utils/function_calling\";\nimport {\n AIMessage,\n BaseMessage,\n FunctionMessage,\n SystemMessage,\n BaseMessageChunk,\n} from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { Agent, AgentArgs, AgentRunnableSequence } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { PREFIX } from \"./prompt.js\";\nimport { LLMChain } from \"../../chains/llm_chain.js\";\nimport {\n FunctionsAgentAction,\n OpenAIFunctionsAgentOutputParser,\n} from \"../openai/output_parser.js\";\nimport { formatToOpenAIFunctionMessages } from \"../format_scratchpad/openai_functions.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;\n\n/**\n * Checks if the given action is a FunctionsAgentAction.\n * @param action The action to check.\n * @returns True if the action is a FunctionsAgentAction, false otherwise.\n */\nfunction isFunctionsAgentAction(\n action: AgentAction | FunctionsAgentAction\n): action is FunctionsAgentAction {\n return (action as FunctionsAgentAction).messageLog !== undefined;\n}\n\nfunction _convertAgentStepToMessages(\n action: AgentAction | FunctionsAgentAction,\n observation: string\n) {\n if (isFunctionsAgentAction(action) && action.messageLog !== undefined) {\n return action.messageLog?.concat(\n new FunctionMessage(observation, action.tool)\n );\n } else {\n return [new AIMessage(action.log)];\n }\n}\n\nexport function _formatIntermediateSteps(\n intermediateSteps: AgentStep[]\n): BaseMessage[] {\n return intermediateSteps.flatMap(({ action, observation }) =>\n _convertAgentStepToMessages(action, observation)\n );\n}\n\n/**\n * Interface for the input data required to create an OpenAIAgent.\n */\nexport interface OpenAIAgentInput extends AgentInput {\n tools: StructuredToolInterface[];\n}\n\n/**\n * Interface for the arguments required to create a prompt for an\n * OpenAIAgent.\n */\nexport interface OpenAIAgentCreatePromptArgs {\n prefix?: string;\n systemMessage?: SystemMessage;\n}\n\n/**\n * Class representing an agent for the OpenAI chat model in LangChain. It\n * extends the Agent class and provides additional functionality specific\n * to the OpenAIAgent type.\n */\nexport class OpenAIAgent extends Agent {\n static lc_name() {\n return \"OpenAIAgent\";\n }\n\n lc_namespace = [\"langchain\", \"agents\", \"openai\"];\n\n _agentType() {\n return \"openai-functions\" as const;\n }\n\n observationPrefix() {\n return \"Observation: \";\n }\n\n llmPrefix() {\n return \"Thought:\";\n }\n\n _stop(): string[] {\n return [\"Observation:\"];\n }\n\n tools: StructuredToolInterface[];\n\n outputParser: OpenAIFunctionsAgentOutputParser =\n new OpenAIFunctionsAgentOutputParser();\n\n constructor(input: Omit<OpenAIAgentInput, \"outputParser\">) {\n super({ ...input, outputParser: undefined });\n this.tools = input.tools;\n }\n\n /**\n * Creates a prompt for the OpenAIAgent using the provided tools and\n * fields.\n * @param _tools The tools to be used in the prompt.\n * @param fields Optional fields for creating the prompt.\n * @returns A BasePromptTemplate object representing the created prompt.\n */\n static createPrompt(\n _tools: StructuredToolInterface[],\n fields?: OpenAIAgentCreatePromptArgs\n ): BasePromptTemplate {\n const { prefix = PREFIX } = fields || {};\n return ChatPromptTemplate.fromMessages([\n SystemMessagePromptTemplate.fromTemplate(prefix),\n new MessagesPlaceholder(\"chat_history\"),\n HumanMessagePromptTemplate.fromTemplate(\"{input}\"),\n new MessagesPlaceholder(\"agent_scratchpad\"),\n ]);\n }\n\n /**\n * Creates an OpenAIAgent from a BaseLanguageModel and a list of tools.\n * @param llm The BaseLanguageModel to use.\n * @param tools The tools to be used by the agent.\n * @param args Optional arguments for creating the agent.\n * @returns An instance of OpenAIAgent.\n */\n static fromLLMAndTools(\n llm: BaseLanguageModelInterface,\n tools: StructuredToolInterface[],\n args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">\n ) {\n OpenAIAgent.validateTools(tools);\n if (llm._modelType() !== \"base_chat_model\" || llm._llmType() !== \"openai\") {\n throw new Error(\"OpenAIAgent requires an OpenAI chat model\");\n }\n const prompt = OpenAIAgent.createPrompt(tools, args);\n const chain = new LLMChain({\n prompt,\n llm,\n callbacks: args?.callbacks,\n });\n return new OpenAIAgent({\n llmChain: chain,\n allowedTools: tools.map((t) => t.name),\n tools,\n });\n }\n\n /**\n * Constructs a scratch pad from a list of agent steps.\n * @param steps The steps to include in the scratch pad.\n * @returns A string or a list of BaseMessages representing the constructed scratch pad.\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return _formatIntermediateSteps(steps);\n }\n\n /**\n * Plans the next action or finish state of the agent based on the\n * provided steps, inputs, and optional callback manager.\n * @param steps The steps to consider in planning.\n * @param inputs The inputs to consider in planning.\n * @param callbackManager Optional CallbackManager to use in planning.\n * @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.\n */\n async plan(\n steps: Array<AgentStep>,\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n // Add scratchpad and stop to inputs\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: thoughts,\n };\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n // Split inputs between prompt and llm\n const llm = this.llmChain.llm as\n | ChatOpenAI\n | Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >;\n\n const valuesForPrompt = { ...newInputs };\n const valuesForLLM: CallOptionsIfAvailable<typeof llm> = {\n functions: this.tools.map((tool) => convertToOpenAIFunction(tool)),\n };\n const callKeys =\n \"callKeys\" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];\n for (const key of callKeys) {\n if (key in inputs) {\n valuesForLLM[key as keyof CallOptionsIfAvailable<typeof llm>] =\n inputs[key];\n delete valuesForPrompt[key];\n }\n }\n\n const promptValue = await this.llmChain.prompt.formatPromptValue(\n valuesForPrompt\n );\n\n const message = await (\n llm as Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >\n ).invoke(promptValue.toChatMessages(), {\n ...valuesForLLM,\n callbacks: callbackManager,\n });\n return this.outputParser.parseAIMessage(message);\n }\n}\n\n/**\n * Params used by the createOpenAIFunctionsAgent function.\n */\nexport type CreateOpenAIFunctionsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI function calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseFunctionCallOptions>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[];\n /** The prompt to use, must have an input key for `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n\n/**\n * Create an agent that uses OpenAI-style function calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIFunctionsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-functions-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * });\n *\n * const agent = await createOpenAIFunctionsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport async function createOpenAIFunctionsAgent({\n llm,\n tools,\n prompt,\n streamRunnable,\n}: CreateOpenAIFunctionsAgentParams) {\n if (!prompt.inputVariables.includes(\"agent_scratchpad\")) {\n throw new Error(\n [\n `Prompt must have an input variable named \"agent_scratchpad\".`,\n `Found ${JSON.stringify(prompt.inputVariables)} instead.`,\n ].join(\"\\n\")\n );\n }\n const llmWithTools = llm.bindTools\n ? llm.bindTools(tools)\n : llm.withConfig({\n functions: tools.map((tool) => convertToOpenAIFunction(tool)),\n });\n const agent = AgentRunnableSequence.fromRunnables(\n [\n RunnablePassthrough.assign({\n agent_scratchpad: (input: { steps: AgentStep[] }) =>\n formatToOpenAIFunctionMessages(input.steps),\n }),\n prompt,\n llmWithTools,\n new OpenAIFunctionsAgentOutputParser(),\n ],\n {\n name: \"OpenAIFunctionsAgent\",\n streamRunnable,\n singleAction: true,\n }\n );\n return agent;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAiDA,SAAS,uBACPA,QACgC;AAChC,QAAQ,OAAgC,eAAe;AACxD;AAED,SAAS,4BACPA,QACAC,aACA;AACA,KAAI,uBAAuB,OAAO,IAAI,OAAO,eAAe,OAC1D,QAAO,OAAO,YAAY,OACxB,IAAIC,0CAAgB,aAAa,OAAO,MACzC;KAED,QAAO,CAAC,IAAIC,oCAAU,OAAO,IAAK;AAErC;AAED,SAAgB,yBACdC,mBACe;AACf,QAAO,kBAAkB,QAAQ,CAAC,EAAE,QAAQ,aAAa,KACvD,4BAA4B,QAAQ,YAAY,CACjD;AACF;;;;;;AAuBD,IAAa,cAAb,MAAa,oBAAoBC,oBAAM;CACrC,OAAO,UAAU;AACf,SAAO;CACR;CAED,eAAe;EAAC;EAAa;EAAU;CAAS;CAEhD,aAAa;AACX,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO;CACR;CAED,YAAY;AACV,SAAO;CACR;CAED,QAAkB;AAChB,SAAO,CAAC,cAAe;CACxB;CAED;CAEA,eACE,IAAIC;CAEN,YAAYC,OAA+C;EACzD,MAAM;GAAE,GAAG;GAAO,cAAc;EAAW,EAAC;EAC5C,KAAK,QAAQ,MAAM;CACpB;;;;;;;;CASD,OAAO,aACLC,QACAC,QACoB;EACpB,MAAM,EAAE,SAASC,uBAAQ,GAAG,UAAU,CAAE;AACxC,SAAOC,4CAAmB,aAAa;GACrCC,qDAA4B,aAAa,OAAO;GAChD,IAAIC,6CAAoB;GACxBC,oDAA2B,aAAa,UAAU;GAClD,IAAID,6CAAoB;EACzB,EAAC;CACH;;;;;;;;CASD,OAAO,gBACLE,KACAC,OACAC,MACA;EACA,YAAY,cAAc,MAAM;AAChC,MAAI,IAAI,YAAY,KAAK,qBAAqB,IAAI,UAAU,KAAK,SAC/D,OAAM,IAAI,MAAM;EAElB,MAAM,SAAS,YAAY,aAAa,OAAO,KAAK;EACpD,MAAM,QAAQ,IAAIC,2BAAS;GACzB;GACA;GACA,WAAW,MAAM;EAClB;AACD,SAAO,IAAI,YAAY;GACrB,UAAU;GACV,cAAc,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK;GACtC;EACD;CACF;;;;;;CAOD,MAAM,oBACJC,OACiC;AACjC,SAAO,yBAAyB,MAAM;CACvC;;;;;;;;;CAUD,MAAM,KACJC,OACAC,QACAC,iBACoC;EAEpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMC,YAAyB;GAC7B,GAAG;GACH,kBAAkB;EACnB;AACD,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAI/B,MAAM,MAAM,KAAK,SAAS;EAQ1B,MAAM,kBAAkB,EAAE,GAAG,UAAW;EACxC,MAAMC,eAAmD,EACvD,WAAW,KAAK,MAAM,IAAI,CAAC,8EAAiC,KAAK,CAAC,CACnE;EACD,MAAM,WACJ,cAAc,KAAK,SAAS,MAAM,KAAK,SAAS,IAAI,WAAW,CAAE;AACnE,OAAK,MAAM,OAAO,SAChB,KAAI,OAAO,QAAQ;GACjB,aAAa,OACX,OAAO;GACT,OAAO,gBAAgB;EACxB;EAGH,MAAM,cAAc,MAAM,KAAK,SAAS,OAAO,kBAC7C,gBACD;EAED,MAAM,UAAU,MACd,IAKA,OAAO,YAAY,gBAAgB,EAAE;GACrC,GAAG;GACH,WAAW;EACZ,EAAC;AACF,SAAO,KAAK,aAAa,eAAe,QAAQ;CACjD;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+ED,eAAsB,2BAA2B,EAC/C,KACA,OACA,QACA,gBACiC,EAAE;AACnC,KAAI,CAAC,OAAO,eAAe,SAAS,mBAAmB,CACrD,OAAM,IAAI,MACR,CACE,CAAC,4DAA4D,CAAC,EAC9D,CAAC,MAAM,EAAE,KAAK,UAAU,OAAO,eAAe,CAAC,SAAS,CAAC,AAC1D,EAAC,KAAK,KAAK;CAGhB,MAAM,eAAe,IAAI,YACrB,IAAI,UAAU,MAAM,GACpB,IAAI,WAAW,EACb,WAAW,MAAM,IAAI,CAAC,8EAAiC,KAAK,CAAC,CAC9D,EAAC;CACN,MAAM,QAAQC,oCAAsB,cAClC;EACEC,+CAAoB,OAAO,EACzB,kBAAkB,CAACC,UACjBC,iFAA+B,MAAM,MAAM,CAC9C,EAAC;EACF;EACA;EACA,IAAItB;CACL,GACD;EACE,MAAM;EACN;EACA,cAAc;CACf,EACF;AACD,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"index.cjs","names":["action: AgentAction | FunctionsAgentAction","observation: string","FunctionMessage","AIMessage","intermediateSteps: AgentStep[]","Agent","OpenAIFunctionsAgentOutputParser","input: Omit<OpenAIAgentInput, \"outputParser\">","_tools: StructuredToolInterface[]","fields?: OpenAIAgentCreatePromptArgs","PREFIX","ChatPromptTemplate","SystemMessagePromptTemplate","MessagesPlaceholder","HumanMessagePromptTemplate","llm: BaseLanguageModelInterface","tools: StructuredToolInterface[]","args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">","LLMChain","steps: AgentStep[]","steps: Array<AgentStep>","inputs: ChainValues","callbackManager?: CallbackManager","newInputs: ChainValues","valuesForLLM: CallOptionsIfAvailable<typeof llm>","AgentRunnableSequence","RunnablePassthrough","input: { steps: AgentStep[] }","formatToOpenAIFunctionMessages"],"sources":["../../../src/agents/openai_functions/index.ts"],"sourcesContent":["import type {\n BaseLanguageModelInterface,\n BaseLanguageModelInput,\n BaseFunctionCallOptions,\n} from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { Runnable, RunnablePassthrough } from \"@langchain/core/runnables\";\nimport { ChatOpenAI, ChatOpenAICallOptions } from \"@langchain/openai\";\nimport type {\n AgentAction,\n AgentFinish,\n AgentStep,\n} from \"@langchain/core/agents\";\nimport { convertToOpenAIFunction } from \"@langchain/core/utils/function_calling\";\nimport {\n AIMessage,\n BaseMessage,\n FunctionMessage,\n SystemMessage,\n BaseMessageChunk,\n} from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { Agent, AgentArgs, AgentRunnableSequence } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { PREFIX } from \"./prompt.js\";\nimport { LLMChain } from \"../../chains/llm_chain.js\";\nimport {\n FunctionsAgentAction,\n OpenAIFunctionsAgentOutputParser,\n} from \"../openai/output_parser.js\";\nimport { formatToOpenAIFunctionMessages } from \"../format_scratchpad/openai_functions.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;\n\n/**\n * Checks if the given action is a FunctionsAgentAction.\n * @param action The action to check.\n * @returns True if the action is a FunctionsAgentAction, false otherwise.\n */\nfunction isFunctionsAgentAction(\n action: AgentAction | FunctionsAgentAction\n): action is FunctionsAgentAction {\n return (action as FunctionsAgentAction).messageLog !== undefined;\n}\n\nfunction _convertAgentStepToMessages(\n action: AgentAction | FunctionsAgentAction,\n observation: string\n) {\n if (isFunctionsAgentAction(action) && action.messageLog !== undefined) {\n return action.messageLog?.concat(\n new FunctionMessage(observation, action.tool)\n );\n } else {\n return [new AIMessage(action.log)];\n }\n}\n\nexport function _formatIntermediateSteps(\n intermediateSteps: AgentStep[]\n): BaseMessage[] {\n return intermediateSteps.flatMap(({ action, observation }) =>\n _convertAgentStepToMessages(action, observation)\n );\n}\n\n/**\n * Interface for the input data required to create an OpenAIAgent.\n */\nexport interface OpenAIAgentInput extends AgentInput {\n tools: StructuredToolInterface[];\n}\n\n/**\n * Interface for the arguments required to create a prompt for an\n * OpenAIAgent.\n */\nexport interface OpenAIAgentCreatePromptArgs {\n prefix?: string;\n systemMessage?: SystemMessage;\n}\n\n/**\n * Class representing an agent for the OpenAI chat model in LangChain. It\n * extends the Agent class and provides additional functionality specific\n * to the OpenAIAgent type.\n */\nexport class OpenAIAgent extends Agent {\n static lc_name() {\n return \"OpenAIAgent\";\n }\n\n lc_namespace = [\"langchain\", \"agents\", \"openai\"];\n\n _agentType() {\n return \"openai-functions\" as const;\n }\n\n observationPrefix() {\n return \"Observation: \";\n }\n\n llmPrefix() {\n return \"Thought:\";\n }\n\n _stop(): string[] {\n return [\"Observation:\"];\n }\n\n tools: StructuredToolInterface[];\n\n outputParser: OpenAIFunctionsAgentOutputParser =\n new OpenAIFunctionsAgentOutputParser();\n\n constructor(input: Omit<OpenAIAgentInput, \"outputParser\">) {\n super({ ...input, outputParser: undefined });\n this.tools = input.tools;\n }\n\n /**\n * Creates a prompt for the OpenAIAgent using the provided tools and\n * fields.\n * @param _tools The tools to be used in the prompt.\n * @param fields Optional fields for creating the prompt.\n * @returns A BasePromptTemplate object representing the created prompt.\n */\n static createPrompt(\n _tools: StructuredToolInterface[],\n fields?: OpenAIAgentCreatePromptArgs\n ): BasePromptTemplate {\n const { prefix = PREFIX } = fields || {};\n return ChatPromptTemplate.fromMessages([\n SystemMessagePromptTemplate.fromTemplate(prefix),\n new MessagesPlaceholder(\"chat_history\"),\n HumanMessagePromptTemplate.fromTemplate(\"{input}\"),\n new MessagesPlaceholder(\"agent_scratchpad\"),\n ]);\n }\n\n /**\n * Creates an OpenAIAgent from a BaseLanguageModel and a list of tools.\n * @param llm The BaseLanguageModel to use.\n * @param tools The tools to be used by the agent.\n * @param args Optional arguments for creating the agent.\n * @returns An instance of OpenAIAgent.\n */\n static fromLLMAndTools(\n llm: BaseLanguageModelInterface,\n tools: StructuredToolInterface[],\n args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">\n ) {\n OpenAIAgent.validateTools(tools);\n if (llm._modelType() !== \"base_chat_model\" || llm._llmType() !== \"openai\") {\n throw new Error(\"OpenAIAgent requires an OpenAI chat model\");\n }\n const prompt = OpenAIAgent.createPrompt(tools, args);\n const chain = new LLMChain({\n prompt,\n llm,\n callbacks: args?.callbacks,\n });\n return new OpenAIAgent({\n llmChain: chain,\n allowedTools: tools.map((t) => t.name),\n tools,\n });\n }\n\n /**\n * Constructs a scratch pad from a list of agent steps.\n * @param steps The steps to include in the scratch pad.\n * @returns A string or a list of BaseMessages representing the constructed scratch pad.\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return _formatIntermediateSteps(steps);\n }\n\n /**\n * Plans the next action or finish state of the agent based on the\n * provided steps, inputs, and optional callback manager.\n * @param steps The steps to consider in planning.\n * @param inputs The inputs to consider in planning.\n * @param callbackManager Optional CallbackManager to use in planning.\n * @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.\n */\n async plan(\n steps: Array<AgentStep>,\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n // Add scratchpad and stop to inputs\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: thoughts,\n };\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n // Split inputs between prompt and llm\n const llm = this.llmChain.llm as\n | ChatOpenAI\n | Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >;\n\n const valuesForPrompt = { ...newInputs };\n const valuesForLLM: CallOptionsIfAvailable<typeof llm> = {\n functions: this.tools.map((tool) => convertToOpenAIFunction(tool)),\n };\n const callKeys =\n \"callKeys\" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];\n for (const key of callKeys) {\n if (key in inputs) {\n valuesForLLM[key as keyof CallOptionsIfAvailable<typeof llm>] =\n inputs[key];\n delete valuesForPrompt[key];\n }\n }\n\n const promptValue =\n await this.llmChain.prompt.formatPromptValue(valuesForPrompt);\n\n const message = await (\n llm as Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >\n ).invoke(promptValue.toChatMessages(), {\n ...valuesForLLM,\n callbacks: callbackManager,\n });\n return this.outputParser.parseAIMessage(message);\n }\n}\n\n/**\n * Params used by the createOpenAIFunctionsAgent function.\n */\nexport type CreateOpenAIFunctionsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI function calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseFunctionCallOptions>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[];\n /** The prompt to use, must have an input key for `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n\n/**\n * Create an agent that uses OpenAI-style function calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIFunctionsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-functions-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * });\n *\n * const agent = await createOpenAIFunctionsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport async function createOpenAIFunctionsAgent({\n llm,\n tools,\n prompt,\n streamRunnable,\n}: CreateOpenAIFunctionsAgentParams) {\n if (!prompt.inputVariables.includes(\"agent_scratchpad\")) {\n throw new Error(\n [\n `Prompt must have an input variable named \"agent_scratchpad\".`,\n `Found ${JSON.stringify(prompt.inputVariables)} instead.`,\n ].join(\"\\n\")\n );\n }\n const llmWithTools = llm.bindTools\n ? llm.bindTools(tools)\n : llm.withConfig({\n functions: tools.map((tool) => convertToOpenAIFunction(tool)),\n });\n const agent = AgentRunnableSequence.fromRunnables(\n [\n RunnablePassthrough.assign({\n agent_scratchpad: (input: { steps: AgentStep[] }) =>\n formatToOpenAIFunctionMessages(input.steps),\n }),\n prompt,\n llmWithTools,\n new OpenAIFunctionsAgentOutputParser(),\n ],\n {\n name: \"OpenAIFunctionsAgent\",\n streamRunnable,\n singleAction: true,\n }\n );\n return agent;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AAiDA,SAAS,uBACPA,QACgC;AAChC,QAAQ,OAAgC,eAAe;AACxD;AAED,SAAS,4BACPA,QACAC,aACA;AACA,KAAI,uBAAuB,OAAO,IAAI,OAAO,eAAe,OAC1D,QAAO,OAAO,YAAY,OACxB,IAAIC,0CAAgB,aAAa,OAAO,MACzC;KAED,QAAO,CAAC,IAAIC,oCAAU,OAAO,IAAK;AAErC;AAED,SAAgB,yBACdC,mBACe;AACf,QAAO,kBAAkB,QAAQ,CAAC,EAAE,QAAQ,aAAa,KACvD,4BAA4B,QAAQ,YAAY,CACjD;AACF;;;;;;AAuBD,IAAa,cAAb,MAAa,oBAAoBC,oBAAM;CACrC,OAAO,UAAU;AACf,SAAO;CACR;CAED,eAAe;EAAC;EAAa;EAAU;CAAS;CAEhD,aAAa;AACX,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO;CACR;CAED,YAAY;AACV,SAAO;CACR;CAED,QAAkB;AAChB,SAAO,CAAC,cAAe;CACxB;CAED;CAEA,eACE,IAAIC;CAEN,YAAYC,OAA+C;EACzD,MAAM;GAAE,GAAG;GAAO,cAAc;EAAW,EAAC;EAC5C,KAAK,QAAQ,MAAM;CACpB;;;;;;;;CASD,OAAO,aACLC,QACAC,QACoB;EACpB,MAAM,EAAE,SAASC,uBAAQ,GAAG,UAAU,CAAE;AACxC,SAAOC,4CAAmB,aAAa;GACrCC,qDAA4B,aAAa,OAAO;GAChD,IAAIC,6CAAoB;GACxBC,oDAA2B,aAAa,UAAU;GAClD,IAAID,6CAAoB;EACzB,EAAC;CACH;;;;;;;;CASD,OAAO,gBACLE,KACAC,OACAC,MACA;EACA,YAAY,cAAc,MAAM;AAChC,MAAI,IAAI,YAAY,KAAK,qBAAqB,IAAI,UAAU,KAAK,SAC/D,OAAM,IAAI,MAAM;EAElB,MAAM,SAAS,YAAY,aAAa,OAAO,KAAK;EACpD,MAAM,QAAQ,IAAIC,2BAAS;GACzB;GACA;GACA,WAAW,MAAM;EAClB;AACD,SAAO,IAAI,YAAY;GACrB,UAAU;GACV,cAAc,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK;GACtC;EACD;CACF;;;;;;CAOD,MAAM,oBACJC,OACiC;AACjC,SAAO,yBAAyB,MAAM;CACvC;;;;;;;;;CAUD,MAAM,KACJC,OACAC,QACAC,iBACoC;EAEpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMC,YAAyB;GAC7B,GAAG;GACH,kBAAkB;EACnB;AACD,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAI/B,MAAM,MAAM,KAAK,SAAS;EAQ1B,MAAM,kBAAkB,EAAE,GAAG,UAAW;EACxC,MAAMC,eAAmD,EACvD,WAAW,KAAK,MAAM,IAAI,CAAC,8EAAiC,KAAK,CAAC,CACnE;EACD,MAAM,WACJ,cAAc,KAAK,SAAS,MAAM,KAAK,SAAS,IAAI,WAAW,CAAE;AACnE,OAAK,MAAM,OAAO,SAChB,KAAI,OAAO,QAAQ;GACjB,aAAa,OACX,OAAO;GACT,OAAO,gBAAgB;EACxB;EAGH,MAAM,cACJ,MAAM,KAAK,SAAS,OAAO,kBAAkB,gBAAgB;EAE/D,MAAM,UAAU,MACd,IAKA,OAAO,YAAY,gBAAgB,EAAE;GACrC,GAAG;GACH,WAAW;EACZ,EAAC;AACF,SAAO,KAAK,aAAa,eAAe,QAAQ;CACjD;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+ED,eAAsB,2BAA2B,EAC/C,KACA,OACA,QACA,gBACiC,EAAE;AACnC,KAAI,CAAC,OAAO,eAAe,SAAS,mBAAmB,CACrD,OAAM,IAAI,MACR,CACE,CAAC,4DAA4D,CAAC,EAC9D,CAAC,MAAM,EAAE,KAAK,UAAU,OAAO,eAAe,CAAC,SAAS,CAAC,AAC1D,EAAC,KAAK,KAAK;CAGhB,MAAM,eAAe,IAAI,YACrB,IAAI,UAAU,MAAM,GACpB,IAAI,WAAW,EACb,WAAW,MAAM,IAAI,CAAC,8EAAiC,KAAK,CAAC,CAC9D,EAAC;CACN,MAAM,QAAQC,oCAAsB,cAClC;EACEC,+CAAoB,OAAO,EACzB,kBAAkB,CAACC,UACjBC,iFAA+B,MAAM,MAAM,CAC9C,EAAC;EACF;EACA;EACA,IAAItB;CACL,GACD;EACE,MAAM;EACN;EACA,cAAc;CACf,EACF;AACD,QAAO;AACR"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","names":["action: AgentAction | FunctionsAgentAction","observation: string","intermediateSteps: AgentStep[]","input: Omit<OpenAIAgentInput, \"outputParser\">","_tools: StructuredToolInterface[]","fields?: OpenAIAgentCreatePromptArgs","llm: BaseLanguageModelInterface","tools: StructuredToolInterface[]","args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">","steps: AgentStep[]","steps: Array<AgentStep>","inputs: ChainValues","callbackManager?: CallbackManager","newInputs: ChainValues","valuesForLLM: CallOptionsIfAvailable<typeof llm>","input: { steps: AgentStep[] }"],"sources":["../../../src/agents/openai_functions/index.ts"],"sourcesContent":["import type {\n BaseLanguageModelInterface,\n BaseLanguageModelInput,\n BaseFunctionCallOptions,\n} from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { Runnable, RunnablePassthrough } from \"@langchain/core/runnables\";\nimport { ChatOpenAI, ChatOpenAICallOptions } from \"@langchain/openai\";\nimport type {\n AgentAction,\n AgentFinish,\n AgentStep,\n} from \"@langchain/core/agents\";\nimport { convertToOpenAIFunction } from \"@langchain/core/utils/function_calling\";\nimport {\n AIMessage,\n BaseMessage,\n FunctionMessage,\n SystemMessage,\n BaseMessageChunk,\n} from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { Agent, AgentArgs, AgentRunnableSequence } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { PREFIX } from \"./prompt.js\";\nimport { LLMChain } from \"../../chains/llm_chain.js\";\nimport {\n FunctionsAgentAction,\n OpenAIFunctionsAgentOutputParser,\n} from \"../openai/output_parser.js\";\nimport { formatToOpenAIFunctionMessages } from \"../format_scratchpad/openai_functions.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;\n\n/**\n * Checks if the given action is a FunctionsAgentAction.\n * @param action The action to check.\n * @returns True if the action is a FunctionsAgentAction, false otherwise.\n */\nfunction isFunctionsAgentAction(\n action: AgentAction | FunctionsAgentAction\n): action is FunctionsAgentAction {\n return (action as FunctionsAgentAction).messageLog !== undefined;\n}\n\nfunction _convertAgentStepToMessages(\n action: AgentAction | FunctionsAgentAction,\n observation: string\n) {\n if (isFunctionsAgentAction(action) && action.messageLog !== undefined) {\n return action.messageLog?.concat(\n new FunctionMessage(observation, action.tool)\n );\n } else {\n return [new AIMessage(action.log)];\n }\n}\n\nexport function _formatIntermediateSteps(\n intermediateSteps: AgentStep[]\n): BaseMessage[] {\n return intermediateSteps.flatMap(({ action, observation }) =>\n _convertAgentStepToMessages(action, observation)\n );\n}\n\n/**\n * Interface for the input data required to create an OpenAIAgent.\n */\nexport interface OpenAIAgentInput extends AgentInput {\n tools: StructuredToolInterface[];\n}\n\n/**\n * Interface for the arguments required to create a prompt for an\n * OpenAIAgent.\n */\nexport interface OpenAIAgentCreatePromptArgs {\n prefix?: string;\n systemMessage?: SystemMessage;\n}\n\n/**\n * Class representing an agent for the OpenAI chat model in LangChain. It\n * extends the Agent class and provides additional functionality specific\n * to the OpenAIAgent type.\n */\nexport class OpenAIAgent extends Agent {\n static lc_name() {\n return \"OpenAIAgent\";\n }\n\n lc_namespace = [\"langchain\", \"agents\", \"openai\"];\n\n _agentType() {\n return \"openai-functions\" as const;\n }\n\n observationPrefix() {\n return \"Observation: \";\n }\n\n llmPrefix() {\n return \"Thought:\";\n }\n\n _stop(): string[] {\n return [\"Observation:\"];\n }\n\n tools: StructuredToolInterface[];\n\n outputParser: OpenAIFunctionsAgentOutputParser =\n new OpenAIFunctionsAgentOutputParser();\n\n constructor(input: Omit<OpenAIAgentInput, \"outputParser\">) {\n super({ ...input, outputParser: undefined });\n this.tools = input.tools;\n }\n\n /**\n * Creates a prompt for the OpenAIAgent using the provided tools and\n * fields.\n * @param _tools The tools to be used in the prompt.\n * @param fields Optional fields for creating the prompt.\n * @returns A BasePromptTemplate object representing the created prompt.\n */\n static createPrompt(\n _tools: StructuredToolInterface[],\n fields?: OpenAIAgentCreatePromptArgs\n ): BasePromptTemplate {\n const { prefix = PREFIX } = fields || {};\n return ChatPromptTemplate.fromMessages([\n SystemMessagePromptTemplate.fromTemplate(prefix),\n new MessagesPlaceholder(\"chat_history\"),\n HumanMessagePromptTemplate.fromTemplate(\"{input}\"),\n new MessagesPlaceholder(\"agent_scratchpad\"),\n ]);\n }\n\n /**\n * Creates an OpenAIAgent from a BaseLanguageModel and a list of tools.\n * @param llm The BaseLanguageModel to use.\n * @param tools The tools to be used by the agent.\n * @param args Optional arguments for creating the agent.\n * @returns An instance of OpenAIAgent.\n */\n static fromLLMAndTools(\n llm: BaseLanguageModelInterface,\n tools: StructuredToolInterface[],\n args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">\n ) {\n OpenAIAgent.validateTools(tools);\n if (llm._modelType() !== \"base_chat_model\" || llm._llmType() !== \"openai\") {\n throw new Error(\"OpenAIAgent requires an OpenAI chat model\");\n }\n const prompt = OpenAIAgent.createPrompt(tools, args);\n const chain = new LLMChain({\n prompt,\n llm,\n callbacks: args?.callbacks,\n });\n return new OpenAIAgent({\n llmChain: chain,\n allowedTools: tools.map((t) => t.name),\n tools,\n });\n }\n\n /**\n * Constructs a scratch pad from a list of agent steps.\n * @param steps The steps to include in the scratch pad.\n * @returns A string or a list of BaseMessages representing the constructed scratch pad.\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return _formatIntermediateSteps(steps);\n }\n\n /**\n * Plans the next action or finish state of the agent based on the\n * provided steps, inputs, and optional callback manager.\n * @param steps The steps to consider in planning.\n * @param inputs The inputs to consider in planning.\n * @param callbackManager Optional CallbackManager to use in planning.\n * @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.\n */\n async plan(\n steps: Array<AgentStep>,\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n // Add scratchpad and stop to inputs\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: thoughts,\n };\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n // Split inputs between prompt and llm\n const llm = this.llmChain.llm as\n | ChatOpenAI\n | Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >;\n\n const valuesForPrompt = { ...newInputs };\n const valuesForLLM: CallOptionsIfAvailable<typeof llm> = {\n functions: this.tools.map((tool) => convertToOpenAIFunction(tool)),\n };\n const callKeys =\n \"callKeys\" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];\n for (const key of callKeys) {\n if (key in inputs) {\n valuesForLLM[key as keyof CallOptionsIfAvailable<typeof llm>] =\n inputs[key];\n delete valuesForPrompt[key];\n }\n }\n\n const promptValue = await this.llmChain.prompt.formatPromptValue(\n valuesForPrompt\n );\n\n const message = await (\n llm as Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >\n ).invoke(promptValue.toChatMessages(), {\n ...valuesForLLM,\n callbacks: callbackManager,\n });\n return this.outputParser.parseAIMessage(message);\n }\n}\n\n/**\n * Params used by the createOpenAIFunctionsAgent function.\n */\nexport type CreateOpenAIFunctionsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI function calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseFunctionCallOptions>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[];\n /** The prompt to use, must have an input key for `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n\n/**\n * Create an agent that uses OpenAI-style function calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIFunctionsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-functions-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * });\n *\n * const agent = await createOpenAIFunctionsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport async function createOpenAIFunctionsAgent({\n llm,\n tools,\n prompt,\n streamRunnable,\n}: CreateOpenAIFunctionsAgentParams) {\n if (!prompt.inputVariables.includes(\"agent_scratchpad\")) {\n throw new Error(\n [\n `Prompt must have an input variable named \"agent_scratchpad\".`,\n `Found ${JSON.stringify(prompt.inputVariables)} instead.`,\n ].join(\"\\n\")\n );\n }\n const llmWithTools = llm.bindTools\n ? llm.bindTools(tools)\n : llm.withConfig({\n functions: tools.map((tool) => convertToOpenAIFunction(tool)),\n });\n const agent = AgentRunnableSequence.fromRunnables(\n [\n RunnablePassthrough.assign({\n agent_scratchpad: (input: { steps: AgentStep[] }) =>\n formatToOpenAIFunctionMessages(input.steps),\n }),\n prompt,\n llmWithTools,\n new OpenAIFunctionsAgentOutputParser(),\n ],\n {\n name: \"OpenAIFunctionsAgent\",\n streamRunnable,\n singleAction: true,\n }\n );\n return agent;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AAiDA,SAAS,uBACPA,QACgC;AAChC,QAAQ,OAAgC,eAAe;AACxD;AAED,SAAS,4BACPA,QACAC,aACA;AACA,KAAI,uBAAuB,OAAO,IAAI,OAAO,eAAe,OAC1D,QAAO,OAAO,YAAY,OACxB,IAAI,gBAAgB,aAAa,OAAO,MACzC;KAED,QAAO,CAAC,IAAI,UAAU,OAAO,IAAK;AAErC;AAED,SAAgB,yBACdC,mBACe;AACf,QAAO,kBAAkB,QAAQ,CAAC,EAAE,QAAQ,aAAa,KACvD,4BAA4B,QAAQ,YAAY,CACjD;AACF;;;;;;AAuBD,IAAa,cAAb,MAAa,oBAAoB,MAAM;CACrC,OAAO,UAAU;AACf,SAAO;CACR;CAED,eAAe;EAAC;EAAa;EAAU;CAAS;CAEhD,aAAa;AACX,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO;CACR;CAED,YAAY;AACV,SAAO;CACR;CAED,QAAkB;AAChB,SAAO,CAAC,cAAe;CACxB;CAED;CAEA,eACE,IAAI;CAEN,YAAYC,OAA+C;EACzD,MAAM;GAAE,GAAG;GAAO,cAAc;EAAW,EAAC;EAC5C,KAAK,QAAQ,MAAM;CACpB;;;;;;;;CASD,OAAO,aACLC,QACAC,QACoB;EACpB,MAAM,EAAE,SAAS,QAAQ,GAAG,UAAU,CAAE;AACxC,SAAO,mBAAmB,aAAa;GACrC,4BAA4B,aAAa,OAAO;GAChD,IAAI,oBAAoB;GACxB,2BAA2B,aAAa,UAAU;GAClD,IAAI,oBAAoB;EACzB,EAAC;CACH;;;;;;;;CASD,OAAO,gBACLC,KACAC,OACAC,MACA;EACA,YAAY,cAAc,MAAM;AAChC,MAAI,IAAI,YAAY,KAAK,qBAAqB,IAAI,UAAU,KAAK,SAC/D,OAAM,IAAI,MAAM;EAElB,MAAM,SAAS,YAAY,aAAa,OAAO,KAAK;EACpD,MAAM,QAAQ,IAAI,SAAS;GACzB;GACA;GACA,WAAW,MAAM;EAClB;AACD,SAAO,IAAI,YAAY;GACrB,UAAU;GACV,cAAc,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK;GACtC;EACD;CACF;;;;;;CAOD,MAAM,oBACJC,OACiC;AACjC,SAAO,yBAAyB,MAAM;CACvC;;;;;;;;;CAUD,MAAM,KACJC,OACAC,QACAC,iBACoC;EAEpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMC,YAAyB;GAC7B,GAAG;GACH,kBAAkB;EACnB;AACD,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAI/B,MAAM,MAAM,KAAK,SAAS;EAQ1B,MAAM,kBAAkB,EAAE,GAAG,UAAW;EACxC,MAAMC,eAAmD,EACvD,WAAW,KAAK,MAAM,IAAI,CAAC,SAAS,wBAAwB,KAAK,CAAC,CACnE;EACD,MAAM,WACJ,cAAc,KAAK,SAAS,MAAM,KAAK,SAAS,IAAI,WAAW,CAAE;AACnE,OAAK,MAAM,OAAO,SAChB,KAAI,OAAO,QAAQ;GACjB,aAAa,OACX,OAAO;GACT,OAAO,gBAAgB;EACxB;EAGH,MAAM,cAAc,MAAM,KAAK,SAAS,OAAO,kBAC7C,gBACD;EAED,MAAM,UAAU,MACd,IAKA,OAAO,YAAY,gBAAgB,EAAE;GACrC,GAAG;GACH,WAAW;EACZ,EAAC;AACF,SAAO,KAAK,aAAa,eAAe,QAAQ;CACjD;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+ED,eAAsB,2BAA2B,EAC/C,KACA,OACA,QACA,gBACiC,EAAE;AACnC,KAAI,CAAC,OAAO,eAAe,SAAS,mBAAmB,CACrD,OAAM,IAAI,MACR,CACE,CAAC,4DAA4D,CAAC,EAC9D,CAAC,MAAM,EAAE,KAAK,UAAU,OAAO,eAAe,CAAC,SAAS,CAAC,AAC1D,EAAC,KAAK,KAAK;CAGhB,MAAM,eAAe,IAAI,YACrB,IAAI,UAAU,MAAM,GACpB,IAAI,WAAW,EACb,WAAW,MAAM,IAAI,CAAC,SAAS,wBAAwB,KAAK,CAAC,CAC9D,EAAC;CACN,MAAM,QAAQ,sBAAsB,cAClC;EACE,oBAAoB,OAAO,EACzB,kBAAkB,CAACC,UACjB,+BAA+B,MAAM,MAAM,CAC9C,EAAC;EACF;EACA;EACA,IAAI;CACL,GACD;EACE,MAAM;EACN;EACA,cAAc;CACf,EACF;AACD,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"index.js","names":["action: AgentAction | FunctionsAgentAction","observation: string","intermediateSteps: AgentStep[]","input: Omit<OpenAIAgentInput, \"outputParser\">","_tools: StructuredToolInterface[]","fields?: OpenAIAgentCreatePromptArgs","llm: BaseLanguageModelInterface","tools: StructuredToolInterface[]","args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">","steps: AgentStep[]","steps: Array<AgentStep>","inputs: ChainValues","callbackManager?: CallbackManager","newInputs: ChainValues","valuesForLLM: CallOptionsIfAvailable<typeof llm>","input: { steps: AgentStep[] }"],"sources":["../../../src/agents/openai_functions/index.ts"],"sourcesContent":["import type {\n BaseLanguageModelInterface,\n BaseLanguageModelInput,\n BaseFunctionCallOptions,\n} from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { Runnable, RunnablePassthrough } from \"@langchain/core/runnables\";\nimport { ChatOpenAI, ChatOpenAICallOptions } from \"@langchain/openai\";\nimport type {\n AgentAction,\n AgentFinish,\n AgentStep,\n} from \"@langchain/core/agents\";\nimport { convertToOpenAIFunction } from \"@langchain/core/utils/function_calling\";\nimport {\n AIMessage,\n BaseMessage,\n FunctionMessage,\n SystemMessage,\n BaseMessageChunk,\n} from \"@langchain/core/messages\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { Agent, AgentArgs, AgentRunnableSequence } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { PREFIX } from \"./prompt.js\";\nimport { LLMChain } from \"../../chains/llm_chain.js\";\nimport {\n FunctionsAgentAction,\n OpenAIFunctionsAgentOutputParser,\n} from \"../openai/output_parser.js\";\nimport { formatToOpenAIFunctionMessages } from \"../format_scratchpad/openai_functions.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype CallOptionsIfAvailable<T> = T extends { CallOptions: infer CO } ? CO : any;\n\n/**\n * Checks if the given action is a FunctionsAgentAction.\n * @param action The action to check.\n * @returns True if the action is a FunctionsAgentAction, false otherwise.\n */\nfunction isFunctionsAgentAction(\n action: AgentAction | FunctionsAgentAction\n): action is FunctionsAgentAction {\n return (action as FunctionsAgentAction).messageLog !== undefined;\n}\n\nfunction _convertAgentStepToMessages(\n action: AgentAction | FunctionsAgentAction,\n observation: string\n) {\n if (isFunctionsAgentAction(action) && action.messageLog !== undefined) {\n return action.messageLog?.concat(\n new FunctionMessage(observation, action.tool)\n );\n } else {\n return [new AIMessage(action.log)];\n }\n}\n\nexport function _formatIntermediateSteps(\n intermediateSteps: AgentStep[]\n): BaseMessage[] {\n return intermediateSteps.flatMap(({ action, observation }) =>\n _convertAgentStepToMessages(action, observation)\n );\n}\n\n/**\n * Interface for the input data required to create an OpenAIAgent.\n */\nexport interface OpenAIAgentInput extends AgentInput {\n tools: StructuredToolInterface[];\n}\n\n/**\n * Interface for the arguments required to create a prompt for an\n * OpenAIAgent.\n */\nexport interface OpenAIAgentCreatePromptArgs {\n prefix?: string;\n systemMessage?: SystemMessage;\n}\n\n/**\n * Class representing an agent for the OpenAI chat model in LangChain. It\n * extends the Agent class and provides additional functionality specific\n * to the OpenAIAgent type.\n */\nexport class OpenAIAgent extends Agent {\n static lc_name() {\n return \"OpenAIAgent\";\n }\n\n lc_namespace = [\"langchain\", \"agents\", \"openai\"];\n\n _agentType() {\n return \"openai-functions\" as const;\n }\n\n observationPrefix() {\n return \"Observation: \";\n }\n\n llmPrefix() {\n return \"Thought:\";\n }\n\n _stop(): string[] {\n return [\"Observation:\"];\n }\n\n tools: StructuredToolInterface[];\n\n outputParser: OpenAIFunctionsAgentOutputParser =\n new OpenAIFunctionsAgentOutputParser();\n\n constructor(input: Omit<OpenAIAgentInput, \"outputParser\">) {\n super({ ...input, outputParser: undefined });\n this.tools = input.tools;\n }\n\n /**\n * Creates a prompt for the OpenAIAgent using the provided tools and\n * fields.\n * @param _tools The tools to be used in the prompt.\n * @param fields Optional fields for creating the prompt.\n * @returns A BasePromptTemplate object representing the created prompt.\n */\n static createPrompt(\n _tools: StructuredToolInterface[],\n fields?: OpenAIAgentCreatePromptArgs\n ): BasePromptTemplate {\n const { prefix = PREFIX } = fields || {};\n return ChatPromptTemplate.fromMessages([\n SystemMessagePromptTemplate.fromTemplate(prefix),\n new MessagesPlaceholder(\"chat_history\"),\n HumanMessagePromptTemplate.fromTemplate(\"{input}\"),\n new MessagesPlaceholder(\"agent_scratchpad\"),\n ]);\n }\n\n /**\n * Creates an OpenAIAgent from a BaseLanguageModel and a list of tools.\n * @param llm The BaseLanguageModel to use.\n * @param tools The tools to be used by the agent.\n * @param args Optional arguments for creating the agent.\n * @returns An instance of OpenAIAgent.\n */\n static fromLLMAndTools(\n llm: BaseLanguageModelInterface,\n tools: StructuredToolInterface[],\n args?: OpenAIAgentCreatePromptArgs & Pick<AgentArgs, \"callbacks\">\n ) {\n OpenAIAgent.validateTools(tools);\n if (llm._modelType() !== \"base_chat_model\" || llm._llmType() !== \"openai\") {\n throw new Error(\"OpenAIAgent requires an OpenAI chat model\");\n }\n const prompt = OpenAIAgent.createPrompt(tools, args);\n const chain = new LLMChain({\n prompt,\n llm,\n callbacks: args?.callbacks,\n });\n return new OpenAIAgent({\n llmChain: chain,\n allowedTools: tools.map((t) => t.name),\n tools,\n });\n }\n\n /**\n * Constructs a scratch pad from a list of agent steps.\n * @param steps The steps to include in the scratch pad.\n * @returns A string or a list of BaseMessages representing the constructed scratch pad.\n */\n async constructScratchPad(\n steps: AgentStep[]\n ): Promise<string | BaseMessage[]> {\n return _formatIntermediateSteps(steps);\n }\n\n /**\n * Plans the next action or finish state of the agent based on the\n * provided steps, inputs, and optional callback manager.\n * @param steps The steps to consider in planning.\n * @param inputs The inputs to consider in planning.\n * @param callbackManager Optional CallbackManager to use in planning.\n * @returns A Promise that resolves to an AgentAction or AgentFinish object representing the planned action or finish state.\n */\n async plan(\n steps: Array<AgentStep>,\n inputs: ChainValues,\n callbackManager?: CallbackManager\n ): Promise<AgentAction | AgentFinish> {\n // Add scratchpad and stop to inputs\n const thoughts = await this.constructScratchPad(steps);\n const newInputs: ChainValues = {\n ...inputs,\n agent_scratchpad: thoughts,\n };\n if (this._stop().length !== 0) {\n newInputs.stop = this._stop();\n }\n\n // Split inputs between prompt and llm\n const llm = this.llmChain.llm as\n | ChatOpenAI\n | Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >;\n\n const valuesForPrompt = { ...newInputs };\n const valuesForLLM: CallOptionsIfAvailable<typeof llm> = {\n functions: this.tools.map((tool) => convertToOpenAIFunction(tool)),\n };\n const callKeys =\n \"callKeys\" in this.llmChain.llm ? this.llmChain.llm.callKeys : [];\n for (const key of callKeys) {\n if (key in inputs) {\n valuesForLLM[key as keyof CallOptionsIfAvailable<typeof llm>] =\n inputs[key];\n delete valuesForPrompt[key];\n }\n }\n\n const promptValue =\n await this.llmChain.prompt.formatPromptValue(valuesForPrompt);\n\n const message = await (\n llm as Runnable<\n BaseLanguageModelInput,\n BaseMessageChunk,\n ChatOpenAICallOptions\n >\n ).invoke(promptValue.toChatMessages(), {\n ...valuesForLLM,\n callbacks: callbackManager,\n });\n return this.outputParser.parseAIMessage(message);\n }\n}\n\n/**\n * Params used by the createOpenAIFunctionsAgent function.\n */\nexport type CreateOpenAIFunctionsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI function calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseFunctionCallOptions>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[];\n /** The prompt to use, must have an input key for `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n\n/**\n * Create an agent that uses OpenAI-style function calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIFunctionsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-functions-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-functions-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * model: \"gpt-4o-mini\",\n * temperature: 0,\n * });\n *\n * const agent = await createOpenAIFunctionsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport async function createOpenAIFunctionsAgent({\n llm,\n tools,\n prompt,\n streamRunnable,\n}: CreateOpenAIFunctionsAgentParams) {\n if (!prompt.inputVariables.includes(\"agent_scratchpad\")) {\n throw new Error(\n [\n `Prompt must have an input variable named \"agent_scratchpad\".`,\n `Found ${JSON.stringify(prompt.inputVariables)} instead.`,\n ].join(\"\\n\")\n );\n }\n const llmWithTools = llm.bindTools\n ? llm.bindTools(tools)\n : llm.withConfig({\n functions: tools.map((tool) => convertToOpenAIFunction(tool)),\n });\n const agent = AgentRunnableSequence.fromRunnables(\n [\n RunnablePassthrough.assign({\n agent_scratchpad: (input: { steps: AgentStep[] }) =>\n formatToOpenAIFunctionMessages(input.steps),\n }),\n prompt,\n llmWithTools,\n new OpenAIFunctionsAgentOutputParser(),\n ],\n {\n name: \"OpenAIFunctionsAgent\",\n streamRunnable,\n singleAction: true,\n }\n );\n return agent;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;AAiDA,SAAS,uBACPA,QACgC;AAChC,QAAQ,OAAgC,eAAe;AACxD;AAED,SAAS,4BACPA,QACAC,aACA;AACA,KAAI,uBAAuB,OAAO,IAAI,OAAO,eAAe,OAC1D,QAAO,OAAO,YAAY,OACxB,IAAI,gBAAgB,aAAa,OAAO,MACzC;KAED,QAAO,CAAC,IAAI,UAAU,OAAO,IAAK;AAErC;AAED,SAAgB,yBACdC,mBACe;AACf,QAAO,kBAAkB,QAAQ,CAAC,EAAE,QAAQ,aAAa,KACvD,4BAA4B,QAAQ,YAAY,CACjD;AACF;;;;;;AAuBD,IAAa,cAAb,MAAa,oBAAoB,MAAM;CACrC,OAAO,UAAU;AACf,SAAO;CACR;CAED,eAAe;EAAC;EAAa;EAAU;CAAS;CAEhD,aAAa;AACX,SAAO;CACR;CAED,oBAAoB;AAClB,SAAO;CACR;CAED,YAAY;AACV,SAAO;CACR;CAED,QAAkB;AAChB,SAAO,CAAC,cAAe;CACxB;CAED;CAEA,eACE,IAAI;CAEN,YAAYC,OAA+C;EACzD,MAAM;GAAE,GAAG;GAAO,cAAc;EAAW,EAAC;EAC5C,KAAK,QAAQ,MAAM;CACpB;;;;;;;;CASD,OAAO,aACLC,QACAC,QACoB;EACpB,MAAM,EAAE,SAAS,QAAQ,GAAG,UAAU,CAAE;AACxC,SAAO,mBAAmB,aAAa;GACrC,4BAA4B,aAAa,OAAO;GAChD,IAAI,oBAAoB;GACxB,2BAA2B,aAAa,UAAU;GAClD,IAAI,oBAAoB;EACzB,EAAC;CACH;;;;;;;;CASD,OAAO,gBACLC,KACAC,OACAC,MACA;EACA,YAAY,cAAc,MAAM;AAChC,MAAI,IAAI,YAAY,KAAK,qBAAqB,IAAI,UAAU,KAAK,SAC/D,OAAM,IAAI,MAAM;EAElB,MAAM,SAAS,YAAY,aAAa,OAAO,KAAK;EACpD,MAAM,QAAQ,IAAI,SAAS;GACzB;GACA;GACA,WAAW,MAAM;EAClB;AACD,SAAO,IAAI,YAAY;GACrB,UAAU;GACV,cAAc,MAAM,IAAI,CAAC,MAAM,EAAE,KAAK;GACtC;EACD;CACF;;;;;;CAOD,MAAM,oBACJC,OACiC;AACjC,SAAO,yBAAyB,MAAM;CACvC;;;;;;;;;CAUD,MAAM,KACJC,OACAC,QACAC,iBACoC;EAEpC,MAAM,WAAW,MAAM,KAAK,oBAAoB,MAAM;EACtD,MAAMC,YAAyB;GAC7B,GAAG;GACH,kBAAkB;EACnB;AACD,MAAI,KAAK,OAAO,CAAC,WAAW,GAC1B,UAAU,OAAO,KAAK,OAAO;EAI/B,MAAM,MAAM,KAAK,SAAS;EAQ1B,MAAM,kBAAkB,EAAE,GAAG,UAAW;EACxC,MAAMC,eAAmD,EACvD,WAAW,KAAK,MAAM,IAAI,CAAC,SAAS,wBAAwB,KAAK,CAAC,CACnE;EACD,MAAM,WACJ,cAAc,KAAK,SAAS,MAAM,KAAK,SAAS,IAAI,WAAW,CAAE;AACnE,OAAK,MAAM,OAAO,SAChB,KAAI,OAAO,QAAQ;GACjB,aAAa,OACX,OAAO;GACT,OAAO,gBAAgB;EACxB;EAGH,MAAM,cACJ,MAAM,KAAK,SAAS,OAAO,kBAAkB,gBAAgB;EAE/D,MAAM,UAAU,MACd,IAKA,OAAO,YAAY,gBAAgB,EAAE;GACrC,GAAG;GACH,WAAW;EACZ,EAAC;AACF,SAAO,KAAK,aAAa,eAAe,QAAQ;CACjD;AACF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+ED,eAAsB,2BAA2B,EAC/C,KACA,OACA,QACA,gBACiC,EAAE;AACnC,KAAI,CAAC,OAAO,eAAe,SAAS,mBAAmB,CACrD,OAAM,IAAI,MACR,CACE,CAAC,4DAA4D,CAAC,EAC9D,CAAC,MAAM,EAAE,KAAK,UAAU,OAAO,eAAe,CAAC,SAAS,CAAC,AAC1D,EAAC,KAAK,KAAK;CAGhB,MAAM,eAAe,IAAI,YACrB,IAAI,UAAU,MAAM,GACpB,IAAI,WAAW,EACb,WAAW,MAAM,IAAI,CAAC,SAAS,wBAAwB,KAAK,CAAC,CAC9D,EAAC;CACN,MAAM,QAAQ,sBAAsB,cAClC;EACE,oBAAoB,OAAO,EACzB,kBAAkB,CAACC,UACjB,+BAA+B,MAAM,MAAM,CAC9C,EAAC;EACF;EACA;EACA,IAAI;CACL,GACD;EACE,MAAM;EACN;EACA,cAAc;CACf,EACF;AACD,QAAO;AACR"}
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AgentRunnableSequence } from "../agent.cjs";
|
|
2
|
-
import * as
|
|
2
|
+
import * as _langchain_core_agents0 from "@langchain/core/agents";
|
|
3
3
|
import { AgentStep } from "@langchain/core/agents";
|
|
4
4
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
5
5
|
import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
@@ -75,7 +75,7 @@ declare function createReactAgent({
|
|
|
75
75
|
streamRunnable
|
|
76
76
|
}: CreateReactAgentParams): Promise<AgentRunnableSequence<{
|
|
77
77
|
steps: AgentStep[];
|
|
78
|
-
},
|
|
78
|
+
}, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
|
|
79
79
|
//#endregion
|
|
80
80
|
export { CreateReactAgentParams, createReactAgent };
|
|
81
81
|
//# sourceMappingURL=index.d.cts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.cts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","
|
|
1
|
+
{"version":3,"file":"index.d.cts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAEzBH;EAEEF,GAAAA,EAFFE,0BAEEF;EAKCC;EAAkB,KAAA,EALnBD,aAKmB,EAAA;EAkDNM;;;;EAAuCI,MAAAA,EAlDnDT,kBAkDmDS;EAAkBL;;;;EAAiCD,cAAAA,CAAAA,EAAAA,OAAAA;CAARU;AAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFR,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
|
|
@@ -2,7 +2,7 @@ import { AgentRunnableSequence } from "../agent.js";
|
|
|
2
2
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
3
3
|
import { ToolInterface } from "@langchain/core/tools";
|
|
4
4
|
import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
5
|
-
import * as
|
|
5
|
+
import * as _langchain_core_agents0 from "@langchain/core/agents";
|
|
6
6
|
import { AgentStep } from "@langchain/core/agents";
|
|
7
7
|
|
|
8
8
|
//#region src/agents/react/index.d.ts
|
|
@@ -75,7 +75,7 @@ declare function createReactAgent({
|
|
|
75
75
|
streamRunnable
|
|
76
76
|
}: CreateReactAgentParams): Promise<AgentRunnableSequence<{
|
|
77
77
|
steps: AgentStep[];
|
|
78
|
-
},
|
|
78
|
+
}, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
|
|
79
79
|
//#endregion
|
|
80
80
|
export { CreateReactAgentParams, createReactAgent };
|
|
81
81
|
//# sourceMappingURL=index.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","
|
|
1
|
+
{"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n//# sourceMappingURL=index.d.ts.map"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAEzBH;EAEEF,GAAAA,EAFFE,0BAEEF;EAKCC;EAAkB,KAAA,EALnBD,aAKmB,EAAA;EAkDNM;;;;EAAuCI,MAAAA,EAlDnDT,kBAkDmDS;EAAkBL;;;;EAAiCD,cAAAA,CAAAA,EAAAA,OAAAA;CAARU;AAAO;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFR,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
|
|
@@ -2,7 +2,7 @@ import { AgentInput } from "../types.cjs";
|
|
|
2
2
|
import { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from "../agent.cjs";
|
|
3
3
|
import { Optional } from "../../types/type-utils.cjs";
|
|
4
4
|
import { StructuredChatOutputParserWithRetries } from "./outputParser.cjs";
|
|
5
|
-
import * as
|
|
5
|
+
import * as _langchain_core_agents1 from "@langchain/core/agents";
|
|
6
6
|
import { AgentStep } from "@langchain/core/agents";
|
|
7
7
|
import { BaseMessagePromptTemplate, BasePromptTemplate, ChatPromptTemplate } from "@langchain/core/prompts";
|
|
8
8
|
import { BaseLanguageModelInterface, ToolDefinition } from "@langchain/core/language_models/base";
|
|
@@ -176,7 +176,7 @@ declare function createStructuredChatAgent({
|
|
|
176
176
|
streamRunnable
|
|
177
177
|
}: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{
|
|
178
178
|
steps: AgentStep[];
|
|
179
|
-
},
|
|
179
|
+
}, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
|
|
180
180
|
//#endregion
|
|
181
181
|
export { CreateStructuredChatAgentParams, StructuredChatAgent, StructuredChatAgentInput, StructuredChatCreatePromptArgs, createStructuredChatAgent };
|
|
182
182
|
//# sourceMappingURL=index.d.cts.map
|