@langchain/classic 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/CHANGELOG.md +9 -0
  2. package/dist/agents/initialize.d.cts +2 -2
  3. package/dist/agents/initialize.d.cts.map +1 -1
  4. package/dist/agents/initialize.d.ts +2 -2
  5. package/dist/agents/initialize.d.ts.map +1 -1
  6. package/dist/agents/mrkl/outputParser.d.cts +1 -1
  7. package/dist/agents/mrkl/outputParser.d.cts.map +1 -1
  8. package/dist/agents/mrkl/outputParser.d.ts +1 -1
  9. package/dist/agents/mrkl/outputParser.d.ts.map +1 -1
  10. package/dist/agents/openai_tools/index.d.cts.map +1 -1
  11. package/dist/agents/openai_tools/index.d.ts.map +1 -1
  12. package/dist/agents/react/index.d.ts +2 -2
  13. package/dist/agents/react/index.d.ts.map +1 -1
  14. package/dist/agents/structured_chat/index.d.ts +2 -2
  15. package/dist/agents/structured_chat/index.d.ts.map +1 -1
  16. package/dist/agents/tool_calling/index.d.cts.map +1 -1
  17. package/dist/agents/tool_calling/index.d.ts.map +1 -1
  18. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts +2 -2
  19. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts.map +1 -1
  20. package/dist/chains/base.cjs +10 -4
  21. package/dist/chains/base.cjs.map +1 -1
  22. package/dist/chains/base.js +10 -4
  23. package/dist/chains/base.js.map +1 -1
  24. package/dist/chains/question_answering/load.d.cts +2 -2
  25. package/dist/chains/question_answering/load.d.cts.map +1 -1
  26. package/dist/chains/summarization/load.d.cts +2 -2
  27. package/dist/chains/summarization/load.d.cts.map +1 -1
  28. package/dist/chains/summarization/load.d.ts +2 -2
  29. package/dist/chains/summarization/load.d.ts.map +1 -1
  30. package/dist/chat_models/universal.cjs +1 -1
  31. package/dist/chat_models/universal.cjs.map +1 -1
  32. package/dist/chat_models/universal.js +1 -1
  33. package/dist/chat_models/universal.js.map +1 -1
  34. package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
  35. package/dist/evaluation/agents/trajectory.d.ts.map +1 -1
  36. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  37. package/dist/experimental/autogpt/prompt.d.cts +2 -2
  38. package/dist/experimental/autogpt/prompt.d.cts.map +1 -1
  39. package/dist/langchain-core/dist/tools/types.d.cts.map +1 -1
  40. package/dist/libs/langchain-core/dist/tools/types.d.ts.map +1 -1
  41. package/package.json +17 -18
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @langchain/classic
2
2
 
3
+ ## 1.0.2
4
+
5
+ ### Patch Changes
6
+
7
+ - Updated dependencies [8319201]
8
+ - Updated dependencies [4906522]
9
+ - @langchain/openai@1.1.0
10
+ - @langchain/textsplitters@1.0.0
11
+
3
12
  ## 1.0.1
4
13
 
5
14
  ### Patch Changes
@@ -6,7 +6,7 @@ import { StringInputToolSchema } from "../langchain-core/dist/tools/types.cjs";
6
6
  import { StructuredChatAgent } from "./structured_chat/index.cjs";
7
7
  import { OpenAIAgent } from "./openai_functions/index.cjs";
8
8
  import { XMLAgent } from "./xml/index.cjs";
9
- import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
9
+ import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
10
10
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
11
11
  import { CallbackManager } from "@langchain/core/callbacks/manager";
12
12
  import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
@@ -19,7 +19,7 @@ import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
19
19
  * "chat-conversational-react-description".
20
20
  */
21
21
  type AgentType = "zero-shot-react-description" | "chat-zero-shot-react-description" | "chat-conversational-react-description";
22
- declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
22
+ declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
23
23
  /**
24
24
  * @interface
25
25
  */
@@ -1 +1 @@
1
- {"version":3,"file":"initialize.d.cts","names":["_langchain_core_language_models_base0","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
1
+ {"version":3,"file":"initialize.d.cts","names":["_langchain_core_language_models_base2","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
@@ -7,7 +7,7 @@ import { StructuredChatAgent } from "./structured_chat/index.js";
7
7
  import { OpenAIAgent } from "./openai_functions/index.js";
8
8
  import { XMLAgent } from "./xml/index.js";
9
9
  import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
10
- import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
10
+ import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
11
11
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
12
12
  import { CallbackManager } from "@langchain/core/callbacks/manager";
13
13
 
@@ -19,7 +19,7 @@ import { CallbackManager } from "@langchain/core/callbacks/manager";
19
19
  * "chat-conversational-react-description".
20
20
  */
21
21
  type AgentType = "zero-shot-react-description" | "chat-zero-shot-react-description" | "chat-conversational-react-description";
22
- declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
22
+ declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
23
23
  /**
24
24
  * @interface
25
25
  */
@@ -1 +1 @@
1
- {"version":3,"file":"initialize.d.ts","names":["_langchain_core_language_models_base0","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
1
+ {"version":3,"file":"initialize.d.ts","names":["_langchain_core_language_models_base2","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
@@ -25,10 +25,10 @@ declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {
25
25
  tool?: undefined;
26
26
  toolInput?: undefined;
27
27
  } | {
28
- returnValues?: undefined;
29
28
  tool: string;
30
29
  toolInput: string;
31
30
  log: string;
31
+ returnValues?: undefined;
32
32
  }>;
33
33
  /**
34
34
  * Returns the format instructions for parsing the output of an agent
@@ -1 +1 @@
1
- {"version":3,"file":"outputParser.d.cts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n returnValues?: undefined;\n tool: string;\n toolInput: string;\n log: string;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
1
+ {"version":3,"file":"outputParser.d.cts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n tool: string;\n toolInput: string;\n log: string;\n returnValues?: undefined;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
@@ -25,10 +25,10 @@ declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {
25
25
  tool?: undefined;
26
26
  toolInput?: undefined;
27
27
  } | {
28
- returnValues?: undefined;
29
28
  tool: string;
30
29
  toolInput: string;
31
30
  log: string;
31
+ returnValues?: undefined;
32
32
  }>;
33
33
  /**
34
34
  * Returns the format instructions for parsing the output of an agent
@@ -1 +1 @@
1
- {"version":3,"file":"outputParser.d.ts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n returnValues?: undefined;\n tool: string;\n toolInput: string;\n log: string;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
1
+ {"version":3,"file":"outputParser.d.ts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n tool: string;\n toolInput: string;\n log: string;\n returnValues?: undefined;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.cts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.cts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
@@ -2,7 +2,7 @@ import { AgentRunnableSequence } from "../agent.js";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
3
  import { ToolInterface } from "@langchain/core/tools";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
- import * as _langchain_core_agents0 from "@langchain/core/agents";
5
+ import * as _langchain_core_agents1 from "@langchain/core/agents";
6
6
  import { AgentStep } from "@langchain/core/agents";
7
7
 
8
8
  //#region src/agents/react/index.d.ts
@@ -75,7 +75,7 @@ declare function createReactAgent({
75
75
  streamRunnable
76
76
  }: CreateReactAgentParams): Promise<AgentRunnableSequence<{
77
77
  steps: AgentStep[];
78
- }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
78
+ }, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
79
79
  //#endregion
80
80
  export { CreateReactAgentParams, createReactAgent };
81
81
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAAA;EAAA,GAEzBH,EAAAA,0BAAAA;EAA0B;EAEX,KAKZD,EALDD,aAKCC,EAAAA;EAAkB;AAkD9B;;;EAA8C,MAAEO,EAlDpCP,kBAkDoCO;EAAK;;;;EACjC,cAAAG,CAAAA,EAAAA,OAAAA;CAC2B;;;AAFkE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFL,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAAA;EAAA,GAEzBH,EAAAA,0BAAAA;EAA0B;EAEX,KAKZD,EALDD,aAKCC,EAAAA;EAAkB;AAkD9B;;;EAA8C,MAAEO,EAlDpCP,kBAkDoCO;EAAK;;;;EACjC,cAAAG,CAAAA,EAAAA,OAAAA;CAC2B;;;AAFkE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFL,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -5,7 +5,7 @@ import { StructuredChatOutputParserWithRetries } from "./outputParser.js";
5
5
  import { BaseMessagePromptTemplate, BasePromptTemplate, ChatPromptTemplate } from "@langchain/core/prompts";
6
6
  import { StructuredToolInterface } from "@langchain/core/tools";
7
7
  import { BaseLanguageModelInterface, ToolDefinition } from "@langchain/core/language_models/base";
8
- import * as _langchain_core_agents1 from "@langchain/core/agents";
8
+ import * as _langchain_core_agents0 from "@langchain/core/agents";
9
9
  import { AgentStep } from "@langchain/core/agents";
10
10
 
11
11
  //#region src/agents/structured_chat/index.d.ts
@@ -176,7 +176,7 @@ declare function createStructuredChatAgent({
176
176
  streamRunnable
177
177
  }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{
178
178
  steps: AgentStep[];
179
- }, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
179
+ }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
180
180
  //#endregion
181
181
  export { CreateStructuredChatAgentParams, StructuredChatAgent, StructuredChatAgentInput, StructuredChatCreatePromptArgs, createStructuredChatAgent };
182
182
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAAoC,UAhBnBc,8BAAAA,CAgBmB;EAAA;EAAsB,MAAnBP,CAAAA,EAAAA,MAAAA;EAAQ;EAK1BS,MAAAA,CAAAA,EAAAA,MAAAA;EAAmB;EAAA,oBAGjBD,CAAAA,EAAAA,MAAAA;EAAwB;EAUQ,cAOZJ,CAAAA,EAAAA,MAAAA,EAAAA;EAAgB;EAEd,aAQdL,CAAAA,EAzCXF,yBAyCWE,EAAAA;;;;;;AA2BCL,KA9DpBc,wBAAAA,GAA2BR,QA8DPN,CA9DgBW,UA8DhBX,EAAAA,cAAAA,CAAAA;;;;;AAzDiBO,cAA5BQ,mBAAAA,SAA4BR,KAAAA,CAAAA;EAAK,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EA8D1CU,YAAAA,EAAAA,MAAAA,EAAAA;EAA+B,WAAA,CAAA,KAAA,EA3DpBH,wBA2DoB;EAAA,UAElCd,CAAAA,CAAAA,EAAAA,6CAAAA;EAA0B,iBAEvBD,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,SAAGE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,KAKxCC,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkB;AA+D9B;;;;EAA8D,OAAEmB,aAAAA,CAAAA,KAAAA,EAzHhCtB,uBAyHgCsB,EAAAA,CAAAA,EAAAA,IAAAA;EAAM;;;;;;EAAmF,OAA7BL,sBAAAA,CAAAA,OAAAA,EAlHjFN,gBAkHiFM,GAAAA;IAAO,SAAA,EAAA,MAAA,EAAA;MAhH3HJ;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAAoC,UAhBnBc,8BAAAA,CAgBmB;EAAA;EAAsB,MAAnBP,CAAAA,EAAAA,MAAAA;EAAQ;EAK1BS,MAAAA,CAAAA,EAAAA,MAAAA;EAAmB;EAAA,oBAGjBD,CAAAA,EAAAA,MAAAA;EAAwB;EAUQ,cAOZJ,CAAAA,EAAAA,MAAAA,EAAAA;EAAgB;EAEd,aAQdL,CAAAA,EAzCXF,yBAyCWE,EAAAA;;;;;;AA2BCL,KA9DpBc,wBAAAA,GAA2BR,QA8DPN,CA9DgBW,UA8DhBX,EAAAA,cAAAA,CAAAA;;;;;AAzDiBO,cAA5BQ,mBAAAA,SAA4BR,KAAAA,CAAAA;EAAK,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EA8D1CU,YAAAA,EAAAA,MAAAA,EAAAA;EAA+B,WAAA,CAAA,KAAA,EA3DpBH,wBA2DoB;EAAA,UAElCd,CAAAA,CAAAA,EAAAA,6CAAAA;EAA0B,iBAEvBD,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,SAAGE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,KAKxCC,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkB;AA+D9B;;;;EAA8D,OAAEmB,aAAAA,CAAAA,KAAAA,EAzHhCtB,uBAyHgCsB,EAAAA,CAAAA,EAAAA,IAAAA;EAAM;;;;;;EAAmF,OAA7BL,sBAAAA,CAAAA,OAAAA,EAlHjFN,gBAkHiFM,GAAAA;IAAO,SAAA,EAAA,MAAA,EAAA;MAhH3HJ;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.cts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.cts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
@@ -1,5 +1,5 @@
1
1
  import { BaseChatMemory, BaseChatMemoryInput } from "../../../memory/chat_memory.cjs";
2
- import * as _langchain_core_messages1 from "@langchain/core/messages";
2
+ import * as _langchain_core_messages0 from "@langchain/core/messages";
3
3
  import { InputValues, MemoryVariables, OutputValues } from "@langchain/core/memory";
4
4
  import { ChatOpenAI } from "@langchain/openai";
5
5
 
@@ -37,7 +37,7 @@ declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {
37
37
  * Retrieves the messages from the chat history.
38
38
  * @returns Promise that resolves with the messages from the chat history.
39
39
  */
40
- getMessages(): Promise<_langchain_core_messages1.BaseMessage<_langchain_core_messages1.MessageStructure, _langchain_core_messages1.MessageType>[]>;
40
+ getMessages(): Promise<_langchain_core_messages0.BaseMessage<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>[]>;
41
41
  /**
42
42
  * Loads memory variables from the input values.
43
43
  * @param _values Input values.
@@ -1 +1 @@
1
- {"version":3,"file":"token_buffer_memory.d.cts","names":["ChatOpenAI","InputValues","MemoryVariables","OutputValues","BaseChatMemory","BaseChatMemoryInput","OpenAIAgentTokenBufferMemoryFields","OpenAIAgentTokenBufferMemory","_langchain_core_messages1","MessageStructure","MessageType","BaseMessage","Promise"],"sources":["../../../../src/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts"],"sourcesContent":["import { ChatOpenAI } from \"@langchain/openai\";\nimport { InputValues, MemoryVariables, OutputValues } from \"@langchain/core/memory\";\nimport { BaseChatMemory, BaseChatMemoryInput } from \"../../../memory/chat_memory.js\";\n/**\n * Type definition for the fields required to initialize an instance of\n * OpenAIAgentTokenBufferMemory.\n */\nexport type OpenAIAgentTokenBufferMemoryFields = BaseChatMemoryInput & {\n llm: ChatOpenAI;\n humanPrefix?: string;\n aiPrefix?: string;\n memoryKey?: string;\n maxTokenLimit?: number;\n returnMessages?: boolean;\n outputKey?: string;\n intermediateStepsKey?: string;\n};\n/**\n * Memory used to save agent output and intermediate steps.\n */\nexport declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {\n humanPrefix: string;\n aiPrefix: string;\n llm: ChatOpenAI;\n memoryKey: string;\n maxTokenLimit: number;\n returnMessages: boolean;\n outputKey: string;\n intermediateStepsKey: string;\n constructor(fields: OpenAIAgentTokenBufferMemoryFields);\n get memoryKeys(): string[];\n /**\n * Retrieves the messages from the chat history.\n * @returns Promise that resolves with the messages from the chat history.\n */\n getMessages(): Promise<import(\"@langchain/core/messages\").BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * Loads memory variables from the input values.\n * @param _values Input values.\n * @returns Promise that resolves with the loaded memory variables.\n */\n loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;\n /**\n * Saves the context of the chat, including user input, AI output, and\n * intermediate steps. Prunes the chat history if the total token count\n * exceeds the maximum limit.\n * @param inputValues Input values.\n * @param outputValues Output values.\n * @returns Promise that resolves when the context has been saved.\n */\n saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;\n}\n"],"mappings":";;;;;;;;;;AAOA;AAA8C,KAAlCM,kCAAAA,GAAqCD,mBAAH,GAAA;EAAA,GAAGA,EACxCL,UADwCK;EAAmB,WAC3DL,CAAAA,EAAAA,MAAAA;EAAU,QAAA,CAAA,EAAA,MAAA;EAYEO,SAAAA,CAAAA,EAAAA,MAAAA;EAA4B,aAAA,CAAA,EAAA,MAAA;EAAA,cAGxCP,CAAAA,EAAAA,OAAAA;EAAU,SAMKM,CAAAA,EAAAA,MAAAA;EAAkC,oBAAAE,CAAAA,EAAAA,MAMmDC;CAAgB;;;;AAMtEP,cArBlCK,4BAAAA,SAAqCH,cAAAA,CAqBHF;EAAe,WAAvBU,EAAAA,MAAAA;EAAO,QASzBX,EAAAA,MAAAA;EAAW,GAAgBE,EA3B/CH,UA2B+CG;EAAY,SAAGS,EAAAA,MAAAA;EAAO,aA9BpBR,EAAAA,MAAAA;EAAc,cAAA,EAAA,OAAA;;;sBAShDE;;;;;;iBAMLM,QAA0JJ,yBAAAA,CAA/GG,YANJH,yBAAAA,CAMmDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;+BAMjIT,cAAcW,QAAQV;;;;;;;;;2BAS1BD,2BAA2BE,eAAeS"}
1
+ {"version":3,"file":"token_buffer_memory.d.cts","names":["ChatOpenAI","InputValues","MemoryVariables","OutputValues","BaseChatMemory","BaseChatMemoryInput","OpenAIAgentTokenBufferMemoryFields","OpenAIAgentTokenBufferMemory","_langchain_core_messages0","MessageStructure","MessageType","BaseMessage","Promise"],"sources":["../../../../src/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts"],"sourcesContent":["import { ChatOpenAI } from \"@langchain/openai\";\nimport { InputValues, MemoryVariables, OutputValues } from \"@langchain/core/memory\";\nimport { BaseChatMemory, BaseChatMemoryInput } from \"../../../memory/chat_memory.js\";\n/**\n * Type definition for the fields required to initialize an instance of\n * OpenAIAgentTokenBufferMemory.\n */\nexport type OpenAIAgentTokenBufferMemoryFields = BaseChatMemoryInput & {\n llm: ChatOpenAI;\n humanPrefix?: string;\n aiPrefix?: string;\n memoryKey?: string;\n maxTokenLimit?: number;\n returnMessages?: boolean;\n outputKey?: string;\n intermediateStepsKey?: string;\n};\n/**\n * Memory used to save agent output and intermediate steps.\n */\nexport declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {\n humanPrefix: string;\n aiPrefix: string;\n llm: ChatOpenAI;\n memoryKey: string;\n maxTokenLimit: number;\n returnMessages: boolean;\n outputKey: string;\n intermediateStepsKey: string;\n constructor(fields: OpenAIAgentTokenBufferMemoryFields);\n get memoryKeys(): string[];\n /**\n * Retrieves the messages from the chat history.\n * @returns Promise that resolves with the messages from the chat history.\n */\n getMessages(): Promise<import(\"@langchain/core/messages\").BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * Loads memory variables from the input values.\n * @param _values Input values.\n * @returns Promise that resolves with the loaded memory variables.\n */\n loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;\n /**\n * Saves the context of the chat, including user input, AI output, and\n * intermediate steps. Prunes the chat history if the total token count\n * exceeds the maximum limit.\n * @param inputValues Input values.\n * @param outputValues Output values.\n * @returns Promise that resolves when the context has been saved.\n */\n saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;\n}\n"],"mappings":";;;;;;;;;;AAOA;AAA8C,KAAlCM,kCAAAA,GAAqCD,mBAAH,GAAA;EAAA,GAAGA,EACxCL,UADwCK;EAAmB,WAC3DL,CAAAA,EAAAA,MAAAA;EAAU,QAAA,CAAA,EAAA,MAAA;EAYEO,SAAAA,CAAAA,EAAAA,MAAAA;EAA4B,aAAA,CAAA,EAAA,MAAA;EAAA,cAGxCP,CAAAA,EAAAA,OAAAA;EAAU,SAMKM,CAAAA,EAAAA,MAAAA;EAAkC,oBAAAE,CAAAA,EAAAA,MAMmDC;CAAgB;;;;AAMtEP,cArBlCK,4BAAAA,SAAqCH,cAAAA,CAqBHF;EAAe,WAAvBU,EAAAA,MAAAA;EAAO,QASzBX,EAAAA,MAAAA;EAAW,GAAgBE,EA3B/CH,UA2B+CG;EAAY,SAAGS,EAAAA,MAAAA;EAAO,aA9BpBR,EAAAA,MAAAA;EAAc,cAAA,EAAA,OAAA;;;sBAShDE;;;;;;iBAMLM,QAA0JJ,yBAAAA,CAA/GG,YANJH,yBAAAA,CAMmDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;+BAMjIT,cAAcW,QAAQV;;;;;;;;;2BAS1BD,2BAA2BE,eAAeS"}
@@ -52,11 +52,17 @@ var BaseChain = class extends __langchain_core_language_models_base.BaseLangChai
52
52
  const runManager = await callbackManager_?.handleChainStart(this.toJSON(), fullValues, void 0, void 0, void 0, void 0, config?.runName);
53
53
  let outputValues;
54
54
  try {
55
- outputValues = await (fullValues.signal ? Promise.race([this._call(fullValues, runManager, config), new Promise((_, reject) => {
56
- fullValues.signal?.addEventListener("abort", () => {
57
- reject(/* @__PURE__ */ new Error("AbortError"));
55
+ if (fullValues.signal) {
56
+ let listener;
57
+ outputValues = await Promise.race([this._call(fullValues, runManager, config), new Promise((_, reject) => {
58
+ listener = () => {
59
+ reject(/* @__PURE__ */ new Error("AbortError"));
60
+ };
61
+ fullValues.signal?.addEventListener("abort", listener);
62
+ })]).finally(() => {
63
+ if (fullValues.signal && listener) fullValues.signal.removeEventListener("abort", listener);
58
64
  });
59
- })]) : this._call(fullValues, runManager, config));
65
+ } else outputValues = await this._call(fullValues, runManager, config);
60
66
  } catch (e) {
61
67
  await runManager?.handleChainError(e);
62
68
  throw e;
@@ -1 +1 @@
1
- {"version":3,"file":"base.cjs","names":["BaseLangChain","fields?: BaseMemory | ChainInputs","verbose?: boolean","callbacks?: Callbacks","values: ChainValues","input: RunInput","options?: RunnableConfig","CallbackManager","outputValues: RunOutput","RUN_KEY","outputs: Record<string, unknown>","inputs: Record<string, unknown>","input: any","config?: Callbacks | RunnableConfig","values: ChainValues & { signal?: AbortSignal; timeout?: number }","tags?: string[]","inputs: RunInput[]","config?: (Callbacks | RunnableConfig)[]","data: SerializedBaseChain","values: LoadValues"],"sources":["../../src/chains/base.ts"],"sourcesContent":["import { BaseMemory } from \"@langchain/core/memory\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { RUN_KEY } from \"@langchain/core/outputs\";\nimport {\n CallbackManagerForChainRun,\n CallbackManager,\n Callbacks,\n parseCallbackConfigArg,\n} from \"@langchain/core/callbacks/manager\";\nimport { ensureConfig, type RunnableConfig } from \"@langchain/core/runnables\";\nimport {\n BaseLangChain,\n BaseLangChainParams,\n} from \"@langchain/core/language_models/base\";\nimport { SerializedBaseChain } from \"./serde.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type LoadValues = Record<string, any>;\n\nexport interface ChainInputs extends BaseLangChainParams {\n memory?: BaseMemory;\n\n /**\n * @deprecated Use `callbacks` instead\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Base interface that all chains must implement.\n */\nexport abstract class BaseChain<\n RunInput extends ChainValues = ChainValues,\n RunOutput extends ChainValues = ChainValues\n >\n extends BaseLangChain<RunInput, RunOutput>\n implements ChainInputs\n{\n declare memory?: BaseMemory;\n\n get lc_namespace(): string[] {\n return [\"langchain\", \"chains\", this._chainType()];\n }\n\n constructor(\n fields?: BaseMemory | ChainInputs,\n /** @deprecated */\n verbose?: boolean,\n /** @deprecated */\n callbacks?: Callbacks\n ) {\n if (\n arguments.length === 1 &&\n typeof fields === \"object\" &&\n !(\"saveContext\" in fields)\n ) {\n // fields is not a BaseMemory\n const { memory, callbackManager, ...rest } = fields;\n super({ ...rest, callbacks: callbackManager ?? rest.callbacks });\n this.memory = memory;\n } else {\n // fields is a BaseMemory\n super({ verbose, callbacks });\n this.memory = fields as BaseMemory;\n }\n }\n\n /** @ignore */\n _selectMemoryInputs(values: ChainValues): ChainValues {\n const valuesForMemory = { ...values };\n if (\"signal\" in valuesForMemory) {\n delete valuesForMemory.signal;\n }\n if (\"timeout\" in valuesForMemory) {\n delete valuesForMemory.timeout;\n }\n return valuesForMemory;\n }\n\n /**\n * Invoke the chain with the provided input and returns the output.\n * @param input Input values for the chain run.\n * @param config Optional configuration for the Runnable.\n * @returns Promise that resolves with the output of the chain run.\n */\n async invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput> {\n const config = ensureConfig(options);\n const fullValues = await this._formatValues(input);\n const callbackManager_ = await CallbackManager.configure(\n config?.callbacks,\n this.callbacks,\n config?.tags,\n this.tags,\n config?.metadata,\n this.metadata,\n { verbose: this.verbose }\n );\n const runManager = await callbackManager_?.handleChainStart(\n this.toJSON(),\n fullValues,\n undefined,\n undefined,\n undefined,\n undefined,\n config?.runName\n );\n let outputValues: RunOutput;\n try {\n outputValues = await (fullValues.signal\n ? (Promise.race([\n this._call(fullValues as RunInput, runManager, config),\n new Promise((_, reject) => {\n fullValues.signal?.addEventListener(\"abort\", () => {\n reject(new Error(\"AbortError\"));\n });\n }),\n ]) as Promise<RunOutput>)\n : this._call(fullValues as RunInput, runManager, config));\n } catch (e) {\n await runManager?.handleChainError(e);\n throw e;\n }\n if (!(this.memory == null)) {\n await this.memory.saveContext(\n this._selectMemoryInputs(input),\n outputValues\n );\n }\n await runManager?.handleChainEnd(outputValues);\n // add the runManager's currentRunId to the outputValues\n Object.defineProperty(outputValues, RUN_KEY, {\n value: runManager ? { runId: runManager?.runId } : undefined,\n configurable: true,\n });\n return outputValues;\n }\n\n private _validateOutputs(outputs: Record<string, unknown>): void {\n const missingKeys = this.outputKeys.filter((k) => !(k in outputs));\n if (missingKeys.length) {\n throw new Error(\n `Missing output keys: ${missingKeys.join(\n \", \"\n )} from chain ${this._chainType()}`\n );\n }\n }\n\n async prepOutputs(\n inputs: Record<string, unknown>,\n outputs: Record<string, unknown>,\n returnOnlyOutputs = false\n ) {\n this._validateOutputs(outputs);\n if (this.memory) {\n await this.memory.saveContext(inputs, outputs);\n }\n if (returnOnlyOutputs) {\n return outputs;\n }\n return { ...inputs, ...outputs };\n }\n\n /**\n * Run the core logic of this chain and return the output\n */\n abstract _call(\n values: RunInput,\n runManager?: CallbackManagerForChainRun,\n config?: RunnableConfig\n ): Promise<RunOutput>;\n\n /**\n * Return the string type key uniquely identifying this class of chain.\n */\n abstract _chainType(): string;\n\n /**\n * Return a json-like object representing this chain.\n */\n serialize(): SerializedBaseChain {\n throw new Error(\"Method not implemented.\");\n }\n\n abstract get inputKeys(): string[];\n\n abstract get outputKeys(): string[];\n\n /** @deprecated Use .invoke() instead. Will be removed in 0.2.0. */\n async run(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n input: any,\n config?: Callbacks | RunnableConfig\n ): Promise<string> {\n const inputKeys = this.inputKeys.filter(\n (k) => !this.memory?.memoryKeys.includes(k)\n );\n const isKeylessInput = inputKeys.length <= 1;\n if (!isKeylessInput) {\n throw new Error(\n `Chain ${this._chainType()} expects multiple inputs, cannot use 'run' `\n );\n }\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const values = inputKeys.length ? { [inputKeys[0]]: input } : ({} as any);\n const returnValues = await this.call(values, config);\n const keys = Object.keys(returnValues);\n\n if (keys.length === 1) {\n return returnValues[keys[0]];\n }\n throw new Error(\n \"return values have multiple keys, `run` only supported when one key currently\"\n );\n }\n\n protected async _formatValues(\n values: ChainValues & { signal?: AbortSignal; timeout?: number }\n ) {\n const fullValues = { ...values } as typeof values;\n if (fullValues.timeout && !fullValues.signal) {\n fullValues.signal = AbortSignal.timeout(fullValues.timeout);\n delete fullValues.timeout;\n }\n if (!(this.memory == null)) {\n const newValues = await this.memory.loadMemoryVariables(\n this._selectMemoryInputs(values)\n );\n for (const [key, value] of Object.entries(newValues)) {\n fullValues[key] = value;\n }\n }\n return fullValues;\n }\n\n /**\n * @deprecated Use .invoke() instead. Will be removed in 0.2.0.\n *\n * Run the core logic of this chain and add to output if desired.\n *\n * Wraps _call and handles memory.\n */\n async call(\n values: ChainValues & { signal?: AbortSignal; timeout?: number },\n config?: Callbacks | RunnableConfig,\n /** @deprecated */\n tags?: string[]\n ): Promise<RunOutput> {\n const parsedConfig = { tags, ...parseCallbackConfigArg(config) };\n return this.invoke(values as RunInput, parsedConfig);\n }\n\n /**\n * @deprecated Use .batch() instead. Will be removed in 0.2.0.\n *\n * Call the chain on all inputs in the list\n */\n async apply(\n inputs: RunInput[],\n config?: (Callbacks | RunnableConfig)[]\n ): Promise<RunOutput[]> {\n return Promise.all(\n inputs.map(async (i, idx) => this.call(i, config?.[idx]))\n );\n }\n\n /**\n * Load a chain from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedBaseChain,\n values: LoadValues = {}\n ): Promise<BaseChain> {\n switch (data._type) {\n case \"llm_chain\": {\n const { LLMChain } = await import(\"./llm_chain.js\");\n return LLMChain.deserialize(data);\n }\n case \"sequential_chain\": {\n const { SequentialChain } = await import(\"./sequential_chain.js\");\n return SequentialChain.deserialize(data);\n }\n case \"simple_sequential_chain\": {\n const { SimpleSequentialChain } = await import(\"./sequential_chain.js\");\n return SimpleSequentialChain.deserialize(data);\n }\n case \"stuff_documents_chain\": {\n const { StuffDocumentsChain } = await import(\"./combine_docs_chain.js\");\n return StuffDocumentsChain.deserialize(data);\n }\n case \"map_reduce_documents_chain\": {\n const { MapReduceDocumentsChain } = await import(\n \"./combine_docs_chain.js\"\n );\n return MapReduceDocumentsChain.deserialize(data);\n }\n case \"refine_documents_chain\": {\n const { RefineDocumentsChain } = await import(\n \"./combine_docs_chain.js\"\n );\n return RefineDocumentsChain.deserialize(data);\n }\n case \"vector_db_qa\": {\n const { VectorDBQAChain } = await import(\"./vector_db_qa.js\");\n return VectorDBQAChain.deserialize(data, values);\n }\n case \"api_chain\": {\n const { APIChain } = await import(\"./api/api_chain.js\");\n return APIChain.deserialize(data);\n }\n default:\n throw new Error(\n `Invalid prompt type in config: ${\n (data as SerializedBaseChain)._type\n }`\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;AA+BA,IAAsB,YAAtB,cAIUA,oDAEV;CAGE,IAAI,eAAyB;AAC3B,SAAO;GAAC;GAAa;GAAU,KAAK,YAAY;EAAC;CAClD;CAED,YACEC,QAEAC,SAEAC,WACA;AACA,MACE,UAAU,WAAW,KACrB,OAAO,WAAW,YAClB,EAAE,iBAAiB,SACnB;GAEA,MAAM,EAAE,QAAQ,gBAAiB,GAAG,MAAM,GAAG;GAC7C,MAAM;IAAE,GAAG;IAAM,WAAW,mBAAmB,KAAK;GAAW,EAAC;GAChE,KAAK,SAAS;EACf,OAAM;GAEL,MAAM;IAAE;IAAS;GAAW,EAAC;GAC7B,KAAK,SAAS;EACf;CACF;;CAGD,oBAAoBC,QAAkC;EACpD,MAAM,kBAAkB,EAAE,GAAG,OAAQ;AACrC,MAAI,YAAY,iBACd,OAAO,gBAAgB;AAEzB,MAAI,aAAa,iBACf,OAAO,gBAAgB;AAEzB,SAAO;CACR;;;;;;;CAQD,MAAM,OAAOC,OAAiBC,SAA8C;EAC1E,MAAM,sDAAsB,QAAQ;EACpC,MAAM,aAAa,MAAM,KAAK,cAAc,MAAM;EAClD,MAAM,mBAAmB,MAAMC,mDAAgB,UAC7C,QAAQ,WACR,KAAK,WACL,QAAQ,MACR,KAAK,MACL,QAAQ,UACR,KAAK,UACL,EAAE,SAAS,KAAK,QAAS,EAC1B;EACD,MAAM,aAAa,MAAM,kBAAkB,iBACzC,KAAK,QAAQ,EACb,YACA,QACA,QACA,QACA,QACA,QAAQ,QACT;EACD,IAAIC;AACJ,MAAI;GACF,eAAe,OAAO,WAAW,SAC5B,QAAQ,KAAK,CACZ,KAAK,MAAM,YAAwB,YAAY,OAAO,EACtD,IAAI,QAAQ,CAAC,GAAG,WAAW;IACzB,WAAW,QAAQ,iBAAiB,SAAS,MAAM;KACjD,uBAAO,IAAI,MAAM,cAAc;IAChC,EAAC;GACH,EACF,EAAC,GACF,KAAK,MAAM,YAAwB,YAAY,OAAO;EAC3D,SAAQ,GAAG;GACV,MAAM,YAAY,iBAAiB,EAAE;AACrC,SAAM;EACP;AACD,MAAI,EAAE,KAAK,UAAU,OACnB,MAAM,KAAK,OAAO,YAChB,KAAK,oBAAoB,MAAM,EAC/B,aACD;EAEH,MAAM,YAAY,eAAe,aAAa;EAE9C,OAAO,eAAe,cAAcC,kCAAS;GAC3C,OAAO,aAAa,EAAE,OAAO,YAAY,MAAO,IAAG;GACnD,cAAc;EACf,EAAC;AACF,SAAO;CACR;CAED,AAAQ,iBAAiBC,SAAwC;EAC/D,MAAM,cAAc,KAAK,WAAW,OAAO,CAAC,MAAM,EAAE,KAAK,SAAS;AAClE,MAAI,YAAY,OACd,OAAM,IAAI,MACR,CAAC,qBAAqB,EAAE,YAAY,KAClC,KACD,CAAC,YAAY,EAAE,KAAK,YAAY,EAAE;CAGxC;CAED,MAAM,YACJC,QACAD,SACA,oBAAoB,OACpB;EACA,KAAK,iBAAiB,QAAQ;AAC9B,MAAI,KAAK,QACP,MAAM,KAAK,OAAO,YAAY,QAAQ,QAAQ;AAEhD,MAAI,kBACF,QAAO;AAET,SAAO;GAAE,GAAG;GAAQ,GAAG;EAAS;CACjC;;;;CAmBD,YAAiC;AAC/B,QAAM,IAAI,MAAM;CACjB;;CAOD,MAAM,IAEJE,OACAC,QACiB;EACjB,MAAM,YAAY,KAAK,UAAU,OAC/B,CAAC,MAAM,CAAC,KAAK,QAAQ,WAAW,SAAS,EAAE,CAC5C;EACD,MAAM,iBAAiB,UAAU,UAAU;AAC3C,MAAI,CAAC,eACH,OAAM,IAAI,MACR,CAAC,MAAM,EAAE,KAAK,YAAY,CAAC,2CAA2C,CAAC;EAI3E,MAAM,SAAS,UAAU,SAAS,GAAG,UAAU,KAAK,MAAO,IAAI,CAAE;EACjE,MAAM,eAAe,MAAM,KAAK,KAAK,QAAQ,OAAO;EACpD,MAAM,OAAO,OAAO,KAAK,aAAa;AAEtC,MAAI,KAAK,WAAW,EAClB,QAAO,aAAa,KAAK;AAE3B,QAAM,IAAI,MACR;CAEH;CAED,MAAgB,cACdC,QACA;EACA,MAAM,aAAa,EAAE,GAAG,OAAQ;AAChC,MAAI,WAAW,WAAW,CAAC,WAAW,QAAQ;GAC5C,WAAW,SAAS,YAAY,QAAQ,WAAW,QAAQ;GAC3D,OAAO,WAAW;EACnB;AACD,MAAI,EAAE,KAAK,UAAU,OAAO;GAC1B,MAAM,YAAY,MAAM,KAAK,OAAO,oBAClC,KAAK,oBAAoB,OAAO,CACjC;AACD,QAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,UAAU,EAClD,WAAW,OAAO;EAErB;AACD,SAAO;CACR;;;;;;;;CASD,MAAM,KACJA,QACAD,QAEAE,MACoB;EACpB,MAAM,eAAe;GAAE;GAAM,kEAA0B,OAAO;EAAE;AAChE,SAAO,KAAK,OAAO,QAAoB,aAAa;CACrD;;;;;;CAOD,MAAM,MACJC,QACAC,QACsB;AACtB,SAAO,QAAQ,IACb,OAAO,IAAI,OAAO,GAAG,QAAQ,KAAK,KAAK,GAAG,SAAS,KAAK,CAAC,CAC1D;CACF;;;;CAKD,aAAa,YACXC,MACAC,SAAqB,CAAE,GACH;AACpB,UAAQ,KAAK,OAAb;GACE,KAAK,aAAa;IAChB,MAAM,EAAE,UAAU,GAAG,2CAAM;AAC3B,WAAO,SAAS,YAAY,KAAK;GAClC;GACD,KAAK,oBAAoB;IACvB,MAAM,EAAE,iBAAiB,GAAG,2CAAM;AAClC,WAAO,gBAAgB,YAAY,KAAK;GACzC;GACD,KAAK,2BAA2B;IAC9B,MAAM,EAAE,uBAAuB,GAAG,2CAAM;AACxC,WAAO,sBAAsB,YAAY,KAAK;GAC/C;GACD,KAAK,yBAAyB;IAC5B,MAAM,EAAE,qBAAqB,GAAG,2CAAM;AACtC,WAAO,oBAAoB,YAAY,KAAK;GAC7C;GACD,KAAK,8BAA8B;IACjC,MAAM,EAAE,yBAAyB,GAAG,2CAAM;AAG1C,WAAO,wBAAwB,YAAY,KAAK;GACjD;GACD,KAAK,0BAA0B;IAC7B,MAAM,EAAE,sBAAsB,GAAG,2CAAM;AAGvC,WAAO,qBAAqB,YAAY,KAAK;GAC9C;GACD,KAAK,gBAAgB;IACnB,MAAM,EAAE,iBAAiB,GAAG,2CAAM;AAClC,WAAO,gBAAgB,YAAY,MAAM,OAAO;GACjD;GACD,KAAK,aAAa;IAChB,MAAM,EAAE,UAAU,GAAG,2CAAM;AAC3B,WAAO,SAAS,YAAY,KAAK;GAClC;GACD,QACE,OAAM,IAAI,MACR,CAAC,+BAA+B,EAC7B,KAA6B,OAC9B;EAEP;CACF;AACF"}
1
+ {"version":3,"file":"base.cjs","names":["BaseLangChain","fields?: BaseMemory | ChainInputs","verbose?: boolean","callbacks?: Callbacks","values: ChainValues","input: RunInput","options?: RunnableConfig","CallbackManager","outputValues: RunOutput","listener: (() => void) | undefined","RUN_KEY","outputs: Record<string, unknown>","inputs: Record<string, unknown>","input: any","config?: Callbacks | RunnableConfig","values: ChainValues & { signal?: AbortSignal; timeout?: number }","tags?: string[]","inputs: RunInput[]","config?: (Callbacks | RunnableConfig)[]","data: SerializedBaseChain","values: LoadValues"],"sources":["../../src/chains/base.ts"],"sourcesContent":["import { BaseMemory } from \"@langchain/core/memory\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { RUN_KEY } from \"@langchain/core/outputs\";\nimport {\n CallbackManagerForChainRun,\n CallbackManager,\n Callbacks,\n parseCallbackConfigArg,\n} from \"@langchain/core/callbacks/manager\";\nimport { ensureConfig, type RunnableConfig } from \"@langchain/core/runnables\";\nimport {\n BaseLangChain,\n BaseLangChainParams,\n} from \"@langchain/core/language_models/base\";\nimport { SerializedBaseChain } from \"./serde.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type LoadValues = Record<string, any>;\n\nexport interface ChainInputs extends BaseLangChainParams {\n memory?: BaseMemory;\n\n /**\n * @deprecated Use `callbacks` instead\n */\n callbackManager?: CallbackManager;\n}\n\n/**\n * Base interface that all chains must implement.\n */\nexport abstract class BaseChain<\n RunInput extends ChainValues = ChainValues,\n RunOutput extends ChainValues = ChainValues\n >\n extends BaseLangChain<RunInput, RunOutput>\n implements ChainInputs\n{\n declare memory?: BaseMemory;\n\n get lc_namespace(): string[] {\n return [\"langchain\", \"chains\", this._chainType()];\n }\n\n constructor(\n fields?: BaseMemory | ChainInputs,\n /** @deprecated */\n verbose?: boolean,\n /** @deprecated */\n callbacks?: Callbacks\n ) {\n if (\n arguments.length === 1 &&\n typeof fields === \"object\" &&\n !(\"saveContext\" in fields)\n ) {\n // fields is not a BaseMemory\n const { memory, callbackManager, ...rest } = fields;\n super({ ...rest, callbacks: callbackManager ?? rest.callbacks });\n this.memory = memory;\n } else {\n // fields is a BaseMemory\n super({ verbose, callbacks });\n this.memory = fields as BaseMemory;\n }\n }\n\n /** @ignore */\n _selectMemoryInputs(values: ChainValues): ChainValues {\n const valuesForMemory = { ...values };\n if (\"signal\" in valuesForMemory) {\n delete valuesForMemory.signal;\n }\n if (\"timeout\" in valuesForMemory) {\n delete valuesForMemory.timeout;\n }\n return valuesForMemory;\n }\n\n /**\n * Invoke the chain with the provided input and returns the output.\n * @param input Input values for the chain run.\n * @param config Optional configuration for the Runnable.\n * @returns Promise that resolves with the output of the chain run.\n */\n async invoke(input: RunInput, options?: RunnableConfig): Promise<RunOutput> {\n const config = ensureConfig(options);\n const fullValues = await this._formatValues(input);\n const callbackManager_ = await CallbackManager.configure(\n config?.callbacks,\n this.callbacks,\n config?.tags,\n this.tags,\n config?.metadata,\n this.metadata,\n { verbose: this.verbose }\n );\n const runManager = await callbackManager_?.handleChainStart(\n this.toJSON(),\n fullValues,\n undefined,\n undefined,\n undefined,\n undefined,\n config?.runName\n );\n let outputValues: RunOutput;\n try {\n if (fullValues.signal) {\n let listener: (() => void) | undefined;\n outputValues = (await Promise.race([\n this._call(fullValues as RunInput, runManager, config),\n new Promise<never>((_, reject) => {\n listener = () => {\n reject(new Error(\"AbortError\"));\n };\n fullValues.signal?.addEventListener(\"abort\", listener);\n }),\n ]).finally(() => {\n if (fullValues.signal && listener) {\n fullValues.signal.removeEventListener(\"abort\", listener);\n }\n })) as RunOutput;\n } else {\n outputValues = await this._call(\n fullValues as RunInput,\n runManager,\n config\n );\n }\n } catch (e) {\n await runManager?.handleChainError(e);\n throw e;\n }\n if (!(this.memory == null)) {\n await this.memory.saveContext(\n this._selectMemoryInputs(input),\n outputValues\n );\n }\n await runManager?.handleChainEnd(outputValues);\n // add the runManager's currentRunId to the outputValues\n Object.defineProperty(outputValues, RUN_KEY, {\n value: runManager ? { runId: runManager?.runId } : undefined,\n configurable: true,\n });\n return outputValues;\n }\n\n private _validateOutputs(outputs: Record<string, unknown>): void {\n const missingKeys = this.outputKeys.filter((k) => !(k in outputs));\n if (missingKeys.length) {\n throw new Error(\n `Missing output keys: ${missingKeys.join(\n \", \"\n )} from chain ${this._chainType()}`\n );\n }\n }\n\n async prepOutputs(\n inputs: Record<string, unknown>,\n outputs: Record<string, unknown>,\n returnOnlyOutputs = false\n ) {\n this._validateOutputs(outputs);\n if (this.memory) {\n await this.memory.saveContext(inputs, outputs);\n }\n if (returnOnlyOutputs) {\n return outputs;\n }\n return { ...inputs, ...outputs };\n }\n\n /**\n * Run the core logic of this chain and return the output\n */\n abstract _call(\n values: RunInput,\n runManager?: CallbackManagerForChainRun,\n config?: RunnableConfig\n ): Promise<RunOutput>;\n\n /**\n * Return the string type key uniquely identifying this class of chain.\n */\n abstract _chainType(): string;\n\n /**\n * Return a json-like object representing this chain.\n */\n serialize(): SerializedBaseChain {\n throw new Error(\"Method not implemented.\");\n }\n\n abstract get inputKeys(): string[];\n\n abstract get outputKeys(): string[];\n\n /** @deprecated Use .invoke() instead. Will be removed in 0.2.0. */\n async run(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n input: any,\n config?: Callbacks | RunnableConfig\n ): Promise<string> {\n const inputKeys = this.inputKeys.filter(\n (k) => !this.memory?.memoryKeys.includes(k)\n );\n const isKeylessInput = inputKeys.length <= 1;\n if (!isKeylessInput) {\n throw new Error(\n `Chain ${this._chainType()} expects multiple inputs, cannot use 'run' `\n );\n }\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n const values = inputKeys.length ? { [inputKeys[0]]: input } : ({} as any);\n const returnValues = await this.call(values, config);\n const keys = Object.keys(returnValues);\n\n if (keys.length === 1) {\n return returnValues[keys[0]];\n }\n throw new Error(\n \"return values have multiple keys, `run` only supported when one key currently\"\n );\n }\n\n protected async _formatValues(\n values: ChainValues & { signal?: AbortSignal; timeout?: number }\n ) {\n const fullValues = { ...values } as typeof values;\n if (fullValues.timeout && !fullValues.signal) {\n fullValues.signal = AbortSignal.timeout(fullValues.timeout);\n delete fullValues.timeout;\n }\n if (!(this.memory == null)) {\n const newValues = await this.memory.loadMemoryVariables(\n this._selectMemoryInputs(values)\n );\n for (const [key, value] of Object.entries(newValues)) {\n fullValues[key] = value;\n }\n }\n return fullValues;\n }\n\n /**\n * @deprecated Use .invoke() instead. Will be removed in 0.2.0.\n *\n * Run the core logic of this chain and add to output if desired.\n *\n * Wraps _call and handles memory.\n */\n async call(\n values: ChainValues & { signal?: AbortSignal; timeout?: number },\n config?: Callbacks | RunnableConfig,\n /** @deprecated */\n tags?: string[]\n ): Promise<RunOutput> {\n const parsedConfig = { tags, ...parseCallbackConfigArg(config) };\n return this.invoke(values as RunInput, parsedConfig);\n }\n\n /**\n * @deprecated Use .batch() instead. Will be removed in 0.2.0.\n *\n * Call the chain on all inputs in the list\n */\n async apply(\n inputs: RunInput[],\n config?: (Callbacks | RunnableConfig)[]\n ): Promise<RunOutput[]> {\n return Promise.all(\n inputs.map(async (i, idx) => this.call(i, config?.[idx]))\n );\n }\n\n /**\n * Load a chain from a json-like object describing it.\n */\n static async deserialize(\n data: SerializedBaseChain,\n values: LoadValues = {}\n ): Promise<BaseChain> {\n switch (data._type) {\n case \"llm_chain\": {\n const { LLMChain } = await import(\"./llm_chain.js\");\n return LLMChain.deserialize(data);\n }\n case \"sequential_chain\": {\n const { SequentialChain } = await import(\"./sequential_chain.js\");\n return SequentialChain.deserialize(data);\n }\n case \"simple_sequential_chain\": {\n const { SimpleSequentialChain } = await import(\"./sequential_chain.js\");\n return SimpleSequentialChain.deserialize(data);\n }\n case \"stuff_documents_chain\": {\n const { StuffDocumentsChain } = await import(\"./combine_docs_chain.js\");\n return StuffDocumentsChain.deserialize(data);\n }\n case \"map_reduce_documents_chain\": {\n const { MapReduceDocumentsChain } = await import(\n \"./combine_docs_chain.js\"\n );\n return MapReduceDocumentsChain.deserialize(data);\n }\n case \"refine_documents_chain\": {\n const { RefineDocumentsChain } = await import(\n \"./combine_docs_chain.js\"\n );\n return RefineDocumentsChain.deserialize(data);\n }\n case \"vector_db_qa\": {\n const { VectorDBQAChain } = await import(\"./vector_db_qa.js\");\n return VectorDBQAChain.deserialize(data, values);\n }\n case \"api_chain\": {\n const { APIChain } = await import(\"./api/api_chain.js\");\n return APIChain.deserialize(data);\n }\n default:\n throw new Error(\n `Invalid prompt type in config: ${\n (data as SerializedBaseChain)._type\n }`\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;AA+BA,IAAsB,YAAtB,cAIUA,oDAEV;CAGE,IAAI,eAAyB;AAC3B,SAAO;GAAC;GAAa;GAAU,KAAK,YAAY;EAAC;CAClD;CAED,YACEC,QAEAC,SAEAC,WACA;AACA,MACE,UAAU,WAAW,KACrB,OAAO,WAAW,YAClB,EAAE,iBAAiB,SACnB;GAEA,MAAM,EAAE,QAAQ,gBAAiB,GAAG,MAAM,GAAG;GAC7C,MAAM;IAAE,GAAG;IAAM,WAAW,mBAAmB,KAAK;GAAW,EAAC;GAChE,KAAK,SAAS;EACf,OAAM;GAEL,MAAM;IAAE;IAAS;GAAW,EAAC;GAC7B,KAAK,SAAS;EACf;CACF;;CAGD,oBAAoBC,QAAkC;EACpD,MAAM,kBAAkB,EAAE,GAAG,OAAQ;AACrC,MAAI,YAAY,iBACd,OAAO,gBAAgB;AAEzB,MAAI,aAAa,iBACf,OAAO,gBAAgB;AAEzB,SAAO;CACR;;;;;;;CAQD,MAAM,OAAOC,OAAiBC,SAA8C;EAC1E,MAAM,sDAAsB,QAAQ;EACpC,MAAM,aAAa,MAAM,KAAK,cAAc,MAAM;EAClD,MAAM,mBAAmB,MAAMC,mDAAgB,UAC7C,QAAQ,WACR,KAAK,WACL,QAAQ,MACR,KAAK,MACL,QAAQ,UACR,KAAK,UACL,EAAE,SAAS,KAAK,QAAS,EAC1B;EACD,MAAM,aAAa,MAAM,kBAAkB,iBACzC,KAAK,QAAQ,EACb,YACA,QACA,QACA,QACA,QACA,QAAQ,QACT;EACD,IAAIC;AACJ,MAAI;AACF,OAAI,WAAW,QAAQ;IACrB,IAAIC;IACJ,eAAgB,MAAM,QAAQ,KAAK,CACjC,KAAK,MAAM,YAAwB,YAAY,OAAO,EACtD,IAAI,QAAe,CAAC,GAAG,WAAW;KAChC,WAAW,MAAM;MACf,uBAAO,IAAI,MAAM,cAAc;KAChC;KACD,WAAW,QAAQ,iBAAiB,SAAS,SAAS;IACvD,EACF,EAAC,CAAC,QAAQ,MAAM;AACf,SAAI,WAAW,UAAU,UACvB,WAAW,OAAO,oBAAoB,SAAS,SAAS;IAE3D,EAAC;GACH,OACC,eAAe,MAAM,KAAK,MACxB,YACA,YACA,OACD;EAEJ,SAAQ,GAAG;GACV,MAAM,YAAY,iBAAiB,EAAE;AACrC,SAAM;EACP;AACD,MAAI,EAAE,KAAK,UAAU,OACnB,MAAM,KAAK,OAAO,YAChB,KAAK,oBAAoB,MAAM,EAC/B,aACD;EAEH,MAAM,YAAY,eAAe,aAAa;EAE9C,OAAO,eAAe,cAAcC,kCAAS;GAC3C,OAAO,aAAa,EAAE,OAAO,YAAY,MAAO,IAAG;GACnD,cAAc;EACf,EAAC;AACF,SAAO;CACR;CAED,AAAQ,iBAAiBC,SAAwC;EAC/D,MAAM,cAAc,KAAK,WAAW,OAAO,CAAC,MAAM,EAAE,KAAK,SAAS;AAClE,MAAI,YAAY,OACd,OAAM,IAAI,MACR,CAAC,qBAAqB,EAAE,YAAY,KAClC,KACD,CAAC,YAAY,EAAE,KAAK,YAAY,EAAE;CAGxC;CAED,MAAM,YACJC,QACAD,SACA,oBAAoB,OACpB;EACA,KAAK,iBAAiB,QAAQ;AAC9B,MAAI,KAAK,QACP,MAAM,KAAK,OAAO,YAAY,QAAQ,QAAQ;AAEhD,MAAI,kBACF,QAAO;AAET,SAAO;GAAE,GAAG;GAAQ,GAAG;EAAS;CACjC;;;;CAmBD,YAAiC;AAC/B,QAAM,IAAI,MAAM;CACjB;;CAOD,MAAM,IAEJE,OACAC,QACiB;EACjB,MAAM,YAAY,KAAK,UAAU,OAC/B,CAAC,MAAM,CAAC,KAAK,QAAQ,WAAW,SAAS,EAAE,CAC5C;EACD,MAAM,iBAAiB,UAAU,UAAU;AAC3C,MAAI,CAAC,eACH,OAAM,IAAI,MACR,CAAC,MAAM,EAAE,KAAK,YAAY,CAAC,2CAA2C,CAAC;EAI3E,MAAM,SAAS,UAAU,SAAS,GAAG,UAAU,KAAK,MAAO,IAAI,CAAE;EACjE,MAAM,eAAe,MAAM,KAAK,KAAK,QAAQ,OAAO;EACpD,MAAM,OAAO,OAAO,KAAK,aAAa;AAEtC,MAAI,KAAK,WAAW,EAClB,QAAO,aAAa,KAAK;AAE3B,QAAM,IAAI,MACR;CAEH;CAED,MAAgB,cACdC,QACA;EACA,MAAM,aAAa,EAAE,GAAG,OAAQ;AAChC,MAAI,WAAW,WAAW,CAAC,WAAW,QAAQ;GAC5C,WAAW,SAAS,YAAY,QAAQ,WAAW,QAAQ;GAC3D,OAAO,WAAW;EACnB;AACD,MAAI,EAAE,KAAK,UAAU,OAAO;GAC1B,MAAM,YAAY,MAAM,KAAK,OAAO,oBAClC,KAAK,oBAAoB,OAAO,CACjC;AACD,QAAK,MAAM,CAAC,KAAK,MAAM,IAAI,OAAO,QAAQ,UAAU,EAClD,WAAW,OAAO;EAErB;AACD,SAAO;CACR;;;;;;;;CASD,MAAM,KACJA,QACAD,QAEAE,MACoB;EACpB,MAAM,eAAe;GAAE;GAAM,kEAA0B,OAAO;EAAE;AAChE,SAAO,KAAK,OAAO,QAAoB,aAAa;CACrD;;;;;;CAOD,MAAM,MACJC,QACAC,QACsB;AACtB,SAAO,QAAQ,IACb,OAAO,IAAI,OAAO,GAAG,QAAQ,KAAK,KAAK,GAAG,SAAS,KAAK,CAAC,CAC1D;CACF;;;;CAKD,aAAa,YACXC,MACAC,SAAqB,CAAE,GACH;AACpB,UAAQ,KAAK,OAAb;GACE,KAAK,aAAa;IAChB,MAAM,EAAE,UAAU,GAAG,2CAAM;AAC3B,WAAO,SAAS,YAAY,KAAK;GAClC;GACD,KAAK,oBAAoB;IACvB,MAAM,EAAE,iBAAiB,GAAG,2CAAM;AAClC,WAAO,gBAAgB,YAAY,KAAK;GACzC;GACD,KAAK,2BAA2B;IAC9B,MAAM,EAAE,uBAAuB,GAAG,2CAAM;AACxC,WAAO,sBAAsB,YAAY,KAAK;GAC/C;GACD,KAAK,yBAAyB;IAC5B,MAAM,EAAE,qBAAqB,GAAG,2CAAM;AACtC,WAAO,oBAAoB,YAAY,KAAK;GAC7C;GACD,KAAK,8BAA8B;IACjC,MAAM,EAAE,yBAAyB,GAAG,2CAAM;AAG1C,WAAO,wBAAwB,YAAY,KAAK;GACjD;GACD,KAAK,0BAA0B;IAC7B,MAAM,EAAE,sBAAsB,GAAG,2CAAM;AAGvC,WAAO,qBAAqB,YAAY,KAAK;GAC9C;GACD,KAAK,gBAAgB;IACnB,MAAM,EAAE,iBAAiB,GAAG,2CAAM;AAClC,WAAO,gBAAgB,YAAY,MAAM,OAAO;GACjD;GACD,KAAK,aAAa;IAChB,MAAM,EAAE,UAAU,GAAG,2CAAM;AAC3B,WAAO,SAAS,YAAY,KAAK;GAClC;GACD,QACE,OAAM,IAAI,MACR,CAAC,+BAA+B,EAC7B,KAA6B,OAC9B;EAEP;CACF;AACF"}
@@ -51,11 +51,17 @@ var BaseChain = class extends BaseLangChain {
51
51
  const runManager = await callbackManager_?.handleChainStart(this.toJSON(), fullValues, void 0, void 0, void 0, void 0, config?.runName);
52
52
  let outputValues;
53
53
  try {
54
- outputValues = await (fullValues.signal ? Promise.race([this._call(fullValues, runManager, config), new Promise((_, reject) => {
55
- fullValues.signal?.addEventListener("abort", () => {
56
- reject(/* @__PURE__ */ new Error("AbortError"));
54
+ if (fullValues.signal) {
55
+ let listener;
56
+ outputValues = await Promise.race([this._call(fullValues, runManager, config), new Promise((_, reject) => {
57
+ listener = () => {
58
+ reject(/* @__PURE__ */ new Error("AbortError"));
59
+ };
60
+ fullValues.signal?.addEventListener("abort", listener);
61
+ })]).finally(() => {
62
+ if (fullValues.signal && listener) fullValues.signal.removeEventListener("abort", listener);
57
63
  });
58
- })]) : this._call(fullValues, runManager, config));
64
+ } else outputValues = await this._call(fullValues, runManager, config);
59
65
  } catch (e) {
60
66
  await runManager?.handleChainError(e);
61
67
  throw e;