@langchain/classic 1.0.3 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +9 -0
  2. package/dist/agents/chat/outputParser.d.ts +2 -2
  3. package/dist/agents/chat/outputParser.d.ts.map +1 -1
  4. package/dist/agents/initialize.d.cts +2 -2
  5. package/dist/agents/initialize.d.cts.map +1 -1
  6. package/dist/agents/initialize.d.ts +2 -2
  7. package/dist/agents/initialize.d.ts.map +1 -1
  8. package/dist/agents/mrkl/outputParser.d.ts +1 -1
  9. package/dist/agents/mrkl/outputParser.d.ts.map +1 -1
  10. package/dist/agents/openai_tools/index.d.ts +1 -1
  11. package/dist/agents/openai_tools/index.d.ts.map +1 -1
  12. package/dist/agents/react/index.d.ts +2 -2
  13. package/dist/agents/react/index.d.ts.map +1 -1
  14. package/dist/agents/structured_chat/index.d.ts +2 -2
  15. package/dist/agents/structured_chat/index.d.ts.map +1 -1
  16. package/dist/agents/tool_calling/index.d.ts.map +1 -1
  17. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts +2 -2
  18. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts.map +1 -1
  19. package/dist/chains/openai_functions/openapi.cjs +1 -1
  20. package/dist/chains/openai_functions/openapi.cjs.map +1 -1
  21. package/dist/chains/openai_functions/openapi.d.ts +1 -1
  22. package/dist/chains/openai_functions/openapi.js +1 -1
  23. package/dist/chains/openai_functions/openapi.js.map +1 -1
  24. package/dist/chains/question_answering/load.d.cts +2 -2
  25. package/dist/chains/question_answering/load.d.cts.map +1 -1
  26. package/dist/chains/question_answering/load.d.ts +2 -2
  27. package/dist/chains/question_answering/load.d.ts.map +1 -1
  28. package/dist/chains/summarization/load.d.cts +2 -2
  29. package/dist/chains/summarization/load.d.cts.map +1 -1
  30. package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
  31. package/dist/evaluation/agents/trajectory.d.ts.map +1 -1
  32. package/dist/evaluation/comparison/pairwise.d.cts.map +1 -1
  33. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  34. package/dist/evaluation/criteria/criteria.d.cts.map +1 -1
  35. package/dist/experimental/autogpt/prompt.d.cts +2 -2
  36. package/dist/experimental/autogpt/prompt.d.cts.map +1 -1
  37. package/dist/load/import_map.cjs +1 -1
  38. package/dist/load/import_map.js +1 -1
  39. package/package.json +20 -20
package/CHANGELOG.md CHANGED
@@ -1,5 +1,14 @@
1
1
  # @langchain/classic
2
2
 
3
+ ## 1.0.4
4
+
5
+ ### Patch Changes
6
+
7
+ - [#9379](https://github.com/langchain-ai/langchainjs/pull/9379) [`34c472d`](https://github.com/langchain-ai/langchainjs/commit/34c472d129c9c3d58042fad6479fd15e0763feaf) Thanks [@kenowessels](https://github.com/kenowessels)! - OpenAPIToJSONSchema required from nested schema
8
+
9
+ - Updated dependencies [[`415cb0b`](https://github.com/langchain-ai/langchainjs/commit/415cb0bfd26207583befdb02367bd12a46b33d51), [`a2ad61e`](https://github.com/langchain-ai/langchainjs/commit/a2ad61e787a06a55a615f63589a65ada05927792)]:
10
+ - @langchain/openai@1.1.2
11
+
3
12
  ## 1.0.3
4
13
 
5
14
  ### Patch Changes
@@ -51,12 +51,12 @@ declare class ChatAgentOutputParser extends AgentActionOutputParser {
51
51
  * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.
52
52
  */
53
53
  parse(text: string): Promise<{
54
+ tool?: undefined;
55
+ toolInput?: undefined;
54
56
  returnValues: {
55
57
  output: string;
56
58
  };
57
59
  log: string;
58
- tool?: undefined;
59
- toolInput?: undefined;
60
60
  } | {
61
61
  returnValues?: undefined;
62
62
  tool: any;
@@ -1 +1 @@
1
- {"version":3,"file":"outputParser.d.ts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n returnValues?: undefined;\n tool: any;\n toolInput: any;\n log: string;\n }>;\n /**\n * Returns the format instructions used in the output parser for the\n * ChatAgent class.\n * @returns The format instructions as a string.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;AAuCA;;;;AAA0E;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAArDE,qBAAAA,SAA8BF,uBAAAA;;;;;;;;;;;uBAW1BG"}
1
+ {"version":3,"file":"outputParser.d.ts","names":["AgentActionOutputParser","FINAL_ANSWER_ACTION","ChatAgentOutputParser","Promise"],"sources":["../../../src/agents/chat/outputParser.d.ts"],"sourcesContent":["import { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends the AgentActionOutputParser to parse the output of\n * the ChatAgent in LangChain. It checks if the output text contains the\n * final answer action or a JSON response, and parses it accordingly.\n * @example\n * ```typescript\n * const prompt = ChatPromptTemplate.fromMessages([\n * [\n * \"ai\",\n * `{PREFIX}\n * {FORMAT_INSTRUCTIONS}\n * {SUFFIX}`,\n * ],\n * [\"human\", \"Question: {input}\"],\n * ]);\n * const runnableAgent = RunnableSequence.from([\n * {\n * input: (i: { input: string; steps: AgentStep[] }) => i.input,\n * agent_scratchpad: (i: { input: string; steps: AgentStep[] }) =>\n * formatLogToString(i.steps),\n * },\n * prompt,\n * new OpenAI({ temperature: 0 }),\n * new ChatAgentOutputParser(),\n * ]);\n *\n * const executor = AgentExecutor.fromAgentAndTools({\n * agent: runnableAgent,\n * tools: [new SerpAPI(), new Calculator()],\n * });\n *\n * const result = await executor.invoke({\n * input:\n * \"Who is Olivia Wilde's boyfriend? What is his current age raised to the 0.23 power?\",\n * });\n * ```\n */\nexport declare class ChatAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n /**\n * Parses the output text from the MRKL chain into an agent action or\n * agent finish. If the text contains the final answer action or does not\n * contain an action, it returns an AgentFinish with the output and log.\n * If the text contains a JSON response, it returns the tool, toolInput,\n * and log.\n * @param text The output text from the MRKL chain.\n * @returns An object that satisfies the AgentFinish interface or an object with the tool, toolInput, and log.\n */\n parse(text: string): Promise<{\n tool?: undefined;\n toolInput?: undefined;\n returnValues: {\n output: string;\n };\n log: string;\n } | {\n returnValues?: undefined;\n tool: any;\n toolInput: any;\n log: string;\n }>;\n /**\n * Returns the format instructions used in the output parser for the\n * ChatAgent class.\n * @returns The format instructions as a string.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;AAuCA;;;;AAA0E;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;cAArDE,qBAAAA,SAA8BF,uBAAAA;;;;;;;;;;;uBAW1BG"}
@@ -6,7 +6,7 @@ import { StringInputToolSchema } from "../langchain-core/dist/tools/types.cjs";
6
6
  import { StructuredChatAgent } from "./structured_chat/index.cjs";
7
7
  import { OpenAIAgent } from "./openai_functions/index.cjs";
8
8
  import { XMLAgent } from "./xml/index.cjs";
9
- import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
9
+ import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
10
10
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
11
11
  import { CallbackManager } from "@langchain/core/callbacks/manager";
12
12
  import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
@@ -19,7 +19,7 @@ import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
19
19
  * "chat-conversational-react-description".
20
20
  */
21
21
  type AgentType = "zero-shot-react-description" | "chat-zero-shot-react-description" | "chat-conversational-react-description";
22
- declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
22
+ declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
23
23
  /**
24
24
  * @interface
25
25
  */
@@ -1 +1 @@
1
- {"version":3,"file":"initialize.d.cts","names":["_langchain_core_language_models_base2","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
1
+ {"version":3,"file":"initialize.d.cts","names":["_langchain_core_language_models_base0","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
@@ -7,7 +7,7 @@ import { StructuredChatAgent } from "./structured_chat/index.js";
7
7
  import { OpenAIAgent } from "./openai_functions/index.js";
8
8
  import { XMLAgent } from "./xml/index.js";
9
9
  import { StructuredToolInterface, ToolInterface } from "@langchain/core/tools";
10
- import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
10
+ import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
11
11
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
12
12
  import { CallbackManager } from "@langchain/core/callbacks/manager";
13
13
 
@@ -19,7 +19,7 @@ import { CallbackManager } from "@langchain/core/callbacks/manager";
19
19
  * "chat-conversational-react-description".
20
20
  */
21
21
  type AgentType = "zero-shot-react-description" | "chat-zero-shot-react-description" | "chat-conversational-react-description";
22
- declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
22
+ declare const initializeAgentExecutor: (tools: ToolInterface<StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;
23
23
  /**
24
24
  * @interface
25
25
  */
@@ -1 +1 @@
1
- {"version":3,"file":"initialize.d.ts","names":["_langchain_core_language_models_base2","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
1
+ {"version":3,"file":"initialize.d.ts","names":["_langchain_core_language_models_base0","BaseLanguageModelInterface","StructuredToolInterface","ToolInterface","CallbackManager","ChatAgent","ChatConversationalAgent","StructuredChatAgent","AgentExecutor","AgentExecutorInput","ZeroShotAgent","OpenAIAgent","XMLAgent","AgentType","initializeAgentExecutor","_________langchain_core_dist_tools_types_js0","StringInputToolSchema","BaseLanguageModelCallOptions","Promise","InitializeAgentExecutorOptions","fromLLMAndTools","Parameters","Omit","InitializeAgentExecutorOptionsStructured","initializeAgentExecutorWithOptions"],"sources":["../../src/agents/initialize.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport type { StructuredToolInterface, ToolInterface } from \"@langchain/core/tools\";\nimport { CallbackManager } from \"@langchain/core/callbacks/manager\";\nimport { ChatAgent } from \"./chat/index.js\";\nimport { ChatConversationalAgent } from \"./chat_convo/index.js\";\nimport { StructuredChatAgent } from \"./structured_chat/index.js\";\nimport { AgentExecutor, AgentExecutorInput } from \"./executor.js\";\nimport { ZeroShotAgent } from \"./mrkl/index.js\";\nimport { OpenAIAgent } from \"./openai_functions/index.js\";\nimport { XMLAgent } from \"./xml/index.js\";\n/**\n * Represents the type of an agent in LangChain. It can be\n * \"zero-shot-react-description\", \"chat-zero-shot-react-description\", or\n * \"chat-conversational-react-description\".\n */\ntype AgentType = \"zero-shot-react-description\" | \"chat-zero-shot-react-description\" | \"chat-conversational-react-description\";\nexport declare const initializeAgentExecutor: (tools: ToolInterface<import(\"../../../langchain-core/dist/tools/types.js\").StringInputToolSchema, any, any>[], llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, _agentType?: AgentType | undefined, _verbose?: boolean | undefined, _callbackManager?: CallbackManager | undefined) => Promise<AgentExecutor>;\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptions = ({\n agentType: \"zero-shot-react-description\";\n agentArgs?: Parameters<typeof ZeroShotAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof ChatAgent.fromLLMAndTools>[2];\n memory?: never;\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"chat-conversational-react-description\";\n agentArgs?: Parameters<typeof ChatConversationalAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"xml\";\n agentArgs?: Parameters<typeof XMLAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * @interface\n */\nexport type InitializeAgentExecutorOptionsStructured = ({\n agentType: \"structured-chat-zero-shot-react-description\";\n agentArgs?: Parameters<typeof StructuredChatAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">) | ({\n agentType: \"openai-functions\";\n agentArgs?: Parameters<typeof OpenAIAgent.fromLLMAndTools>[2];\n} & Omit<AgentExecutorInput, \"agent\" | \"tools\">);\n/**\n * Initialize an agent executor with options.\n * @param tools Array of tools to use in the agent\n * @param llm LLM or ChatModel to use in the agent\n * @param options Options for the agent, including agentType, agentArgs, and other options for AgentExecutor.fromAgentAndTools\n * @returns AgentExecutor\n */\nexport declare function initializeAgentExecutorWithOptions(tools: StructuredToolInterface[], llm: BaseLanguageModelInterface, options: InitializeAgentExecutorOptionsStructured): Promise<AgentExecutor>;\nexport declare function initializeAgentExecutorWithOptions(tools: ToolInterface[], llm: BaseLanguageModelInterface, options?: InitializeAgentExecutorOptions): Promise<AgentExecutor>;\nexport {};\n"],"mappings":";;;;;;;;;;;;;;;;;;;AAS0C;AAO1C,KADKa,SAAAA,GACgBC,6BAAyY,GAAA,kCAAA,GAAA,uCAAA;AAAA,cAAzYA,uBAAyY,EAAA,CAAA,KAAA,EAAxWX,aAAwW,CAAA,qBAAA,EAAA,GAAA,EAAA,GAAA,CAAA,EAAA,EAAA,GAAA,EAA3PF,0BAA2P,CAAA,GAAA,EAA3VD,qCAAAA,CAA+KiB,4BAAAA,CAA4K,EAAA,UAAA,CAAA,EAAhIJ,SAAgI,GAAA,SAAA,EAAA,QAAA,CAAA,EAAA,OAAA,GAAA,SAAA,EAAA,gBAAA,CAAA,EAAtDT,eAAsD,GAAA,SAAA,EAAA,GAAtBc,OAAsB,CAAdV,aAAc,CAAA;;;;AAA3PP,KAIvJkB,8BAAAA,GAJuJlB,CAAAA;EAA0B,SAAiGY,EAAAA,6BAAAA;EAAS,SAAiET,CAAAA,EAMxViB,UANwVjB,CAAAA,OAMtUM,aAAAA,CAAcU,eANwThB,CAAAA,CAAAA,CAAAA,CAAAA;EAAe,MAAyBI,CAAAA,EAAAA,KAAAA;CAAa,GAQzZc,IARoYJ,CAQ/XT,kBAR+XS,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAO,SAAA,EAAA,kCAAA;EAInYC,SAAAA,CAAAA,EAMIE,UANJF,CAAAA,OAMsBd,SAAAA,CAAUe,eANF,CAAA,CAAA,CAAA,CAAA;EAAA,MAAA,CAAA,EAAA,KAAA;CAAA,GAQtCE,IAN8BZ,CAMzBD,kBANuCW,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAe,SAA/CC,EAAAA,uCAAAA;EAAU,SAEjBZ,CAAAA,EAMOY,UANPZ,CAAAA,OAMyBH,uBAAAA,CAAwBc,eANjDX,CAAAA,CAAAA,CAAAA,CAAAA;CAAkB,GAOvBa,IAPAA,CAOKb,kBAPLa,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAI,SAE0BjB,EAAAA,KAAUe;EAAe,SAA3CC,CAAAA,EAOAA,UAPAA,CAAAA,OAOkBT,QAAAA,CAASQ,eAP3BC,CAAAA,CAAAA,CAAAA,CAAAA;CAAU,GAQtBC,IANKb,CAMAA,kBANAA,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA;;;;AAGAA,KAOGc,wCAAAA,GAPHd,CAAAA;EAAkB,SAAvBa,EAAAA,6CAAAA;EAAI,SAE0BV,CAAAA,EAOlBS,UAP2BD,CAAAA,OAOTb,mBAAAA,CAAoBa,eAPXA,CAAAA,CAAAA,CAAAA,CAAAA;CAAe,GAQtDE,IARYD,CAQPZ,kBAROY,EAAAA,OAAAA,GAAAA,OAAAA,CAAAA,CAAAA,GAAAA,CAAAA;EAAU,SACjBZ,EAAAA,kBAAAA;EAAkB,SAAvBa,CAAAA,EASYD,UATZC,CAAAA,OAS8BX,WAAAA,CAAYS,eAT1CE,CAAAA,CAAAA,CAAAA,CAAAA;AAAI,CAAA,GAUJA,IAVI,CAUCb,kBAVD,EAAA,OAAA,GAAA,OAAA,CAAA,CAAA;AAIR;;;;;;;AAKgBY,iBASQG,kCAAAA,CATRH,KAAAA,EASkDnB,uBATlDmB,EAAAA,EAAAA,GAAAA,EASkFpB,0BATlFoB,EAAAA,OAAAA,EASuHE,wCATvHF,CAAAA,EASkKH,OATlKG,CAS0Kb,aAT1Ka,CAAAA;AACPZ,iBASee,kCAAAA,CATff,KAAAA,EASyDN,aATzDM,EAAAA,EAAAA,GAAAA,EAS+ER,0BAT/EQ,EAAAA,OAAAA,CAAAA,EASqHU,8BATrHV,CAAAA,EASsJS,OATtJT,CAS8JD,aAT9JC,CAAAA"}
@@ -25,10 +25,10 @@ declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {
25
25
  tool?: undefined;
26
26
  toolInput?: undefined;
27
27
  } | {
28
+ returnValues?: undefined;
28
29
  tool: string;
29
30
  toolInput: string;
30
31
  log: string;
31
- returnValues?: undefined;
32
32
  }>;
33
33
  /**
34
34
  * Returns the format instructions for parsing the output of an agent
@@ -1 +1 @@
1
- {"version":3,"file":"outputParser.d.ts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n tool: string;\n toolInput: string;\n log: string;\n returnValues?: undefined;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
1
+ {"version":3,"file":"outputParser.d.ts","names":["OutputParserArgs","AgentActionOutputParser","FINAL_ANSWER_ACTION","ZeroShotAgentOutputParser","Promise"],"sources":["../../../src/agents/mrkl/outputParser.d.ts"],"sourcesContent":["import { OutputParserArgs } from \"../agent.js\";\nimport { AgentActionOutputParser } from \"../types.js\";\nexport declare const FINAL_ANSWER_ACTION = \"Final Answer:\";\n/**\n * A class that extends `AgentActionOutputParser` to provide a custom\n * implementation for parsing the output of a ZeroShotAgent action.\n */\nexport declare class ZeroShotAgentOutputParser extends AgentActionOutputParser {\n lc_namespace: string[];\n finishToolName: string;\n constructor(fields?: OutputParserArgs);\n /**\n * Parses the text output of an agent action, extracting the tool, tool\n * input, and output.\n * @param text The text output of an agent action.\n * @returns An object containing the tool, tool input, and output extracted from the text, along with the original text as a log.\n */\n parse(text: string): Promise<{\n returnValues: {\n output: string;\n };\n log: string;\n tool?: undefined;\n toolInput?: undefined;\n } | {\n returnValues?: undefined;\n tool: string;\n toolInput: string;\n log: string;\n }>;\n /**\n * Returns the format instructions for parsing the output of an agent\n * action in the style of the ZeroShotAgent.\n * @returns The format instructions for parsing the output.\n */\n getFormatInstructions(): string;\n}\n"],"mappings":";;;;;;AAOA;;;AAUyBI,cAVJD,yBAAAA,SAAkCF,uBAAAA,CAU9BG;EAAO,YAVuBH,EAAAA,MAAAA,EAAAA;EAAuB,cAAA,EAAA,MAAA;uBAGrDD;;;;;;;uBAOAI"}
@@ -5,8 +5,8 @@ import { AgentAction, AgentFinish } from "../index.js";
5
5
  import { ChatPromptTemplate } from "@langchain/core/prompts";
6
6
  import { StructuredToolInterface } from "@langchain/core/tools";
7
7
  import { ToolDefinition } from "@langchain/core/language_models/base";
8
- import { BaseChatModel, BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models";
9
8
  import { OpenAIClient } from "@langchain/openai";
9
+ import { BaseChatModel, BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models";
10
10
 
11
11
  //#region src/agents/openai_tools/index.d.ts
12
12
  /**
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseChatModel","BaseChatModelCallOptions","ChatPromptTemplate","OpenAIClient","ToolDefinition","OpenAIToolsAgentOutputParser","ToolsAgentStep","AgentRunnableSequence","CreateOpenAIToolsAgentParams","ChatCompletionTool","createOpenAIToolsAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction","Promise"],"sources":["../../../src/agents/openai_tools/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport type { BaseChatModel, BaseChatModelCallOptions } from \"@langchain/core/language_models/chat_models\";\nimport { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { OpenAIClient } from \"@langchain/openai\";\nimport { ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { OpenAIToolsAgentOutputParser, type ToolsAgentStep } from \"./output_parser.js\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nexport { OpenAIToolsAgentOutputParser, type ToolsAgentStep };\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateOpenAIToolsAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: BaseChatModel<BaseChatModelCallOptions & {\n tools?: StructuredToolInterface[] | OpenAIClient.ChatCompletionTool[]\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n | any[];\n }>;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses OpenAI-style tool calling.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createOpenAIToolsAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/openai-tools-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/openai-tools-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createOpenAIToolsAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createOpenAIToolsAgent({ llm, tools, prompt, streamRunnable }: CreateOpenAIToolsAgentParams): Promise<AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>>;\n"],"mappings":";;;;;;;;;;;;;;AAWYS,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAMlB,GAMXT,EANFC,aAMED,CANYE,wBAMZF,GAAAA;IAA4BK,KAAAA,CAAAA,EALvBL,uBAKuBK,EAAAA,GALKD,YAAAA,CAAaM,kBAKlBL;IAE3BF;IAAAA,EAAkB,GAAA,EAAA;EA+DNQ,CAAAA,CAAAA;EAAsB;EAAA,KAAGC,EAjEtCZ,uBAiEsCY,EAAAA,GAjEVP,cAiEUO,EAAAA;EAAG;EAAO,MAAEE,EA/DjDX,kBA+DiDW;EAAM;;;;EAE/B,cAAA,CAAA,EAAA,OAAA;CAAoC;;AAFqD;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAArGH,sBAAAA;;;;;GAA+DF,+BAA+BU,QAAQX;SACnHD;GAAc,WAAA,GACW,WAAA"}
@@ -2,7 +2,7 @@ import { AgentRunnableSequence } from "../agent.js";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
3
  import { ToolInterface } from "@langchain/core/tools";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
- import * as _langchain_core_agents1 from "@langchain/core/agents";
5
+ import * as _langchain_core_agents0 from "@langchain/core/agents";
6
6
  import { AgentStep } from "@langchain/core/agents";
7
7
 
8
8
  //#region src/agents/react/index.d.ts
@@ -75,7 +75,7 @@ declare function createReactAgent({
75
75
  streamRunnable
76
76
  }: CreateReactAgentParams): Promise<AgentRunnableSequence<{
77
77
  steps: AgentStep[];
78
- }, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
78
+ }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
79
79
  //#endregion
80
80
  export { CreateReactAgentParams, createReactAgent };
81
81
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAAA;EAAA,GAEzBH,EAAAA,0BAAAA;EAA0B;EAEX,KAKZD,EALDD,aAKCC,EAAAA;EAAkB;AAkD9B;;;EAA8C,MAAEO,EAlDpCP,kBAkDoCO;EAAK;;;;EACjC,cAAAG,CAAAA,EAAAA,OAAAA;CAC2B;;;AAFkE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFL,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["ToolInterface","BasePromptTemplate","BaseLanguageModelInterface","AgentStep","AgentRunnableSequence","CreateReactAgentParams","createReactAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish","Promise"],"sources":["../../../src/agents/react/index.d.ts"],"sourcesContent":["import type { ToolInterface } from \"@langchain/core/tools\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { AgentRunnableSequence } from \"../agent.js\";\n/**\n * Params used by the createXmlAgent function.\n */\nexport type CreateReactAgentParams = {\n /** LLM to use for the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: ToolInterface[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses ReAct prompting.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createReactAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { PromptTemplate } from \"@langchain/core/prompts\";\n *\n * import { OpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/react\n * const prompt = await pull<PromptTemplate>(\"hwchase17/react\");\n *\n * const llm = new OpenAI({\n * temperature: 0,\n * });\n *\n * const agent = await createReactAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n * ```\n */\nexport declare function createReactAgent({ llm, tools, prompt, streamRunnable }: CreateReactAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;AAQYK,KAAAA,sBAAAA,GAAsB;EAAA;EAAA,GAEzBH,EAAAA,0BAAAA;EAA0B;EAEX,KAKZD,EALDD,aAKCC,EAAAA;EAAkB;AAkD9B;;;EAA8C,MAAEO,EAlDpCP,kBAkDoCO;EAAK;;;;EACjC,cAAAG,CAAAA,EAAAA,OAAAA;CAC2B;;;AAFkE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAzFL,gBAAAA;;;;;GAAyDD,yBAAyBS,QAAQV;SACvGD;GAASQ,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -5,7 +5,7 @@ import { StructuredChatOutputParserWithRetries } from "./outputParser.js";
5
5
  import { BaseMessagePromptTemplate, BasePromptTemplate, ChatPromptTemplate } from "@langchain/core/prompts";
6
6
  import { StructuredToolInterface } from "@langchain/core/tools";
7
7
  import { BaseLanguageModelInterface, ToolDefinition } from "@langchain/core/language_models/base";
8
- import * as _langchain_core_agents0 from "@langchain/core/agents";
8
+ import * as _langchain_core_agents1 from "@langchain/core/agents";
9
9
  import { AgentStep } from "@langchain/core/agents";
10
10
 
11
11
  //#region src/agents/structured_chat/index.d.ts
@@ -176,7 +176,7 @@ declare function createStructuredChatAgent({
176
176
  streamRunnable
177
177
  }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{
178
178
  steps: AgentStep[];
179
- }, _langchain_core_agents0.AgentAction | _langchain_core_agents0.AgentFinish>>;
179
+ }, _langchain_core_agents1.AgentAction | _langchain_core_agents1.AgentFinish>>;
180
180
  //#endregion
181
181
  export { CreateStructuredChatAgentParams, StructuredChatAgent, StructuredChatAgentInput, StructuredChatCreatePromptArgs, createStructuredChatAgent };
182
182
  //# sourceMappingURL=index.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents0","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAAoC,UAhBnBc,8BAAAA,CAgBmB;EAAA;EAAsB,MAAnBP,CAAAA,EAAAA,MAAAA;EAAQ;EAK1BS,MAAAA,CAAAA,EAAAA,MAAAA;EAAmB;EAAA,oBAGjBD,CAAAA,EAAAA,MAAAA;EAAwB;EAUQ,cAOZJ,CAAAA,EAAAA,MAAAA,EAAAA;EAAgB;EAEd,aAQdL,CAAAA,EAzCXF,yBAyCWE,EAAAA;;;;;;AA2BCL,KA9DpBc,wBAAAA,GAA2BR,QA8DPN,CA9DgBW,UA8DhBX,EAAAA,cAAAA,CAAAA;;;;;AAzDiBO,cAA5BQ,mBAAAA,SAA4BR,KAAAA,CAAAA;EAAK,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EA8D1CU,YAAAA,EAAAA,MAAAA,EAAAA;EAA+B,WAAA,CAAA,KAAA,EA3DpBH,wBA2DoB;EAAA,UAElCd,CAAAA,CAAAA,EAAAA,6CAAAA;EAA0B,iBAEvBD,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,SAAGE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,KAKxCC,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkB;AA+D9B;;;;EAA8D,OAAEmB,aAAAA,CAAAA,KAAAA,EAzHhCtB,uBAyHgCsB,EAAAA,CAAAA,EAAAA,IAAAA;EAAM;;;;;;EAAmF,OAA7BL,sBAAAA,CAAAA,OAAAA,EAlHjFN,gBAkHiFM,GAAAA;IAAO,SAAA,EAAA,MAAA,EAAA;MAhH3HJ;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
1
+ {"version":3,"file":"index.d.ts","names":["StructuredToolInterface","BaseLanguageModelInterface","ToolDefinition","BasePromptTemplate","BaseMessagePromptTemplate","ChatPromptTemplate","AgentStep","Optional","Agent","AgentArgs","AgentRunnableSequence","OutputParserArgs","AgentInput","StructuredChatOutputParserWithRetries","StructuredChatCreatePromptArgs","StructuredChatAgentInput","StructuredChatAgent","Promise","CreateStructuredChatAgentParams","createStructuredChatAgent","llm","tools","prompt","streamRunnable","_langchain_core_agents1","AgentAction","AgentFinish"],"sources":["../../../src/agents/structured_chat/index.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { type BaseLanguageModelInterface, type ToolDefinition } from \"@langchain/core/language_models/base\";\nimport type { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessagePromptTemplate, ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { Optional } from \"../../types/type-utils.js\";\nimport { Agent, AgentArgs, AgentRunnableSequence, OutputParserArgs } from \"../agent.js\";\nimport { AgentInput } from \"../types.js\";\nimport { StructuredChatOutputParserWithRetries } from \"./outputParser.js\";\n/**\n * Interface for arguments used to create a prompt for a\n * StructuredChatAgent.\n */\nexport interface StructuredChatCreatePromptArgs {\n /** String to put after the list of tools. */\n suffix?: string;\n /** String to put before the list of tools. */\n prefix?: string;\n /** String to use directly as the human message template. */\n humanMessageTemplate?: string;\n /** List of input variables the final prompt will expect. */\n inputVariables?: string[];\n /** List of historical prompts from memory. */\n memoryPrompts?: BaseMessagePromptTemplate[];\n}\n/**\n * Type for input data for creating a StructuredChatAgent, with the\n * 'outputParser' property made optional.\n */\nexport type StructuredChatAgentInput = Optional<AgentInput, \"outputParser\">;\n/**\n * Agent that interoperates with Structured Tools using React logic.\n * @augments Agent\n */\nexport declare class StructuredChatAgent extends Agent {\n static lc_name(): string;\n lc_namespace: string[];\n constructor(input: StructuredChatAgentInput);\n _agentType(): \"structured-chat-zero-shot-react-description\";\n observationPrefix(): string;\n llmPrefix(): string;\n _stop(): string[];\n /**\n * Validates that all provided tools have a description. Throws an error\n * if any tool lacks a description.\n * @param tools Array of StructuredTool instances to validate.\n */\n static validateTools(tools: StructuredToolInterface[]): void;\n /**\n * Returns a default output parser for the StructuredChatAgent. If an LLM\n * is provided, it creates an output parser with retry logic from the LLM.\n * @param fields Optional fields to customize the output parser. Can include an LLM and a list of tool names.\n * @returns An instance of StructuredChatOutputParserWithRetries.\n */\n static getDefaultOutputParser(fields?: OutputParserArgs & {\n toolNames: string[];\n }): StructuredChatOutputParserWithRetries;\n /**\n * Constructs the agent's scratchpad from a list of steps. If the agent's\n * scratchpad is not empty, it prepends a message indicating that the\n * agent has not seen any previous work.\n * @param steps Array of AgentStep instances to construct the scratchpad from.\n * @returns A Promise that resolves to a string representing the agent's scratchpad.\n */\n constructScratchPad(steps: AgentStep[]): Promise<string>;\n /**\n * Creates a string representation of the schemas of the provided tools.\n * @param tools Array of StructuredTool instances to create the schemas string from.\n * @returns A string representing the schemas of the provided tools.\n */\n static createToolSchemasString(tools: StructuredToolInterface[]): string;\n /**\n * Create prompt in the style of the agent.\n *\n * @param tools - List of tools the agent will have access to, used to format the prompt.\n * @param args - Arguments to create the prompt with.\n * @param args.suffix - String to put after the list of tools.\n * @param args.prefix - String to put before the list of tools.\n * @param args.inputVariables List of input variables the final prompt will expect.\n * @param args.memoryPrompts List of historical prompts from memory.\n */\n static createPrompt(tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs): ChatPromptTemplate<any, any>;\n /**\n * Creates a StructuredChatAgent from an LLM and a list of tools.\n * Validates the tools, creates a prompt, and sets up an LLM chain for the\n * agent.\n * @param llm BaseLanguageModel instance to create the agent from.\n * @param tools Array of StructuredTool instances to create the agent from.\n * @param args Optional arguments to customize the creation of the agent. Can include arguments for creating the prompt and AgentArgs.\n * @returns A new instance of StructuredChatAgent.\n */\n static fromLLMAndTools(llm: BaseLanguageModelInterface, tools: StructuredToolInterface[], args?: StructuredChatCreatePromptArgs & AgentArgs): StructuredChatAgent;\n}\n/**\n * Params used by the createStructuredChatAgent function.\n */\nexport type CreateStructuredChatAgentParams = {\n /** LLM to use as the agent. */\n llm: BaseLanguageModelInterface;\n /** Tools this agent has access to. */\n tools: (StructuredToolInterface | ToolDefinition)[];\n /**\n * The prompt to use. Must have input keys for\n * `tools`, `tool_names`, and `agent_scratchpad`.\n */\n prompt: BasePromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent aimed at supporting tools with multiple inputs.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n *\n * @example\n * ```typescript\n * import { AgentExecutor, createStructuredChatAgent } from \"langchain/agents\";\n * import { pull } from \"langchain/hub\";\n * import type { ChatPromptTemplate } from \"@langchain/core/prompts\";\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * import { ChatOpenAI } from \"@langchain/openai\";\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * // Get the prompt to use - you can modify this!\n * // If you want to see the prompt in full, you can at:\n * // https://smith.langchain.com/hub/hwchase17/structured-chat-agent\n * const prompt = await pull<ChatPromptTemplate>(\n * \"hwchase17/structured-chat-agent\"\n * );\n *\n * const llm = new ChatOpenAI({\n * temperature: 0,\n * model: \"gpt-3.5-turbo-1106\",\n * });\n *\n * const agent = await createStructuredChatAgent({\n * llm,\n * tools,\n * prompt,\n * });\n *\n * const agentExecutor = new AgentExecutor({\n * agent,\n * tools,\n * });\n *\n * const result = await agentExecutor.invoke({\n * input: \"what is LangChain?\",\n * });\n *\n * // With chat history\n * const result2 = await agentExecutor.invoke({\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage(\"hi! my name is cob\"),\n * new AIMessage(\"Hello Cob! How can I assist you today?\"),\n * ],\n * });\n * ```\n */\nexport declare function createStructuredChatAgent({ llm, tools, prompt, streamRunnable }: CreateStructuredChatAgentParams): Promise<AgentRunnableSequence<{\n steps: AgentStep[];\n}, import(\"@langchain/core/agents\").AgentAction | import(\"@langchain/core/agents\").AgentFinish>>;\n"],"mappings":";;;;;;;;;;;;;;AAaA;AAgBA;AAAoC,UAhBnBc,8BAAAA,CAgBmB;EAAA;EAAsB,MAAnBP,CAAAA,EAAAA,MAAAA;EAAQ;EAK1BS,MAAAA,CAAAA,EAAAA,MAAAA;EAAmB;EAAA,oBAGjBD,CAAAA,EAAAA,MAAAA;EAAwB;EAUQ,cAOZJ,CAAAA,EAAAA,MAAAA,EAAAA;EAAgB;EAEd,aAQdL,CAAAA,EAzCXF,yBAyCWE,EAAAA;;;;;;AA2BCL,KA9DpBc,wBAAAA,GAA2BR,QA8DPN,CA9DgBW,UA8DhBX,EAAAA,cAAAA,CAAAA;;;;;AAzDiBO,cAA5BQ,mBAAAA,SAA4BR,KAAAA,CAAAA;EAAK,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EA8D1CU,YAAAA,EAAAA,MAAAA,EAAAA;EAA+B,WAAA,CAAA,KAAA,EA3DpBH,wBA2DoB;EAAA,UAElCd,CAAAA,CAAAA,EAAAA,6CAAAA;EAA0B,iBAEvBD,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,SAAGE,CAAAA,CAAAA,EAAAA,MAAAA;EAAc,KAKxCC,CAAAA,CAAAA,EAAAA,MAAAA,EAAAA;EAAkB;AA+D9B;;;;EAA8D,OAAEmB,aAAAA,CAAAA,KAAAA,EAzHhCtB,uBAyHgCsB,EAAAA,CAAAA,EAAAA,IAAAA;EAAM;;;;;;EAAmF,OAA7BL,sBAAAA,CAAAA,OAAAA,EAlHjFN,gBAkHiFM,GAAAA;IAAO,SAAA,EAAA,MAAA,EAAA;MAhH3HJ;;;;;;;;6BAQuBP,cAAcW;;;;;;wCAMHjB;;;;;;;;;;;6BAWXA,kCAAkCc,iCAAiCT;;;;;;;;;;8BAUlEJ,mCAAmCD,kCAAkCc,iCAAiCL,YAAYO;;;;;KAKtIE,+BAAAA;;OAEHjB;;UAEGD,0BAA0BE;;;;;UAK1BC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBA+DYgB,yBAAAA;;;;;GAAkED,kCAAkCD,QAAQP;SACzHJ;GAASkB,uBAAAA,CACgBC,WAAAA,GAAWD,uBAAAA,CAAoCE"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js1","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
1
+ {"version":3,"file":"index.d.ts","names":["ChatPromptTemplate","StructuredToolInterface","LanguageModelLike","ToolDefinition","AgentRunnableSequence","ToolsAgentStep","CreateToolCallingAgentParams","createToolCallingAgent","llm","tools","prompt","streamRunnable","___index_js0","AgentFinish","AgentAction"],"sources":["../../../src/agents/tool_calling/index.d.ts"],"sourcesContent":["import { ChatPromptTemplate } from \"@langchain/core/prompts\";\nimport { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { LanguageModelLike, ToolDefinition } from \"@langchain/core/language_models/base\";\nimport { AgentRunnableSequence } from \"../agent.js\";\nimport { ToolsAgentStep } from \"./output_parser.js\";\n/**\n * Params used by the createOpenAIToolsAgent function.\n */\nexport type CreateToolCallingAgentParams = {\n /**\n * LLM to use as the agent. Should work with OpenAI tool calling,\n * so must either be an OpenAI model that supports that or a wrapper of\n * a different model that adds in equivalent support.\n */\n llm: LanguageModelLike;\n /** Tools this agent has access to. */\n tools: StructuredToolInterface[] | ToolDefinition[];\n /** The prompt to use, must have an input key of `agent_scratchpad`. */\n prompt: ChatPromptTemplate;\n /**\n * Whether to invoke the underlying model in streaming mode,\n * allowing streaming of intermediate steps. Defaults to true.\n */\n streamRunnable?: boolean;\n};\n/**\n * Create an agent that uses tools.\n * @param params Params required to create the agent. Includes an LLM, tools, and prompt.\n * @returns A runnable sequence representing an agent. It takes as input all the same input\n * variables as the prompt passed in does. It returns as output either an\n * AgentAction or AgentFinish.\n * @example\n * ```typescript\n * import { ChatAnthropic } from \"@langchain/anthropic\";\n * import { ChatPromptTemplate, MessagesPlaceholder } from \"@langchain/core/prompts\";\n * import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n *\n * const prompt = ChatPromptTemplate.fromMessages(\n * [\n * [\"system\", \"You are a helpful assistant\"],\n * [\"placeholder\", \"{chat_history}\"],\n * [\"human\", \"{input}\"],\n * [\"placeholder\", \"{agent_scratchpad}\"],\n * ]\n * );\n *\n *\n * const llm = new ChatAnthropic({\n * modelName: \"claude-3-opus-20240229\",\n * temperature: 0,\n * });\n *\n * // Define the tools the agent will have access to.\n * const tools = [...];\n *\n * const agent = createToolCallingAgent({ llm, tools, prompt });\n *\n * const agentExecutor = new AgentExecutor({ agent, tools });\n *\n * const result = await agentExecutor.invoke({input: \"what is LangChain?\"});\n *\n * // Using with chat history\n * import { AIMessage, HumanMessage } from \"@langchain/core/messages\";\n *\n * const result2 = await agentExecutor.invoke(\n * {\n * input: \"what's my name?\",\n * chat_history: [\n * new HumanMessage({content: \"hi! my name is bob\"}),\n * new AIMessage({content: \"Hello Bob! How can I assist you today?\"}),\n * ],\n * }\n * );\n * ```\n */\nexport declare function createToolCallingAgent({ llm, tools, prompt, streamRunnable }: CreateToolCallingAgentParams): AgentRunnableSequence<{\n steps: ToolsAgentStep[];\n}, import(\"../index.js\").AgentFinish | import(\"../index.js\").AgentAction[]>;\n"],"mappings":";;;;;;;;;;;AAQYM,KAAAA,4BAAAA,GAA4B;EAAA;;;;;EAUV,GAAA,EAJrBJ,iBAIqB;EAyDNK;EAAsB,KAAA,EA3DnCN,uBA2DmC,EAAA,GA3DPE,cA2DO,EAAA;EAAA;EAAM,MAAEM,EAzD1CT,kBAyD0CS;EAAK;;;;EAClC,cAAA,CAAA,EAAA,OAAA;CACW;;AAFuG;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;iBAAnHF,sBAAAA;;;;;GAA+DD,+BAA+BF;SAC3GC;GAAc,WAAA,GACW,WAAA"}
@@ -1,5 +1,5 @@
1
1
  import { BaseChatMemory, BaseChatMemoryInput } from "../../../memory/chat_memory.cjs";
2
- import * as _langchain_core_messages0 from "@langchain/core/messages";
2
+ import * as _langchain_core_messages1 from "@langchain/core/messages";
3
3
  import { InputValues, MemoryVariables, OutputValues } from "@langchain/core/memory";
4
4
  import { ChatOpenAI } from "@langchain/openai";
5
5
 
@@ -37,7 +37,7 @@ declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {
37
37
  * Retrieves the messages from the chat history.
38
38
  * @returns Promise that resolves with the messages from the chat history.
39
39
  */
40
- getMessages(): Promise<_langchain_core_messages0.BaseMessage<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>[]>;
40
+ getMessages(): Promise<_langchain_core_messages1.BaseMessage<_langchain_core_messages1.MessageStructure, _langchain_core_messages1.MessageType>[]>;
41
41
  /**
42
42
  * Loads memory variables from the input values.
43
43
  * @param _values Input values.
@@ -1 +1 @@
1
- {"version":3,"file":"token_buffer_memory.d.cts","names":["ChatOpenAI","InputValues","MemoryVariables","OutputValues","BaseChatMemory","BaseChatMemoryInput","OpenAIAgentTokenBufferMemoryFields","OpenAIAgentTokenBufferMemory","_langchain_core_messages0","MessageStructure","MessageType","BaseMessage","Promise"],"sources":["../../../../src/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts"],"sourcesContent":["import { ChatOpenAI } from \"@langchain/openai\";\nimport { InputValues, MemoryVariables, OutputValues } from \"@langchain/core/memory\";\nimport { BaseChatMemory, BaseChatMemoryInput } from \"../../../memory/chat_memory.js\";\n/**\n * Type definition for the fields required to initialize an instance of\n * OpenAIAgentTokenBufferMemory.\n */\nexport type OpenAIAgentTokenBufferMemoryFields = BaseChatMemoryInput & {\n llm: ChatOpenAI;\n humanPrefix?: string;\n aiPrefix?: string;\n memoryKey?: string;\n maxTokenLimit?: number;\n returnMessages?: boolean;\n outputKey?: string;\n intermediateStepsKey?: string;\n};\n/**\n * Memory used to save agent output and intermediate steps.\n */\nexport declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {\n humanPrefix: string;\n aiPrefix: string;\n llm: ChatOpenAI;\n memoryKey: string;\n maxTokenLimit: number;\n returnMessages: boolean;\n outputKey: string;\n intermediateStepsKey: string;\n constructor(fields: OpenAIAgentTokenBufferMemoryFields);\n get memoryKeys(): string[];\n /**\n * Retrieves the messages from the chat history.\n * @returns Promise that resolves with the messages from the chat history.\n */\n getMessages(): Promise<import(\"@langchain/core/messages\").BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * Loads memory variables from the input values.\n * @param _values Input values.\n * @returns Promise that resolves with the loaded memory variables.\n */\n loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;\n /**\n * Saves the context of the chat, including user input, AI output, and\n * intermediate steps. Prunes the chat history if the total token count\n * exceeds the maximum limit.\n * @param inputValues Input values.\n * @param outputValues Output values.\n * @returns Promise that resolves when the context has been saved.\n */\n saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;\n}\n"],"mappings":";;;;;;;;;;AAOA;AAA8C,KAAlCM,kCAAAA,GAAqCD,mBAAH,GAAA;EAAA,GAAGA,EACxCL,UADwCK;EAAmB,WAC3DL,CAAAA,EAAAA,MAAAA;EAAU,QAAA,CAAA,EAAA,MAAA;EAYEO,SAAAA,CAAAA,EAAAA,MAAAA;EAA4B,aAAA,CAAA,EAAA,MAAA;EAAA,cAGxCP,CAAAA,EAAAA,OAAAA;EAAU,SAMKM,CAAAA,EAAAA,MAAAA;EAAkC,oBAAAE,CAAAA,EAAAA,MAMmDC;CAAgB;;;;AAMtEP,cArBlCK,4BAAAA,SAAqCH,cAAAA,CAqBHF;EAAe,WAAvBU,EAAAA,MAAAA;EAAO,QASzBX,EAAAA,MAAAA;EAAW,GAAgBE,EA3B/CH,UA2B+CG;EAAY,SAAGS,EAAAA,MAAAA;EAAO,aA9BpBR,EAAAA,MAAAA;EAAc,cAAA,EAAA,OAAA;;;sBAShDE;;;;;;iBAMLM,QAA0JJ,yBAAAA,CAA/GG,YANJH,yBAAAA,CAMmDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;+BAMjIT,cAAcW,QAAQV;;;;;;;;;2BAS1BD,2BAA2BE,eAAeS"}
1
+ {"version":3,"file":"token_buffer_memory.d.cts","names":["ChatOpenAI","InputValues","MemoryVariables","OutputValues","BaseChatMemory","BaseChatMemoryInput","OpenAIAgentTokenBufferMemoryFields","OpenAIAgentTokenBufferMemory","_langchain_core_messages1","MessageStructure","MessageType","BaseMessage","Promise"],"sources":["../../../../src/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts"],"sourcesContent":["import { ChatOpenAI } from \"@langchain/openai\";\nimport { InputValues, MemoryVariables, OutputValues } from \"@langchain/core/memory\";\nimport { BaseChatMemory, BaseChatMemoryInput } from \"../../../memory/chat_memory.js\";\n/**\n * Type definition for the fields required to initialize an instance of\n * OpenAIAgentTokenBufferMemory.\n */\nexport type OpenAIAgentTokenBufferMemoryFields = BaseChatMemoryInput & {\n llm: ChatOpenAI;\n humanPrefix?: string;\n aiPrefix?: string;\n memoryKey?: string;\n maxTokenLimit?: number;\n returnMessages?: boolean;\n outputKey?: string;\n intermediateStepsKey?: string;\n};\n/**\n * Memory used to save agent output and intermediate steps.\n */\nexport declare class OpenAIAgentTokenBufferMemory extends BaseChatMemory {\n humanPrefix: string;\n aiPrefix: string;\n llm: ChatOpenAI;\n memoryKey: string;\n maxTokenLimit: number;\n returnMessages: boolean;\n outputKey: string;\n intermediateStepsKey: string;\n constructor(fields: OpenAIAgentTokenBufferMemoryFields);\n get memoryKeys(): string[];\n /**\n * Retrieves the messages from the chat history.\n * @returns Promise that resolves with the messages from the chat history.\n */\n getMessages(): Promise<import(\"@langchain/core/messages\").BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * Loads memory variables from the input values.\n * @param _values Input values.\n * @returns Promise that resolves with the loaded memory variables.\n */\n loadMemoryVariables(_values: InputValues): Promise<MemoryVariables>;\n /**\n * Saves the context of the chat, including user input, AI output, and\n * intermediate steps. Prunes the chat history if the total token count\n * exceeds the maximum limit.\n * @param inputValues Input values.\n * @param outputValues Output values.\n * @returns Promise that resolves when the context has been saved.\n */\n saveContext(inputValues: InputValues, outputValues: OutputValues): Promise<void>;\n}\n"],"mappings":";;;;;;;;;;AAOA;AAA8C,KAAlCM,kCAAAA,GAAqCD,mBAAH,GAAA;EAAA,GAAGA,EACxCL,UADwCK;EAAmB,WAC3DL,CAAAA,EAAAA,MAAAA;EAAU,QAAA,CAAA,EAAA,MAAA;EAYEO,SAAAA,CAAAA,EAAAA,MAAAA;EAA4B,aAAA,CAAA,EAAA,MAAA;EAAA,cAGxCP,CAAAA,EAAAA,OAAAA;EAAU,SAMKM,CAAAA,EAAAA,MAAAA;EAAkC,oBAAAE,CAAAA,EAAAA,MAMmDC;CAAgB;;;;AAMtEP,cArBlCK,4BAAAA,SAAqCH,cAAAA,CAqBHF;EAAe,WAAvBU,EAAAA,MAAAA;EAAO,QASzBX,EAAAA,MAAAA;EAAW,GAAgBE,EA3B/CH,UA2B+CG;EAAY,SAAGS,EAAAA,MAAAA;EAAO,aA9BpBR,EAAAA,MAAAA;EAAc,cAAA,EAAA,OAAA;;;sBAShDE;;;;;;iBAMLM,QAA0JJ,yBAAAA,CAA/GG,YANJH,yBAAAA,CAMmDC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;+BAMjIT,cAAcW,QAAQV;;;;;;;;;2BAS1BD,2BAA2BE,eAAeS"}
@@ -92,7 +92,7 @@ function convertOpenAPISchemaToJSONSchema(schema, spec) {
92
92
  const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
93
93
  if (openAPIProperty.type === void 0) return jsonSchema;
94
94
  jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
95
- if ((openAPIProperty.required || schema.required?.includes(propertyName)) && jsonSchema.required !== void 0) jsonSchema.required.push(propertyName);
95
+ if (schema.required?.includes(propertyName) && jsonSchema.required !== void 0) jsonSchema.required.push(propertyName);
96
96
  return jsonSchema;
97
97
  }, {
98
98
  type: "object",
@@ -1 +1 @@
1
- {"version":3,"file":"openapi.cjs","names":["url: string","pathParams: Record<string, string>","newParams: Record<string, string>","params: OpenAPIV3_1.ParameterObject[]","spec: OpenAPISpec","jsonSchema: JsonSchema7ObjectType","schema: OpenAPIV3_1.SchemaObject","nameToCallMap: Record<string, { method: string; url: string }>","operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>","paramLocationToRequestArgNameMap: Record<string, string>","requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n }","requestBodySchemas: Record<string, JsonSchema7ObjectType>","openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function","OpenAPISpec","name: string","requestArgs: Record<string, any>","options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }","filteredArgs: Record<string, any>","headers: Record<string, string>","BaseChain","config: { requestMethod: SimpleRequestChainExecutionMethod }","values: ChainValues","_runManager?: CallbackManagerForChainRun","spec: OpenAPIV3_1.Document | string","options: OpenAPIChainOptions","ChatPromptTemplate","HumanMessagePromptTemplate","LLMChain","JsonOutputFunctionsParser","SequentialChain"],"sources":["../../../src/chains/openai_functions/openapi.ts"],"sourcesContent":["import type { OpenAIClient } from \"@langchain/openai\";\nimport {\n type JsonSchema7ObjectType,\n type JsonSchema7ArrayType,\n type JsonSchema7Type,\n} from \"@langchain/core/utils/json_schema\";\nimport type { OpenAPIV3_1 } from \"openapi-types\";\n\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { OpenAPISpec } from \"../../util/openapi.js\";\nimport { BaseChain } from \"../base.js\";\nimport { LLMChain, LLMChainInput } from \"../llm_chain.js\";\nimport { SequentialChain } from \"../sequential_chain.js\";\nimport { JsonOutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing a function for executing OpenAPI requests.\n */\ntype OpenAPIExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n) => Promise<string>;\n\n/**\n * Formats a URL by replacing path parameters with their corresponding\n * values.\n * @param url The URL to format.\n * @param pathParams The path parameters to replace in the URL.\n * @returns The formatted URL.\n */\nfunction formatURL(url: string, pathParams: Record<string, string>): string {\n const expectedPathParamNames = [...url.matchAll(/{(.*?)}/g)].map(\n (match) => match[1]\n );\n const newParams: Record<string, string> = {};\n for (const paramName of expectedPathParamNames) {\n const cleanParamName = paramName.replace(/^\\.;/, \"\").replace(/\\*$/, \"\");\n const value = pathParams[cleanParamName];\n let formattedValue;\n if (Array.isArray(value)) {\n if (paramName.startsWith(\".\")) {\n const separator = paramName.endsWith(\"*\") ? \".\" : \",\";\n formattedValue = `.${value.join(separator)}`;\n } else if (paramName.startsWith(\",\")) {\n const separator = paramName.endsWith(\"*\") ? `${cleanParamName}=` : \",\";\n formattedValue = `${cleanParamName}=${value.join(separator)}`;\n } else {\n formattedValue = value.join(\",\");\n }\n } else if (typeof value === \"object\") {\n const kvSeparator = paramName.endsWith(\"*\") ? \"=\" : \",\";\n const kvStrings = Object.entries(value).map(\n ([k, v]) => k + kvSeparator + v\n );\n let entrySeparator;\n if (paramName.startsWith(\".\")) {\n entrySeparator = \".\";\n formattedValue = \".\";\n } else if (paramName.startsWith(\";\")) {\n entrySeparator = \";\";\n formattedValue = \";\";\n } else {\n entrySeparator = \",\";\n formattedValue = \"\";\n }\n formattedValue += kvStrings.join(entrySeparator);\n } else {\n if (paramName.startsWith(\".\")) {\n formattedValue = `.${value}`;\n } else if (paramName.startsWith(\";\")) {\n formattedValue = `;${cleanParamName}=${value}`;\n } else {\n formattedValue = value;\n }\n }\n newParams[paramName] = formattedValue;\n }\n let formattedUrl = url;\n for (const [key, newValue] of Object.entries(newParams)) {\n formattedUrl = formattedUrl.replace(`{${key}}`, newValue);\n }\n return formattedUrl;\n}\n\n/**\n * Converts OpenAPI parameters to JSON schema format.\n * @param params The OpenAPI parameters to convert.\n * @param spec The OpenAPI specification that contains the parameters.\n * @returns The JSON schema representation of the OpenAPI parameters.\n */\nfunction convertOpenAPIParamsToJSONSchema(\n params: OpenAPIV3_1.ParameterObject[],\n spec: OpenAPISpec\n) {\n return params.reduce(\n (jsonSchema: JsonSchema7ObjectType, param) => {\n let schema;\n if (param.schema) {\n schema = spec.getSchema(param.schema);\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else if (param.content) {\n const mediaTypeSchema = Object.values(param.content)[0].schema;\n if (mediaTypeSchema) {\n schema = spec.getSchema(mediaTypeSchema);\n }\n if (!schema) {\n return jsonSchema;\n }\n if (schema.description === undefined) {\n schema.description = param.description ?? \"\";\n }\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else {\n return jsonSchema;\n }\n if (param.required && Array.isArray(jsonSchema.required)) {\n jsonSchema.required.push(param.name);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n}\n\n// OpenAI throws errors on extraneous schema properties, e.g. if \"required\" is set on individual ones\n/**\n * Converts OpenAPI schemas to JSON schema format.\n * @param schema The OpenAPI schema to convert.\n * @param spec The OpenAPI specification that contains the schema.\n * @returns The JSON schema representation of the OpenAPI schema.\n */\nexport function convertOpenAPISchemaToJSONSchema(\n schema: OpenAPIV3_1.SchemaObject,\n spec: OpenAPISpec\n): JsonSchema7Type {\n if (schema.type === \"object\") {\n return Object.keys(schema.properties ?? {}).reduce(\n (jsonSchema: JsonSchema7ObjectType, propertyName) => {\n if (!schema.properties) {\n return jsonSchema;\n }\n const openAPIProperty = spec.getSchema(schema.properties[propertyName]);\n if (openAPIProperty.type === undefined) {\n return jsonSchema;\n }\n jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(\n openAPIProperty,\n spec\n );\n if (\n (openAPIProperty.required ||\n schema.required?.includes(propertyName)) &&\n jsonSchema.required !== undefined\n ) {\n jsonSchema.required.push(propertyName);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n }\n if (schema.type === \"array\") {\n const openAPIItems = spec.getSchema(schema.items ?? {});\n return {\n type: \"array\",\n items: convertOpenAPISchemaToJSONSchema(openAPIItems, spec),\n minItems: schema.minItems,\n maxItems: schema.maxItems,\n } as JsonSchema7ArrayType;\n }\n return {\n type: schema.type ?? \"string\",\n } as JsonSchema7Type;\n}\n\n/**\n * Converts an OpenAPI specification to OpenAI functions.\n * @param spec The OpenAPI specification to convert.\n * @returns An object containing the OpenAI functions derived from the OpenAPI specification and a default execution method.\n */\nexport function convertOpenAPISpecToOpenAIFunctions(spec: OpenAPISpec): {\n openAIFunctions: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];\n defaultExecutionMethod?: OpenAPIExecutionMethod;\n} {\n if (!spec.document.paths) {\n return { openAIFunctions: [] };\n }\n const openAIFunctions = [];\n const nameToCallMap: Record<string, { method: string; url: string }> = {};\n for (const path of Object.keys(spec.document.paths)) {\n const pathParameters = spec.getParametersForPath(path);\n for (const method of spec.getMethodsForPath(path)) {\n const operation = spec.getOperation(path, method);\n if (!operation) {\n return { openAIFunctions: [] };\n }\n const operationParametersByLocation = pathParameters\n .concat(spec.getParametersForOperation(operation))\n .reduce(\n (\n operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>,\n param\n ) => {\n if (!operationParams[param.in]) {\n operationParams[param.in] = [];\n }\n operationParams[param.in].push(param);\n return operationParams;\n },\n {}\n );\n const paramLocationToRequestArgNameMap: Record<string, string> = {\n query: \"params\",\n header: \"headers\",\n cookie: \"cookies\",\n path: \"path_params\",\n };\n const requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n } = {};\n for (const paramLocation of Object.keys(\n paramLocationToRequestArgNameMap\n )) {\n if (operationParametersByLocation[paramLocation]) {\n requestArgsSchema[paramLocationToRequestArgNameMap[paramLocation]] =\n convertOpenAPIParamsToJSONSchema(\n operationParametersByLocation[paramLocation],\n spec\n );\n }\n }\n const requestBody = spec.getRequestBodyForOperation(operation);\n if (requestBody?.content !== undefined) {\n const requestBodySchemas: Record<string, JsonSchema7ObjectType> = {};\n for (const [mediaType, mediaTypeObject] of Object.entries(\n requestBody.content\n )) {\n if (mediaTypeObject.schema !== undefined) {\n const schema = spec.getSchema(mediaTypeObject.schema);\n requestBodySchemas[mediaType] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n ) as JsonSchema7ObjectType;\n }\n }\n const mediaTypes = Object.keys(requestBodySchemas);\n if (mediaTypes.length === 1) {\n requestArgsSchema.data = requestBodySchemas[mediaTypes[0]];\n } else if (mediaTypes.length > 1) {\n requestArgsSchema.data = {\n anyOf: Object.values(requestBodySchemas),\n };\n }\n }\n const openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function =\n {\n name: OpenAPISpec.getCleanedOperationId(operation, path, method),\n description: operation.description ?? operation.summary ?? \"\",\n parameters: {\n type: \"object\",\n properties: requestArgsSchema,\n // All remaining top-level parameters are required\n required: Object.keys(requestArgsSchema),\n },\n };\n\n openAIFunctions.push(openAIFunction);\n const baseUrl = (spec.baseUrl ?? \"\").endsWith(\"/\")\n ? (spec.baseUrl ?? \"\").slice(0, -1)\n : spec.baseUrl ?? \"\";\n nameToCallMap[openAIFunction.name] = {\n method,\n url: baseUrl + path,\n };\n }\n }\n return {\n openAIFunctions,\n defaultExecutionMethod: async (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n ) => {\n const {\n headers: customHeaders,\n params: customParams,\n ...rest\n } = options ?? {};\n const { method, url } = nameToCallMap[name];\n const requestParams = requestArgs.params ?? {};\n const nonEmptyParams = Object.keys(requestParams).reduce(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (filteredArgs: Record<string, any>, argName) => {\n if (\n requestParams[argName] !== \"\" &&\n requestParams[argName] !== null &&\n requestParams[argName] !== undefined\n ) {\n filteredArgs[argName] = requestParams[argName];\n }\n return filteredArgs;\n },\n {}\n );\n const queryString = new URLSearchParams({\n ...nonEmptyParams,\n ...customParams,\n }).toString();\n const pathParams = requestArgs.path_params;\n const formattedUrl =\n formatURL(url, pathParams) +\n (queryString.length ? `?${queryString}` : \"\");\n const headers: Record<string, string> = {};\n let body;\n if (requestArgs.data !== undefined) {\n let contentType = \"text/plain\";\n if (typeof requestArgs.data !== \"string\") {\n if (typeof requestArgs.data === \"object\") {\n contentType = \"application/json\";\n }\n body = JSON.stringify(requestArgs.data);\n } else {\n body = requestArgs.data;\n }\n headers[\"content-type\"] = contentType;\n }\n const response = await fetch(formattedUrl, {\n ...requestArgs,\n method,\n headers: {\n ...headers,\n ...requestArgs.headers,\n ...customHeaders,\n },\n body,\n ...rest,\n });\n let output;\n if (response.status < 200 || response.status > 299) {\n output = `${response.status}: ${\n response.statusText\n } for ${name} called with ${JSON.stringify(queryString)}`;\n } else {\n output = await response.text();\n }\n return output;\n },\n };\n}\n\n/**\n * Type representing a function for executing simple requests.\n */\ntype SimpleRequestChainExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>\n) => Promise<string>;\n\n/**\n * A chain for making simple API requests.\n */\nclass SimpleRequestChain extends BaseChain {\n static lc_name() {\n return \"SimpleRequestChain\";\n }\n\n private requestMethod: SimpleRequestChainExecutionMethod;\n\n inputKey = \"function\";\n\n outputKey = \"response\";\n\n constructor(config: { requestMethod: SimpleRequestChainExecutionMethod }) {\n super();\n this.requestMethod = config.requestMethod;\n }\n\n get inputKeys() {\n return [this.inputKey];\n }\n\n get outputKeys() {\n return [this.outputKey];\n }\n\n _chainType() {\n return \"simple_request_chain\" as const;\n }\n\n /** @ignore */\n async _call(\n values: ChainValues,\n _runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n const inputKeyValue = values[this.inputKey];\n const methodName = inputKeyValue.name;\n const args = inputKeyValue.arguments;\n const response = await this.requestMethod(methodName, args);\n return { [this.outputKey]: response };\n }\n}\n\n/**\n * Type representing the options for creating an OpenAPI chain.\n */\nexport type OpenAPIChainOptions = {\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n prompt?: BasePromptTemplate;\n requestChain?: BaseChain;\n llmChainInputs?: LLMChainInput;\n headers?: Record<string, string>;\n params?: Record<string, string>;\n verbose?: boolean;\n};\n\n/**\n * Create a chain for querying an API from a OpenAPI spec.\n * @param spec OpenAPISpec or url/file/text string corresponding to one.\n * @param options Custom options passed into the chain\n * @returns OpenAPIChain\n */\nexport async function createOpenAPIChain(\n spec: OpenAPIV3_1.Document | string,\n options: OpenAPIChainOptions = {}\n) {\n let convertedSpec;\n if (typeof spec === \"string\") {\n try {\n convertedSpec = await OpenAPISpec.fromURL(spec);\n } catch {\n try {\n convertedSpec = OpenAPISpec.fromString(spec);\n } catch {\n throw new Error(`Unable to parse spec from source ${spec}.`);\n }\n }\n } else {\n convertedSpec = OpenAPISpec.fromObject(spec);\n }\n const { openAIFunctions, defaultExecutionMethod } =\n convertOpenAPISpecToOpenAIFunctions(convertedSpec);\n if (defaultExecutionMethod === undefined) {\n throw new Error(\n `Could not parse any valid operations from the provided spec.`\n );\n }\n\n if (!options.llm) {\n throw new Error(\"`llm` option is required\");\n }\n\n const {\n llm = options.llm,\n prompt = ChatPromptTemplate.fromMessages([\n HumanMessagePromptTemplate.fromTemplate(\n \"Use the provided API's to respond to this user query:\\n\\n{query}\"\n ),\n ]),\n requestChain = new SimpleRequestChain({\n requestMethod: async (name, args) =>\n defaultExecutionMethod(name, args, {\n headers: options.headers,\n params: options.params,\n }),\n }),\n llmChainInputs = {},\n verbose,\n ...rest\n } = options;\n const formatChain = new LLMChain({\n llm,\n prompt,\n outputParser: new JsonOutputFunctionsParser({ argsOnly: false }),\n outputKey: \"function\",\n llmKwargs: { functions: openAIFunctions },\n ...llmChainInputs,\n });\n return new SequentialChain({\n chains: [formatChain, requestChain],\n outputVariables: [\"response\"],\n inputVariables: formatChain.inputKeys,\n verbose,\n ...rest,\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;;AA2CA,SAAS,UAAUA,KAAaC,YAA4C;CAC1E,MAAM,yBAAyB,CAAC,GAAG,IAAI,SAAS,WAAW,AAAC,EAAC,IAC3D,CAAC,UAAU,MAAM,GAClB;CACD,MAAMC,YAAoC,CAAE;AAC5C,MAAK,MAAM,aAAa,wBAAwB;EAC9C,MAAM,iBAAiB,UAAU,QAAQ,QAAQ,GAAG,CAAC,QAAQ,OAAO,GAAG;EACvE,MAAM,QAAQ,WAAW;EACzB,IAAI;AACJ,MAAI,MAAM,QAAQ,MAAM,CACtB,KAAI,UAAU,WAAW,IAAI,EAAE;GAC7B,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,MAAM;GAClD,iBAAiB,CAAC,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC7C,WAAU,UAAU,WAAW,IAAI,EAAE;GACpC,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,GAAG;GACnE,iBAAiB,GAAG,eAAe,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC9D,OACC,iBAAiB,MAAM,KAAK,IAAI;WAEzB,OAAO,UAAU,UAAU;GACpC,MAAM,cAAc,UAAU,SAAS,IAAI,GAAG,MAAM;GACpD,MAAM,YAAY,OAAO,QAAQ,MAAM,CAAC,IACtC,CAAC,CAAC,GAAG,EAAE,KAAK,IAAI,cAAc,EAC/B;GACD,IAAI;AACJ,OAAI,UAAU,WAAW,IAAI,EAAE;IAC7B,iBAAiB;IACjB,iBAAiB;GAClB,WAAU,UAAU,WAAW,IAAI,EAAE;IACpC,iBAAiB;IACjB,iBAAiB;GAClB,OAAM;IACL,iBAAiB;IACjB,iBAAiB;GAClB;GACD,kBAAkB,UAAU,KAAK,eAAe;EACjD,WACK,UAAU,WAAW,IAAI,EAC3B,iBAAiB,CAAC,CAAC,EAAE,OAAO;WACnB,UAAU,WAAW,IAAI,EAClC,iBAAiB,CAAC,CAAC,EAAE,eAAe,CAAC,EAAE,OAAO;OAE9C,iBAAiB;EAGrB,UAAU,aAAa;CACxB;CACD,IAAI,eAAe;AACnB,MAAK,MAAM,CAAC,KAAK,SAAS,IAAI,OAAO,QAAQ,UAAU,EACrD,eAAe,aAAa,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,SAAS;AAE3D,QAAO;AACR;;;;;;;AAQD,SAAS,iCACPC,QACAC,MACA;AACA,QAAO,OAAO,OACZ,CAACC,YAAmC,UAAU;EAC5C,IAAI;AACJ,MAAI,MAAM,QAAQ;GAChB,SAAS,KAAK,UAAU,MAAM,OAAO;GACrC,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,WAAU,MAAM,SAAS;GACxB,MAAM,kBAAkB,OAAO,OAAO,MAAM,QAAQ,CAAC,GAAG;AACxD,OAAI,iBACF,SAAS,KAAK,UAAU,gBAAgB;AAE1C,OAAI,CAAC,OACH,QAAO;AAET,OAAI,OAAO,gBAAgB,QACzB,OAAO,cAAc,MAAM,eAAe;GAE5C,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,MACC,QAAO;AAET,MAAI,MAAM,YAAY,MAAM,QAAQ,WAAW,SAAS,EACtD,WAAW,SAAS,KAAK,MAAM,KAAK;AAEtC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AACF;;;;;;;AASD,SAAgB,iCACdC,QACAF,MACiB;AACjB,KAAI,OAAO,SAAS,SAClB,QAAO,OAAO,KAAK,OAAO,cAAc,CAAE,EAAC,CAAC,OAC1C,CAACC,YAAmC,iBAAiB;AACnD,MAAI,CAAC,OAAO,WACV,QAAO;EAET,MAAM,kBAAkB,KAAK,UAAU,OAAO,WAAW,cAAc;AACvE,MAAI,gBAAgB,SAAS,OAC3B,QAAO;EAET,WAAW,WAAW,gBAAgB,iCACpC,iBACA,KACD;AACD,OACG,gBAAgB,YACf,OAAO,UAAU,SAAS,aAAa,KACzC,WAAW,aAAa,QAExB,WAAW,SAAS,KAAK,aAAa;AAExC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AAEH,KAAI,OAAO,SAAS,SAAS;EAC3B,MAAM,eAAe,KAAK,UAAU,OAAO,SAAS,CAAE,EAAC;AACvD,SAAO;GACL,MAAM;GACN,OAAO,iCAAiC,cAAc,KAAK;GAC3D,UAAU,OAAO;GACjB,UAAU,OAAO;EAClB;CACF;AACD,QAAO,EACL,MAAM,OAAO,QAAQ,SACtB;AACF;;;;;;AAOD,SAAgB,oCAAoCD,MAGlD;AACA,KAAI,CAAC,KAAK,SAAS,MACjB,QAAO,EAAE,iBAAiB,CAAE,EAAE;CAEhC,MAAM,kBAAkB,CAAE;CAC1B,MAAMG,gBAAiE,CAAE;AACzE,MAAK,MAAM,QAAQ,OAAO,KAAK,KAAK,SAAS,MAAM,EAAE;EACnD,MAAM,iBAAiB,KAAK,qBAAqB,KAAK;AACtD,OAAK,MAAM,UAAU,KAAK,kBAAkB,KAAK,EAAE;GACjD,MAAM,YAAY,KAAK,aAAa,MAAM,OAAO;AACjD,OAAI,CAAC,UACH,QAAO,EAAE,iBAAiB,CAAE,EAAE;GAEhC,MAAM,gCAAgC,eACnC,OAAO,KAAK,0BAA0B,UAAU,CAAC,CACjD,OACC,CACEC,iBACA,UACG;AACH,QAAI,CAAC,gBAAgB,MAAM,KACzB,gBAAgB,MAAM,MAAM,CAAE;IAEhC,gBAAgB,MAAM,IAAI,KAAK,MAAM;AACrC,WAAO;GACR,GACD,CAAE,EACH;GACH,MAAMC,mCAA2D;IAC/D,OAAO;IACP,QAAQ;IACR,QAAQ;IACR,MAAM;GACP;GACD,MAAMC,oBAMF,CAAE;AACN,QAAK,MAAM,iBAAiB,OAAO,KACjC,iCACD,CACC,KAAI,8BAA8B,gBAChC,kBAAkB,iCAAiC,kBACjD,iCACE,8BAA8B,gBAC9B,KACD;GAGP,MAAM,cAAc,KAAK,2BAA2B,UAAU;AAC9D,OAAI,aAAa,YAAY,QAAW;IACtC,MAAMC,qBAA4D,CAAE;AACpE,SAAK,MAAM,CAAC,WAAW,gBAAgB,IAAI,OAAO,QAChD,YAAY,QACb,CACC,KAAI,gBAAgB,WAAW,QAAW;KACxC,MAAM,SAAS,KAAK,UAAU,gBAAgB,OAAO;KACrD,mBAAmB,aAAa,iCAC9B,QACA,KACD;IACF;IAEH,MAAM,aAAa,OAAO,KAAK,mBAAmB;AAClD,QAAI,WAAW,WAAW,GACxB,kBAAkB,OAAO,mBAAmB,WAAW;aAC9C,WAAW,SAAS,GAC7B,kBAAkB,OAAO,EACvB,OAAO,OAAO,OAAO,mBAAmB,CACzC;GAEJ;GACD,MAAMC,iBACJ;IACE,MAAMC,4BAAY,sBAAsB,WAAW,MAAM,OAAO;IAChE,aAAa,UAAU,eAAe,UAAU,WAAW;IAC3D,YAAY;KACV,MAAM;KACN,YAAY;KAEZ,UAAU,OAAO,KAAK,kBAAkB;IACzC;GACF;GAEH,gBAAgB,KAAK,eAAe;GACpC,MAAM,WAAW,KAAK,WAAW,IAAI,SAAS,IAAI,IAC7C,KAAK,WAAW,IAAI,MAAM,GAAG,GAAG,GACjC,KAAK,WAAW;GACpB,cAAc,eAAe,QAAQ;IACnC;IACA,KAAK,UAAU;GAChB;EACF;CACF;AACD,QAAO;EACL;EACA,wBAAwB,OACtBC,MAEAC,aACAC,YAIG;GACH,MAAM,EACJ,SAAS,eACT,QAAQ,aACR,GAAG,MACJ,GAAG,WAAW,CAAE;GACjB,MAAM,EAAE,QAAQ,KAAK,GAAG,cAAc;GACtC,MAAM,gBAAgB,YAAY,UAAU,CAAE;GAC9C,MAAM,iBAAiB,OAAO,KAAK,cAAc,CAAC,OAEhD,CAACC,cAAmC,YAAY;AAC9C,QACE,cAAc,aAAa,MAC3B,cAAc,aAAa,QAC3B,cAAc,aAAa,QAE3B,aAAa,WAAW,cAAc;AAExC,WAAO;GACR,GACD,CAAE,EACH;GACD,MAAM,cAAc,IAAI,gBAAgB;IACtC,GAAG;IACH,GAAG;GACJ,GAAE,UAAU;GACb,MAAM,aAAa,YAAY;GAC/B,MAAM,eACJ,UAAU,KAAK,WAAW,IACzB,YAAY,SAAS,CAAC,CAAC,EAAE,aAAa,GAAG;GAC5C,MAAMC,UAAkC,CAAE;GAC1C,IAAI;AACJ,OAAI,YAAY,SAAS,QAAW;IAClC,IAAI,cAAc;AAClB,QAAI,OAAO,YAAY,SAAS,UAAU;AACxC,SAAI,OAAO,YAAY,SAAS,UAC9B,cAAc;KAEhB,OAAO,KAAK,UAAU,YAAY,KAAK;IACxC,OACC,OAAO,YAAY;IAErB,QAAQ,kBAAkB;GAC3B;GACD,MAAM,WAAW,MAAM,MAAM,cAAc;IACzC,GAAG;IACH;IACA,SAAS;KACP,GAAG;KACH,GAAG,YAAY;KACf,GAAG;IACJ;IACD;IACA,GAAG;GACJ,EAAC;GACF,IAAI;AACJ,OAAI,SAAS,SAAS,OAAO,SAAS,SAAS,KAC7C,SAAS,GAAG,SAAS,OAAO,EAAE,EAC5B,SAAS,WACV,KAAK,EAAE,KAAK,aAAa,EAAE,KAAK,UAAU,YAAY,EAAE;QAEzD,SAAS,MAAM,SAAS,MAAM;AAEhC,UAAO;EACR;CACF;AACF;;;;AAcD,IAAM,qBAAN,cAAiCC,uBAAU;CACzC,OAAO,UAAU;AACf,SAAO;CACR;CAED,AAAQ;CAER,WAAW;CAEX,YAAY;CAEZ,YAAYC,QAA8D;EACxE,OAAO;EACP,KAAK,gBAAgB,OAAO;CAC7B;CAED,IAAI,YAAY;AACd,SAAO,CAAC,KAAK,QAAS;CACvB;CAED,IAAI,aAAa;AACf,SAAO,CAAC,KAAK,SAAU;CACxB;CAED,aAAa;AACX,SAAO;CACR;;CAGD,MAAM,MACJC,QACAC,aACsB;EACtB,MAAM,gBAAgB,OAAO,KAAK;EAClC,MAAM,aAAa,cAAc;EACjC,MAAM,OAAO,cAAc;EAC3B,MAAM,WAAW,MAAM,KAAK,cAAc,YAAY,KAAK;AAC3D,SAAO,GAAG,KAAK,YAAY,SAAU;CACtC;AACF;;;;;;;AAqBD,eAAsB,mBACpBC,MACAC,UAA+B,CAAE,GACjC;CACA,IAAI;AACJ,KAAI,OAAO,SAAS,SAClB,KAAI;EACF,gBAAgB,MAAMX,4BAAY,QAAQ,KAAK;CAChD,QAAO;AACN,MAAI;GACF,gBAAgBA,4BAAY,WAAW,KAAK;EAC7C,QAAO;AACN,SAAM,IAAI,MAAM,CAAC,iCAAiC,EAAE,KAAK,CAAC,CAAC;EAC5D;CACF;MAED,gBAAgBA,4BAAY,WAAW,KAAK;CAE9C,MAAM,EAAE,iBAAiB,wBAAwB,GAC/C,oCAAoC,cAAc;AACpD,KAAI,2BAA2B,OAC7B,OAAM,IAAI,MACR,CAAC,4DAA4D,CAAC;AAIlE,KAAI,CAAC,QAAQ,IACX,OAAM,IAAI,MAAM;CAGlB,MAAM,EACJ,MAAM,QAAQ,KACd,SAASY,4CAAmB,aAAa,CACvCC,oDAA2B,aACzB,mEACD,AACF,EAAC,EACF,eAAe,IAAI,mBAAmB,EACpC,eAAe,OAAO,MAAM,SAC1B,uBAAuB,MAAM,MAAM;EACjC,SAAS,QAAQ;EACjB,QAAQ,QAAQ;CACjB,EAAC,CACL,IACD,iBAAiB,CAAE,GACnB,QACA,GAAG,MACJ,GAAG;CACJ,MAAM,cAAc,IAAIC,2BAAS;EAC/B;EACA;EACA,cAAc,IAAIC,mDAA0B,EAAE,UAAU,MAAO;EAC/D,WAAW;EACX,WAAW,EAAE,WAAW,gBAAiB;EACzC,GAAG;CACJ;AACD,QAAO,IAAIC,yCAAgB;EACzB,QAAQ,CAAC,aAAa,YAAa;EACnC,iBAAiB,CAAC,UAAW;EAC7B,gBAAgB,YAAY;EAC5B;EACA,GAAG;CACJ;AACF"}
1
+ {"version":3,"file":"openapi.cjs","names":["url: string","pathParams: Record<string, string>","newParams: Record<string, string>","params: OpenAPIV3_1.ParameterObject[]","spec: OpenAPISpec","jsonSchema: JsonSchema7ObjectType","schema: OpenAPIV3_1.SchemaObject","nameToCallMap: Record<string, { method: string; url: string }>","operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>","paramLocationToRequestArgNameMap: Record<string, string>","requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n }","requestBodySchemas: Record<string, JsonSchema7ObjectType>","openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function","OpenAPISpec","name: string","requestArgs: Record<string, any>","options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }","filteredArgs: Record<string, any>","headers: Record<string, string>","BaseChain","config: { requestMethod: SimpleRequestChainExecutionMethod }","values: ChainValues","_runManager?: CallbackManagerForChainRun","spec: OpenAPIV3_1.Document | string","options: OpenAPIChainOptions","ChatPromptTemplate","HumanMessagePromptTemplate","LLMChain","JsonOutputFunctionsParser","SequentialChain"],"sources":["../../../src/chains/openai_functions/openapi.ts"],"sourcesContent":["import type { OpenAIClient } from \"@langchain/openai\";\nimport {\n type JsonSchema7ObjectType,\n type JsonSchema7ArrayType,\n type JsonSchema7Type,\n} from \"@langchain/core/utils/json_schema\";\nimport type { OpenAPIV3_1 } from \"openapi-types\";\n\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { OpenAPISpec } from \"../../util/openapi.js\";\nimport { BaseChain } from \"../base.js\";\nimport { LLMChain, LLMChainInput } from \"../llm_chain.js\";\nimport { SequentialChain } from \"../sequential_chain.js\";\nimport { JsonOutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing a function for executing OpenAPI requests.\n */\ntype OpenAPIExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n) => Promise<string>;\n\n/**\n * Formats a URL by replacing path parameters with their corresponding\n * values.\n * @param url The URL to format.\n * @param pathParams The path parameters to replace in the URL.\n * @returns The formatted URL.\n */\nfunction formatURL(url: string, pathParams: Record<string, string>): string {\n const expectedPathParamNames = [...url.matchAll(/{(.*?)}/g)].map(\n (match) => match[1]\n );\n const newParams: Record<string, string> = {};\n for (const paramName of expectedPathParamNames) {\n const cleanParamName = paramName.replace(/^\\.;/, \"\").replace(/\\*$/, \"\");\n const value = pathParams[cleanParamName];\n let formattedValue;\n if (Array.isArray(value)) {\n if (paramName.startsWith(\".\")) {\n const separator = paramName.endsWith(\"*\") ? \".\" : \",\";\n formattedValue = `.${value.join(separator)}`;\n } else if (paramName.startsWith(\",\")) {\n const separator = paramName.endsWith(\"*\") ? `${cleanParamName}=` : \",\";\n formattedValue = `${cleanParamName}=${value.join(separator)}`;\n } else {\n formattedValue = value.join(\",\");\n }\n } else if (typeof value === \"object\") {\n const kvSeparator = paramName.endsWith(\"*\") ? \"=\" : \",\";\n const kvStrings = Object.entries(value).map(\n ([k, v]) => k + kvSeparator + v\n );\n let entrySeparator;\n if (paramName.startsWith(\".\")) {\n entrySeparator = \".\";\n formattedValue = \".\";\n } else if (paramName.startsWith(\";\")) {\n entrySeparator = \";\";\n formattedValue = \";\";\n } else {\n entrySeparator = \",\";\n formattedValue = \"\";\n }\n formattedValue += kvStrings.join(entrySeparator);\n } else {\n if (paramName.startsWith(\".\")) {\n formattedValue = `.${value}`;\n } else if (paramName.startsWith(\";\")) {\n formattedValue = `;${cleanParamName}=${value}`;\n } else {\n formattedValue = value;\n }\n }\n newParams[paramName] = formattedValue;\n }\n let formattedUrl = url;\n for (const [key, newValue] of Object.entries(newParams)) {\n formattedUrl = formattedUrl.replace(`{${key}}`, newValue);\n }\n return formattedUrl;\n}\n\n/**\n * Converts OpenAPI parameters to JSON schema format.\n * @param params The OpenAPI parameters to convert.\n * @param spec The OpenAPI specification that contains the parameters.\n * @returns The JSON schema representation of the OpenAPI parameters.\n */\nfunction convertOpenAPIParamsToJSONSchema(\n params: OpenAPIV3_1.ParameterObject[],\n spec: OpenAPISpec\n) {\n return params.reduce(\n (jsonSchema: JsonSchema7ObjectType, param) => {\n let schema;\n if (param.schema) {\n schema = spec.getSchema(param.schema);\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else if (param.content) {\n const mediaTypeSchema = Object.values(param.content)[0].schema;\n if (mediaTypeSchema) {\n schema = spec.getSchema(mediaTypeSchema);\n }\n if (!schema) {\n return jsonSchema;\n }\n if (schema.description === undefined) {\n schema.description = param.description ?? \"\";\n }\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else {\n return jsonSchema;\n }\n if (param.required && Array.isArray(jsonSchema.required)) {\n jsonSchema.required.push(param.name);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n}\n\n// OpenAI throws errors on extraneous schema properties, e.g. if \"required\" is set on individual ones\n/**\n * Converts OpenAPI schemas to JSON schema format.\n * @param schema The OpenAPI schema to convert.\n * @param spec The OpenAPI specification that contains the schema.\n * @returns The JSON schema representation of the OpenAPI schema.\n */\nexport function convertOpenAPISchemaToJSONSchema(\n schema: OpenAPIV3_1.SchemaObject,\n spec: OpenAPISpec\n): JsonSchema7Type {\n if (schema.type === \"object\") {\n return Object.keys(schema.properties ?? {}).reduce(\n (jsonSchema: JsonSchema7ObjectType, propertyName) => {\n if (!schema.properties) {\n return jsonSchema;\n }\n const openAPIProperty = spec.getSchema(schema.properties[propertyName]);\n if (openAPIProperty.type === undefined) {\n return jsonSchema;\n }\n jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(\n openAPIProperty,\n spec\n );\n if (\n schema.required?.includes(propertyName) &&\n jsonSchema.required !== undefined\n ) {\n jsonSchema.required.push(propertyName);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n }\n if (schema.type === \"array\") {\n const openAPIItems = spec.getSchema(schema.items ?? {});\n return {\n type: \"array\",\n items: convertOpenAPISchemaToJSONSchema(openAPIItems, spec),\n minItems: schema.minItems,\n maxItems: schema.maxItems,\n } as JsonSchema7ArrayType;\n }\n return {\n type: schema.type ?? \"string\",\n } as JsonSchema7Type;\n}\n\n/**\n * Converts an OpenAPI specification to OpenAI functions.\n * @param spec The OpenAPI specification to convert.\n * @returns An object containing the OpenAI functions derived from the OpenAPI specification and a default execution method.\n */\nexport function convertOpenAPISpecToOpenAIFunctions(spec: OpenAPISpec): {\n openAIFunctions: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];\n defaultExecutionMethod?: OpenAPIExecutionMethod;\n} {\n if (!spec.document.paths) {\n return { openAIFunctions: [] };\n }\n const openAIFunctions = [];\n const nameToCallMap: Record<string, { method: string; url: string }> = {};\n for (const path of Object.keys(spec.document.paths)) {\n const pathParameters = spec.getParametersForPath(path);\n for (const method of spec.getMethodsForPath(path)) {\n const operation = spec.getOperation(path, method);\n if (!operation) {\n return { openAIFunctions: [] };\n }\n const operationParametersByLocation = pathParameters\n .concat(spec.getParametersForOperation(operation))\n .reduce(\n (\n operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>,\n param\n ) => {\n if (!operationParams[param.in]) {\n operationParams[param.in] = [];\n }\n operationParams[param.in].push(param);\n return operationParams;\n },\n {}\n );\n const paramLocationToRequestArgNameMap: Record<string, string> = {\n query: \"params\",\n header: \"headers\",\n cookie: \"cookies\",\n path: \"path_params\",\n };\n const requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n } = {};\n for (const paramLocation of Object.keys(\n paramLocationToRequestArgNameMap\n )) {\n if (operationParametersByLocation[paramLocation]) {\n requestArgsSchema[paramLocationToRequestArgNameMap[paramLocation]] =\n convertOpenAPIParamsToJSONSchema(\n operationParametersByLocation[paramLocation],\n spec\n );\n }\n }\n const requestBody = spec.getRequestBodyForOperation(operation);\n if (requestBody?.content !== undefined) {\n const requestBodySchemas: Record<string, JsonSchema7ObjectType> = {};\n for (const [mediaType, mediaTypeObject] of Object.entries(\n requestBody.content\n )) {\n if (mediaTypeObject.schema !== undefined) {\n const schema = spec.getSchema(mediaTypeObject.schema);\n requestBodySchemas[mediaType] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n ) as JsonSchema7ObjectType;\n }\n }\n const mediaTypes = Object.keys(requestBodySchemas);\n if (mediaTypes.length === 1) {\n requestArgsSchema.data = requestBodySchemas[mediaTypes[0]];\n } else if (mediaTypes.length > 1) {\n requestArgsSchema.data = {\n anyOf: Object.values(requestBodySchemas),\n };\n }\n }\n const openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function =\n {\n name: OpenAPISpec.getCleanedOperationId(operation, path, method),\n description: operation.description ?? operation.summary ?? \"\",\n parameters: {\n type: \"object\",\n properties: requestArgsSchema,\n // All remaining top-level parameters are required\n required: Object.keys(requestArgsSchema),\n },\n };\n\n openAIFunctions.push(openAIFunction);\n const baseUrl = (spec.baseUrl ?? \"\").endsWith(\"/\")\n ? (spec.baseUrl ?? \"\").slice(0, -1)\n : spec.baseUrl ?? \"\";\n nameToCallMap[openAIFunction.name] = {\n method,\n url: baseUrl + path,\n };\n }\n }\n return {\n openAIFunctions,\n defaultExecutionMethod: async (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n ) => {\n const {\n headers: customHeaders,\n params: customParams,\n ...rest\n } = options ?? {};\n const { method, url } = nameToCallMap[name];\n const requestParams = requestArgs.params ?? {};\n const nonEmptyParams = Object.keys(requestParams).reduce(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (filteredArgs: Record<string, any>, argName) => {\n if (\n requestParams[argName] !== \"\" &&\n requestParams[argName] !== null &&\n requestParams[argName] !== undefined\n ) {\n filteredArgs[argName] = requestParams[argName];\n }\n return filteredArgs;\n },\n {}\n );\n const queryString = new URLSearchParams({\n ...nonEmptyParams,\n ...customParams,\n }).toString();\n const pathParams = requestArgs.path_params;\n const formattedUrl =\n formatURL(url, pathParams) +\n (queryString.length ? `?${queryString}` : \"\");\n const headers: Record<string, string> = {};\n let body;\n if (requestArgs.data !== undefined) {\n let contentType = \"text/plain\";\n if (typeof requestArgs.data !== \"string\") {\n if (typeof requestArgs.data === \"object\") {\n contentType = \"application/json\";\n }\n body = JSON.stringify(requestArgs.data);\n } else {\n body = requestArgs.data;\n }\n headers[\"content-type\"] = contentType;\n }\n const response = await fetch(formattedUrl, {\n ...requestArgs,\n method,\n headers: {\n ...headers,\n ...requestArgs.headers,\n ...customHeaders,\n },\n body,\n ...rest,\n });\n let output;\n if (response.status < 200 || response.status > 299) {\n output = `${response.status}: ${\n response.statusText\n } for ${name} called with ${JSON.stringify(queryString)}`;\n } else {\n output = await response.text();\n }\n return output;\n },\n };\n}\n\n/**\n * Type representing a function for executing simple requests.\n */\ntype SimpleRequestChainExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>\n) => Promise<string>;\n\n/**\n * A chain for making simple API requests.\n */\nclass SimpleRequestChain extends BaseChain {\n static lc_name() {\n return \"SimpleRequestChain\";\n }\n\n private requestMethod: SimpleRequestChainExecutionMethod;\n\n inputKey = \"function\";\n\n outputKey = \"response\";\n\n constructor(config: { requestMethod: SimpleRequestChainExecutionMethod }) {\n super();\n this.requestMethod = config.requestMethod;\n }\n\n get inputKeys() {\n return [this.inputKey];\n }\n\n get outputKeys() {\n return [this.outputKey];\n }\n\n _chainType() {\n return \"simple_request_chain\" as const;\n }\n\n /** @ignore */\n async _call(\n values: ChainValues,\n _runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n const inputKeyValue = values[this.inputKey];\n const methodName = inputKeyValue.name;\n const args = inputKeyValue.arguments;\n const response = await this.requestMethod(methodName, args);\n return { [this.outputKey]: response };\n }\n}\n\n/**\n * Type representing the options for creating an OpenAPI chain.\n */\nexport type OpenAPIChainOptions = {\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n prompt?: BasePromptTemplate;\n requestChain?: BaseChain;\n llmChainInputs?: LLMChainInput;\n headers?: Record<string, string>;\n params?: Record<string, string>;\n verbose?: boolean;\n};\n\n/**\n * Create a chain for querying an API from a OpenAPI spec.\n * @param spec OpenAPISpec or url/file/text string corresponding to one.\n * @param options Custom options passed into the chain\n * @returns OpenAPIChain\n */\nexport async function createOpenAPIChain(\n spec: OpenAPIV3_1.Document | string,\n options: OpenAPIChainOptions = {}\n) {\n let convertedSpec;\n if (typeof spec === \"string\") {\n try {\n convertedSpec = await OpenAPISpec.fromURL(spec);\n } catch {\n try {\n convertedSpec = OpenAPISpec.fromString(spec);\n } catch {\n throw new Error(`Unable to parse spec from source ${spec}.`);\n }\n }\n } else {\n convertedSpec = OpenAPISpec.fromObject(spec);\n }\n const { openAIFunctions, defaultExecutionMethod } =\n convertOpenAPISpecToOpenAIFunctions(convertedSpec);\n if (defaultExecutionMethod === undefined) {\n throw new Error(\n `Could not parse any valid operations from the provided spec.`\n );\n }\n\n if (!options.llm) {\n throw new Error(\"`llm` option is required\");\n }\n\n const {\n llm = options.llm,\n prompt = ChatPromptTemplate.fromMessages([\n HumanMessagePromptTemplate.fromTemplate(\n \"Use the provided API's to respond to this user query:\\n\\n{query}\"\n ),\n ]),\n requestChain = new SimpleRequestChain({\n requestMethod: async (name, args) =>\n defaultExecutionMethod(name, args, {\n headers: options.headers,\n params: options.params,\n }),\n }),\n llmChainInputs = {},\n verbose,\n ...rest\n } = options;\n const formatChain = new LLMChain({\n llm,\n prompt,\n outputParser: new JsonOutputFunctionsParser({ argsOnly: false }),\n outputKey: \"function\",\n llmKwargs: { functions: openAIFunctions },\n ...llmChainInputs,\n });\n return new SequentialChain({\n chains: [formatChain, requestChain],\n outputVariables: [\"response\"],\n inputVariables: formatChain.inputKeys,\n verbose,\n ...rest,\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;;AA2CA,SAAS,UAAUA,KAAaC,YAA4C;CAC1E,MAAM,yBAAyB,CAAC,GAAG,IAAI,SAAS,WAAW,AAAC,EAAC,IAC3D,CAAC,UAAU,MAAM,GAClB;CACD,MAAMC,YAAoC,CAAE;AAC5C,MAAK,MAAM,aAAa,wBAAwB;EAC9C,MAAM,iBAAiB,UAAU,QAAQ,QAAQ,GAAG,CAAC,QAAQ,OAAO,GAAG;EACvE,MAAM,QAAQ,WAAW;EACzB,IAAI;AACJ,MAAI,MAAM,QAAQ,MAAM,CACtB,KAAI,UAAU,WAAW,IAAI,EAAE;GAC7B,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,MAAM;GAClD,iBAAiB,CAAC,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC7C,WAAU,UAAU,WAAW,IAAI,EAAE;GACpC,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,GAAG;GACnE,iBAAiB,GAAG,eAAe,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC9D,OACC,iBAAiB,MAAM,KAAK,IAAI;WAEzB,OAAO,UAAU,UAAU;GACpC,MAAM,cAAc,UAAU,SAAS,IAAI,GAAG,MAAM;GACpD,MAAM,YAAY,OAAO,QAAQ,MAAM,CAAC,IACtC,CAAC,CAAC,GAAG,EAAE,KAAK,IAAI,cAAc,EAC/B;GACD,IAAI;AACJ,OAAI,UAAU,WAAW,IAAI,EAAE;IAC7B,iBAAiB;IACjB,iBAAiB;GAClB,WAAU,UAAU,WAAW,IAAI,EAAE;IACpC,iBAAiB;IACjB,iBAAiB;GAClB,OAAM;IACL,iBAAiB;IACjB,iBAAiB;GAClB;GACD,kBAAkB,UAAU,KAAK,eAAe;EACjD,WACK,UAAU,WAAW,IAAI,EAC3B,iBAAiB,CAAC,CAAC,EAAE,OAAO;WACnB,UAAU,WAAW,IAAI,EAClC,iBAAiB,CAAC,CAAC,EAAE,eAAe,CAAC,EAAE,OAAO;OAE9C,iBAAiB;EAGrB,UAAU,aAAa;CACxB;CACD,IAAI,eAAe;AACnB,MAAK,MAAM,CAAC,KAAK,SAAS,IAAI,OAAO,QAAQ,UAAU,EACrD,eAAe,aAAa,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,SAAS;AAE3D,QAAO;AACR;;;;;;;AAQD,SAAS,iCACPC,QACAC,MACA;AACA,QAAO,OAAO,OACZ,CAACC,YAAmC,UAAU;EAC5C,IAAI;AACJ,MAAI,MAAM,QAAQ;GAChB,SAAS,KAAK,UAAU,MAAM,OAAO;GACrC,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,WAAU,MAAM,SAAS;GACxB,MAAM,kBAAkB,OAAO,OAAO,MAAM,QAAQ,CAAC,GAAG;AACxD,OAAI,iBACF,SAAS,KAAK,UAAU,gBAAgB;AAE1C,OAAI,CAAC,OACH,QAAO;AAET,OAAI,OAAO,gBAAgB,QACzB,OAAO,cAAc,MAAM,eAAe;GAE5C,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,MACC,QAAO;AAET,MAAI,MAAM,YAAY,MAAM,QAAQ,WAAW,SAAS,EACtD,WAAW,SAAS,KAAK,MAAM,KAAK;AAEtC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AACF;;;;;;;AASD,SAAgB,iCACdC,QACAF,MACiB;AACjB,KAAI,OAAO,SAAS,SAClB,QAAO,OAAO,KAAK,OAAO,cAAc,CAAE,EAAC,CAAC,OAC1C,CAACC,YAAmC,iBAAiB;AACnD,MAAI,CAAC,OAAO,WACV,QAAO;EAET,MAAM,kBAAkB,KAAK,UAAU,OAAO,WAAW,cAAc;AACvE,MAAI,gBAAgB,SAAS,OAC3B,QAAO;EAET,WAAW,WAAW,gBAAgB,iCACpC,iBACA,KACD;AACD,MACE,OAAO,UAAU,SAAS,aAAa,IACvC,WAAW,aAAa,QAExB,WAAW,SAAS,KAAK,aAAa;AAExC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AAEH,KAAI,OAAO,SAAS,SAAS;EAC3B,MAAM,eAAe,KAAK,UAAU,OAAO,SAAS,CAAE,EAAC;AACvD,SAAO;GACL,MAAM;GACN,OAAO,iCAAiC,cAAc,KAAK;GAC3D,UAAU,OAAO;GACjB,UAAU,OAAO;EAClB;CACF;AACD,QAAO,EACL,MAAM,OAAO,QAAQ,SACtB;AACF;;;;;;AAOD,SAAgB,oCAAoCD,MAGlD;AACA,KAAI,CAAC,KAAK,SAAS,MACjB,QAAO,EAAE,iBAAiB,CAAE,EAAE;CAEhC,MAAM,kBAAkB,CAAE;CAC1B,MAAMG,gBAAiE,CAAE;AACzE,MAAK,MAAM,QAAQ,OAAO,KAAK,KAAK,SAAS,MAAM,EAAE;EACnD,MAAM,iBAAiB,KAAK,qBAAqB,KAAK;AACtD,OAAK,MAAM,UAAU,KAAK,kBAAkB,KAAK,EAAE;GACjD,MAAM,YAAY,KAAK,aAAa,MAAM,OAAO;AACjD,OAAI,CAAC,UACH,QAAO,EAAE,iBAAiB,CAAE,EAAE;GAEhC,MAAM,gCAAgC,eACnC,OAAO,KAAK,0BAA0B,UAAU,CAAC,CACjD,OACC,CACEC,iBACA,UACG;AACH,QAAI,CAAC,gBAAgB,MAAM,KACzB,gBAAgB,MAAM,MAAM,CAAE;IAEhC,gBAAgB,MAAM,IAAI,KAAK,MAAM;AACrC,WAAO;GACR,GACD,CAAE,EACH;GACH,MAAMC,mCAA2D;IAC/D,OAAO;IACP,QAAQ;IACR,QAAQ;IACR,MAAM;GACP;GACD,MAAMC,oBAMF,CAAE;AACN,QAAK,MAAM,iBAAiB,OAAO,KACjC,iCACD,CACC,KAAI,8BAA8B,gBAChC,kBAAkB,iCAAiC,kBACjD,iCACE,8BAA8B,gBAC9B,KACD;GAGP,MAAM,cAAc,KAAK,2BAA2B,UAAU;AAC9D,OAAI,aAAa,YAAY,QAAW;IACtC,MAAMC,qBAA4D,CAAE;AACpE,SAAK,MAAM,CAAC,WAAW,gBAAgB,IAAI,OAAO,QAChD,YAAY,QACb,CACC,KAAI,gBAAgB,WAAW,QAAW;KACxC,MAAM,SAAS,KAAK,UAAU,gBAAgB,OAAO;KACrD,mBAAmB,aAAa,iCAC9B,QACA,KACD;IACF;IAEH,MAAM,aAAa,OAAO,KAAK,mBAAmB;AAClD,QAAI,WAAW,WAAW,GACxB,kBAAkB,OAAO,mBAAmB,WAAW;aAC9C,WAAW,SAAS,GAC7B,kBAAkB,OAAO,EACvB,OAAO,OAAO,OAAO,mBAAmB,CACzC;GAEJ;GACD,MAAMC,iBACJ;IACE,MAAMC,4BAAY,sBAAsB,WAAW,MAAM,OAAO;IAChE,aAAa,UAAU,eAAe,UAAU,WAAW;IAC3D,YAAY;KACV,MAAM;KACN,YAAY;KAEZ,UAAU,OAAO,KAAK,kBAAkB;IACzC;GACF;GAEH,gBAAgB,KAAK,eAAe;GACpC,MAAM,WAAW,KAAK,WAAW,IAAI,SAAS,IAAI,IAC7C,KAAK,WAAW,IAAI,MAAM,GAAG,GAAG,GACjC,KAAK,WAAW;GACpB,cAAc,eAAe,QAAQ;IACnC;IACA,KAAK,UAAU;GAChB;EACF;CACF;AACD,QAAO;EACL;EACA,wBAAwB,OACtBC,MAEAC,aACAC,YAIG;GACH,MAAM,EACJ,SAAS,eACT,QAAQ,aACR,GAAG,MACJ,GAAG,WAAW,CAAE;GACjB,MAAM,EAAE,QAAQ,KAAK,GAAG,cAAc;GACtC,MAAM,gBAAgB,YAAY,UAAU,CAAE;GAC9C,MAAM,iBAAiB,OAAO,KAAK,cAAc,CAAC,OAEhD,CAACC,cAAmC,YAAY;AAC9C,QACE,cAAc,aAAa,MAC3B,cAAc,aAAa,QAC3B,cAAc,aAAa,QAE3B,aAAa,WAAW,cAAc;AAExC,WAAO;GACR,GACD,CAAE,EACH;GACD,MAAM,cAAc,IAAI,gBAAgB;IACtC,GAAG;IACH,GAAG;GACJ,GAAE,UAAU;GACb,MAAM,aAAa,YAAY;GAC/B,MAAM,eACJ,UAAU,KAAK,WAAW,IACzB,YAAY,SAAS,CAAC,CAAC,EAAE,aAAa,GAAG;GAC5C,MAAMC,UAAkC,CAAE;GAC1C,IAAI;AACJ,OAAI,YAAY,SAAS,QAAW;IAClC,IAAI,cAAc;AAClB,QAAI,OAAO,YAAY,SAAS,UAAU;AACxC,SAAI,OAAO,YAAY,SAAS,UAC9B,cAAc;KAEhB,OAAO,KAAK,UAAU,YAAY,KAAK;IACxC,OACC,OAAO,YAAY;IAErB,QAAQ,kBAAkB;GAC3B;GACD,MAAM,WAAW,MAAM,MAAM,cAAc;IACzC,GAAG;IACH;IACA,SAAS;KACP,GAAG;KACH,GAAG,YAAY;KACf,GAAG;IACJ;IACD;IACA,GAAG;GACJ,EAAC;GACF,IAAI;AACJ,OAAI,SAAS,SAAS,OAAO,SAAS,SAAS,KAC7C,SAAS,GAAG,SAAS,OAAO,EAAE,EAC5B,SAAS,WACV,KAAK,EAAE,KAAK,aAAa,EAAE,KAAK,UAAU,YAAY,EAAE;QAEzD,SAAS,MAAM,SAAS,MAAM;AAEhC,UAAO;EACR;CACF;AACF;;;;AAcD,IAAM,qBAAN,cAAiCC,uBAAU;CACzC,OAAO,UAAU;AACf,SAAO;CACR;CAED,AAAQ;CAER,WAAW;CAEX,YAAY;CAEZ,YAAYC,QAA8D;EACxE,OAAO;EACP,KAAK,gBAAgB,OAAO;CAC7B;CAED,IAAI,YAAY;AACd,SAAO,CAAC,KAAK,QAAS;CACvB;CAED,IAAI,aAAa;AACf,SAAO,CAAC,KAAK,SAAU;CACxB;CAED,aAAa;AACX,SAAO;CACR;;CAGD,MAAM,MACJC,QACAC,aACsB;EACtB,MAAM,gBAAgB,OAAO,KAAK;EAClC,MAAM,aAAa,cAAc;EACjC,MAAM,OAAO,cAAc;EAC3B,MAAM,WAAW,MAAM,KAAK,cAAc,YAAY,KAAK;AAC3D,SAAO,GAAG,KAAK,YAAY,SAAU;CACtC;AACF;;;;;;;AAqBD,eAAsB,mBACpBC,MACAC,UAA+B,CAAE,GACjC;CACA,IAAI;AACJ,KAAI,OAAO,SAAS,SAClB,KAAI;EACF,gBAAgB,MAAMX,4BAAY,QAAQ,KAAK;CAChD,QAAO;AACN,MAAI;GACF,gBAAgBA,4BAAY,WAAW,KAAK;EAC7C,QAAO;AACN,SAAM,IAAI,MAAM,CAAC,iCAAiC,EAAE,KAAK,CAAC,CAAC;EAC5D;CACF;MAED,gBAAgBA,4BAAY,WAAW,KAAK;CAE9C,MAAM,EAAE,iBAAiB,wBAAwB,GAC/C,oCAAoC,cAAc;AACpD,KAAI,2BAA2B,OAC7B,OAAM,IAAI,MACR,CAAC,4DAA4D,CAAC;AAIlE,KAAI,CAAC,QAAQ,IACX,OAAM,IAAI,MAAM;CAGlB,MAAM,EACJ,MAAM,QAAQ,KACd,SAASY,4CAAmB,aAAa,CACvCC,oDAA2B,aACzB,mEACD,AACF,EAAC,EACF,eAAe,IAAI,mBAAmB,EACpC,eAAe,OAAO,MAAM,SAC1B,uBAAuB,MAAM,MAAM;EACjC,SAAS,QAAQ;EACjB,QAAQ,QAAQ;CACjB,EAAC,CACL,IACD,iBAAiB,CAAE,GACnB,QACA,GAAG,MACJ,GAAG;CACJ,MAAM,cAAc,IAAIC,2BAAS;EAC/B;EACA;EACA,cAAc,IAAIC,mDAA0B,EAAE,UAAU,MAAO;EAC/D,WAAW;EACX,WAAW,EAAE,WAAW,gBAAiB;EACzC,GAAG;CACJ;AACD,QAAO,IAAIC,yCAAgB;EACzB,QAAQ,CAAC,aAAa,YAAa;EACnC,iBAAiB,CAAC,UAAW;EAC7B,gBAAgB,YAAY;EAC5B;EACA,GAAG;CACJ;AACF"}
@@ -5,8 +5,8 @@ import { OpenAPISpec } from "../../util/openapi.js";
5
5
  import { BasePromptTemplate } from "@langchain/core/prompts";
6
6
  import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
7
7
  import { JsonSchema7Type } from "@langchain/core/utils/json_schema";
8
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
9
8
  import { OpenAIClient } from "@langchain/openai";
9
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models";
10
10
  import { OpenAPIV3_1 } from "openapi-types";
11
11
 
12
12
  //#region src/chains/openai_functions/openapi.d.ts
@@ -91,7 +91,7 @@ function convertOpenAPISchemaToJSONSchema(schema, spec) {
91
91
  const openAPIProperty = spec.getSchema(schema.properties[propertyName]);
92
92
  if (openAPIProperty.type === void 0) return jsonSchema;
93
93
  jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(openAPIProperty, spec);
94
- if ((openAPIProperty.required || schema.required?.includes(propertyName)) && jsonSchema.required !== void 0) jsonSchema.required.push(propertyName);
94
+ if (schema.required?.includes(propertyName) && jsonSchema.required !== void 0) jsonSchema.required.push(propertyName);
95
95
  return jsonSchema;
96
96
  }, {
97
97
  type: "object",
@@ -1 +1 @@
1
- {"version":3,"file":"openapi.js","names":["url: string","pathParams: Record<string, string>","newParams: Record<string, string>","params: OpenAPIV3_1.ParameterObject[]","spec: OpenAPISpec","jsonSchema: JsonSchema7ObjectType","schema: OpenAPIV3_1.SchemaObject","nameToCallMap: Record<string, { method: string; url: string }>","operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>","paramLocationToRequestArgNameMap: Record<string, string>","requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n }","requestBodySchemas: Record<string, JsonSchema7ObjectType>","openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function","name: string","requestArgs: Record<string, any>","options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }","filteredArgs: Record<string, any>","headers: Record<string, string>","config: { requestMethod: SimpleRequestChainExecutionMethod }","values: ChainValues","_runManager?: CallbackManagerForChainRun","spec: OpenAPIV3_1.Document | string","options: OpenAPIChainOptions"],"sources":["../../../src/chains/openai_functions/openapi.ts"],"sourcesContent":["import type { OpenAIClient } from \"@langchain/openai\";\nimport {\n type JsonSchema7ObjectType,\n type JsonSchema7ArrayType,\n type JsonSchema7Type,\n} from \"@langchain/core/utils/json_schema\";\nimport type { OpenAPIV3_1 } from \"openapi-types\";\n\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { OpenAPISpec } from \"../../util/openapi.js\";\nimport { BaseChain } from \"../base.js\";\nimport { LLMChain, LLMChainInput } from \"../llm_chain.js\";\nimport { SequentialChain } from \"../sequential_chain.js\";\nimport { JsonOutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing a function for executing OpenAPI requests.\n */\ntype OpenAPIExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n) => Promise<string>;\n\n/**\n * Formats a URL by replacing path parameters with their corresponding\n * values.\n * @param url The URL to format.\n * @param pathParams The path parameters to replace in the URL.\n * @returns The formatted URL.\n */\nfunction formatURL(url: string, pathParams: Record<string, string>): string {\n const expectedPathParamNames = [...url.matchAll(/{(.*?)}/g)].map(\n (match) => match[1]\n );\n const newParams: Record<string, string> = {};\n for (const paramName of expectedPathParamNames) {\n const cleanParamName = paramName.replace(/^\\.;/, \"\").replace(/\\*$/, \"\");\n const value = pathParams[cleanParamName];\n let formattedValue;\n if (Array.isArray(value)) {\n if (paramName.startsWith(\".\")) {\n const separator = paramName.endsWith(\"*\") ? \".\" : \",\";\n formattedValue = `.${value.join(separator)}`;\n } else if (paramName.startsWith(\",\")) {\n const separator = paramName.endsWith(\"*\") ? `${cleanParamName}=` : \",\";\n formattedValue = `${cleanParamName}=${value.join(separator)}`;\n } else {\n formattedValue = value.join(\",\");\n }\n } else if (typeof value === \"object\") {\n const kvSeparator = paramName.endsWith(\"*\") ? \"=\" : \",\";\n const kvStrings = Object.entries(value).map(\n ([k, v]) => k + kvSeparator + v\n );\n let entrySeparator;\n if (paramName.startsWith(\".\")) {\n entrySeparator = \".\";\n formattedValue = \".\";\n } else if (paramName.startsWith(\";\")) {\n entrySeparator = \";\";\n formattedValue = \";\";\n } else {\n entrySeparator = \",\";\n formattedValue = \"\";\n }\n formattedValue += kvStrings.join(entrySeparator);\n } else {\n if (paramName.startsWith(\".\")) {\n formattedValue = `.${value}`;\n } else if (paramName.startsWith(\";\")) {\n formattedValue = `;${cleanParamName}=${value}`;\n } else {\n formattedValue = value;\n }\n }\n newParams[paramName] = formattedValue;\n }\n let formattedUrl = url;\n for (const [key, newValue] of Object.entries(newParams)) {\n formattedUrl = formattedUrl.replace(`{${key}}`, newValue);\n }\n return formattedUrl;\n}\n\n/**\n * Converts OpenAPI parameters to JSON schema format.\n * @param params The OpenAPI parameters to convert.\n * @param spec The OpenAPI specification that contains the parameters.\n * @returns The JSON schema representation of the OpenAPI parameters.\n */\nfunction convertOpenAPIParamsToJSONSchema(\n params: OpenAPIV3_1.ParameterObject[],\n spec: OpenAPISpec\n) {\n return params.reduce(\n (jsonSchema: JsonSchema7ObjectType, param) => {\n let schema;\n if (param.schema) {\n schema = spec.getSchema(param.schema);\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else if (param.content) {\n const mediaTypeSchema = Object.values(param.content)[0].schema;\n if (mediaTypeSchema) {\n schema = spec.getSchema(mediaTypeSchema);\n }\n if (!schema) {\n return jsonSchema;\n }\n if (schema.description === undefined) {\n schema.description = param.description ?? \"\";\n }\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else {\n return jsonSchema;\n }\n if (param.required && Array.isArray(jsonSchema.required)) {\n jsonSchema.required.push(param.name);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n}\n\n// OpenAI throws errors on extraneous schema properties, e.g. if \"required\" is set on individual ones\n/**\n * Converts OpenAPI schemas to JSON schema format.\n * @param schema The OpenAPI schema to convert.\n * @param spec The OpenAPI specification that contains the schema.\n * @returns The JSON schema representation of the OpenAPI schema.\n */\nexport function convertOpenAPISchemaToJSONSchema(\n schema: OpenAPIV3_1.SchemaObject,\n spec: OpenAPISpec\n): JsonSchema7Type {\n if (schema.type === \"object\") {\n return Object.keys(schema.properties ?? {}).reduce(\n (jsonSchema: JsonSchema7ObjectType, propertyName) => {\n if (!schema.properties) {\n return jsonSchema;\n }\n const openAPIProperty = spec.getSchema(schema.properties[propertyName]);\n if (openAPIProperty.type === undefined) {\n return jsonSchema;\n }\n jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(\n openAPIProperty,\n spec\n );\n if (\n (openAPIProperty.required ||\n schema.required?.includes(propertyName)) &&\n jsonSchema.required !== undefined\n ) {\n jsonSchema.required.push(propertyName);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n }\n if (schema.type === \"array\") {\n const openAPIItems = spec.getSchema(schema.items ?? {});\n return {\n type: \"array\",\n items: convertOpenAPISchemaToJSONSchema(openAPIItems, spec),\n minItems: schema.minItems,\n maxItems: schema.maxItems,\n } as JsonSchema7ArrayType;\n }\n return {\n type: schema.type ?? \"string\",\n } as JsonSchema7Type;\n}\n\n/**\n * Converts an OpenAPI specification to OpenAI functions.\n * @param spec The OpenAPI specification to convert.\n * @returns An object containing the OpenAI functions derived from the OpenAPI specification and a default execution method.\n */\nexport function convertOpenAPISpecToOpenAIFunctions(spec: OpenAPISpec): {\n openAIFunctions: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];\n defaultExecutionMethod?: OpenAPIExecutionMethod;\n} {\n if (!spec.document.paths) {\n return { openAIFunctions: [] };\n }\n const openAIFunctions = [];\n const nameToCallMap: Record<string, { method: string; url: string }> = {};\n for (const path of Object.keys(spec.document.paths)) {\n const pathParameters = spec.getParametersForPath(path);\n for (const method of spec.getMethodsForPath(path)) {\n const operation = spec.getOperation(path, method);\n if (!operation) {\n return { openAIFunctions: [] };\n }\n const operationParametersByLocation = pathParameters\n .concat(spec.getParametersForOperation(operation))\n .reduce(\n (\n operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>,\n param\n ) => {\n if (!operationParams[param.in]) {\n operationParams[param.in] = [];\n }\n operationParams[param.in].push(param);\n return operationParams;\n },\n {}\n );\n const paramLocationToRequestArgNameMap: Record<string, string> = {\n query: \"params\",\n header: \"headers\",\n cookie: \"cookies\",\n path: \"path_params\",\n };\n const requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n } = {};\n for (const paramLocation of Object.keys(\n paramLocationToRequestArgNameMap\n )) {\n if (operationParametersByLocation[paramLocation]) {\n requestArgsSchema[paramLocationToRequestArgNameMap[paramLocation]] =\n convertOpenAPIParamsToJSONSchema(\n operationParametersByLocation[paramLocation],\n spec\n );\n }\n }\n const requestBody = spec.getRequestBodyForOperation(operation);\n if (requestBody?.content !== undefined) {\n const requestBodySchemas: Record<string, JsonSchema7ObjectType> = {};\n for (const [mediaType, mediaTypeObject] of Object.entries(\n requestBody.content\n )) {\n if (mediaTypeObject.schema !== undefined) {\n const schema = spec.getSchema(mediaTypeObject.schema);\n requestBodySchemas[mediaType] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n ) as JsonSchema7ObjectType;\n }\n }\n const mediaTypes = Object.keys(requestBodySchemas);\n if (mediaTypes.length === 1) {\n requestArgsSchema.data = requestBodySchemas[mediaTypes[0]];\n } else if (mediaTypes.length > 1) {\n requestArgsSchema.data = {\n anyOf: Object.values(requestBodySchemas),\n };\n }\n }\n const openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function =\n {\n name: OpenAPISpec.getCleanedOperationId(operation, path, method),\n description: operation.description ?? operation.summary ?? \"\",\n parameters: {\n type: \"object\",\n properties: requestArgsSchema,\n // All remaining top-level parameters are required\n required: Object.keys(requestArgsSchema),\n },\n };\n\n openAIFunctions.push(openAIFunction);\n const baseUrl = (spec.baseUrl ?? \"\").endsWith(\"/\")\n ? (spec.baseUrl ?? \"\").slice(0, -1)\n : spec.baseUrl ?? \"\";\n nameToCallMap[openAIFunction.name] = {\n method,\n url: baseUrl + path,\n };\n }\n }\n return {\n openAIFunctions,\n defaultExecutionMethod: async (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n ) => {\n const {\n headers: customHeaders,\n params: customParams,\n ...rest\n } = options ?? {};\n const { method, url } = nameToCallMap[name];\n const requestParams = requestArgs.params ?? {};\n const nonEmptyParams = Object.keys(requestParams).reduce(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (filteredArgs: Record<string, any>, argName) => {\n if (\n requestParams[argName] !== \"\" &&\n requestParams[argName] !== null &&\n requestParams[argName] !== undefined\n ) {\n filteredArgs[argName] = requestParams[argName];\n }\n return filteredArgs;\n },\n {}\n );\n const queryString = new URLSearchParams({\n ...nonEmptyParams,\n ...customParams,\n }).toString();\n const pathParams = requestArgs.path_params;\n const formattedUrl =\n formatURL(url, pathParams) +\n (queryString.length ? `?${queryString}` : \"\");\n const headers: Record<string, string> = {};\n let body;\n if (requestArgs.data !== undefined) {\n let contentType = \"text/plain\";\n if (typeof requestArgs.data !== \"string\") {\n if (typeof requestArgs.data === \"object\") {\n contentType = \"application/json\";\n }\n body = JSON.stringify(requestArgs.data);\n } else {\n body = requestArgs.data;\n }\n headers[\"content-type\"] = contentType;\n }\n const response = await fetch(formattedUrl, {\n ...requestArgs,\n method,\n headers: {\n ...headers,\n ...requestArgs.headers,\n ...customHeaders,\n },\n body,\n ...rest,\n });\n let output;\n if (response.status < 200 || response.status > 299) {\n output = `${response.status}: ${\n response.statusText\n } for ${name} called with ${JSON.stringify(queryString)}`;\n } else {\n output = await response.text();\n }\n return output;\n },\n };\n}\n\n/**\n * Type representing a function for executing simple requests.\n */\ntype SimpleRequestChainExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>\n) => Promise<string>;\n\n/**\n * A chain for making simple API requests.\n */\nclass SimpleRequestChain extends BaseChain {\n static lc_name() {\n return \"SimpleRequestChain\";\n }\n\n private requestMethod: SimpleRequestChainExecutionMethod;\n\n inputKey = \"function\";\n\n outputKey = \"response\";\n\n constructor(config: { requestMethod: SimpleRequestChainExecutionMethod }) {\n super();\n this.requestMethod = config.requestMethod;\n }\n\n get inputKeys() {\n return [this.inputKey];\n }\n\n get outputKeys() {\n return [this.outputKey];\n }\n\n _chainType() {\n return \"simple_request_chain\" as const;\n }\n\n /** @ignore */\n async _call(\n values: ChainValues,\n _runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n const inputKeyValue = values[this.inputKey];\n const methodName = inputKeyValue.name;\n const args = inputKeyValue.arguments;\n const response = await this.requestMethod(methodName, args);\n return { [this.outputKey]: response };\n }\n}\n\n/**\n * Type representing the options for creating an OpenAPI chain.\n */\nexport type OpenAPIChainOptions = {\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n prompt?: BasePromptTemplate;\n requestChain?: BaseChain;\n llmChainInputs?: LLMChainInput;\n headers?: Record<string, string>;\n params?: Record<string, string>;\n verbose?: boolean;\n};\n\n/**\n * Create a chain for querying an API from a OpenAPI spec.\n * @param spec OpenAPISpec or url/file/text string corresponding to one.\n * @param options Custom options passed into the chain\n * @returns OpenAPIChain\n */\nexport async function createOpenAPIChain(\n spec: OpenAPIV3_1.Document | string,\n options: OpenAPIChainOptions = {}\n) {\n let convertedSpec;\n if (typeof spec === \"string\") {\n try {\n convertedSpec = await OpenAPISpec.fromURL(spec);\n } catch {\n try {\n convertedSpec = OpenAPISpec.fromString(spec);\n } catch {\n throw new Error(`Unable to parse spec from source ${spec}.`);\n }\n }\n } else {\n convertedSpec = OpenAPISpec.fromObject(spec);\n }\n const { openAIFunctions, defaultExecutionMethod } =\n convertOpenAPISpecToOpenAIFunctions(convertedSpec);\n if (defaultExecutionMethod === undefined) {\n throw new Error(\n `Could not parse any valid operations from the provided spec.`\n );\n }\n\n if (!options.llm) {\n throw new Error(\"`llm` option is required\");\n }\n\n const {\n llm = options.llm,\n prompt = ChatPromptTemplate.fromMessages([\n HumanMessagePromptTemplate.fromTemplate(\n \"Use the provided API's to respond to this user query:\\n\\n{query}\"\n ),\n ]),\n requestChain = new SimpleRequestChain({\n requestMethod: async (name, args) =>\n defaultExecutionMethod(name, args, {\n headers: options.headers,\n params: options.params,\n }),\n }),\n llmChainInputs = {},\n verbose,\n ...rest\n } = options;\n const formatChain = new LLMChain({\n llm,\n prompt,\n outputParser: new JsonOutputFunctionsParser({ argsOnly: false }),\n outputKey: \"function\",\n llmKwargs: { functions: openAIFunctions },\n ...llmChainInputs,\n });\n return new SequentialChain({\n chains: [formatChain, requestChain],\n outputVariables: [\"response\"],\n inputVariables: formatChain.inputKeys,\n verbose,\n ...rest,\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AA2CA,SAAS,UAAUA,KAAaC,YAA4C;CAC1E,MAAM,yBAAyB,CAAC,GAAG,IAAI,SAAS,WAAW,AAAC,EAAC,IAC3D,CAAC,UAAU,MAAM,GAClB;CACD,MAAMC,YAAoC,CAAE;AAC5C,MAAK,MAAM,aAAa,wBAAwB;EAC9C,MAAM,iBAAiB,UAAU,QAAQ,QAAQ,GAAG,CAAC,QAAQ,OAAO,GAAG;EACvE,MAAM,QAAQ,WAAW;EACzB,IAAI;AACJ,MAAI,MAAM,QAAQ,MAAM,CACtB,KAAI,UAAU,WAAW,IAAI,EAAE;GAC7B,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,MAAM;GAClD,iBAAiB,CAAC,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC7C,WAAU,UAAU,WAAW,IAAI,EAAE;GACpC,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,GAAG;GACnE,iBAAiB,GAAG,eAAe,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC9D,OACC,iBAAiB,MAAM,KAAK,IAAI;WAEzB,OAAO,UAAU,UAAU;GACpC,MAAM,cAAc,UAAU,SAAS,IAAI,GAAG,MAAM;GACpD,MAAM,YAAY,OAAO,QAAQ,MAAM,CAAC,IACtC,CAAC,CAAC,GAAG,EAAE,KAAK,IAAI,cAAc,EAC/B;GACD,IAAI;AACJ,OAAI,UAAU,WAAW,IAAI,EAAE;IAC7B,iBAAiB;IACjB,iBAAiB;GAClB,WAAU,UAAU,WAAW,IAAI,EAAE;IACpC,iBAAiB;IACjB,iBAAiB;GAClB,OAAM;IACL,iBAAiB;IACjB,iBAAiB;GAClB;GACD,kBAAkB,UAAU,KAAK,eAAe;EACjD,WACK,UAAU,WAAW,IAAI,EAC3B,iBAAiB,CAAC,CAAC,EAAE,OAAO;WACnB,UAAU,WAAW,IAAI,EAClC,iBAAiB,CAAC,CAAC,EAAE,eAAe,CAAC,EAAE,OAAO;OAE9C,iBAAiB;EAGrB,UAAU,aAAa;CACxB;CACD,IAAI,eAAe;AACnB,MAAK,MAAM,CAAC,KAAK,SAAS,IAAI,OAAO,QAAQ,UAAU,EACrD,eAAe,aAAa,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,SAAS;AAE3D,QAAO;AACR;;;;;;;AAQD,SAAS,iCACPC,QACAC,MACA;AACA,QAAO,OAAO,OACZ,CAACC,YAAmC,UAAU;EAC5C,IAAI;AACJ,MAAI,MAAM,QAAQ;GAChB,SAAS,KAAK,UAAU,MAAM,OAAO;GACrC,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,WAAU,MAAM,SAAS;GACxB,MAAM,kBAAkB,OAAO,OAAO,MAAM,QAAQ,CAAC,GAAG;AACxD,OAAI,iBACF,SAAS,KAAK,UAAU,gBAAgB;AAE1C,OAAI,CAAC,OACH,QAAO;AAET,OAAI,OAAO,gBAAgB,QACzB,OAAO,cAAc,MAAM,eAAe;GAE5C,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,MACC,QAAO;AAET,MAAI,MAAM,YAAY,MAAM,QAAQ,WAAW,SAAS,EACtD,WAAW,SAAS,KAAK,MAAM,KAAK;AAEtC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AACF;;;;;;;AASD,SAAgB,iCACdC,QACAF,MACiB;AACjB,KAAI,OAAO,SAAS,SAClB,QAAO,OAAO,KAAK,OAAO,cAAc,CAAE,EAAC,CAAC,OAC1C,CAACC,YAAmC,iBAAiB;AACnD,MAAI,CAAC,OAAO,WACV,QAAO;EAET,MAAM,kBAAkB,KAAK,UAAU,OAAO,WAAW,cAAc;AACvE,MAAI,gBAAgB,SAAS,OAC3B,QAAO;EAET,WAAW,WAAW,gBAAgB,iCACpC,iBACA,KACD;AACD,OACG,gBAAgB,YACf,OAAO,UAAU,SAAS,aAAa,KACzC,WAAW,aAAa,QAExB,WAAW,SAAS,KAAK,aAAa;AAExC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AAEH,KAAI,OAAO,SAAS,SAAS;EAC3B,MAAM,eAAe,KAAK,UAAU,OAAO,SAAS,CAAE,EAAC;AACvD,SAAO;GACL,MAAM;GACN,OAAO,iCAAiC,cAAc,KAAK;GAC3D,UAAU,OAAO;GACjB,UAAU,OAAO;EAClB;CACF;AACD,QAAO,EACL,MAAM,OAAO,QAAQ,SACtB;AACF;;;;;;AAOD,SAAgB,oCAAoCD,MAGlD;AACA,KAAI,CAAC,KAAK,SAAS,MACjB,QAAO,EAAE,iBAAiB,CAAE,EAAE;CAEhC,MAAM,kBAAkB,CAAE;CAC1B,MAAMG,gBAAiE,CAAE;AACzE,MAAK,MAAM,QAAQ,OAAO,KAAK,KAAK,SAAS,MAAM,EAAE;EACnD,MAAM,iBAAiB,KAAK,qBAAqB,KAAK;AACtD,OAAK,MAAM,UAAU,KAAK,kBAAkB,KAAK,EAAE;GACjD,MAAM,YAAY,KAAK,aAAa,MAAM,OAAO;AACjD,OAAI,CAAC,UACH,QAAO,EAAE,iBAAiB,CAAE,EAAE;GAEhC,MAAM,gCAAgC,eACnC,OAAO,KAAK,0BAA0B,UAAU,CAAC,CACjD,OACC,CACEC,iBACA,UACG;AACH,QAAI,CAAC,gBAAgB,MAAM,KACzB,gBAAgB,MAAM,MAAM,CAAE;IAEhC,gBAAgB,MAAM,IAAI,KAAK,MAAM;AACrC,WAAO;GACR,GACD,CAAE,EACH;GACH,MAAMC,mCAA2D;IAC/D,OAAO;IACP,QAAQ;IACR,QAAQ;IACR,MAAM;GACP;GACD,MAAMC,oBAMF,CAAE;AACN,QAAK,MAAM,iBAAiB,OAAO,KACjC,iCACD,CACC,KAAI,8BAA8B,gBAChC,kBAAkB,iCAAiC,kBACjD,iCACE,8BAA8B,gBAC9B,KACD;GAGP,MAAM,cAAc,KAAK,2BAA2B,UAAU;AAC9D,OAAI,aAAa,YAAY,QAAW;IACtC,MAAMC,qBAA4D,CAAE;AACpE,SAAK,MAAM,CAAC,WAAW,gBAAgB,IAAI,OAAO,QAChD,YAAY,QACb,CACC,KAAI,gBAAgB,WAAW,QAAW;KACxC,MAAM,SAAS,KAAK,UAAU,gBAAgB,OAAO;KACrD,mBAAmB,aAAa,iCAC9B,QACA,KACD;IACF;IAEH,MAAM,aAAa,OAAO,KAAK,mBAAmB;AAClD,QAAI,WAAW,WAAW,GACxB,kBAAkB,OAAO,mBAAmB,WAAW;aAC9C,WAAW,SAAS,GAC7B,kBAAkB,OAAO,EACvB,OAAO,OAAO,OAAO,mBAAmB,CACzC;GAEJ;GACD,MAAMC,iBACJ;IACE,MAAM,YAAY,sBAAsB,WAAW,MAAM,OAAO;IAChE,aAAa,UAAU,eAAe,UAAU,WAAW;IAC3D,YAAY;KACV,MAAM;KACN,YAAY;KAEZ,UAAU,OAAO,KAAK,kBAAkB;IACzC;GACF;GAEH,gBAAgB,KAAK,eAAe;GACpC,MAAM,WAAW,KAAK,WAAW,IAAI,SAAS,IAAI,IAC7C,KAAK,WAAW,IAAI,MAAM,GAAG,GAAG,GACjC,KAAK,WAAW;GACpB,cAAc,eAAe,QAAQ;IACnC;IACA,KAAK,UAAU;GAChB;EACF;CACF;AACD,QAAO;EACL;EACA,wBAAwB,OACtBC,MAEAC,aACAC,YAIG;GACH,MAAM,EACJ,SAAS,eACT,QAAQ,aACR,GAAG,MACJ,GAAG,WAAW,CAAE;GACjB,MAAM,EAAE,QAAQ,KAAK,GAAG,cAAc;GACtC,MAAM,gBAAgB,YAAY,UAAU,CAAE;GAC9C,MAAM,iBAAiB,OAAO,KAAK,cAAc,CAAC,OAEhD,CAACC,cAAmC,YAAY;AAC9C,QACE,cAAc,aAAa,MAC3B,cAAc,aAAa,QAC3B,cAAc,aAAa,QAE3B,aAAa,WAAW,cAAc;AAExC,WAAO;GACR,GACD,CAAE,EACH;GACD,MAAM,cAAc,IAAI,gBAAgB;IACtC,GAAG;IACH,GAAG;GACJ,GAAE,UAAU;GACb,MAAM,aAAa,YAAY;GAC/B,MAAM,eACJ,UAAU,KAAK,WAAW,IACzB,YAAY,SAAS,CAAC,CAAC,EAAE,aAAa,GAAG;GAC5C,MAAMC,UAAkC,CAAE;GAC1C,IAAI;AACJ,OAAI,YAAY,SAAS,QAAW;IAClC,IAAI,cAAc;AAClB,QAAI,OAAO,YAAY,SAAS,UAAU;AACxC,SAAI,OAAO,YAAY,SAAS,UAC9B,cAAc;KAEhB,OAAO,KAAK,UAAU,YAAY,KAAK;IACxC,OACC,OAAO,YAAY;IAErB,QAAQ,kBAAkB;GAC3B;GACD,MAAM,WAAW,MAAM,MAAM,cAAc;IACzC,GAAG;IACH;IACA,SAAS;KACP,GAAG;KACH,GAAG,YAAY;KACf,GAAG;IACJ;IACD;IACA,GAAG;GACJ,EAAC;GACF,IAAI;AACJ,OAAI,SAAS,SAAS,OAAO,SAAS,SAAS,KAC7C,SAAS,GAAG,SAAS,OAAO,EAAE,EAC5B,SAAS,WACV,KAAK,EAAE,KAAK,aAAa,EAAE,KAAK,UAAU,YAAY,EAAE;QAEzD,SAAS,MAAM,SAAS,MAAM;AAEhC,UAAO;EACR;CACF;AACF;;;;AAcD,IAAM,qBAAN,cAAiC,UAAU;CACzC,OAAO,UAAU;AACf,SAAO;CACR;CAED,AAAQ;CAER,WAAW;CAEX,YAAY;CAEZ,YAAYC,QAA8D;EACxE,OAAO;EACP,KAAK,gBAAgB,OAAO;CAC7B;CAED,IAAI,YAAY;AACd,SAAO,CAAC,KAAK,QAAS;CACvB;CAED,IAAI,aAAa;AACf,SAAO,CAAC,KAAK,SAAU;CACxB;CAED,aAAa;AACX,SAAO;CACR;;CAGD,MAAM,MACJC,QACAC,aACsB;EACtB,MAAM,gBAAgB,OAAO,KAAK;EAClC,MAAM,aAAa,cAAc;EACjC,MAAM,OAAO,cAAc;EAC3B,MAAM,WAAW,MAAM,KAAK,cAAc,YAAY,KAAK;AAC3D,SAAO,GAAG,KAAK,YAAY,SAAU;CACtC;AACF;;;;;;;AAqBD,eAAsB,mBACpBC,MACAC,UAA+B,CAAE,GACjC;CACA,IAAI;AACJ,KAAI,OAAO,SAAS,SAClB,KAAI;EACF,gBAAgB,MAAM,YAAY,QAAQ,KAAK;CAChD,QAAO;AACN,MAAI;GACF,gBAAgB,YAAY,WAAW,KAAK;EAC7C,QAAO;AACN,SAAM,IAAI,MAAM,CAAC,iCAAiC,EAAE,KAAK,CAAC,CAAC;EAC5D;CACF;MAED,gBAAgB,YAAY,WAAW,KAAK;CAE9C,MAAM,EAAE,iBAAiB,wBAAwB,GAC/C,oCAAoC,cAAc;AACpD,KAAI,2BAA2B,OAC7B,OAAM,IAAI,MACR,CAAC,4DAA4D,CAAC;AAIlE,KAAI,CAAC,QAAQ,IACX,OAAM,IAAI,MAAM;CAGlB,MAAM,EACJ,MAAM,QAAQ,KACd,SAAS,mBAAmB,aAAa,CACvC,2BAA2B,aACzB,mEACD,AACF,EAAC,EACF,eAAe,IAAI,mBAAmB,EACpC,eAAe,OAAO,MAAM,SAC1B,uBAAuB,MAAM,MAAM;EACjC,SAAS,QAAQ;EACjB,QAAQ,QAAQ;CACjB,EAAC,CACL,IACD,iBAAiB,CAAE,GACnB,QACA,GAAG,MACJ,GAAG;CACJ,MAAM,cAAc,IAAI,SAAS;EAC/B;EACA;EACA,cAAc,IAAI,0BAA0B,EAAE,UAAU,MAAO;EAC/D,WAAW;EACX,WAAW,EAAE,WAAW,gBAAiB;EACzC,GAAG;CACJ;AACD,QAAO,IAAI,gBAAgB;EACzB,QAAQ,CAAC,aAAa,YAAa;EACnC,iBAAiB,CAAC,UAAW;EAC7B,gBAAgB,YAAY;EAC5B;EACA,GAAG;CACJ;AACF"}
1
+ {"version":3,"file":"openapi.js","names":["url: string","pathParams: Record<string, string>","newParams: Record<string, string>","params: OpenAPIV3_1.ParameterObject[]","spec: OpenAPISpec","jsonSchema: JsonSchema7ObjectType","schema: OpenAPIV3_1.SchemaObject","nameToCallMap: Record<string, { method: string; url: string }>","operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>","paramLocationToRequestArgNameMap: Record<string, string>","requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n }","requestBodySchemas: Record<string, JsonSchema7ObjectType>","openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function","name: string","requestArgs: Record<string, any>","options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }","filteredArgs: Record<string, any>","headers: Record<string, string>","config: { requestMethod: SimpleRequestChainExecutionMethod }","values: ChainValues","_runManager?: CallbackManagerForChainRun","spec: OpenAPIV3_1.Document | string","options: OpenAPIChainOptions"],"sources":["../../../src/chains/openai_functions/openapi.ts"],"sourcesContent":["import type { OpenAIClient } from \"@langchain/openai\";\nimport {\n type JsonSchema7ObjectType,\n type JsonSchema7ArrayType,\n type JsonSchema7Type,\n} from \"@langchain/core/utils/json_schema\";\nimport type { OpenAPIV3_1 } from \"openapi-types\";\n\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n ChatPromptTemplate,\n HumanMessagePromptTemplate,\n BasePromptTemplate,\n} from \"@langchain/core/prompts\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { OpenAPISpec } from \"../../util/openapi.js\";\nimport { BaseChain } from \"../base.js\";\nimport { LLMChain, LLMChainInput } from \"../llm_chain.js\";\nimport { SequentialChain } from \"../sequential_chain.js\";\nimport { JsonOutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing a function for executing OpenAPI requests.\n */\ntype OpenAPIExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n) => Promise<string>;\n\n/**\n * Formats a URL by replacing path parameters with their corresponding\n * values.\n * @param url The URL to format.\n * @param pathParams The path parameters to replace in the URL.\n * @returns The formatted URL.\n */\nfunction formatURL(url: string, pathParams: Record<string, string>): string {\n const expectedPathParamNames = [...url.matchAll(/{(.*?)}/g)].map(\n (match) => match[1]\n );\n const newParams: Record<string, string> = {};\n for (const paramName of expectedPathParamNames) {\n const cleanParamName = paramName.replace(/^\\.;/, \"\").replace(/\\*$/, \"\");\n const value = pathParams[cleanParamName];\n let formattedValue;\n if (Array.isArray(value)) {\n if (paramName.startsWith(\".\")) {\n const separator = paramName.endsWith(\"*\") ? \".\" : \",\";\n formattedValue = `.${value.join(separator)}`;\n } else if (paramName.startsWith(\",\")) {\n const separator = paramName.endsWith(\"*\") ? `${cleanParamName}=` : \",\";\n formattedValue = `${cleanParamName}=${value.join(separator)}`;\n } else {\n formattedValue = value.join(\",\");\n }\n } else if (typeof value === \"object\") {\n const kvSeparator = paramName.endsWith(\"*\") ? \"=\" : \",\";\n const kvStrings = Object.entries(value).map(\n ([k, v]) => k + kvSeparator + v\n );\n let entrySeparator;\n if (paramName.startsWith(\".\")) {\n entrySeparator = \".\";\n formattedValue = \".\";\n } else if (paramName.startsWith(\";\")) {\n entrySeparator = \";\";\n formattedValue = \";\";\n } else {\n entrySeparator = \",\";\n formattedValue = \"\";\n }\n formattedValue += kvStrings.join(entrySeparator);\n } else {\n if (paramName.startsWith(\".\")) {\n formattedValue = `.${value}`;\n } else if (paramName.startsWith(\";\")) {\n formattedValue = `;${cleanParamName}=${value}`;\n } else {\n formattedValue = value;\n }\n }\n newParams[paramName] = formattedValue;\n }\n let formattedUrl = url;\n for (const [key, newValue] of Object.entries(newParams)) {\n formattedUrl = formattedUrl.replace(`{${key}}`, newValue);\n }\n return formattedUrl;\n}\n\n/**\n * Converts OpenAPI parameters to JSON schema format.\n * @param params The OpenAPI parameters to convert.\n * @param spec The OpenAPI specification that contains the parameters.\n * @returns The JSON schema representation of the OpenAPI parameters.\n */\nfunction convertOpenAPIParamsToJSONSchema(\n params: OpenAPIV3_1.ParameterObject[],\n spec: OpenAPISpec\n) {\n return params.reduce(\n (jsonSchema: JsonSchema7ObjectType, param) => {\n let schema;\n if (param.schema) {\n schema = spec.getSchema(param.schema);\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else if (param.content) {\n const mediaTypeSchema = Object.values(param.content)[0].schema;\n if (mediaTypeSchema) {\n schema = spec.getSchema(mediaTypeSchema);\n }\n if (!schema) {\n return jsonSchema;\n }\n if (schema.description === undefined) {\n schema.description = param.description ?? \"\";\n }\n jsonSchema.properties[param.name] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n );\n } else {\n return jsonSchema;\n }\n if (param.required && Array.isArray(jsonSchema.required)) {\n jsonSchema.required.push(param.name);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n}\n\n// OpenAI throws errors on extraneous schema properties, e.g. if \"required\" is set on individual ones\n/**\n * Converts OpenAPI schemas to JSON schema format.\n * @param schema The OpenAPI schema to convert.\n * @param spec The OpenAPI specification that contains the schema.\n * @returns The JSON schema representation of the OpenAPI schema.\n */\nexport function convertOpenAPISchemaToJSONSchema(\n schema: OpenAPIV3_1.SchemaObject,\n spec: OpenAPISpec\n): JsonSchema7Type {\n if (schema.type === \"object\") {\n return Object.keys(schema.properties ?? {}).reduce(\n (jsonSchema: JsonSchema7ObjectType, propertyName) => {\n if (!schema.properties) {\n return jsonSchema;\n }\n const openAPIProperty = spec.getSchema(schema.properties[propertyName]);\n if (openAPIProperty.type === undefined) {\n return jsonSchema;\n }\n jsonSchema.properties[propertyName] = convertOpenAPISchemaToJSONSchema(\n openAPIProperty,\n spec\n );\n if (\n schema.required?.includes(propertyName) &&\n jsonSchema.required !== undefined\n ) {\n jsonSchema.required.push(propertyName);\n }\n return jsonSchema;\n },\n {\n type: \"object\",\n properties: {},\n required: [],\n additionalProperties: {},\n }\n );\n }\n if (schema.type === \"array\") {\n const openAPIItems = spec.getSchema(schema.items ?? {});\n return {\n type: \"array\",\n items: convertOpenAPISchemaToJSONSchema(openAPIItems, spec),\n minItems: schema.minItems,\n maxItems: schema.maxItems,\n } as JsonSchema7ArrayType;\n }\n return {\n type: schema.type ?? \"string\",\n } as JsonSchema7Type;\n}\n\n/**\n * Converts an OpenAPI specification to OpenAI functions.\n * @param spec The OpenAPI specification to convert.\n * @returns An object containing the OpenAI functions derived from the OpenAPI specification and a default execution method.\n */\nexport function convertOpenAPISpecToOpenAIFunctions(spec: OpenAPISpec): {\n openAIFunctions: OpenAIClient.Chat.ChatCompletionCreateParams.Function[];\n defaultExecutionMethod?: OpenAPIExecutionMethod;\n} {\n if (!spec.document.paths) {\n return { openAIFunctions: [] };\n }\n const openAIFunctions = [];\n const nameToCallMap: Record<string, { method: string; url: string }> = {};\n for (const path of Object.keys(spec.document.paths)) {\n const pathParameters = spec.getParametersForPath(path);\n for (const method of spec.getMethodsForPath(path)) {\n const operation = spec.getOperation(path, method);\n if (!operation) {\n return { openAIFunctions: [] };\n }\n const operationParametersByLocation = pathParameters\n .concat(spec.getParametersForOperation(operation))\n .reduce(\n (\n operationParams: Record<string, OpenAPIV3_1.ParameterObject[]>,\n param\n ) => {\n if (!operationParams[param.in]) {\n operationParams[param.in] = [];\n }\n operationParams[param.in].push(param);\n return operationParams;\n },\n {}\n );\n const paramLocationToRequestArgNameMap: Record<string, string> = {\n query: \"params\",\n header: \"headers\",\n cookie: \"cookies\",\n path: \"path_params\",\n };\n const requestArgsSchema: Record<string, JsonSchema7ObjectType> & {\n data?:\n | JsonSchema7ObjectType\n | {\n anyOf?: JsonSchema7ObjectType[];\n };\n } = {};\n for (const paramLocation of Object.keys(\n paramLocationToRequestArgNameMap\n )) {\n if (operationParametersByLocation[paramLocation]) {\n requestArgsSchema[paramLocationToRequestArgNameMap[paramLocation]] =\n convertOpenAPIParamsToJSONSchema(\n operationParametersByLocation[paramLocation],\n spec\n );\n }\n }\n const requestBody = spec.getRequestBodyForOperation(operation);\n if (requestBody?.content !== undefined) {\n const requestBodySchemas: Record<string, JsonSchema7ObjectType> = {};\n for (const [mediaType, mediaTypeObject] of Object.entries(\n requestBody.content\n )) {\n if (mediaTypeObject.schema !== undefined) {\n const schema = spec.getSchema(mediaTypeObject.schema);\n requestBodySchemas[mediaType] = convertOpenAPISchemaToJSONSchema(\n schema,\n spec\n ) as JsonSchema7ObjectType;\n }\n }\n const mediaTypes = Object.keys(requestBodySchemas);\n if (mediaTypes.length === 1) {\n requestArgsSchema.data = requestBodySchemas[mediaTypes[0]];\n } else if (mediaTypes.length > 1) {\n requestArgsSchema.data = {\n anyOf: Object.values(requestBodySchemas),\n };\n }\n }\n const openAIFunction: OpenAIClient.Chat.ChatCompletionCreateParams.Function =\n {\n name: OpenAPISpec.getCleanedOperationId(operation, path, method),\n description: operation.description ?? operation.summary ?? \"\",\n parameters: {\n type: \"object\",\n properties: requestArgsSchema,\n // All remaining top-level parameters are required\n required: Object.keys(requestArgsSchema),\n },\n };\n\n openAIFunctions.push(openAIFunction);\n const baseUrl = (spec.baseUrl ?? \"\").endsWith(\"/\")\n ? (spec.baseUrl ?? \"\").slice(0, -1)\n : spec.baseUrl ?? \"\";\n nameToCallMap[openAIFunction.name] = {\n method,\n url: baseUrl + path,\n };\n }\n }\n return {\n openAIFunctions,\n defaultExecutionMethod: async (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>,\n options?: {\n headers?: Record<string, string>;\n params?: Record<string, string>;\n }\n ) => {\n const {\n headers: customHeaders,\n params: customParams,\n ...rest\n } = options ?? {};\n const { method, url } = nameToCallMap[name];\n const requestParams = requestArgs.params ?? {};\n const nonEmptyParams = Object.keys(requestParams).reduce(\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n (filteredArgs: Record<string, any>, argName) => {\n if (\n requestParams[argName] !== \"\" &&\n requestParams[argName] !== null &&\n requestParams[argName] !== undefined\n ) {\n filteredArgs[argName] = requestParams[argName];\n }\n return filteredArgs;\n },\n {}\n );\n const queryString = new URLSearchParams({\n ...nonEmptyParams,\n ...customParams,\n }).toString();\n const pathParams = requestArgs.path_params;\n const formattedUrl =\n formatURL(url, pathParams) +\n (queryString.length ? `?${queryString}` : \"\");\n const headers: Record<string, string> = {};\n let body;\n if (requestArgs.data !== undefined) {\n let contentType = \"text/plain\";\n if (typeof requestArgs.data !== \"string\") {\n if (typeof requestArgs.data === \"object\") {\n contentType = \"application/json\";\n }\n body = JSON.stringify(requestArgs.data);\n } else {\n body = requestArgs.data;\n }\n headers[\"content-type\"] = contentType;\n }\n const response = await fetch(formattedUrl, {\n ...requestArgs,\n method,\n headers: {\n ...headers,\n ...requestArgs.headers,\n ...customHeaders,\n },\n body,\n ...rest,\n });\n let output;\n if (response.status < 200 || response.status > 299) {\n output = `${response.status}: ${\n response.statusText\n } for ${name} called with ${JSON.stringify(queryString)}`;\n } else {\n output = await response.text();\n }\n return output;\n },\n };\n}\n\n/**\n * Type representing a function for executing simple requests.\n */\ntype SimpleRequestChainExecutionMethod = (\n name: string,\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n requestArgs: Record<string, any>\n) => Promise<string>;\n\n/**\n * A chain for making simple API requests.\n */\nclass SimpleRequestChain extends BaseChain {\n static lc_name() {\n return \"SimpleRequestChain\";\n }\n\n private requestMethod: SimpleRequestChainExecutionMethod;\n\n inputKey = \"function\";\n\n outputKey = \"response\";\n\n constructor(config: { requestMethod: SimpleRequestChainExecutionMethod }) {\n super();\n this.requestMethod = config.requestMethod;\n }\n\n get inputKeys() {\n return [this.inputKey];\n }\n\n get outputKeys() {\n return [this.outputKey];\n }\n\n _chainType() {\n return \"simple_request_chain\" as const;\n }\n\n /** @ignore */\n async _call(\n values: ChainValues,\n _runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n const inputKeyValue = values[this.inputKey];\n const methodName = inputKeyValue.name;\n const args = inputKeyValue.arguments;\n const response = await this.requestMethod(methodName, args);\n return { [this.outputKey]: response };\n }\n}\n\n/**\n * Type representing the options for creating an OpenAPI chain.\n */\nexport type OpenAPIChainOptions = {\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n prompt?: BasePromptTemplate;\n requestChain?: BaseChain;\n llmChainInputs?: LLMChainInput;\n headers?: Record<string, string>;\n params?: Record<string, string>;\n verbose?: boolean;\n};\n\n/**\n * Create a chain for querying an API from a OpenAPI spec.\n * @param spec OpenAPISpec or url/file/text string corresponding to one.\n * @param options Custom options passed into the chain\n * @returns OpenAPIChain\n */\nexport async function createOpenAPIChain(\n spec: OpenAPIV3_1.Document | string,\n options: OpenAPIChainOptions = {}\n) {\n let convertedSpec;\n if (typeof spec === \"string\") {\n try {\n convertedSpec = await OpenAPISpec.fromURL(spec);\n } catch {\n try {\n convertedSpec = OpenAPISpec.fromString(spec);\n } catch {\n throw new Error(`Unable to parse spec from source ${spec}.`);\n }\n }\n } else {\n convertedSpec = OpenAPISpec.fromObject(spec);\n }\n const { openAIFunctions, defaultExecutionMethod } =\n convertOpenAPISpecToOpenAIFunctions(convertedSpec);\n if (defaultExecutionMethod === undefined) {\n throw new Error(\n `Could not parse any valid operations from the provided spec.`\n );\n }\n\n if (!options.llm) {\n throw new Error(\"`llm` option is required\");\n }\n\n const {\n llm = options.llm,\n prompt = ChatPromptTemplate.fromMessages([\n HumanMessagePromptTemplate.fromTemplate(\n \"Use the provided API's to respond to this user query:\\n\\n{query}\"\n ),\n ]),\n requestChain = new SimpleRequestChain({\n requestMethod: async (name, args) =>\n defaultExecutionMethod(name, args, {\n headers: options.headers,\n params: options.params,\n }),\n }),\n llmChainInputs = {},\n verbose,\n ...rest\n } = options;\n const formatChain = new LLMChain({\n llm,\n prompt,\n outputParser: new JsonOutputFunctionsParser({ argsOnly: false }),\n outputKey: \"function\",\n llmKwargs: { functions: openAIFunctions },\n ...llmChainInputs,\n });\n return new SequentialChain({\n chains: [formatChain, requestChain],\n outputVariables: [\"response\"],\n inputVariables: formatChain.inputKeys,\n verbose,\n ...rest,\n });\n}\n"],"mappings":";;;;;;;;;;;;;;;AA2CA,SAAS,UAAUA,KAAaC,YAA4C;CAC1E,MAAM,yBAAyB,CAAC,GAAG,IAAI,SAAS,WAAW,AAAC,EAAC,IAC3D,CAAC,UAAU,MAAM,GAClB;CACD,MAAMC,YAAoC,CAAE;AAC5C,MAAK,MAAM,aAAa,wBAAwB;EAC9C,MAAM,iBAAiB,UAAU,QAAQ,QAAQ,GAAG,CAAC,QAAQ,OAAO,GAAG;EACvE,MAAM,QAAQ,WAAW;EACzB,IAAI;AACJ,MAAI,MAAM,QAAQ,MAAM,CACtB,KAAI,UAAU,WAAW,IAAI,EAAE;GAC7B,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,MAAM;GAClD,iBAAiB,CAAC,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC7C,WAAU,UAAU,WAAW,IAAI,EAAE;GACpC,MAAM,YAAY,UAAU,SAAS,IAAI,GAAG,GAAG,eAAe,CAAC,CAAC,GAAG;GACnE,iBAAiB,GAAG,eAAe,CAAC,EAAE,MAAM,KAAK,UAAU,EAAE;EAC9D,OACC,iBAAiB,MAAM,KAAK,IAAI;WAEzB,OAAO,UAAU,UAAU;GACpC,MAAM,cAAc,UAAU,SAAS,IAAI,GAAG,MAAM;GACpD,MAAM,YAAY,OAAO,QAAQ,MAAM,CAAC,IACtC,CAAC,CAAC,GAAG,EAAE,KAAK,IAAI,cAAc,EAC/B;GACD,IAAI;AACJ,OAAI,UAAU,WAAW,IAAI,EAAE;IAC7B,iBAAiB;IACjB,iBAAiB;GAClB,WAAU,UAAU,WAAW,IAAI,EAAE;IACpC,iBAAiB;IACjB,iBAAiB;GAClB,OAAM;IACL,iBAAiB;IACjB,iBAAiB;GAClB;GACD,kBAAkB,UAAU,KAAK,eAAe;EACjD,WACK,UAAU,WAAW,IAAI,EAC3B,iBAAiB,CAAC,CAAC,EAAE,OAAO;WACnB,UAAU,WAAW,IAAI,EAClC,iBAAiB,CAAC,CAAC,EAAE,eAAe,CAAC,EAAE,OAAO;OAE9C,iBAAiB;EAGrB,UAAU,aAAa;CACxB;CACD,IAAI,eAAe;AACnB,MAAK,MAAM,CAAC,KAAK,SAAS,IAAI,OAAO,QAAQ,UAAU,EACrD,eAAe,aAAa,QAAQ,CAAC,CAAC,EAAE,IAAI,CAAC,CAAC,EAAE,SAAS;AAE3D,QAAO;AACR;;;;;;;AAQD,SAAS,iCACPC,QACAC,MACA;AACA,QAAO,OAAO,OACZ,CAACC,YAAmC,UAAU;EAC5C,IAAI;AACJ,MAAI,MAAM,QAAQ;GAChB,SAAS,KAAK,UAAU,MAAM,OAAO;GACrC,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,WAAU,MAAM,SAAS;GACxB,MAAM,kBAAkB,OAAO,OAAO,MAAM,QAAQ,CAAC,GAAG;AACxD,OAAI,iBACF,SAAS,KAAK,UAAU,gBAAgB;AAE1C,OAAI,CAAC,OACH,QAAO;AAET,OAAI,OAAO,gBAAgB,QACzB,OAAO,cAAc,MAAM,eAAe;GAE5C,WAAW,WAAW,MAAM,QAAQ,iCAClC,QACA,KACD;EACF,MACC,QAAO;AAET,MAAI,MAAM,YAAY,MAAM,QAAQ,WAAW,SAAS,EACtD,WAAW,SAAS,KAAK,MAAM,KAAK;AAEtC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AACF;;;;;;;AASD,SAAgB,iCACdC,QACAF,MACiB;AACjB,KAAI,OAAO,SAAS,SAClB,QAAO,OAAO,KAAK,OAAO,cAAc,CAAE,EAAC,CAAC,OAC1C,CAACC,YAAmC,iBAAiB;AACnD,MAAI,CAAC,OAAO,WACV,QAAO;EAET,MAAM,kBAAkB,KAAK,UAAU,OAAO,WAAW,cAAc;AACvE,MAAI,gBAAgB,SAAS,OAC3B,QAAO;EAET,WAAW,WAAW,gBAAgB,iCACpC,iBACA,KACD;AACD,MACE,OAAO,UAAU,SAAS,aAAa,IACvC,WAAW,aAAa,QAExB,WAAW,SAAS,KAAK,aAAa;AAExC,SAAO;CACR,GACD;EACE,MAAM;EACN,YAAY,CAAE;EACd,UAAU,CAAE;EACZ,sBAAsB,CAAE;CACzB,EACF;AAEH,KAAI,OAAO,SAAS,SAAS;EAC3B,MAAM,eAAe,KAAK,UAAU,OAAO,SAAS,CAAE,EAAC;AACvD,SAAO;GACL,MAAM;GACN,OAAO,iCAAiC,cAAc,KAAK;GAC3D,UAAU,OAAO;GACjB,UAAU,OAAO;EAClB;CACF;AACD,QAAO,EACL,MAAM,OAAO,QAAQ,SACtB;AACF;;;;;;AAOD,SAAgB,oCAAoCD,MAGlD;AACA,KAAI,CAAC,KAAK,SAAS,MACjB,QAAO,EAAE,iBAAiB,CAAE,EAAE;CAEhC,MAAM,kBAAkB,CAAE;CAC1B,MAAMG,gBAAiE,CAAE;AACzE,MAAK,MAAM,QAAQ,OAAO,KAAK,KAAK,SAAS,MAAM,EAAE;EACnD,MAAM,iBAAiB,KAAK,qBAAqB,KAAK;AACtD,OAAK,MAAM,UAAU,KAAK,kBAAkB,KAAK,EAAE;GACjD,MAAM,YAAY,KAAK,aAAa,MAAM,OAAO;AACjD,OAAI,CAAC,UACH,QAAO,EAAE,iBAAiB,CAAE,EAAE;GAEhC,MAAM,gCAAgC,eACnC,OAAO,KAAK,0BAA0B,UAAU,CAAC,CACjD,OACC,CACEC,iBACA,UACG;AACH,QAAI,CAAC,gBAAgB,MAAM,KACzB,gBAAgB,MAAM,MAAM,CAAE;IAEhC,gBAAgB,MAAM,IAAI,KAAK,MAAM;AACrC,WAAO;GACR,GACD,CAAE,EACH;GACH,MAAMC,mCAA2D;IAC/D,OAAO;IACP,QAAQ;IACR,QAAQ;IACR,MAAM;GACP;GACD,MAAMC,oBAMF,CAAE;AACN,QAAK,MAAM,iBAAiB,OAAO,KACjC,iCACD,CACC,KAAI,8BAA8B,gBAChC,kBAAkB,iCAAiC,kBACjD,iCACE,8BAA8B,gBAC9B,KACD;GAGP,MAAM,cAAc,KAAK,2BAA2B,UAAU;AAC9D,OAAI,aAAa,YAAY,QAAW;IACtC,MAAMC,qBAA4D,CAAE;AACpE,SAAK,MAAM,CAAC,WAAW,gBAAgB,IAAI,OAAO,QAChD,YAAY,QACb,CACC,KAAI,gBAAgB,WAAW,QAAW;KACxC,MAAM,SAAS,KAAK,UAAU,gBAAgB,OAAO;KACrD,mBAAmB,aAAa,iCAC9B,QACA,KACD;IACF;IAEH,MAAM,aAAa,OAAO,KAAK,mBAAmB;AAClD,QAAI,WAAW,WAAW,GACxB,kBAAkB,OAAO,mBAAmB,WAAW;aAC9C,WAAW,SAAS,GAC7B,kBAAkB,OAAO,EACvB,OAAO,OAAO,OAAO,mBAAmB,CACzC;GAEJ;GACD,MAAMC,iBACJ;IACE,MAAM,YAAY,sBAAsB,WAAW,MAAM,OAAO;IAChE,aAAa,UAAU,eAAe,UAAU,WAAW;IAC3D,YAAY;KACV,MAAM;KACN,YAAY;KAEZ,UAAU,OAAO,KAAK,kBAAkB;IACzC;GACF;GAEH,gBAAgB,KAAK,eAAe;GACpC,MAAM,WAAW,KAAK,WAAW,IAAI,SAAS,IAAI,IAC7C,KAAK,WAAW,IAAI,MAAM,GAAG,GAAG,GACjC,KAAK,WAAW;GACpB,cAAc,eAAe,QAAQ;IACnC;IACA,KAAK,UAAU;GAChB;EACF;CACF;AACD,QAAO;EACL;EACA,wBAAwB,OACtBC,MAEAC,aACAC,YAIG;GACH,MAAM,EACJ,SAAS,eACT,QAAQ,aACR,GAAG,MACJ,GAAG,WAAW,CAAE;GACjB,MAAM,EAAE,QAAQ,KAAK,GAAG,cAAc;GACtC,MAAM,gBAAgB,YAAY,UAAU,CAAE;GAC9C,MAAM,iBAAiB,OAAO,KAAK,cAAc,CAAC,OAEhD,CAACC,cAAmC,YAAY;AAC9C,QACE,cAAc,aAAa,MAC3B,cAAc,aAAa,QAC3B,cAAc,aAAa,QAE3B,aAAa,WAAW,cAAc;AAExC,WAAO;GACR,GACD,CAAE,EACH;GACD,MAAM,cAAc,IAAI,gBAAgB;IACtC,GAAG;IACH,GAAG;GACJ,GAAE,UAAU;GACb,MAAM,aAAa,YAAY;GAC/B,MAAM,eACJ,UAAU,KAAK,WAAW,IACzB,YAAY,SAAS,CAAC,CAAC,EAAE,aAAa,GAAG;GAC5C,MAAMC,UAAkC,CAAE;GAC1C,IAAI;AACJ,OAAI,YAAY,SAAS,QAAW;IAClC,IAAI,cAAc;AAClB,QAAI,OAAO,YAAY,SAAS,UAAU;AACxC,SAAI,OAAO,YAAY,SAAS,UAC9B,cAAc;KAEhB,OAAO,KAAK,UAAU,YAAY,KAAK;IACxC,OACC,OAAO,YAAY;IAErB,QAAQ,kBAAkB;GAC3B;GACD,MAAM,WAAW,MAAM,MAAM,cAAc;IACzC,GAAG;IACH;IACA,SAAS;KACP,GAAG;KACH,GAAG,YAAY;KACf,GAAG;IACJ;IACD;IACA,GAAG;GACJ,EAAC;GACF,IAAI;AACJ,OAAI,SAAS,SAAS,OAAO,SAAS,SAAS,KAC7C,SAAS,GAAG,SAAS,OAAO,EAAE,EAC5B,SAAS,WACV,KAAK,EAAE,KAAK,aAAa,EAAE,KAAK,UAAU,YAAY,EAAE;QAEzD,SAAS,MAAM,SAAS,MAAM;AAEhC,UAAO;EACR;CACF;AACF;;;;AAcD,IAAM,qBAAN,cAAiC,UAAU;CACzC,OAAO,UAAU;AACf,SAAO;CACR;CAED,AAAQ;CAER,WAAW;CAEX,YAAY;CAEZ,YAAYC,QAA8D;EACxE,OAAO;EACP,KAAK,gBAAgB,OAAO;CAC7B;CAED,IAAI,YAAY;AACd,SAAO,CAAC,KAAK,QAAS;CACvB;CAED,IAAI,aAAa;AACf,SAAO,CAAC,KAAK,SAAU;CACxB;CAED,aAAa;AACX,SAAO;CACR;;CAGD,MAAM,MACJC,QACAC,aACsB;EACtB,MAAM,gBAAgB,OAAO,KAAK;EAClC,MAAM,aAAa,cAAc;EACjC,MAAM,OAAO,cAAc;EAC3B,MAAM,WAAW,MAAM,KAAK,cAAc,YAAY,KAAK;AAC3D,SAAO,GAAG,KAAK,YAAY,SAAU;CACtC;AACF;;;;;;;AAqBD,eAAsB,mBACpBC,MACAC,UAA+B,CAAE,GACjC;CACA,IAAI;AACJ,KAAI,OAAO,SAAS,SAClB,KAAI;EACF,gBAAgB,MAAM,YAAY,QAAQ,KAAK;CAChD,QAAO;AACN,MAAI;GACF,gBAAgB,YAAY,WAAW,KAAK;EAC7C,QAAO;AACN,SAAM,IAAI,MAAM,CAAC,iCAAiC,EAAE,KAAK,CAAC,CAAC;EAC5D;CACF;MAED,gBAAgB,YAAY,WAAW,KAAK;CAE9C,MAAM,EAAE,iBAAiB,wBAAwB,GAC/C,oCAAoC,cAAc;AACpD,KAAI,2BAA2B,OAC7B,OAAM,IAAI,MACR,CAAC,4DAA4D,CAAC;AAIlE,KAAI,CAAC,QAAQ,IACX,OAAM,IAAI,MAAM;CAGlB,MAAM,EACJ,MAAM,QAAQ,KACd,SAAS,mBAAmB,aAAa,CACvC,2BAA2B,aACzB,mEACD,AACF,EAAC,EACF,eAAe,IAAI,mBAAmB,EACpC,eAAe,OAAO,MAAM,SAC1B,uBAAuB,MAAM,MAAM;EACjC,SAAS,QAAQ;EACjB,QAAQ,QAAQ;CACjB,EAAC,CACL,IACD,iBAAiB,CAAE,GACnB,QACA,GAAG,MACJ,GAAG;CACJ,MAAM,cAAc,IAAI,SAAS;EAC/B;EACA;EACA,cAAc,IAAI,0BAA0B,EAAE,UAAU,MAAO;EAC/D,WAAW;EACX,WAAW,EAAE,WAAW,gBAAiB;EACzC,GAAG;CACJ;AACD,QAAO,IAAI,gBAAgB;EACzB,QAAQ,CAAC,aAAa,YAAa;EACnC,iBAAiB,CAAC,UAAW;EAC7B,gBAAgB,YAAY;EAC5B;EACA,GAAG;CACJ;AACF"}
@@ -1,6 +1,6 @@
1
1
  import { MapReduceDocumentsChain, MapReduceDocumentsChainInput, RefineDocumentsChain, StuffDocumentsChain } from "../combine_docs_chain.cjs";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
- import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
3
+ import * as _langchain_core_language_models_base1 from "@langchain/core/language_models/base";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
5
 
6
6
  //#region src/chains/question_answering/load.d.ts
@@ -16,7 +16,7 @@ type QAChainParams = ({
16
16
  } & MapReduceQAChainParams) | ({
17
17
  type?: "refine";
18
18
  } & RefineQAChainParams);
19
- declare const loadQAChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
19
+ declare const loadQAChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
20
20
  /**
21
21
  * Represents the parameters for creating a StuffQAChain.
22
22
  */
@@ -1 +1 @@
1
- {"version":3,"file":"load.d.cts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","QAChainParams","StuffQAChainParams","MapReduceQAChainParams","RefineQAChainParams","loadQAChain","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","loadQAStuffChain","loadQAMapReduceChain","loadQARefineChain"],"sources":["../../../src/chains/question_answering/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Represents the parameters for creating a QAChain. It can be of three\n * types: \"stuff\", \"map_reduce\", or \"refine\".\n */\nexport type QAChainParams = ({\n type?: \"stuff\";\n} & StuffQAChainParams) | ({\n type?: \"map_reduce\";\n} & MapReduceQAChainParams) | ({\n type?: \"refine\";\n} & RefineQAChainParams);\nexport declare const loadQAChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\n/**\n * Represents the parameters for creating a StuffQAChain.\n */\nexport interface StuffQAChainParams {\n prompt?: BasePromptTemplate;\n verbose?: boolean;\n}\n/**\n * Loads a StuffQAChain based on the provided parameters. It takes an LLM\n * instance and StuffQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a StuffQAChain.\n * @returns A StuffQAChain instance.\n */\nexport declare function loadQAStuffChain(llm: BaseLanguageModelInterface, params?: StuffQAChainParams): StuffDocumentsChain;\n/**\n * Represents the parameters for creating a MapReduceQAChain.\n */\nexport interface MapReduceQAChainParams {\n returnIntermediateSteps?: MapReduceDocumentsChainInput[\"returnIntermediateSteps\"];\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a MapReduceQAChain based on the provided parameters. It takes an\n * LLM instance and MapReduceQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a MapReduceQAChain.\n * @returns A MapReduceQAChain instance.\n */\nexport declare function loadQAMapReduceChain(llm: BaseLanguageModelInterface, params?: MapReduceQAChainParams): MapReduceDocumentsChain;\n/**\n * Represents the parameters for creating a RefineQAChain.\n */\nexport interface RefineQAChainParams {\n questionPrompt?: BasePromptTemplate;\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a RefineQAChain based on the provided parameters. It takes an LLM\n * instance and RefineQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a RefineQAChain.\n * @returns A RefineQAChain instance.\n */\nexport declare function loadQARefineChain(llm: BaseLanguageModelInterface, params?: RefineQAChainParams): RefineDocumentsChain;\n"],"mappings":";;;;;;;;;;AAOA;AAAyB,KAAbM,aAAAA,GAAa,CAAA;EAAA,IAErBC,CAAAA,EAAAA,OAAAA;CAAkB,GAAlBA,kBAEAC,CAAAA,GAAAA,CAAAA;EAAsB,IAEtBC,CAAAA,EAAAA,YAAAA;AAAmB,CAAA,GAFnBD,sBAEmB,CAAA,GAAA,CAAA;EACFE,IAAAA,CAAAA,EAAAA,QAAgO;CAAA,GADjPD,mBACiP,CAAA;AAAAE,cAAhOD,WAAgOC,EAAAA,CAAAA,GAAAA,EAA7MX,0BAA+EY,CAAAA,GAAAA,EAA8HD,qCAAAA,CAA9HC,4BAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAAwCN,aAAxCM,EAAAA,GAA0DT,uBAA1DS,GAAoFR,oBAApFQ,GAA2GV,mBAA3GU;;;;AAAoFR,UAI1LG,kBAAAA,CAJ0LH;EAAoB,MAAGF,CAAAA,EAKrND,kBALqNC;EAAmB,OAAA,CAAA,EAAA,OAAA;AAIrP;AAWA;;;;;AAA2H;AAI3H;AAAuC,iBAJfW,gBAAAA,CAIe,GAAA,EAJOb,0BAIP,EAAA,MAAA,CAAA,EAJ4CO,kBAI5C,CAAA,EAJiEL,mBAIjE;;;;AAItBF,UAJAQ,sBAAAA,CAIAR;EAA0B,uBAAA,CAAA,EAHbK,4BAGa,CAAA,yBAAA,CAAA;EAUnBS,gBAAAA,CAAAA,EAZDb,kBAYqB;EAAA,aAAA,CAAA,EAXxBA,kBAWwB;EAAA,UAAMD,CAAAA,EAVjCA,0BAUiCA;EAA0B,OAAWQ,CAAAA,EAAAA,OAAAA;;AAAgD;AAIvI;;;;;AAG0C;AAUlBO,iBAjBAD,oBAAAA,CAiBiB,GAAA,EAjBSd,0BAiBT,EAAA,MAAA,CAAA,EAjB8CQ,sBAiB9C,CAAA,EAjBuEL,uBAiBvE;;;;AAAiEC,UAbzFK,mBAAAA,CAayFL;EAAoB,cAAA,CAAA,EAZzGH,kBAYyG;iBAX3GA;cACHD;;;;;;;;;;iBAUQe,iBAAAA,MAAuBf,qCAAqCS,sBAAsBL"}
1
+ {"version":3,"file":"load.d.cts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","QAChainParams","StuffQAChainParams","MapReduceQAChainParams","RefineQAChainParams","loadQAChain","_langchain_core_language_models_base1","BaseLanguageModelCallOptions","loadQAStuffChain","loadQAMapReduceChain","loadQARefineChain"],"sources":["../../../src/chains/question_answering/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Represents the parameters for creating a QAChain. It can be of three\n * types: \"stuff\", \"map_reduce\", or \"refine\".\n */\nexport type QAChainParams = ({\n type?: \"stuff\";\n} & StuffQAChainParams) | ({\n type?: \"map_reduce\";\n} & MapReduceQAChainParams) | ({\n type?: \"refine\";\n} & RefineQAChainParams);\nexport declare const loadQAChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\n/**\n * Represents the parameters for creating a StuffQAChain.\n */\nexport interface StuffQAChainParams {\n prompt?: BasePromptTemplate;\n verbose?: boolean;\n}\n/**\n * Loads a StuffQAChain based on the provided parameters. It takes an LLM\n * instance and StuffQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a StuffQAChain.\n * @returns A StuffQAChain instance.\n */\nexport declare function loadQAStuffChain(llm: BaseLanguageModelInterface, params?: StuffQAChainParams): StuffDocumentsChain;\n/**\n * Represents the parameters for creating a MapReduceQAChain.\n */\nexport interface MapReduceQAChainParams {\n returnIntermediateSteps?: MapReduceDocumentsChainInput[\"returnIntermediateSteps\"];\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a MapReduceQAChain based on the provided parameters. It takes an\n * LLM instance and MapReduceQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a MapReduceQAChain.\n * @returns A MapReduceQAChain instance.\n */\nexport declare function loadQAMapReduceChain(llm: BaseLanguageModelInterface, params?: MapReduceQAChainParams): MapReduceDocumentsChain;\n/**\n * Represents the parameters for creating a RefineQAChain.\n */\nexport interface RefineQAChainParams {\n questionPrompt?: BasePromptTemplate;\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a RefineQAChain based on the provided parameters. It takes an LLM\n * instance and RefineQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a RefineQAChain.\n * @returns A RefineQAChain instance.\n */\nexport declare function loadQARefineChain(llm: BaseLanguageModelInterface, params?: RefineQAChainParams): RefineDocumentsChain;\n"],"mappings":";;;;;;;;;;AAOA;AAAyB,KAAbM,aAAAA,GAAa,CAAA;EAAA,IAErBC,CAAAA,EAAAA,OAAAA;CAAkB,GAAlBA,kBAEAC,CAAAA,GAAAA,CAAAA;EAAsB,IAEtBC,CAAAA,EAAAA,YAAAA;AAAmB,CAAA,GAFnBD,sBAEmB,CAAA,GAAA,CAAA;EACFE,IAAAA,CAAAA,EAAAA,QAAgO;CAAA,GADjPD,mBACiP,CAAA;AAAAE,cAAhOD,WAAgOC,EAAAA,CAAAA,GAAAA,EAA7MX,0BAA+EY,CAAAA,GAAAA,EAA8HD,qCAAAA,CAA9HC,4BAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAAwCN,aAAxCM,EAAAA,GAA0DT,uBAA1DS,GAAoFR,oBAApFQ,GAA2GV,mBAA3GU;;;;AAAoFR,UAI1LG,kBAAAA,CAJ0LH;EAAoB,MAAGF,CAAAA,EAKrND,kBALqNC;EAAmB,OAAA,CAAA,EAAA,OAAA;AAIrP;AAWA;;;;;AAA2H;AAI3H;AAAuC,iBAJfW,gBAAAA,CAIe,GAAA,EAJOb,0BAIP,EAAA,MAAA,CAAA,EAJ4CO,kBAI5C,CAAA,EAJiEL,mBAIjE;;;;AAItBF,UAJAQ,sBAAAA,CAIAR;EAA0B,uBAAA,CAAA,EAHbK,4BAGa,CAAA,yBAAA,CAAA;EAUnBS,gBAAAA,CAAAA,EAZDb,kBAYqB;EAAA,aAAA,CAAA,EAXxBA,kBAWwB;EAAA,UAAMD,CAAAA,EAVjCA,0BAUiCA;EAA0B,OAAWQ,CAAAA,EAAAA,OAAAA;;AAAgD;AAIvI;;;;;AAG0C;AAUlBO,iBAjBAD,oBAAAA,CAiBiB,GAAA,EAjBSd,0BAiBT,EAAA,MAAA,CAAA,EAjB8CQ,sBAiB9C,CAAA,EAjBuEL,uBAiBvE;;;;AAAiEC,UAbzFK,mBAAAA,CAayFL;EAAoB,cAAA,CAAA,EAZzGH,kBAYyG;iBAX3GA;cACHD;;;;;;;;;;iBAUQe,iBAAAA,MAAuBf,qCAAqCS,sBAAsBL"}
@@ -1,6 +1,6 @@
1
1
  import { MapReduceDocumentsChain, MapReduceDocumentsChainInput, RefineDocumentsChain, StuffDocumentsChain } from "../combine_docs_chain.js";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
- import * as _langchain_core_language_models_base0 from "@langchain/core/language_models/base";
3
+ import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
5
 
6
6
  //#region src/chains/question_answering/load.d.ts
@@ -16,7 +16,7 @@ type QAChainParams = ({
16
16
  } & MapReduceQAChainParams) | ({
17
17
  type?: "refine";
18
18
  } & RefineQAChainParams);
19
- declare const loadQAChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base0.BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
19
+ declare const loadQAChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
20
20
  /**
21
21
  * Represents the parameters for creating a StuffQAChain.
22
22
  */
@@ -1 +1 @@
1
- {"version":3,"file":"load.d.ts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","QAChainParams","StuffQAChainParams","MapReduceQAChainParams","RefineQAChainParams","loadQAChain","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","loadQAStuffChain","loadQAMapReduceChain","loadQARefineChain"],"sources":["../../../src/chains/question_answering/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Represents the parameters for creating a QAChain. It can be of three\n * types: \"stuff\", \"map_reduce\", or \"refine\".\n */\nexport type QAChainParams = ({\n type?: \"stuff\";\n} & StuffQAChainParams) | ({\n type?: \"map_reduce\";\n} & MapReduceQAChainParams) | ({\n type?: \"refine\";\n} & RefineQAChainParams);\nexport declare const loadQAChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\n/**\n * Represents the parameters for creating a StuffQAChain.\n */\nexport interface StuffQAChainParams {\n prompt?: BasePromptTemplate;\n verbose?: boolean;\n}\n/**\n * Loads a StuffQAChain based on the provided parameters. It takes an LLM\n * instance and StuffQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a StuffQAChain.\n * @returns A StuffQAChain instance.\n */\nexport declare function loadQAStuffChain(llm: BaseLanguageModelInterface, params?: StuffQAChainParams): StuffDocumentsChain;\n/**\n * Represents the parameters for creating a MapReduceQAChain.\n */\nexport interface MapReduceQAChainParams {\n returnIntermediateSteps?: MapReduceDocumentsChainInput[\"returnIntermediateSteps\"];\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a MapReduceQAChain based on the provided parameters. It takes an\n * LLM instance and MapReduceQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a MapReduceQAChain.\n * @returns A MapReduceQAChain instance.\n */\nexport declare function loadQAMapReduceChain(llm: BaseLanguageModelInterface, params?: MapReduceQAChainParams): MapReduceDocumentsChain;\n/**\n * Represents the parameters for creating a RefineQAChain.\n */\nexport interface RefineQAChainParams {\n questionPrompt?: BasePromptTemplate;\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a RefineQAChain based on the provided parameters. It takes an LLM\n * instance and RefineQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a RefineQAChain.\n * @returns A RefineQAChain instance.\n */\nexport declare function loadQARefineChain(llm: BaseLanguageModelInterface, params?: RefineQAChainParams): RefineDocumentsChain;\n"],"mappings":";;;;;;;;;;AAOA;AAAyB,KAAbM,aAAAA,GAAa,CAAA;EAAA,IAErBC,CAAAA,EAAAA,OAAAA;CAAkB,GAAlBA,kBAEAC,CAAAA,GAAAA,CAAAA;EAAsB,IAEtBC,CAAAA,EAAAA,YAAAA;AAAmB,CAAA,GAFnBD,sBAEmB,CAAA,GAAA,CAAA;EACFE,IAAAA,CAAAA,EAAAA,QAAgO;CAAA,GADjPD,mBACiP,CAAA;AAAAE,cAAhOD,WAAgOC,EAAAA,CAAAA,GAAAA,EAA7MX,0BAA+EY,CAAAA,GAAAA,EAA8HD,qCAAAA,CAA9HC,4BAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAAwCN,aAAxCM,EAAAA,GAA0DT,uBAA1DS,GAAoFR,oBAApFQ,GAA2GV,mBAA3GU;;;;AAAoFR,UAI1LG,kBAAAA,CAJ0LH;EAAoB,MAAGF,CAAAA,EAKrND,kBALqNC;EAAmB,OAAA,CAAA,EAAA,OAAA;AAIrP;AAWA;;;;;AAA2H;AAI3H;AAAuC,iBAJfW,gBAAAA,CAIe,GAAA,EAJOb,0BAIP,EAAA,MAAA,CAAA,EAJ4CO,kBAI5C,CAAA,EAJiEL,mBAIjE;;;;AAItBF,UAJAQ,sBAAAA,CAIAR;EAA0B,uBAAA,CAAA,EAHbK,4BAGa,CAAA,yBAAA,CAAA;EAUnBS,gBAAAA,CAAAA,EAZDb,kBAYqB;EAAA,aAAA,CAAA,EAXxBA,kBAWwB;EAAA,UAAMD,CAAAA,EAVjCA,0BAUiCA;EAA0B,OAAWQ,CAAAA,EAAAA,OAAAA;;AAAgD;AAIvI;;;;;AAG0C;AAUlBO,iBAjBAD,oBAAAA,CAiBiB,GAAA,EAjBSd,0BAiBT,EAAA,MAAA,CAAA,EAjB8CQ,sBAiB9C,CAAA,EAjBuEL,uBAiBvE;;;;AAAiEC,UAbzFK,mBAAAA,CAayFL;EAAoB,cAAA,CAAA,EAZzGH,kBAYyG;iBAX3GA;cACHD;;;;;;;;;;iBAUQe,iBAAAA,MAAuBf,qCAAqCS,sBAAsBL"}
1
+ {"version":3,"file":"load.d.ts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","QAChainParams","StuffQAChainParams","MapReduceQAChainParams","RefineQAChainParams","loadQAChain","_langchain_core_language_models_base2","BaseLanguageModelCallOptions","loadQAStuffChain","loadQAMapReduceChain","loadQARefineChain"],"sources":["../../../src/chains/question_answering/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Represents the parameters for creating a QAChain. It can be of three\n * types: \"stuff\", \"map_reduce\", or \"refine\".\n */\nexport type QAChainParams = ({\n type?: \"stuff\";\n} & StuffQAChainParams) | ({\n type?: \"map_reduce\";\n} & MapReduceQAChainParams) | ({\n type?: \"refine\";\n} & RefineQAChainParams);\nexport declare const loadQAChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: QAChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\n/**\n * Represents the parameters for creating a StuffQAChain.\n */\nexport interface StuffQAChainParams {\n prompt?: BasePromptTemplate;\n verbose?: boolean;\n}\n/**\n * Loads a StuffQAChain based on the provided parameters. It takes an LLM\n * instance and StuffQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a StuffQAChain.\n * @returns A StuffQAChain instance.\n */\nexport declare function loadQAStuffChain(llm: BaseLanguageModelInterface, params?: StuffQAChainParams): StuffDocumentsChain;\n/**\n * Represents the parameters for creating a MapReduceQAChain.\n */\nexport interface MapReduceQAChainParams {\n returnIntermediateSteps?: MapReduceDocumentsChainInput[\"returnIntermediateSteps\"];\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a MapReduceQAChain based on the provided parameters. It takes an\n * LLM instance and MapReduceQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a MapReduceQAChain.\n * @returns A MapReduceQAChain instance.\n */\nexport declare function loadQAMapReduceChain(llm: BaseLanguageModelInterface, params?: MapReduceQAChainParams): MapReduceDocumentsChain;\n/**\n * Represents the parameters for creating a RefineQAChain.\n */\nexport interface RefineQAChainParams {\n questionPrompt?: BasePromptTemplate;\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n verbose?: boolean;\n}\n/**\n * Loads a RefineQAChain based on the provided parameters. It takes an LLM\n * instance and RefineQAChainParams as parameters.\n * @param llm An instance of BaseLanguageModel.\n * @param params Parameters for creating a RefineQAChain.\n * @returns A RefineQAChain instance.\n */\nexport declare function loadQARefineChain(llm: BaseLanguageModelInterface, params?: RefineQAChainParams): RefineDocumentsChain;\n"],"mappings":";;;;;;;;;;AAOA;AAAyB,KAAbM,aAAAA,GAAa,CAAA;EAAA,IAErBC,CAAAA,EAAAA,OAAAA;CAAkB,GAAlBA,kBAEAC,CAAAA,GAAAA,CAAAA;EAAsB,IAEtBC,CAAAA,EAAAA,YAAAA;AAAmB,CAAA,GAFnBD,sBAEmB,CAAA,GAAA,CAAA;EACFE,IAAAA,CAAAA,EAAAA,QAAgO;CAAA,GADjPD,mBACiP,CAAA;AAAAE,cAAhOD,WAAgOC,EAAAA,CAAAA,GAAAA,EAA7MX,0BAA+EY,CAAAA,GAAAA,EAA8HD,qCAAAA,CAA9HC,4BAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAAwCN,aAAxCM,EAAAA,GAA0DT,uBAA1DS,GAAoFR,oBAApFQ,GAA2GV,mBAA3GU;;;;AAAoFR,UAI1LG,kBAAAA,CAJ0LH;EAAoB,MAAGF,CAAAA,EAKrND,kBALqNC;EAAmB,OAAA,CAAA,EAAA,OAAA;AAIrP;AAWA;;;;;AAA2H;AAI3H;AAAuC,iBAJfW,gBAAAA,CAIe,GAAA,EAJOb,0BAIP,EAAA,MAAA,CAAA,EAJ4CO,kBAI5C,CAAA,EAJiEL,mBAIjE;;;;AAItBF,UAJAQ,sBAAAA,CAIAR;EAA0B,uBAAA,CAAA,EAHbK,4BAGa,CAAA,yBAAA,CAAA;EAUnBS,gBAAAA,CAAAA,EAZDb,kBAYqB;EAAA,aAAA,CAAA,EAXxBA,kBAWwB;EAAA,UAAMD,CAAAA,EAVjCA,0BAUiCA;EAA0B,OAAWQ,CAAAA,EAAAA,OAAAA;;AAAgD;AAIvI;;;;;AAG0C;AAUlBO,iBAjBAD,oBAAAA,CAiBiB,GAAA,EAjBSd,0BAiBT,EAAA,MAAA,CAAA,EAjB8CQ,sBAiB9C,CAAA,EAjBuEL,uBAiBvE;;;;AAAiEC,UAbzFK,mBAAAA,CAayFL;EAAoB,cAAA,CAAA,EAZzGH,kBAYyG;iBAX3GA;cACHD;;;;;;;;;;iBAUQe,iBAAAA,MAAuBf,qCAAqCS,sBAAsBL"}
@@ -1,6 +1,6 @@
1
1
  import { MapReduceDocumentsChain, MapReduceDocumentsChainInput, RefineDocumentsChain, StuffDocumentsChain } from "../combine_docs_chain.cjs";
2
2
  import { BasePromptTemplate } from "@langchain/core/prompts";
3
- import * as _langchain_core_language_models_base1 from "@langchain/core/language_models/base";
3
+ import * as _langchain_core_language_models_base2 from "@langchain/core/language_models/base";
4
4
  import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
5
5
 
6
6
  //#region src/chains/summarization/load.d.ts
@@ -27,7 +27,7 @@ type SummarizationChainParams = BaseParams & ({
27
27
  refineLLM?: BaseLanguageModelInterface;
28
28
  questionPrompt?: BasePromptTemplate;
29
29
  });
30
- declare const loadSummarizationChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base1.BaseLanguageModelCallOptions>, params?: SummarizationChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
30
+ declare const loadSummarizationChain: (llm: BaseLanguageModelInterface<any, _langchain_core_language_models_base2.BaseLanguageModelCallOptions>, params?: SummarizationChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;
31
31
  //#endregion
32
32
  export { SummarizationChainParams, loadSummarizationChain };
33
33
  //# sourceMappingURL=load.d.cts.map
@@ -1 +1 @@
1
- {"version":3,"file":"load.d.cts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","BaseParams","SummarizationChainParams","Pick","loadSummarizationChain","_langchain_core_language_models_base1","BaseLanguageModelCallOptions"],"sources":["../../../src/chains/summarization/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Type for the base parameters that can be used to configure a\n * summarization chain.\n */\ntype BaseParams = {\n verbose?: boolean;\n};\n/** @interface */\nexport type SummarizationChainParams = BaseParams & ({\n type?: \"stuff\";\n prompt?: BasePromptTemplate;\n} | ({\n type?: \"map_reduce\";\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n} & Pick<MapReduceDocumentsChainInput, \"returnIntermediateSteps\">) | {\n type?: \"refine\";\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n questionPrompt?: BasePromptTemplate;\n});\nexport declare const loadSummarizationChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: SummarizationChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\nexport {};\n"],"mappings":";;;;;;;;;;AAE4I;AAS5I,KAJKM,UAAAA,GAIOC;EAAwB,OAAA,CAAA,EAAA,OAAA;CAAA;;AAKbN,KALXM,wBAAAA,GAA2BD,UAKhBL,GAAAA,CAAAA;EAAkB,IACrBA,CAAAA,EAAAA,OAAAA;EAAkB,MACrBD,CAAAA,EALJC,kBAKID;CAA0B,GAAA,CAAA;EACN,IAAjCQ,CAAAA,EAAAA,YAAAA;EAAI,gBAEWP,CAAAA,EALIA,kBAKJA;EAAkB,aACrBD,CAAAA,EALIC,kBAKJD;EAA0B,UACrBC,CAAAA,EALJD,0BAKIC;AAAkB,CAAA,GAJnCO,IAImC,CAJ9BH,4BAI8B,EAAA,yBAAA,CAAA,CAAA,GAAA;EAElBI,IAAAA,CAAAA,EAAAA,QAAAA;EAAsP,YAAA,CAAA,EAJxPR,kBAIwP;EAAA,SAAAS,CAAAA,EAH3PV,0BAG2PU;EAA7G,cAA3GV,CAAAA,EAF9BC,kBAE8BD;CAA0B,CAAA;AAA0HG,cAAlLM,sBAAkLN,EAAAA,CAAAA,GAAAA,EAApJH,0BAAoJG,CAAAA,GAAAA,EAAoEO,qCAAAA,CAAzIC,4BAAAA,CAAqER,EAAAA,MAAAA,CAAAA,EAA7BI,wBAA6BJ,EAAAA,GAAAA,uBAAAA,GAA0BC,oBAA1BD,GAAiDD,mBAAjDC"}
1
+ {"version":3,"file":"load.d.cts","names":["BaseLanguageModelInterface","BasePromptTemplate","StuffDocumentsChain","MapReduceDocumentsChain","RefineDocumentsChain","MapReduceDocumentsChainInput","BaseParams","SummarizationChainParams","Pick","loadSummarizationChain","_langchain_core_language_models_base2","BaseLanguageModelCallOptions"],"sources":["../../../src/chains/summarization/load.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { StuffDocumentsChain, MapReduceDocumentsChain, RefineDocumentsChain, MapReduceDocumentsChainInput } from \"../combine_docs_chain.js\";\n/**\n * Type for the base parameters that can be used to configure a\n * summarization chain.\n */\ntype BaseParams = {\n verbose?: boolean;\n};\n/** @interface */\nexport type SummarizationChainParams = BaseParams & ({\n type?: \"stuff\";\n prompt?: BasePromptTemplate;\n} | ({\n type?: \"map_reduce\";\n combineMapPrompt?: BasePromptTemplate;\n combinePrompt?: BasePromptTemplate;\n combineLLM?: BaseLanguageModelInterface;\n} & Pick<MapReduceDocumentsChainInput, \"returnIntermediateSteps\">) | {\n type?: \"refine\";\n refinePrompt?: BasePromptTemplate;\n refineLLM?: BaseLanguageModelInterface;\n questionPrompt?: BasePromptTemplate;\n});\nexport declare const loadSummarizationChain: (llm: BaseLanguageModelInterface<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, params?: SummarizationChainParams) => MapReduceDocumentsChain | RefineDocumentsChain | StuffDocumentsChain;\nexport {};\n"],"mappings":";;;;;;;;;;AAE4I;AAS5I,KAJKM,UAAAA,GAIOC;EAAwB,OAAA,CAAA,EAAA,OAAA;CAAA;;AAKbN,KALXM,wBAAAA,GAA2BD,UAKhBL,GAAAA,CAAAA;EAAkB,IACrBA,CAAAA,EAAAA,OAAAA;EAAkB,MACrBD,CAAAA,EALJC,kBAKID;CAA0B,GAAA,CAAA;EACN,IAAjCQ,CAAAA,EAAAA,YAAAA;EAAI,gBAEWP,CAAAA,EALIA,kBAKJA;EAAkB,aACrBD,CAAAA,EALIC,kBAKJD;EAA0B,UACrBC,CAAAA,EALJD,0BAKIC;AAAkB,CAAA,GAJnCO,IAImC,CAJ9BH,4BAI8B,EAAA,yBAAA,CAAA,CAAA,GAAA;EAElBI,IAAAA,CAAAA,EAAAA,QAAAA;EAAsP,YAAA,CAAA,EAJxPR,kBAIwP;EAAA,SAAAS,CAAAA,EAH3PV,0BAG2PU;EAA7G,cAA3GV,CAAAA,EAF9BC,kBAE8BD;CAA0B,CAAA;AAA0HG,cAAlLM,sBAAkLN,EAAAA,CAAAA,GAAAA,EAApJH,0BAAoJG,CAAAA,GAAAA,EAAoEO,qCAAAA,CAAzIC,4BAAAA,CAAqER,EAAAA,MAAAA,CAAAA,EAA7BI,wBAA6BJ,EAAAA,GAAAA,uBAAAA,GAA0BC,oBAA1BD,GAAiDD,mBAAjDC"}
@@ -1 +1 @@
1
- {"version":3,"file":"trajectory.d.cts","names":["StructuredToolInterface","BaseLLMOutputParser","AgentStep","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","BaseChatModel","AgentTrajectoryEvaluator","EvalOutputType","LLMEvalChainInput","LLMTrajectoryEvaluatorArgs","ExtractLLMCallOptions","TrajectoryOutputParser","Promise","TrajectoryEvalChain","____________langchain_core_dist_prompt_values_js1","BasePromptValueInterface","Omit","Partial"],"sources":["../../../src/evaluation/agents/trajectory.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { AgentTrajectoryEvaluator, EvalOutputType, LLMEvalChainInput, LLMTrajectoryEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\n/**\n * A parser for the output of the TrajectoryEvalChain.\n */\nexport declare class TrajectoryOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for evaluating ReAct style agents.\n *\n * This chain is used to evaluate ReAct style agents by reasoning about\n * the sequence of actions taken and their outcomes.\n */\nexport declare class TrajectoryEvalChain extends AgentTrajectoryEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n outputParser: TrajectoryOutputParser;\n static resolveTrajectoryPrompt(prompt?: BasePromptTemplate | undefined, agentTools?: StructuredToolInterface[]): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Get the description of the agent tools.\n *\n * @returns The description of the agent tools.\n */\n static toolsDescription(agentTools: StructuredToolInterface[]): string;\n /**\n * Create a new TrajectoryEvalChain.\n * @param llm\n * @param agentTools - The tools used by the agent.\n * @param chainOptions - The options for the chain.\n */\n static fromLLM(llm: BaseChatModel, agentTools?: StructuredToolInterface[], chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<TrajectoryEvalChain>;\n _prepareOutput(result: ChainValues): any;\n /**\n * Get the agent trajectory as a formatted string.\n *\n * @param steps - The agent trajectory.\n * @returns The formatted agent trajectory.\n */\n getAgentTrajectory(steps: AgentStep[]): string;\n formatReference(reference?: string): string;\n _evaluateAgentTrajectory(args: LLMTrajectoryEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n"],"mappings":";;;;;;;;;;;;;;;AAYqBe,cAAAA,sBAAAA,SAA+Bd,mBAAT,CAA6BU,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGzDN,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EM,cAA7EN,CAAAA;;;;;;AAH0C;AAWvE;AAAwC,cAAnBY,mBAAAA,SAA4BP,wBAAAA,CAAT;EAAA,OAMtBK,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAsB,aACIT,CAAAA,EAAAA,MAAAA;EAAkB,cAA2BN,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,EAAA,OAAA;EAAgH,iBAA3GM,EAAAA,OAAAA;EAAkB,YAM/FN,EAPtBe,sBAOsBf;EAAuB,OAOvCS,uBAAAA,CAAAA,MAAAA,CAAAA,EAboBH,kBAapBG,GAAAA,SAAAA,EAAAA,UAAAA,CAAAA,EAbiET,uBAajES,EAAAA,CAAAA,EAb6FH,kBAa7FG,CAAAA,GAAAA,EAbwF,wBAAA,EAaxFA,GAAAA,CAAAA;EAAa;;;;;EAA8H,OAA3BO,gBAAAA,CAAAA,UAAAA,EAPhGhB,uBAOgGgB,EAAAA,CAAAA,EAAAA,MAAAA;EAAO;;;;;;EAUQ,OAAWb,OAAAA,CAAAA,GAAAA,EAV1IM,aAU0IN,EAAAA,UAAAA,CAAAA,EAV9GH,uBAU8GG,EAAAA,EAAAA,YAAAA,CAAAA,EAVpEkB,OAUoElB,CAV5DiB,IAU4DjB,CAVvDS,iBAUuDT,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAV1Ba,OAU0Bb,CAVlBc,mBAUkBd,CAAAA;EAAW,cAAnBa,CAAAA,MAAAA,EAT/Hb,WAS+Ha,CAAAA,EAAAA,GAAAA;EAAO;AA9BxF;;;;;4BA4B3Cd;;iCAEKW,yCAAyCC,6CAA6CP,YAAYC,qBAAqBQ,QAAQb"}
1
+ {"version":3,"file":"trajectory.d.cts","names":["StructuredToolInterface","BaseLLMOutputParser","AgentStep","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","BaseChatModel","AgentTrajectoryEvaluator","EvalOutputType","LLMEvalChainInput","LLMTrajectoryEvaluatorArgs","ExtractLLMCallOptions","TrajectoryOutputParser","Promise","TrajectoryEvalChain","____________langchain_core_dist_prompt_values_js3","BasePromptValueInterface","Omit","Partial"],"sources":["../../../src/evaluation/agents/trajectory.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { AgentTrajectoryEvaluator, EvalOutputType, LLMEvalChainInput, LLMTrajectoryEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\n/**\n * A parser for the output of the TrajectoryEvalChain.\n */\nexport declare class TrajectoryOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for evaluating ReAct style agents.\n *\n * This chain is used to evaluate ReAct style agents by reasoning about\n * the sequence of actions taken and their outcomes.\n */\nexport declare class TrajectoryEvalChain extends AgentTrajectoryEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n outputParser: TrajectoryOutputParser;\n static resolveTrajectoryPrompt(prompt?: BasePromptTemplate | undefined, agentTools?: StructuredToolInterface[]): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Get the description of the agent tools.\n *\n * @returns The description of the agent tools.\n */\n static toolsDescription(agentTools: StructuredToolInterface[]): string;\n /**\n * Create a new TrajectoryEvalChain.\n * @param llm\n * @param agentTools - The tools used by the agent.\n * @param chainOptions - The options for the chain.\n */\n static fromLLM(llm: BaseChatModel, agentTools?: StructuredToolInterface[], chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<TrajectoryEvalChain>;\n _prepareOutput(result: ChainValues): any;\n /**\n * Get the agent trajectory as a formatted string.\n *\n * @param steps - The agent trajectory.\n * @returns The formatted agent trajectory.\n */\n getAgentTrajectory(steps: AgentStep[]): string;\n formatReference(reference?: string): string;\n _evaluateAgentTrajectory(args: LLMTrajectoryEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n"],"mappings":";;;;;;;;;;;;;;;AAYqBe,cAAAA,sBAAAA,SAA+Bd,mBAAT,CAA6BU,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGzDN,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EM,cAA7EN,CAAAA;;;;;;AAH0C;AAWvE;AAAwC,cAAnBY,mBAAAA,SAA4BP,wBAAAA,CAAT;EAAA,OAMtBK,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAsB,aACIT,CAAAA,EAAAA,MAAAA;EAAkB,cAA2BN,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,EAAA,OAAA;EAAgH,iBAA3GM,EAAAA,OAAAA;EAAkB,YAM/FN,EAPtBe,sBAOsBf;EAAuB,OAOvCS,uBAAAA,CAAAA,MAAAA,CAAAA,EAboBH,kBAapBG,GAAAA,SAAAA,EAAAA,UAAAA,CAAAA,EAbiET,uBAajES,EAAAA,CAAAA,EAb6FH,kBAa7FG,CAAAA,GAAAA,EAbwF,wBAAA,EAaxFA,GAAAA,CAAAA;EAAa;;;;;EAA8H,OAA3BO,gBAAAA,CAAAA,UAAAA,EAPhGhB,uBAOgGgB,EAAAA,CAAAA,EAAAA,MAAAA;EAAO;;;;;;EAUQ,OAAWb,OAAAA,CAAAA,GAAAA,EAV1IM,aAU0IN,EAAAA,UAAAA,CAAAA,EAV9GH,uBAU8GG,EAAAA,EAAAA,YAAAA,CAAAA,EAVpEkB,OAUoElB,CAV5DiB,IAU4DjB,CAVvDS,iBAUuDT,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAV1Ba,OAU0Bb,CAVlBc,mBAUkBd,CAAAA;EAAW,cAAnBa,CAAAA,MAAAA,EAT/Hb,WAS+Ha,CAAAA,EAAAA,GAAAA;EAAO;AA9BxF;;;;;4BA4B3Cd;;iCAEKW,yCAAyCC,6CAA6CP,YAAYC,qBAAqBQ,QAAQb"}
@@ -1 +1 @@
1
- {"version":3,"file":"trajectory.d.ts","names":["StructuredToolInterface","BaseLLMOutputParser","AgentStep","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","BaseChatModel","AgentTrajectoryEvaluator","EvalOutputType","LLMEvalChainInput","LLMTrajectoryEvaluatorArgs","ExtractLLMCallOptions","TrajectoryOutputParser","Promise","TrajectoryEvalChain","____________langchain_core_dist_prompt_values_js1","BasePromptValueInterface","Omit","Partial"],"sources":["../../../src/evaluation/agents/trajectory.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { AgentTrajectoryEvaluator, EvalOutputType, LLMEvalChainInput, LLMTrajectoryEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\n/**\n * A parser for the output of the TrajectoryEvalChain.\n */\nexport declare class TrajectoryOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for evaluating ReAct style agents.\n *\n * This chain is used to evaluate ReAct style agents by reasoning about\n * the sequence of actions taken and their outcomes.\n */\nexport declare class TrajectoryEvalChain extends AgentTrajectoryEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n outputParser: TrajectoryOutputParser;\n static resolveTrajectoryPrompt(prompt?: BasePromptTemplate | undefined, agentTools?: StructuredToolInterface[]): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Get the description of the agent tools.\n *\n * @returns The description of the agent tools.\n */\n static toolsDescription(agentTools: StructuredToolInterface[]): string;\n /**\n * Create a new TrajectoryEvalChain.\n * @param llm\n * @param agentTools - The tools used by the agent.\n * @param chainOptions - The options for the chain.\n */\n static fromLLM(llm: BaseChatModel, agentTools?: StructuredToolInterface[], chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<TrajectoryEvalChain>;\n _prepareOutput(result: ChainValues): any;\n /**\n * Get the agent trajectory as a formatted string.\n *\n * @param steps - The agent trajectory.\n * @returns The formatted agent trajectory.\n */\n getAgentTrajectory(steps: AgentStep[]): string;\n formatReference(reference?: string): string;\n _evaluateAgentTrajectory(args: LLMTrajectoryEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n"],"mappings":";;;;;;;;;;;;;;;AAYqBe,cAAAA,sBAAAA,SAA+Bd,mBAAT,CAA6BU,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGzDN,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EM,cAA7EN,CAAAA;;;;;;AAH0C;AAWvE;AAAwC,cAAnBY,mBAAAA,SAA4BP,wBAAAA,CAAT;EAAA,OAMtBK,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAsB,aACIT,CAAAA,EAAAA,MAAAA;EAAkB,cAA2BN,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,EAAA,OAAA;EAAgH,iBAA3GM,EAAAA,OAAAA;EAAkB,YAM/FN,EAPtBe,sBAOsBf;EAAuB,OAOvCS,uBAAAA,CAAAA,MAAAA,CAAAA,EAboBH,kBAapBG,GAAAA,SAAAA,EAAAA,UAAAA,CAAAA,EAbiET,uBAajES,EAAAA,CAAAA,EAb6FH,kBAa7FG,CAAAA,GAAAA,EAbwF,wBAAA,EAaxFA,GAAAA,CAAAA;EAAa;;;;;EAA8H,OAA3BO,gBAAAA,CAAAA,UAAAA,EAPhGhB,uBAOgGgB,EAAAA,CAAAA,EAAAA,MAAAA;EAAO;;;;;;EAUQ,OAAWb,OAAAA,CAAAA,GAAAA,EAV1IM,aAU0IN,EAAAA,UAAAA,CAAAA,EAV9GH,uBAU8GG,EAAAA,EAAAA,YAAAA,CAAAA,EAVpEkB,OAUoElB,CAV5DiB,IAU4DjB,CAVvDS,iBAUuDT,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAV1Ba,OAU0Bb,CAVlBc,mBAUkBd,CAAAA;EAAW,cAAnBa,CAAAA,MAAAA,EAT/Hb,WAS+Ha,CAAAA,EAAAA,GAAAA;EAAO;AA9BxF;;;;;4BA4B3Cd;;iCAEKW,yCAAyCC,6CAA6CP,YAAYC,qBAAqBQ,QAAQb"}
1
+ {"version":3,"file":"trajectory.d.ts","names":["StructuredToolInterface","BaseLLMOutputParser","AgentStep","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","BaseChatModel","AgentTrajectoryEvaluator","EvalOutputType","LLMEvalChainInput","LLMTrajectoryEvaluatorArgs","ExtractLLMCallOptions","TrajectoryOutputParser","Promise","TrajectoryEvalChain","____________langchain_core_dist_prompt_values_js3","BasePromptValueInterface","Omit","Partial"],"sources":["../../../src/evaluation/agents/trajectory.d.ts"],"sourcesContent":["import type { StructuredToolInterface } from \"@langchain/core/tools\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { AgentStep } from \"@langchain/core/agents\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { AgentTrajectoryEvaluator, EvalOutputType, LLMEvalChainInput, LLMTrajectoryEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\n/**\n * A parser for the output of the TrajectoryEvalChain.\n */\nexport declare class TrajectoryOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for evaluating ReAct style agents.\n *\n * This chain is used to evaluate ReAct style agents by reasoning about\n * the sequence of actions taken and their outcomes.\n */\nexport declare class TrajectoryEvalChain extends AgentTrajectoryEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n outputParser: TrajectoryOutputParser;\n static resolveTrajectoryPrompt(prompt?: BasePromptTemplate | undefined, agentTools?: StructuredToolInterface[]): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Get the description of the agent tools.\n *\n * @returns The description of the agent tools.\n */\n static toolsDescription(agentTools: StructuredToolInterface[]): string;\n /**\n * Create a new TrajectoryEvalChain.\n * @param llm\n * @param agentTools - The tools used by the agent.\n * @param chainOptions - The options for the chain.\n */\n static fromLLM(llm: BaseChatModel, agentTools?: StructuredToolInterface[], chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<TrajectoryEvalChain>;\n _prepareOutput(result: ChainValues): any;\n /**\n * Get the agent trajectory as a formatted string.\n *\n * @param steps - The agent trajectory.\n * @returns The formatted agent trajectory.\n */\n getAgentTrajectory(steps: AgentStep[]): string;\n formatReference(reference?: string): string;\n _evaluateAgentTrajectory(args: LLMTrajectoryEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n"],"mappings":";;;;;;;;;;;;;;;AAYqBe,cAAAA,sBAAAA,SAA+Bd,mBAAT,CAA6BU,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGzDN,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EM,cAA7EN,CAAAA;;;;;;AAH0C;AAWvE;AAAwC,cAAnBY,mBAAAA,SAA4BP,wBAAAA,CAAT;EAAA,OAMtBK,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAsB,aACIT,CAAAA,EAAAA,MAAAA;EAAkB,cAA2BN,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,EAAA,OAAA;EAAgH,iBAA3GM,EAAAA,OAAAA;EAAkB,YAM/FN,EAPtBe,sBAOsBf;EAAuB,OAOvCS,uBAAAA,CAAAA,MAAAA,CAAAA,EAboBH,kBAapBG,GAAAA,SAAAA,EAAAA,UAAAA,CAAAA,EAbiET,uBAajES,EAAAA,CAAAA,EAb6FH,kBAa7FG,CAAAA,GAAAA,EAbwF,wBAAA,EAaxFA,GAAAA,CAAAA;EAAa;;;;;EAA8H,OAA3BO,gBAAAA,CAAAA,UAAAA,EAPhGhB,uBAOgGgB,EAAAA,CAAAA,EAAAA,MAAAA;EAAO;;;;;;EAUQ,OAAWb,OAAAA,CAAAA,GAAAA,EAV1IM,aAU0IN,EAAAA,UAAAA,CAAAA,EAV9GH,uBAU8GG,EAAAA,EAAAA,YAAAA,CAAAA,EAVpEkB,OAUoElB,CAV5DiB,IAU4DjB,CAVvDS,iBAUuDT,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAV1Ba,OAU0Bb,CAVlBc,mBAUkBd,CAAAA;EAAW,cAAnBa,CAAAA,MAAAA,EAT/Hb,WAS+Ha,CAAAA,EAAAA,GAAAA;EAAO;AA9BxF;;;;;4BA4B3Cd;;iCAEKW,yCAAyCC,6CAA6CP,YAAYC,qBAAqBQ,QAAQb"}
@@ -1 +1 @@
1
- {"version":3,"file":"pairwise.d.cts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js2","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
1
+ {"version":3,"file":"pairwise.d.cts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js0","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"pairwise.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js2","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
1
+ {"version":3,"file":"pairwise.d.ts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMPairwiseStringEvaluator","LLMPairwiseStringEvaluatorArgs","ExtractLLMCallOptions","CriteriaLike","PairwiseStringResultOutputParser","Promise","PairwiseStringEvalChain","Record","____________langchain_core_dist_prompt_values_js1","BasePromptValueInterface","Omit","Partial","LabeledPairwiseStringEvalChain"],"sources":["../../../src/evaluation/comparison/pairwise.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMPairwiseStringEvaluator, LLMPairwiseStringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { CriteriaLike } from \"../criteria/criteria.js\";\n/**\n * A parser for the output of the PairwiseStringEvalChain.\n */\nexport declare class PairwiseStringResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n static lc_name(): string;\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs.\n */\nexport declare class PairwiseStringEvalChain extends LLMPairwiseStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n outputParser: PairwiseStringResultOutputParser;\n static resolvePairwiseCriteria(criteria?: CriteriaLike): Record<string, string>;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the PairwiseStringEvalChain.\n * @param llm\n * @param criteria The criteria to use for evaluation.\n * @param chainOptions Options to pass to the chain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<PairwiseStringEvalChain>;\n _prepareOutput(result: ChainValues): any;\n _evaluateStringPairs(args: LLMPairwiseStringEvaluatorArgs, callOptions: ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * A chain for comparing two outputs, such as the outputs\n * of two models, prompts, or outputs of a single model on similar inputs,\n * with labeled preferences.\n */\nexport declare class LabeledPairwiseStringEvalChain extends PairwiseStringEvalChain {\n static lc_name(): string;\n requiresReference: boolean;\n static resolvePairwisePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWqBc,cAAAA,gCAAAA,SAAyCb,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAGnEJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEW,OAArEX,CAA6EI,cAA7EJ,CAAAA;;;;;;AAHoD,cAS5DY,uBAAAA,SAAgCN,0BAAAA,CAT4B;EAS5DM,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAAA;EAAuB,aAAA,CAAA,EAAA,MAAA;EAAA,cAO1BF,CAAAA,EAAAA,MAAAA;EAAgC,aACJD,EAAAA,OAAAA;EAAY,iBAAGI,EAAAA,OAAAA;EAAM,oBACzBZ,EAAAA,MAAAA;EAAkB,YAAA,EAF1CS,gCAE0C;EAA8G,OAA3GT,uBAAAA,CAAAA,QAAAA,CAAAA,EADjBQ,YACiBR,CAAAA,EADFY,MACEZ,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAkB,OAOzDL,qBAAAA,CAAAA,MAAAA,CAAAA,EAPkBK,kBAOlBL,CAAAA,EAPuCK,kBAOvCL,CAAAA,GAAAA,EAPoC,wBAAA,EAOpCA,GAAAA,CAAAA;EAA0B;;;;;;EAA2F,OAClHE,OAAAA,CAAAA,GAAAA,EADHF,0BACGE,EAAAA,QAAAA,CAAAA,EADoCW,YACpCX,EAAAA,YAAAA,CAAAA,EADiEmB,OACjEnB,CADyEkB,IACzElB,CAD8EO,iBAC9EP,EAAAA,KAAAA,CAAAA,CAAAA,CAAAA,EAD2Ga,OAC3Gb,CADmHc,uBACnHd,CAAAA;EAAW,cACPS,CAAAA,MAAAA,EADJT,WACIS,CAAAA,EAAAA,GAAAA;EAA8B,oBAAeC,CAAAA,IAAAA,EAA7CD,8BAA6CC,EAAAA,WAAAA,EAAAA,qBAAAA,CAAAA,IAAAA,CAAAA,KAAAA,CAAAA,CAAAA,EAAAA,MAAAA,CAAAA,EAA6CN,SAA7CM,GAAyDL,kBAAzDK,CAAAA,EAA8EG,OAA9EH,CAAsFV,WAAtFU,CAAAA;;;;;;AAlBG;AAyB1DU,cAAAA,8BAAAA,SAAuCN,uBAAAA,CAAT;EAAA,OAAA,OAAA,CAAA,CAAA,EAAA,MAAA;EAAA,iBAGTX,EAAAA,OAAAA;EAAkB,OAAA,qBAAA,CAAA,MAAA,CAAA,EAAlBA,kBAAkB,CAAA,EAAGA,kBAAH,CAAA,GAAA,EAAA,wBAAA,EAAA,GAAA,CAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"criteria.d.cts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMStringEvaluator","StringEvaluatorArgs","ExtractLLMCallOptions","ConstitutionalPrinciple","Criteria","CriteriaLike","CriteriaResultOutputParser","Promise","CriteriaEvalInput","CriteriaEvalChain","Record","____________langchain_core_dist_prompt_values_js0","BasePromptValueInterface","Omit","Partial","input","prediction","reference","LabeledCriteriaEvalChain"],"sources":["../../../src/evaluation/criteria/criteria.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMStringEvaluator, StringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { ConstitutionalPrinciple } from \"../../chains/constitutional_ai/constitutional_principle.js\";\n/**\n * A Criteria to evaluate.\n */\nexport type Criteria = \"conciseness\" | \"relevance\" | \"correctness\" | \"coherence\" | \"harmfulness\" | \"maliciousness\" | \"helpfulness\" | \"controversiality\" | \"misogyny\" | \"criminality\" | \"insensitivity\" | \"depth\" | \"creativity\" | \"detail\";\nexport type CriteriaLike = {\n [key: string]: string;\n} | Criteria | ConstitutionalPrinciple;\n/**\n * A parser for the output of the CriteriaEvalChain.\n */\nexport declare class CriteriaResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\nexport interface CriteriaEvalInput {\n input?: string;\n output: string;\n reference?: string;\n}\nexport declare class CriteriaEvalChain extends LLMStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n // The output parser to use for the evaluation chain.\n outputParser: BaseLLMOutputParser<EvalOutputType>;\n /**\n * Resolve the criteria to evaluate.\n * @param criteria The criteria to evaluate the runs against. It can be:\n * - a mapping of a criterion name to its description\n * - a single criterion name present in one of the default criteria\n * - a single `ConstitutionalPrinciple` instance\n *\n * @return A dictionary mapping criterion names to descriptions.\n */\n static resolveCriteria(criteria?: CriteriaLike): Record<string, string>;\n /**\n * Resolve the prompt to use for the evaluation.\n * @param prompt\n */\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the CriteriaEvalChain.\n * @param llm\n * @param criteria\n * @param chainOptions Options to pass to the constructor of the LLMChain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<CriteriaEvalChain>;\n getEvalInput({ input, prediction, reference }: StringEvaluatorArgs): CriteriaEvalInput;\n /**\n * Prepare the output of the evaluation.\n * @param result\n */\n _prepareOutput(result: ChainValues): any;\n _evaluateStrings(args: StringEvaluatorArgs & ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * Criteria evaluation chain that requires references.\n */\nexport declare class LabeledCriteriaEvalChain extends CriteriaEvalChain {\n static lc_name(): string;\n // Whether the evaluation requires a reference text.\n requiresReference: boolean;\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWYc,KAAAA,QAAAA,GAAQ,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,eAAA,GAAA,aAAA,GAAA,kBAAA,GAAA,UAAA,GAAA,aAAA,GAAA,eAAA,GAAA,OAAA,GAAA,YAAA,GAAA,QAAA;AACRC,KAAAA,YAAAA,GAAY;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;CAAA,GAEpBD,QAAAA,GAAWD,uBAAXC;;AAAkC;AAItC;AAA+C,cAA1BE,0BAAAA,SAAmCf,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAE7DJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEa,OAArEb,CAA6EI,cAA7EJ,CAAAA;;AAA6CE,UAEzDY,iBAAAA,CAFyDZ;EAAS,KAAuBE,CAAAA,EAAAA,MAAAA;EAAc,MAAtBS,EAAAA,MAAAA;EAAO,SAFjDhB,CAAAA,EAAAA,MAAAA;AAAmB;AAI1DiB,cAKIC,iBAAAA,SAA0BT,kBAAAA,CALb;EAKbS,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAiB;EAAA,aAAA,CAAA,EAAA,MAAA;EAAA,cAQAX,CAAAA,EAAAA,MAAAA;EAAc,aAAlCP,EAAAA,OAAAA;EAAmB,iBAUCc,EAAAA,OAAAA;EAAY,oBAAGK,EAAAA,MAAAA;EAAM;EAKP,YAAA,EAflCnB,mBAekC,CAfdO,cAec,CAAA;EAA8G;;;;;;;;;EAQ1I,OAAEkB,eAAAA,CAAAA,QAAAA,CAAAA,EAbYX,YAaZW,CAAAA,EAb2BN,MAa3BM,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAU;;;;EAKE,OACXf,aAAAA,CAAAA,MAAAA,CAAAA,EAdON,kBAcPM,CAAAA,EAd4BN,kBAc5BM,CAAAA,GAAAA,EAdyB,wBAAA,EAczBA,GAAAA,CAAAA;EAAmB;;;;;;EArCmB,OAAA,OAAA,CAAA,GAAA,EA8BzCX,0BA9ByC,EAAA,QAAA,CAAA,EA8BFe,YA9BE,EAAA,YAAA,CAAA,EA8B2BS,OA9B3B,CA8BmCD,IA9BnC,CA8BwCd,iBA9BxC,EAAA,KAAA,CAAA,CAAA,CAAA,EA8BqEQ,OA9BrE,CA8B6EE,iBA9B7E,CAAA;EA0C5CS,YAAAA,CAAAA;IAAAA,KAAAA;IAAAA,UAAwB;IAAA;EAAA,CAAA,EAXMjB,mBAWN,CAAA,EAX4BO,iBAW5B;EAAA;;;;EAI4B,cAJnBC,CAAAA,MAAAA,EAN3BjB,WAM2BiB,CAAAA,EAAAA,GAAAA;EAAiB,gBAAA,CAAA,IAAA,EAL5CR,mBAK4C,GALtBC,qBAKsB,CAAA,IAAA,CAAA,KAAA,CAAA,CAAA,EAAA,MAAA,CAAA,EALuBN,SAKvB,GALmCC,kBAKnC,CAAA,EALwDU,OAKxD,CALgEf,WAKhE,CAAA;;;;;cAAlD0B,wBAAAA,SAAiCT,iBAAAA;;;;gCAIpBd,qBAAqBA,wBAAH,wBAAA"}
1
+ {"version":3,"file":"criteria.d.cts","names":["BaseLanguageModelInterface","BaseLLMOutputParser","ChainValues","ChatGeneration","Generation","BasePromptTemplate","Callbacks","BaseCallbackConfig","EvalOutputType","LLMEvalChainInput","LLMStringEvaluator","StringEvaluatorArgs","ExtractLLMCallOptions","ConstitutionalPrinciple","Criteria","CriteriaLike","CriteriaResultOutputParser","Promise","CriteriaEvalInput","CriteriaEvalChain","Record","____________langchain_core_dist_prompt_values_js1","BasePromptValueInterface","Omit","Partial","input","prediction","reference","LabeledCriteriaEvalChain"],"sources":["../../../src/evaluation/criteria/criteria.d.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { ChatGeneration, Generation } from \"@langchain/core/outputs\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { Callbacks, BaseCallbackConfig } from \"@langchain/core/callbacks/manager\";\nimport { EvalOutputType, LLMEvalChainInput, LLMStringEvaluator, StringEvaluatorArgs, type ExtractLLMCallOptions } from \"../base.js\";\nimport { ConstitutionalPrinciple } from \"../../chains/constitutional_ai/constitutional_principle.js\";\n/**\n * A Criteria to evaluate.\n */\nexport type Criteria = \"conciseness\" | \"relevance\" | \"correctness\" | \"coherence\" | \"harmfulness\" | \"maliciousness\" | \"helpfulness\" | \"controversiality\" | \"misogyny\" | \"criminality\" | \"insensitivity\" | \"depth\" | \"creativity\" | \"detail\";\nexport type CriteriaLike = {\n [key: string]: string;\n} | Criteria | ConstitutionalPrinciple;\n/**\n * A parser for the output of the CriteriaEvalChain.\n */\nexport declare class CriteriaResultOutputParser extends BaseLLMOutputParser<EvalOutputType> {\n lc_namespace: string[];\n parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;\n}\nexport interface CriteriaEvalInput {\n input?: string;\n output: string;\n reference?: string;\n}\nexport declare class CriteriaEvalChain extends LLMStringEvaluator {\n static lc_name(): string;\n criterionName?: string;\n evaluationName?: string;\n requiresInput: boolean;\n requiresReference: boolean;\n skipReferenceWarning: string;\n // The output parser to use for the evaluation chain.\n outputParser: BaseLLMOutputParser<EvalOutputType>;\n /**\n * Resolve the criteria to evaluate.\n * @param criteria The criteria to evaluate the runs against. It can be:\n * - a mapping of a criterion name to its description\n * - a single criterion name present in one of the default criteria\n * - a single `ConstitutionalPrinciple` instance\n *\n * @return A dictionary mapping criterion names to descriptions.\n */\n static resolveCriteria(criteria?: CriteriaLike): Record<string, string>;\n /**\n * Resolve the prompt to use for the evaluation.\n * @param prompt\n */\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n /**\n * Create a new instance of the CriteriaEvalChain.\n * @param llm\n * @param criteria\n * @param chainOptions Options to pass to the constructor of the LLMChain.\n */\n static fromLLM(llm: BaseLanguageModelInterface, criteria?: CriteriaLike, chainOptions?: Partial<Omit<LLMEvalChainInput, \"llm\">>): Promise<CriteriaEvalChain>;\n getEvalInput({ input, prediction, reference }: StringEvaluatorArgs): CriteriaEvalInput;\n /**\n * Prepare the output of the evaluation.\n * @param result\n */\n _prepareOutput(result: ChainValues): any;\n _evaluateStrings(args: StringEvaluatorArgs & ExtractLLMCallOptions<this[\"llm\"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;\n}\n/**\n * Criteria evaluation chain that requires references.\n */\nexport declare class LabeledCriteriaEvalChain extends CriteriaEvalChain {\n static lc_name(): string;\n // Whether the evaluation requires a reference text.\n requiresReference: boolean;\n static resolvePrompt(prompt?: BasePromptTemplate): BasePromptTemplate<any, import(\"../../../../langchain-core/dist/prompt_values.js\").BasePromptValueInterface, any>;\n}\n"],"mappings":";;;;;;;;;;;;;;AAWYc,KAAAA,QAAAA,GAAQ,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,WAAA,GAAA,aAAA,GAAA,eAAA,GAAA,aAAA,GAAA,kBAAA,GAAA,UAAA,GAAA,aAAA,GAAA,eAAA,GAAA,OAAA,GAAA,YAAA,GAAA,QAAA;AACRC,KAAAA,YAAAA,GAAY;EAAA,CAAA,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;CAAA,GAEpBD,QAAAA,GAAWD,uBAAXC;;AAAkC;AAItC;AAA+C,cAA1BE,0BAAAA,SAAmCf,mBAAT,CAA6BO,cAA7B,CAAA,CAAA;EAAA,YAA6BA,EAAAA,MAAAA,EAAAA;EAAc,WAE7DJ,CAAAA,WAAAA,EAAAA,UAAAA,EAAAA,GAAeD,cAAfC,EAAAA,EAAAA,UAAAA,EAA6CE,SAA7CF,GAAAA,SAAAA,CAAAA,EAAqEa,OAArEb,CAA6EI,cAA7EJ,CAAAA;;AAA6CE,UAEzDY,iBAAAA,CAFyDZ;EAAS,KAAuBE,CAAAA,EAAAA,MAAAA;EAAc,MAAtBS,EAAAA,MAAAA;EAAO,SAFjDhB,CAAAA,EAAAA,MAAAA;AAAmB;AAI1DiB,cAKIC,iBAAAA,SAA0BT,kBAAAA,CALb;EAKbS,OAAAA,OAAAA,CAAAA,CAAAA,EAAAA,MAAiB;EAAA,aAAA,CAAA,EAAA,MAAA;EAAA,cAQAX,CAAAA,EAAAA,MAAAA;EAAc,aAAlCP,EAAAA,OAAAA;EAAmB,iBAUCc,EAAAA,OAAAA;EAAY,oBAAGK,EAAAA,MAAAA;EAAM;EAKP,YAAA,EAflCnB,mBAekC,CAfdO,cAec,CAAA;EAA8G;;;;;;;;;EAQ1I,OAAEkB,eAAAA,CAAAA,QAAAA,CAAAA,EAbYX,YAaZW,CAAAA,EAb2BN,MAa3BM,CAAAA,MAAAA,EAAAA,MAAAA,CAAAA;EAAU;;;;EAKE,OACXf,aAAAA,CAAAA,MAAAA,CAAAA,EAdON,kBAcPM,CAAAA,EAd4BN,kBAc5BM,CAAAA,GAAAA,EAdyB,wBAAA,EAczBA,GAAAA,CAAAA;EAAmB;;;;;;EArCmB,OAAA,OAAA,CAAA,GAAA,EA8BzCX,0BA9ByC,EAAA,QAAA,CAAA,EA8BFe,YA9BE,EAAA,YAAA,CAAA,EA8B2BS,OA9B3B,CA8BmCD,IA9BnC,CA8BwCd,iBA9BxC,EAAA,KAAA,CAAA,CAAA,CAAA,EA8BqEQ,OA9BrE,CA8B6EE,iBA9B7E,CAAA;EA0C5CS,YAAAA,CAAAA;IAAAA,KAAAA;IAAAA,UAAwB;IAAA;EAAA,CAAA,EAXMjB,mBAWN,CAAA,EAX4BO,iBAW5B;EAAA;;;;EAI4B,cAJnBC,CAAAA,MAAAA,EAN3BjB,WAM2BiB,CAAAA,EAAAA,GAAAA;EAAiB,gBAAA,CAAA,IAAA,EAL5CR,mBAK4C,GALtBC,qBAKsB,CAAA,IAAA,CAAA,KAAA,CAAA,CAAA,EAAA,MAAA,CAAA,EALuBN,SAKvB,GALmCC,kBAKnC,CAAA,EALwDU,OAKxD,CALgEf,WAKhE,CAAA;;;;;cAAlD0B,wBAAAA,SAAiCT,iBAAAA;;;;gCAIpBd,qBAAqBA,wBAAH,wBAAA"}
@@ -1,5 +1,5 @@
1
1
  import { ObjectTool } from "./schema.cjs";
2
- import * as _langchain_core_messages2 from "@langchain/core/messages";
2
+ import * as _langchain_core_messages0 from "@langchain/core/messages";
3
3
  import { BaseMessage } from "@langchain/core/messages";
4
4
  import { PartialValues } from "@langchain/core/utils/types";
5
5
  import { BaseChatPromptTemplate, SerializedBasePromptTemplate } from "@langchain/core/prompts";
@@ -54,7 +54,7 @@ declare class AutoGPTPrompt extends BaseChatPromptTemplate implements AutoGPTPro
54
54
  memory: VectorStoreRetrieverInterface;
55
55
  messages: BaseMessage[];
56
56
  user_input: string;
57
- }): Promise<BaseMessage<_langchain_core_messages2.MessageStructure, _langchain_core_messages2.MessageType>[]>;
57
+ }): Promise<BaseMessage<_langchain_core_messages0.MessageStructure, _langchain_core_messages0.MessageType>[]>;
58
58
  /**
59
59
  * This method is not implemented in the AutoGPTPrompt class and will
60
60
  * throw an error if called.
@@ -1 +1 @@
1
- {"version":3,"file":"prompt.d.cts","names":["VectorStoreRetrieverInterface","BaseChatPromptTemplate","SerializedBasePromptTemplate","BaseMessage","PartialValues","ObjectTool","AutoGPTPromptInput","Promise","AutoGPTPrompt","goals","memory","previousMessages","user_input","_langchain_core_messages2","MessageStructure","MessageType"],"sources":["../../../src/experimental/autogpt/prompt.d.ts"],"sourcesContent":["import type { VectorStoreRetrieverInterface } from \"@langchain/core/vectorstores\";\nimport { BaseChatPromptTemplate, SerializedBasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { PartialValues } from \"@langchain/core/utils/types\";\nimport { ObjectTool } from \"./schema.js\";\n/**\n * Interface for the input parameters of the AutoGPTPrompt class.\n */\nexport interface AutoGPTPromptInput {\n aiName: string;\n aiRole: string;\n tools: ObjectTool[];\n tokenCounter: (text: string) => Promise<number>;\n sendTokenLimit?: number;\n}\n/**\n * Class used to generate prompts for the AutoGPT model. It takes into\n * account the AI's name, role, tools, token counter, and send token\n * limit. The class also handles the formatting of messages and the\n * construction of the full prompt.\n */\nexport declare class AutoGPTPrompt extends BaseChatPromptTemplate implements AutoGPTPromptInput {\n aiName: string;\n aiRole: string;\n tools: ObjectTool[];\n tokenCounter: (text: string) => Promise<number>;\n sendTokenLimit: number;\n constructor(fields: AutoGPTPromptInput);\n _getPromptType(): \"autogpt\";\n /**\n * Constructs the full prompt based on the provided goals.\n * @param goals An array of goals.\n * @returns The full prompt as a string.\n */\n constructFullPrompt(goals: string[]): string;\n /**\n * Formats the messages based on the provided parameters.\n * @param goals An array of goals.\n * @param memory A VectorStoreRetriever instance.\n * @param messages An array of previous messages.\n * @param user_input The user's input.\n * @returns An array of formatted messages.\n */\n formatMessages({ goals, memory, messages: previousMessages, user_input }: {\n goals: string[];\n memory: VectorStoreRetrieverInterface;\n messages: BaseMessage[];\n user_input: string;\n }): Promise<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * This method is not implemented in the AutoGPTPrompt class and will\n * throw an error if called.\n * @param _values Partial values.\n * @returns Throws an error.\n */\n partial(_values: PartialValues): Promise<BaseChatPromptTemplate>;\n serialize(): SerializedBasePromptTemplate;\n}\n"],"mappings":";;;;;;;;;;;AAQiBM,UAAAA,kBAAAA,CAAkB;EAAA,MAAA,EAAA,MAAA;EAAA,MAGxBD,EAAAA,MAAAA;EAAU,KACeE,EADzBF,UACyBE,EAAAA;EAAO,YAAA,EAAA,CAAA,IAAA,EAAA,MAAA,EAAA,GAAPA,OAAO,CAAA,MAAA,CAAA;EAStBC,cAAAA,CAAAA,EAAa,MAAA;;;;;;;;AAsB8BI,cAtB3CJ,aAAAA,SAAsBP,sBAAAA,YAAkCK,kBAsBbM,CAAAA;EAAU,MAE1DZ,EAAAA,MAAAA;EAA6B,MAC3BG,EAAAA,MAAAA;EAAW,KAAAU,EAtBlBR,UAsBkBQ,EAAAA;EAEkD,YAAAA,EAAAA,CAAAA,IAAAA,EAAAA,MAAAA,EAAqCE,GAvBhFR,OAuBgFQ,CAAAA,MAAAA,CAAAA;EAAW,cAA/GZ,EAAAA,MAAAA;EAAW,WAAnBI,CAAAA,MAAAA,EArBgBD,kBAqBhBC;EAAO,cAOMH,CAAAA,CAAAA,EAAAA,SAAAA;EAAa;;;;;EAlC6D,mBAAA,CAAA,KAAA,EAAA,MAAA,EAAA,CAAA,EAAA,MAAA;;;;;;;;;;;;cAsBjDO;;;;YAE9BX;cACEG;;MAEVI,QAAQJ,YAFaU,yBAAAA,CAEkCC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;;mBAO/FX,gBAAgBG,QAAQN;eAC5BC"}
1
+ {"version":3,"file":"prompt.d.cts","names":["VectorStoreRetrieverInterface","BaseChatPromptTemplate","SerializedBasePromptTemplate","BaseMessage","PartialValues","ObjectTool","AutoGPTPromptInput","Promise","AutoGPTPrompt","goals","memory","previousMessages","user_input","_langchain_core_messages0","MessageStructure","MessageType"],"sources":["../../../src/experimental/autogpt/prompt.d.ts"],"sourcesContent":["import type { VectorStoreRetrieverInterface } from \"@langchain/core/vectorstores\";\nimport { BaseChatPromptTemplate, SerializedBasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { PartialValues } from \"@langchain/core/utils/types\";\nimport { ObjectTool } from \"./schema.js\";\n/**\n * Interface for the input parameters of the AutoGPTPrompt class.\n */\nexport interface AutoGPTPromptInput {\n aiName: string;\n aiRole: string;\n tools: ObjectTool[];\n tokenCounter: (text: string) => Promise<number>;\n sendTokenLimit?: number;\n}\n/**\n * Class used to generate prompts for the AutoGPT model. It takes into\n * account the AI's name, role, tools, token counter, and send token\n * limit. The class also handles the formatting of messages and the\n * construction of the full prompt.\n */\nexport declare class AutoGPTPrompt extends BaseChatPromptTemplate implements AutoGPTPromptInput {\n aiName: string;\n aiRole: string;\n tools: ObjectTool[];\n tokenCounter: (text: string) => Promise<number>;\n sendTokenLimit: number;\n constructor(fields: AutoGPTPromptInput);\n _getPromptType(): \"autogpt\";\n /**\n * Constructs the full prompt based on the provided goals.\n * @param goals An array of goals.\n * @returns The full prompt as a string.\n */\n constructFullPrompt(goals: string[]): string;\n /**\n * Formats the messages based on the provided parameters.\n * @param goals An array of goals.\n * @param memory A VectorStoreRetriever instance.\n * @param messages An array of previous messages.\n * @param user_input The user's input.\n * @returns An array of formatted messages.\n */\n formatMessages({ goals, memory, messages: previousMessages, user_input }: {\n goals: string[];\n memory: VectorStoreRetrieverInterface;\n messages: BaseMessage[];\n user_input: string;\n }): Promise<BaseMessage<import(\"@langchain/core/messages\").MessageStructure, import(\"@langchain/core/messages\").MessageType>[]>;\n /**\n * This method is not implemented in the AutoGPTPrompt class and will\n * throw an error if called.\n * @param _values Partial values.\n * @returns Throws an error.\n */\n partial(_values: PartialValues): Promise<BaseChatPromptTemplate>;\n serialize(): SerializedBasePromptTemplate;\n}\n"],"mappings":";;;;;;;;;;;AAQiBM,UAAAA,kBAAAA,CAAkB;EAAA,MAAA,EAAA,MAAA;EAAA,MAGxBD,EAAAA,MAAAA;EAAU,KACeE,EADzBF,UACyBE,EAAAA;EAAO,YAAA,EAAA,CAAA,IAAA,EAAA,MAAA,EAAA,GAAPA,OAAO,CAAA,MAAA,CAAA;EAStBC,cAAAA,CAAAA,EAAa,MAAA;;;;;;;;AAsB8BI,cAtB3CJ,aAAAA,SAAsBP,sBAAAA,YAAkCK,kBAsBbM,CAAAA;EAAU,MAE1DZ,EAAAA,MAAAA;EAA6B,MAC3BG,EAAAA,MAAAA;EAAW,KAAAU,EAtBlBR,UAsBkBQ,EAAAA;EAEkD,YAAAA,EAAAA,CAAAA,IAAAA,EAAAA,MAAAA,EAAqCE,GAvBhFR,OAuBgFQ,CAAAA,MAAAA,CAAAA;EAAW,cAA/GZ,EAAAA,MAAAA;EAAW,WAAnBI,CAAAA,MAAAA,EArBgBD,kBAqBhBC;EAAO,cAOMH,CAAAA,CAAAA,EAAAA,SAAAA;EAAa;;;;;EAlC6D,mBAAA,CAAA,KAAA,EAAA,MAAA,EAAA,CAAA,EAAA,MAAA;;;;;;;;;;;;cAsBjDO;;;;YAE9BX;cACEG;;MAEVI,QAAQJ,YAFaU,yBAAAA,CAEkCC,gBAAAA,EAAgBD,yBAAAA,CAAqCE,WAAAA;;;;;;;mBAO/FX,gBAAgBG,QAAQN;eAC5BC"}
@@ -18,11 +18,11 @@ const require_agents_index = require('../agents/index.cjs');
18
18
  const require_agents_load = require('../agents/load.cjs');
19
19
  const require_cache_file_system = require('../cache/file_system.cjs');
20
20
  const require_callbacks_index = require('../callbacks/index.cjs');
21
- const require_chat_models_universal = require('../chat_models/universal.cjs');
22
21
  const require_chains_history_aware_retriever = require('../chains/history_aware_retriever.cjs');
23
22
  const require_chains_index = require('../chains/index.cjs');
24
23
  const require_chains_load = require('../chains/load.cjs');
25
24
  const require_chains_retrieval = require('../chains/retrieval.cjs');
25
+ const require_chat_models_universal = require('../chat_models/universal.cjs');
26
26
  const require_document_loaders_base = require('../document_loaders/base.cjs');
27
27
  const require_chains_openai_functions_index = require('../chains/openai_functions/index.cjs');
28
28
  const require_document_transformers_openai_functions = require('../document_transformers/openai_functions.cjs');
@@ -18,11 +18,11 @@ import { agents_exports } from "../agents/index.js";
18
18
  import { load_exports } from "../agents/load.js";
19
19
  import { file_system_exports } from "../cache/file_system.js";
20
20
  import { callbacks_exports } from "../callbacks/index.js";
21
- import { universal_exports } from "../chat_models/universal.js";
22
21
  import { history_aware_retriever_exports } from "../chains/history_aware_retriever.js";
23
22
  import { chains_exports } from "../chains/index.js";
24
23
  import { load_exports as load_exports$1 } from "../chains/load.js";
25
24
  import { retrieval_exports } from "../chains/retrieval.js";
25
+ import { universal_exports } from "../chat_models/universal.js";
26
26
  import { base_exports } from "../document_loaders/base.js";
27
27
  import { openai_functions_exports as openai_functions_exports$1 } from "../chains/openai_functions/index.js";
28
28
  import { openai_functions_exports as openai_functions_exports$2 } from "../document_transformers/openai_functions.js";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@langchain/classic",
3
- "version": "1.0.3",
3
+ "version": "1.0.4",
4
4
  "description": "Old abstractions form LangChain.js",
5
5
  "author": "LangChain",
6
6
  "license": "MIT",
@@ -96,36 +96,36 @@
96
96
  "voy-search": "0.6.2",
97
97
  "weaviate-client": "^3.8.0",
98
98
  "zod-to-json-schema": "^3.24.6",
99
+ "@langchain/anthropic": "1.1.0",
100
+ "@langchain/aws": "1.0.2",
101
+ "@langchain/azure-cosmosdb": "1.0.0",
99
102
  "@langchain/azure-dynamic-sessions": "1.0.0",
100
- "@langchain/anthropic": "1.0.1",
101
- "@langchain/eslint": "0.1.0",
102
- "@langchain/aws": "1.0.1",
103
- "@langchain/core": "1.0.5",
104
103
  "@langchain/baidu-qianfan": "1.0.0",
104
+ "@langchain/cloudflare": "1.0.0",
105
+ "@langchain/cohere": "1.0.0",
106
+ "@langchain/core": "1.0.5",
105
107
  "@langchain/deepseek": "1.0.1",
108
+ "@langchain/eslint": "0.1.0",
106
109
  "@langchain/exa": "1.0.0",
110
+ "@langchain/google-cloud-sql-pg": "1.0.0",
107
111
  "@langchain/google-common": "1.0.1",
108
112
  "@langchain/google-genai": "1.0.1",
109
- "@langchain/google-cloud-sql-pg": "1.0.0",
110
- "@langchain/cloudflare": "1.0.0",
111
- "@langchain/cohere": "1.0.0",
112
- "@langchain/google-vertexai-web": "1.0.1",
113
113
  "@langchain/google-vertexai": "1.0.1",
114
- "@langchain/mistralai": "1.0.0",
114
+ "@langchain/google-vertexai-web": "1.0.1",
115
115
  "@langchain/groq": "1.0.1",
116
+ "@langchain/mistralai": "1.0.0",
117
+ "@langchain/mongodb": "1.0.0",
116
118
  "@langchain/nomic": "1.0.0",
117
119
  "@langchain/ollama": "1.0.1",
120
+ "@langchain/pinecone": "1.0.0",
121
+ "@langchain/qdrant": "1.0.0",
122
+ "@langchain/redis": "1.0.0",
123
+ "@langchain/tavily": "1.0.0",
118
124
  "@langchain/textsplitters": "1.0.0",
119
125
  "@langchain/weaviate": "1.0.0",
120
- "@langchain/tavily": "1.0.0",
121
- "@langchain/redis": "1.0.0",
122
- "@langchain/pinecone": "1.0.0",
123
- "@langchain/azure-cosmosdb": "1.0.0",
124
- "@langchain/mongodb": "1.0.0",
126
+ "@langchain/xai": "1.0.1",
125
127
  "@langchain/yandex": "1.0.0",
126
- "langchain": "1.0.4",
127
- "@langchain/qdrant": "1.0.0",
128
- "@langchain/xai": "1.0.1"
128
+ "langchain": "1.0.5"
129
129
  },
130
130
  "peerDependencies": {
131
131
  "@langchain/core": "^1.0.0",
@@ -146,14 +146,14 @@
146
146
  },
147
147
  "dependencies": {
148
148
  "handlebars": "^4.7.8",
149
- "js-yaml": "^4.1.0",
149
+ "js-yaml": "^4.1.1",
150
150
  "jsonpointer": "^5.0.1",
151
151
  "openapi-types": "^12.1.3",
152
152
  "p-retry": "4",
153
153
  "uuid": "^10.0.0",
154
154
  "yaml": "^2.2.1",
155
155
  "zod": "^3.25.76 || ^4",
156
- "@langchain/openai": "1.1.1",
156
+ "@langchain/openai": "1.1.2",
157
157
  "@langchain/textsplitters": "1.0.0"
158
158
  },
159
159
  "optionalDependencies": {