langchain 1.0.0-alpha.4 → 1.0.0-alpha.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/agents/ReactAgent.cjs +5 -5
- package/dist/agents/ReactAgent.cjs.map +1 -1
- package/dist/agents/ReactAgent.d.cts +1 -3
- package/dist/agents/ReactAgent.d.cts.map +1 -1
- package/dist/agents/ReactAgent.d.ts +1 -3
- package/dist/agents/ReactAgent.d.ts.map +1 -1
- package/dist/agents/ReactAgent.js +6 -6
- package/dist/agents/ReactAgent.js.map +1 -1
- package/dist/agents/RunnableCallable.cjs +5 -0
- package/dist/agents/RunnableCallable.cjs.map +1 -1
- package/dist/agents/RunnableCallable.d.cts +2 -0
- package/dist/agents/RunnableCallable.d.cts.map +1 -1
- package/dist/agents/RunnableCallable.d.ts +2 -0
- package/dist/agents/RunnableCallable.d.ts.map +1 -1
- package/dist/agents/RunnableCallable.js +5 -0
- package/dist/agents/RunnableCallable.js.map +1 -1
- package/dist/agents/annotation.cjs.map +1 -1
- package/dist/agents/annotation.d.cts +5 -7
- package/dist/agents/annotation.d.cts.map +1 -1
- package/dist/agents/annotation.d.ts +4 -6
- package/dist/agents/annotation.d.ts.map +1 -1
- package/dist/agents/annotation.js.map +1 -1
- package/dist/agents/createAgent.cjs +10 -0
- package/dist/agents/createAgent.cjs.map +1 -0
- package/dist/agents/createAgent.js +10 -0
- package/dist/agents/createAgent.js.map +1 -0
- package/dist/agents/index.cjs +23 -4
- package/dist/agents/index.cjs.map +1 -1
- package/dist/agents/index.d.cts +84 -21
- package/dist/agents/index.d.cts.map +1 -1
- package/dist/agents/index.d.ts +84 -21
- package/dist/agents/index.d.ts.map +1 -1
- package/dist/agents/index.js +23 -4
- package/dist/agents/index.js.map +1 -1
- package/dist/agents/middlewareAgent/ReactAgent.cjs +255 -0
- package/dist/agents/middlewareAgent/ReactAgent.cjs.map +1 -0
- package/dist/agents/middlewareAgent/ReactAgent.d.cts +67 -0
- package/dist/agents/middlewareAgent/ReactAgent.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/ReactAgent.d.ts +67 -0
- package/dist/agents/middlewareAgent/ReactAgent.d.ts.map +1 -0
- package/dist/agents/middlewareAgent/ReactAgent.js +254 -0
- package/dist/agents/middlewareAgent/ReactAgent.js.map +1 -0
- package/dist/agents/middlewareAgent/annotation.cjs +39 -0
- package/dist/agents/middlewareAgent/annotation.cjs.map +1 -0
- package/dist/agents/middlewareAgent/annotation.js +38 -0
- package/dist/agents/middlewareAgent/annotation.js.map +1 -0
- package/dist/agents/middlewareAgent/index.cjs +11 -0
- package/dist/agents/middlewareAgent/index.cjs.map +1 -0
- package/dist/agents/middlewareAgent/index.js +11 -0
- package/dist/agents/middlewareAgent/index.js.map +1 -0
- package/dist/agents/middlewareAgent/middleware/hitl.cjs +235 -0
- package/dist/agents/middlewareAgent/middleware/hitl.cjs.map +1 -0
- package/dist/agents/middlewareAgent/middleware/hitl.d.cts +199 -0
- package/dist/agents/middlewareAgent/middleware/hitl.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/hitl.d.ts +199 -0
- package/dist/agents/middlewareAgent/middleware/hitl.d.ts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/hitl.js +234 -0
- package/dist/agents/middlewareAgent/middleware/hitl.js.map +1 -0
- package/dist/agents/middlewareAgent/middleware/index.cjs +26 -0
- package/dist/agents/middlewareAgent/middleware/index.cjs.map +1 -0
- package/dist/agents/middlewareAgent/middleware/index.d.cts +4 -0
- package/dist/agents/middlewareAgent/middleware/index.d.ts +4 -0
- package/dist/agents/middlewareAgent/middleware/index.js +17 -0
- package/dist/agents/middlewareAgent/middleware/index.js.map +1 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.cjs +182 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.cjs.map +1 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.d.cts +152 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.d.ts +152 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.d.ts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.js +181 -0
- package/dist/agents/middlewareAgent/middleware/promptCaching.js.map +1 -0
- package/dist/agents/middlewareAgent/middleware/summarization.cjs +262 -0
- package/dist/agents/middlewareAgent/middleware/summarization.cjs.map +1 -0
- package/dist/agents/middlewareAgent/middleware/summarization.d.cts +89 -0
- package/dist/agents/middlewareAgent/middleware/summarization.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/summarization.d.ts +89 -0
- package/dist/agents/middlewareAgent/middleware/summarization.d.ts.map +1 -0
- package/dist/agents/middlewareAgent/middleware/summarization.js +260 -0
- package/dist/agents/middlewareAgent/middleware/summarization.js.map +1 -0
- package/dist/agents/middlewareAgent/middleware.cjs +47 -0
- package/dist/agents/middlewareAgent/middleware.cjs.map +1 -0
- package/dist/agents/middlewareAgent/middleware.d.cts +46 -0
- package/dist/agents/middlewareAgent/middleware.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/middleware.d.ts +46 -0
- package/dist/agents/middlewareAgent/middleware.d.ts.map +1 -0
- package/dist/agents/middlewareAgent/middleware.js +46 -0
- package/dist/agents/middlewareAgent/middleware.js.map +1 -0
- package/dist/agents/middlewareAgent/nodes/AfterModalNode.cjs +29 -0
- package/dist/agents/middlewareAgent/nodes/AfterModalNode.cjs.map +1 -0
- package/dist/agents/middlewareAgent/nodes/AfterModalNode.js +29 -0
- package/dist/agents/middlewareAgent/nodes/AfterModalNode.js.map +1 -0
- package/dist/agents/middlewareAgent/nodes/AgentNode.cjs +325 -0
- package/dist/agents/middlewareAgent/nodes/AgentNode.cjs.map +1 -0
- package/dist/agents/middlewareAgent/nodes/AgentNode.js +324 -0
- package/dist/agents/middlewareAgent/nodes/AgentNode.js.map +1 -0
- package/dist/agents/middlewareAgent/nodes/BeforeModalNode.cjs +27 -0
- package/dist/agents/middlewareAgent/nodes/BeforeModalNode.cjs.map +1 -0
- package/dist/agents/middlewareAgent/nodes/BeforeModalNode.js +27 -0
- package/dist/agents/middlewareAgent/nodes/BeforeModalNode.js.map +1 -0
- package/dist/agents/middlewareAgent/nodes/middleware.cjs +73 -0
- package/dist/agents/middlewareAgent/nodes/middleware.cjs.map +1 -0
- package/dist/agents/middlewareAgent/nodes/middleware.js +73 -0
- package/dist/agents/middlewareAgent/nodes/middleware.js.map +1 -0
- package/dist/agents/middlewareAgent/nodes/utils.cjs +74 -0
- package/dist/agents/middlewareAgent/nodes/utils.cjs.map +1 -0
- package/dist/agents/middlewareAgent/nodes/utils.js +70 -0
- package/dist/agents/middlewareAgent/nodes/utils.js.map +1 -0
- package/dist/agents/middlewareAgent/types.d.cts +380 -0
- package/dist/agents/middlewareAgent/types.d.cts.map +1 -0
- package/dist/agents/middlewareAgent/types.d.ts +380 -0
- package/dist/agents/middlewareAgent/types.d.ts.map +1 -0
- package/dist/agents/nodes/AgentNode.cjs +4 -4
- package/dist/agents/nodes/AgentNode.cjs.map +1 -1
- package/dist/agents/nodes/AgentNode.js +4 -4
- package/dist/agents/nodes/AgentNode.js.map +1 -1
- package/dist/agents/nodes/ToolNode.cjs +3 -3
- package/dist/agents/nodes/ToolNode.cjs.map +1 -1
- package/dist/agents/nodes/ToolNode.d.cts +4 -5
- package/dist/agents/nodes/ToolNode.d.cts.map +1 -1
- package/dist/agents/nodes/ToolNode.d.ts +1 -2
- package/dist/agents/nodes/ToolNode.d.ts.map +1 -1
- package/dist/agents/nodes/ToolNode.js +4 -4
- package/dist/agents/nodes/ToolNode.js.map +1 -1
- package/dist/agents/responses.cjs +1 -1
- package/dist/agents/responses.cjs.map +1 -1
- package/dist/agents/responses.d.cts.map +1 -1
- package/dist/agents/responses.d.ts.map +1 -1
- package/dist/agents/responses.js +1 -1
- package/dist/agents/responses.js.map +1 -1
- package/dist/agents/types.d.cts +5 -7
- package/dist/agents/types.d.cts.map +1 -1
- package/dist/agents/types.d.ts +1 -3
- package/dist/agents/types.d.ts.map +1 -1
- package/dist/agents/utils.cjs +6 -6
- package/dist/agents/utils.cjs.map +1 -1
- package/dist/agents/utils.js +7 -7
- package/dist/agents/utils.js.map +1 -1
- package/dist/agents/withAgentName.cjs.map +1 -1
- package/dist/agents/withAgentName.js.map +1 -1
- package/dist/chains/api/api_chain.d.cts +1 -1
- package/dist/chains/api/prompts.cjs.map +1 -1
- package/dist/chains/api/prompts.js.map +1 -1
- package/dist/chains/base.d.cts +1 -1
- package/dist/chains/combine_docs_chain.d.cts +1 -1
- package/dist/chains/combine_documents/stuff.d.cts +1 -1
- package/dist/chains/constitutional_ai/constitutional_chain.cjs.map +1 -1
- package/dist/chains/constitutional_ai/constitutional_chain.js.map +1 -1
- package/dist/chains/conversational_retrieval_chain.d.cts +1 -1
- package/dist/chains/graph_qa/cypher.d.cts +1 -1
- package/dist/chains/history_aware_retriever.d.cts +2 -2
- package/dist/chains/index.cjs +0 -3
- package/dist/chains/index.cjs.map +1 -1
- package/dist/chains/index.d.cts +1 -2
- package/dist/chains/index.d.ts +1 -2
- package/dist/chains/index.js +1 -3
- package/dist/chains/index.js.map +1 -1
- package/dist/chains/llm_chain.d.cts +3 -3
- package/dist/chains/openai_functions/base.d.cts +3 -3
- package/dist/chains/openai_functions/extraction.cjs.map +1 -1
- package/dist/chains/openai_functions/extraction.d.cts +1 -3
- package/dist/chains/openai_functions/extraction.d.cts.map +1 -1
- package/dist/chains/openai_functions/extraction.d.ts +1 -3
- package/dist/chains/openai_functions/extraction.d.ts.map +1 -1
- package/dist/chains/openai_functions/extraction.js.map +1 -1
- package/dist/chains/openai_functions/index.cjs +0 -5
- package/dist/chains/openai_functions/index.cjs.map +1 -1
- package/dist/chains/openai_functions/index.d.cts +1 -2
- package/dist/chains/openai_functions/index.d.ts +1 -2
- package/dist/chains/openai_functions/index.js +1 -4
- package/dist/chains/openai_functions/index.js.map +1 -1
- package/dist/chains/openai_functions/openapi.cjs +4 -4
- package/dist/chains/openai_functions/openapi.cjs.map +1 -1
- package/dist/chains/openai_functions/openapi.d.cts +2 -2
- package/dist/chains/openai_functions/openapi.js +4 -4
- package/dist/chains/openai_functions/openapi.js.map +1 -1
- package/dist/chains/openai_functions/tagging.cjs.map +1 -1
- package/dist/chains/openai_functions/tagging.d.cts +2 -4
- package/dist/chains/openai_functions/tagging.d.cts.map +1 -1
- package/dist/chains/openai_functions/tagging.d.ts +1 -3
- package/dist/chains/openai_functions/tagging.d.ts.map +1 -1
- package/dist/chains/openai_functions/tagging.js.map +1 -1
- package/dist/chains/query_constructor/index.cjs +4 -4
- package/dist/chains/query_constructor/index.cjs.map +1 -1
- package/dist/chains/query_constructor/index.d.cts +5 -3
- package/dist/chains/query_constructor/index.d.cts.map +1 -1
- package/dist/chains/query_constructor/index.d.ts +4 -2
- package/dist/chains/query_constructor/index.d.ts.map +1 -1
- package/dist/chains/query_constructor/index.js +1 -1
- package/dist/chains/query_constructor/index.js.map +1 -1
- package/dist/chains/question_answering/map_reduce_prompts.cjs.map +1 -1
- package/dist/chains/question_answering/map_reduce_prompts.js.map +1 -1
- package/dist/chains/question_answering/refine_prompts.cjs.map +1 -1
- package/dist/chains/question_answering/refine_prompts.js.map +1 -1
- package/dist/chains/question_answering/stuff_prompts.cjs.map +1 -1
- package/dist/chains/question_answering/stuff_prompts.js.map +1 -1
- package/dist/chains/retrieval.d.cts +1 -1
- package/dist/chains/router/llm_router.d.cts +1 -1
- package/dist/chains/router/multi_prompt.cjs +4 -4
- package/dist/chains/router/multi_prompt.cjs.map +1 -1
- package/dist/chains/router/multi_prompt.js +1 -1
- package/dist/chains/router/multi_prompt.js.map +1 -1
- package/dist/chains/router/multi_retrieval_qa.cjs +4 -4
- package/dist/chains/router/multi_retrieval_qa.cjs.map +1 -1
- package/dist/chains/router/multi_retrieval_qa.js +1 -1
- package/dist/chains/router/multi_retrieval_qa.js.map +1 -1
- package/dist/chains/sql_db/sql_db_chain.d.cts +2 -2
- package/dist/chains/sql_db/sql_db_prompt.cjs.map +1 -1
- package/dist/chains/sql_db/sql_db_prompt.d.cts.map +1 -1
- package/dist/chains/sql_db/sql_db_prompt.d.ts.map +1 -1
- package/dist/chains/sql_db/sql_db_prompt.js.map +1 -1
- package/dist/chains/summarization/stuff_prompts.cjs.map +1 -1
- package/dist/chains/summarization/stuff_prompts.js.map +1 -1
- package/dist/chat_models/universal.cjs +8 -5
- package/dist/chat_models/universal.cjs.map +1 -1
- package/dist/chat_models/universal.d.cts +4 -4
- package/dist/chat_models/universal.d.cts.map +1 -1
- package/dist/chat_models/universal.d.ts +2 -2
- package/dist/chat_models/universal.d.ts.map +1 -1
- package/dist/chat_models/universal.js +8 -5
- package/dist/chat_models/universal.js.map +1 -1
- package/dist/document_loaders/fs/directory.cjs.map +1 -1
- package/dist/document_loaders/fs/directory.d.cts +0 -1
- package/dist/document_loaders/fs/directory.d.cts.map +1 -1
- package/dist/document_loaders/fs/directory.d.ts +0 -1
- package/dist/document_loaders/fs/directory.d.ts.map +1 -1
- package/dist/document_loaders/fs/directory.js.map +1 -1
- package/dist/document_loaders/fs/json.cjs +7 -1
- package/dist/document_loaders/fs/json.cjs.map +1 -1
- package/dist/document_loaders/fs/json.js +7 -1
- package/dist/document_loaders/fs/json.js.map +1 -1
- package/dist/embeddings/cache_backed.cjs +1 -1
- package/dist/embeddings/cache_backed.cjs.map +1 -1
- package/dist/embeddings/cache_backed.d.cts +1 -1
- package/dist/embeddings/cache_backed.d.ts +1 -1
- package/dist/embeddings/cache_backed.js +2 -2
- package/dist/embeddings/cache_backed.js.map +1 -1
- package/dist/evaluation/agents/trajectory.d.cts +3 -3
- package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
- package/dist/evaluation/agents/trajectory.d.ts.map +1 -1
- package/dist/evaluation/comparison/pairwise.d.cts +1 -1
- package/dist/evaluation/comparison/pairwise.d.cts.map +1 -1
- package/dist/evaluation/criteria/criteria.d.cts +1 -1
- package/dist/evaluation/criteria/criteria.d.cts.map +1 -1
- package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
- package/dist/evaluation/embedding_distance/base.cjs +2 -4
- package/dist/evaluation/embedding_distance/base.cjs.map +1 -1
- package/dist/evaluation/embedding_distance/base.js +2 -3
- package/dist/evaluation/embedding_distance/base.js.map +1 -1
- package/dist/evaluation/loader.cjs +7 -12
- package/dist/evaluation/loader.cjs.map +1 -1
- package/dist/evaluation/loader.d.cts +8 -2
- package/dist/evaluation/loader.d.cts.map +1 -1
- package/dist/evaluation/loader.d.ts +8 -2
- package/dist/evaluation/loader.d.ts.map +1 -1
- package/dist/evaluation/loader.js +7 -12
- package/dist/evaluation/loader.js.map +1 -1
- package/dist/evaluation/qa/eval_chain.d.cts +1 -1
- package/dist/hub/base.cjs.map +1 -1
- package/dist/hub/base.js.map +1 -1
- package/dist/index.cjs +3 -0
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +5 -4
- package/dist/index.d.ts +3 -2
- package/dist/index.js +3 -1
- package/dist/index.js.map +1 -1
- package/dist/langchain-core/dist/load/serializable.d.cts.map +1 -1
- package/dist/langchain-core/dist/messages/base.d.cts +24 -33
- package/dist/langchain-core/dist/messages/base.d.cts.map +1 -1
- package/dist/langchain-core/dist/messages/content/index.d.cts +1 -1
- package/dist/langchain-core/dist/messages/content/index.d.cts.map +1 -1
- package/dist/langchain-core/dist/messages/message.d.cts +598 -0
- package/dist/langchain-core/dist/messages/message.d.cts.map +1 -0
- package/dist/langchain-core/dist/messages/metadata.d.cts +97 -0
- package/dist/langchain-core/dist/messages/metadata.d.cts.map +1 -0
- package/dist/langchain-core/dist/messages/utils.d.cts +75 -0
- package/dist/langchain-core/dist/messages/utils.d.cts.map +1 -0
- package/dist/langchain-core/dist/prompt_values.d.cts.map +1 -1
- package/dist/libs/langchain-core/dist/load/serializable.d.ts.map +1 -1
- package/dist/libs/langchain-core/dist/messages/base.d.ts +24 -33
- package/dist/libs/langchain-core/dist/messages/base.d.ts.map +1 -1
- package/dist/libs/langchain-core/dist/messages/content/index.d.ts +1 -1
- package/dist/libs/langchain-core/dist/messages/content/index.d.ts.map +1 -1
- package/dist/libs/langchain-core/dist/messages/message.d.ts +598 -0
- package/dist/libs/langchain-core/dist/messages/message.d.ts.map +1 -0
- package/dist/libs/langchain-core/dist/messages/metadata.d.ts +97 -0
- package/dist/libs/langchain-core/dist/messages/metadata.d.ts.map +1 -0
- package/dist/libs/langchain-core/dist/messages/utils.d.ts +75 -0
- package/dist/libs/langchain-core/dist/messages/utils.d.ts.map +1 -0
- package/dist/libs/langchain-core/dist/prompt_values.d.ts.map +1 -1
- package/dist/libs/langchain-core/dist/utils/types/index.d.ts +2 -0
- package/dist/libs/langchain-core/dist/utils/types/index.d.ts.map +1 -1
- package/dist/libs/langchain-core/dist/utils/types/zod.d.ts +1 -0
- package/dist/load/import_map.cjs +2 -13
- package/dist/load/import_map.cjs.map +1 -1
- package/dist/load/import_map.js +2 -13
- package/dist/load/import_map.js.map +1 -1
- package/dist/memory/prompt.cjs.map +1 -1
- package/dist/memory/prompt.d.cts.map +1 -1
- package/dist/memory/prompt.d.ts.map +1 -1
- package/dist/memory/prompt.js.map +1 -1
- package/dist/memory/summary.d.cts +1 -1
- package/dist/output_parsers/combining.cjs +1 -1
- package/dist/output_parsers/combining.cjs.map +1 -1
- package/dist/output_parsers/combining.js +1 -1
- package/dist/output_parsers/combining.js.map +1 -1
- package/dist/output_parsers/expression_type_handlers/array_literal_expression_handler.cjs.map +1 -1
- package/dist/output_parsers/expression_type_handlers/array_literal_expression_handler.js.map +1 -1
- package/dist/output_parsers/expression_type_handlers/base.cjs +1 -1
- package/dist/output_parsers/expression_type_handlers/base.cjs.map +1 -1
- package/dist/output_parsers/expression_type_handlers/base.js +1 -1
- package/dist/output_parsers/expression_type_handlers/base.js.map +1 -1
- package/dist/output_parsers/fix.d.cts +1 -1
- package/dist/output_parsers/http_response.d.cts +1 -1
- package/dist/output_parsers/regex.cjs.map +1 -1
- package/dist/output_parsers/regex.js.map +1 -1
- package/dist/output_parsers/structured.cjs +4 -4
- package/dist/output_parsers/structured.cjs.map +1 -1
- package/dist/output_parsers/structured.d.cts +2 -2
- package/dist/output_parsers/structured.d.cts.map +1 -1
- package/dist/output_parsers/structured.d.ts +1 -1
- package/dist/output_parsers/structured.d.ts.map +1 -1
- package/dist/output_parsers/structured.js +2 -2
- package/dist/output_parsers/structured.js.map +1 -1
- package/dist/retrievers/ensemble.cjs.map +1 -1
- package/dist/retrievers/ensemble.js.map +1 -1
- package/dist/storage/file_system.cjs +1 -1
- package/dist/storage/file_system.cjs.map +1 -1
- package/dist/storage/file_system.js +1 -1
- package/dist/storage/file_system.js.map +1 -1
- package/dist/tools/fs.cjs +5 -5
- package/dist/tools/fs.cjs.map +1 -1
- package/dist/tools/fs.d.cts +1 -1
- package/dist/tools/fs.d.cts.map +1 -1
- package/dist/tools/fs.d.ts +1 -1
- package/dist/tools/fs.d.ts.map +1 -1
- package/dist/tools/fs.js +1 -1
- package/dist/tools/fs.js.map +1 -1
- package/dist/tools/json.d.cts +1 -1
- package/dist/tools/retriever.cjs +2 -2
- package/dist/tools/retriever.cjs.map +1 -1
- package/dist/tools/retriever.d.cts +2 -2
- package/dist/tools/retriever.d.cts.map +1 -1
- package/dist/tools/retriever.d.ts +1 -1
- package/dist/tools/retriever.d.ts.map +1 -1
- package/dist/tools/retriever.js +1 -1
- package/dist/tools/retriever.js.map +1 -1
- package/dist/tools/sql.cjs +1 -2
- package/dist/tools/sql.cjs.map +1 -1
- package/dist/tools/sql.d.cts +1 -1
- package/dist/tools/sql.d.cts.map +1 -1
- package/dist/tools/sql.d.ts +1 -1
- package/dist/tools/sql.d.ts.map +1 -1
- package/dist/tools/sql.js +1 -2
- package/dist/tools/sql.js.map +1 -1
- package/dist/tools/vectorstore.d.cts +1 -1
- package/dist/tools/webbrowser.d.cts +1 -1
- package/dist/types/expression-parser.d.cts +2 -0
- package/dist/types/expression-parser.d.cts.map +1 -1
- package/dist/types/expression-parser.d.ts +2 -0
- package/dist/types/expression-parser.d.ts.map +1 -1
- package/dist/util/hub.cjs +1 -1
- package/dist/util/hub.js +1 -1
- package/dist/util/openapi.cjs +1 -1
- package/dist/util/openapi.cjs.map +1 -1
- package/dist/util/openapi.js +1 -1
- package/dist/util/openapi.js.map +1 -1
- package/package.json +21 -16
- package/dist/chains/openai_functions/structured_output.cjs +0 -107
- package/dist/chains/openai_functions/structured_output.cjs.map +0 -1
- package/dist/chains/openai_functions/structured_output.d.cts +0 -38
- package/dist/chains/openai_functions/structured_output.d.cts.map +0 -1
- package/dist/chains/openai_functions/structured_output.d.ts +0 -38
- package/dist/chains/openai_functions/structured_output.d.ts.map +0 -1
- package/dist/chains/openai_functions/structured_output.js +0 -105
- package/dist/chains/openai_functions/structured_output.js.map +0 -1
- package/dist/chains/openai_moderation.cjs +0 -107
- package/dist/chains/openai_moderation.cjs.map +0 -1
- package/dist/chains/openai_moderation.d.cts +0 -74
- package/dist/chains/openai_moderation.d.cts.map +0 -1
- package/dist/chains/openai_moderation.d.ts +0 -74
- package/dist/chains/openai_moderation.d.ts.map +0 -1
- package/dist/chains/openai_moderation.js +0 -106
- package/dist/chains/openai_moderation.js.map +0 -1
package/dist/agents/utils.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import { MultipleToolsBoundError } from "./errors.js";
|
|
2
2
|
import { PROMPT_RUNNABLE_NAME } from "./constants.js";
|
|
3
|
-
import { AIMessage,
|
|
3
|
+
import { AIMessage, AIMessageChunk, SystemMessage } from "@langchain/core/messages";
|
|
4
4
|
import { Runnable, RunnableBinding, RunnableLambda, RunnableSequence } from "@langchain/core/runnables";
|
|
5
5
|
|
|
6
6
|
//#region src/agents/utils.ts
|
|
@@ -21,11 +21,11 @@ const CONTENT_PATTERN = /<content>(.*?)<\/content>/s;
|
|
|
21
21
|
* @internal
|
|
22
22
|
*/
|
|
23
23
|
function _addInlineAgentName(message) {
|
|
24
|
-
|
|
25
|
-
if (!
|
|
24
|
+
if (!AIMessage.isInstance(message) || AIMessageChunk.isInstance(message)) return message;
|
|
25
|
+
if (!message.name) return message;
|
|
26
26
|
const { name } = message;
|
|
27
27
|
if (typeof message.content === "string") return new AIMessage({
|
|
28
|
-
...
|
|
28
|
+
...message.lc_kwargs,
|
|
29
29
|
content: `<name>${name}</name><content>${message.content}</content>`,
|
|
30
30
|
name: void 0
|
|
31
31
|
});
|
|
@@ -68,7 +68,7 @@ function _addInlineAgentName(message) {
|
|
|
68
68
|
* @internal
|
|
69
69
|
*/
|
|
70
70
|
function _removeInlineAgentName(message) {
|
|
71
|
-
if (!
|
|
71
|
+
if (!AIMessage.isInstance(message) || !message.content) return message;
|
|
72
72
|
let updatedContent = [];
|
|
73
73
|
let updatedName;
|
|
74
74
|
if (Array.isArray(message.content)) updatedContent = message.content.filter((block) => {
|
|
@@ -130,7 +130,7 @@ function getPromptRunnable(prompt) {
|
|
|
130
130
|
promptRunnable = RunnableLambda.from((state) => {
|
|
131
131
|
return [systemMessage, ...state.messages ?? []];
|
|
132
132
|
}).withConfig({ runName: PROMPT_RUNNABLE_NAME });
|
|
133
|
-
} else if (
|
|
133
|
+
} else if (SystemMessage.isInstance(prompt)) promptRunnable = RunnableLambda.from((state) => [prompt, ...state.messages]).withConfig({ runName: PROMPT_RUNNABLE_NAME });
|
|
134
134
|
else if (typeof prompt === "function") promptRunnable = RunnableLambda.from(prompt).withConfig({ runName: PROMPT_RUNNABLE_NAME });
|
|
135
135
|
else if (Runnable.isRunnable(prompt)) promptRunnable = prompt;
|
|
136
136
|
else throw new Error(`Got unexpected type for 'prompt': ${typeof prompt}`);
|
|
@@ -226,7 +226,7 @@ function validateLLMHasNoBoundTools(llm) {
|
|
|
226
226
|
*/
|
|
227
227
|
function hasToolCalls(messages) {
|
|
228
228
|
const lastMessage = messages.at(-1);
|
|
229
|
-
return Boolean(lastMessage
|
|
229
|
+
return Boolean(AIMessage.isInstance(lastMessage) && lastMessage.tool_calls && lastMessage.tool_calls.length > 0);
|
|
230
230
|
}
|
|
231
231
|
const CHAT_MODELS_THAT_SUPPORT_JSON_SCHEMA_OUTPUT = ["ChatOpenAI", "FakeToolCallingModel"];
|
|
232
232
|
/**
|
package/dist/agents/utils.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"utils.js","names":["message: T","updatedContent: MessageContent","updatedName: string | undefined","tool: ClientTool | ServerTool","model: LanguageModelLike","model: unknown","llm: LanguageModelLike","prompt?: Prompt","promptRunnable: Runnable","state: typeof MessagesAnnotation.State","toolClasses: (ClientTool | ServerTool)[]","options: Partial<BaseChatModelCallOptions>","model","nextSteps: unknown[]","step: RunnableLike","messages: BaseMessage[]"],"sources":["../../src/agents/utils.ts"],"sourcesContent":["/* eslint-disable no-instanceof/no-instanceof */\nimport {\n AIMessage,\n BaseMessage,\n BaseMessageLike,\n MessageContent,\n SystemMessage,\n AIMessageChunk,\n isAIMessage,\n isAIMessageChunk,\n isBaseMessage,\n isBaseMessageChunk,\n} from \"@langchain/core/messages\";\nimport { MessagesAnnotation } from \"@langchain/langgraph\";\nimport {\n BaseChatModel,\n type BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n LanguageModelLike,\n BaseLanguageModelInput,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnableLike,\n RunnableConfig,\n RunnableLambda,\n RunnableSequence,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\n\nimport { MultipleToolsBoundError } from \"./errors.js\";\nimport { PROMPT_RUNNABLE_NAME } from \"./constants.js\";\nimport {\n ServerTool,\n ClientTool,\n ConfigurableModelInterface,\n Prompt,\n} from \"./types.js\";\n\nconst NAME_PATTERN = /<name>(.*?)<\\/name>/s;\nconst CONTENT_PATTERN = /<content>(.*?)<\\/content>/s;\n\nexport type AgentNameMode = \"inline\";\n\n/**\n * Attach formatted agent names to the messages passed to and from a language model.\n *\n * This is useful for making a message history with multiple agents more coherent.\n *\n * NOTE: agent name is consumed from the message.name field.\n * If you're using an agent built with createAgent, name is automatically set.\n * If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.\n *\n * @param message - Message to add agent name formatting to\n * @returns Message with agent name formatting\n *\n * @internal\n */\nexport function _addInlineAgentName<T extends BaseMessageLike>(\n message: T\n): T | AIMessage {\n const isAI =\n isBaseMessage(message) &&\n (isAIMessage(message) ||\n (isBaseMessageChunk(message) && isAIMessageChunk(message)));\n\n if (!isAI || !message.name) {\n return message;\n }\n\n const { name } = message;\n\n if (typeof message.content === \"string\") {\n return new AIMessage({\n ...(Object.keys(message.lc_kwargs ?? {}).length > 0\n ? message.lc_kwargs\n : message),\n content: `<name>${name}</name><content>${message.content}</content>`,\n name: undefined,\n });\n }\n\n const updatedContent = [];\n let textBlockCount = 0;\n\n for (const contentBlock of message.content) {\n if (typeof contentBlock === \"string\") {\n textBlockCount += 1;\n updatedContent.push(\n `<name>${name}</name><content>${contentBlock}</content>`\n );\n } else if (\n typeof contentBlock === \"object\" &&\n \"type\" in contentBlock &&\n contentBlock.type === \"text\"\n ) {\n textBlockCount += 1;\n updatedContent.push({\n ...contentBlock,\n text: `<name>${name}</name><content>${contentBlock.text}</content>`,\n });\n } else {\n updatedContent.push(contentBlock);\n }\n }\n\n if (!textBlockCount) {\n updatedContent.unshift({\n type: \"text\",\n text: `<name>${name}</name><content></content>`,\n });\n }\n return new AIMessage({\n ...message.lc_kwargs,\n content: updatedContent as MessageContent,\n name: undefined,\n });\n}\n\n/**\n * Remove explicit name and content XML tags from the AI message content.\n *\n * Examples:\n *\n * @example\n * ```typescript\n * removeInlineAgentName(new AIMessage({ content: \"<name>assistant</name><content>Hello</content>\", name: \"assistant\" }))\n * // AIMessage with content: \"Hello\"\n *\n * removeInlineAgentName(new AIMessage({ content: [{type: \"text\", text: \"<name>assistant</name><content>Hello</content>\"}], name: \"assistant\" }))\n * // AIMessage with content: [{type: \"text\", text: \"Hello\"}]\n * ```\n *\n * @internal\n */\nexport function _removeInlineAgentName<T extends BaseMessage>(message: T): T {\n if (!isAIMessage(message) || !message.content) {\n return message;\n }\n\n let updatedContent: MessageContent = [];\n let updatedName: string | undefined;\n\n if (Array.isArray(message.content)) {\n updatedContent = message.content\n .filter((block) => {\n if (block.type === \"text\" && typeof block.text === \"string\") {\n const nameMatch = block.text.match(NAME_PATTERN);\n const contentMatch = block.text.match(CONTENT_PATTERN);\n // don't include empty content blocks that were added because there was no text block to modify\n if (nameMatch && (!contentMatch || contentMatch[1] === \"\")) {\n // capture name from text block\n // eslint-disable-next-line prefer-destructuring\n updatedName = nameMatch[1];\n return false;\n }\n return true;\n }\n return true;\n })\n .map((block) => {\n if (block.type === \"text\" && typeof block.text === \"string\") {\n const nameMatch = block.text.match(NAME_PATTERN);\n const contentMatch = block.text.match(CONTENT_PATTERN);\n\n if (!nameMatch || !contentMatch) {\n return block;\n }\n\n // capture name from text block\n // eslint-disable-next-line prefer-destructuring\n updatedName = nameMatch[1];\n\n return {\n ...block,\n text: contentMatch[1],\n };\n }\n return block;\n });\n } else {\n const content = message.content as string;\n const nameMatch = content.match(NAME_PATTERN);\n const contentMatch = content.match(CONTENT_PATTERN);\n\n if (!nameMatch || !contentMatch) {\n return message;\n }\n\n // eslint-disable-next-line prefer-destructuring\n updatedName = nameMatch[1];\n // eslint-disable-next-line prefer-destructuring\n updatedContent = contentMatch[1];\n }\n\n return new AIMessage({\n ...(Object.keys(message.lc_kwargs ?? {}).length > 0\n ? message.lc_kwargs\n : message),\n content: updatedContent,\n name: updatedName,\n }) as T;\n}\n\nexport function isClientTool(\n tool: ClientTool | ServerTool\n): tool is ClientTool {\n return Runnable.isRunnable(tool);\n}\n\nexport function isBaseChatModel(\n model: LanguageModelLike\n): model is BaseChatModel {\n return (\n \"invoke\" in model &&\n typeof model.invoke === \"function\" &&\n \"_modelType\" in model\n );\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport function isConfigurableModel(\n model: unknown\n): model is ConfigurableModelInterface {\n return (\n typeof model === \"object\" &&\n model != null &&\n \"_queuedMethodOperations\" in model &&\n \"_model\" in model &&\n typeof model._model === \"function\"\n );\n}\n\nfunction _isChatModelWithBindTools(\n llm: LanguageModelLike\n): llm is BaseChatModel & Required<Pick<BaseChatModel, \"bindTools\">> {\n if (!isBaseChatModel(llm)) return false;\n return \"bindTools\" in llm && typeof llm.bindTools === \"function\";\n}\n\nexport function getPromptRunnable(prompt?: Prompt): Runnable {\n let promptRunnable: Runnable;\n\n if (prompt == null) {\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => state.messages\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (typeof prompt === \"string\") {\n const systemMessage = new SystemMessage(prompt);\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => {\n return [systemMessage, ...(state.messages ?? [])];\n }\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (isBaseMessage(prompt) && prompt._getType() === \"system\") {\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => [prompt, ...state.messages]\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (typeof prompt === \"function\") {\n promptRunnable = RunnableLambda.from(prompt).withConfig({\n runName: PROMPT_RUNNABLE_NAME,\n });\n } else if (Runnable.isRunnable(prompt)) {\n promptRunnable = prompt;\n } else {\n throw new Error(`Got unexpected type for 'prompt': ${typeof prompt}`);\n }\n\n return promptRunnable;\n}\n\nexport async function shouldBindTools(\n llm: LanguageModelLike,\n tools: (ClientTool | ServerTool)[]\n): Promise<boolean> {\n // If model is a RunnableSequence, find a RunnableBinding or BaseChatModel in its steps\n let model = llm;\n if (RunnableSequence.isRunnableSequence(model)) {\n model =\n model.steps.find(\n (step) =>\n RunnableBinding.isRunnableBinding(step) ||\n isBaseChatModel(step) ||\n isConfigurableModel(step)\n ) || model;\n }\n\n if (isConfigurableModel(model)) {\n model = await model._model();\n }\n\n // If not a RunnableBinding, we should bind tools\n if (!RunnableBinding.isRunnableBinding(model)) {\n return true;\n }\n\n let boundTools = (() => {\n // check if model.kwargs contain the tools key\n if (\n model.kwargs != null &&\n typeof model.kwargs === \"object\" &&\n \"tools\" in model.kwargs &&\n Array.isArray(model.kwargs.tools)\n ) {\n return (model.kwargs.tools ?? null) as BindToolsInput[] | null;\n }\n\n // Some models can bind the tools via `withConfig()` instead of `bind()`\n if (\n model.config != null &&\n typeof model.config === \"object\" &&\n \"tools\" in model.config &&\n Array.isArray(model.config.tools)\n ) {\n return (model.config.tools ?? null) as BindToolsInput[] | null;\n }\n\n return null;\n })();\n\n // google-style\n if (\n boundTools != null &&\n boundTools.length === 1 &&\n \"functionDeclarations\" in boundTools[0]\n ) {\n boundTools = boundTools[0].functionDeclarations;\n }\n\n // If no tools in kwargs, we should bind tools\n if (boundTools == null) return true;\n\n // Check if tools count matches\n if (tools.length !== boundTools.length) {\n throw new Error(\n \"Number of tools in the model.bindTools() and tools passed to createAgent must match\"\n );\n }\n\n const toolNames = new Set<string>(\n tools.flatMap((tool) => (isClientTool(tool) ? tool.name : []))\n );\n\n const boundToolNames = new Set<string>();\n\n for (const boundTool of boundTools) {\n let boundToolName: string | undefined;\n\n // OpenAI-style tool\n if (\"type\" in boundTool && boundTool.type === \"function\") {\n boundToolName = boundTool.function.name;\n }\n // Anthropic or Google-style tool\n else if (\"name\" in boundTool) {\n boundToolName = boundTool.name;\n }\n // Bedrock-style tool\n else if (\"toolSpec\" in boundTool && \"name\" in boundTool.toolSpec) {\n boundToolName = boundTool.toolSpec.name;\n }\n // unknown tool type so we'll ignore it\n else {\n continue;\n }\n\n if (boundToolName) {\n boundToolNames.add(boundToolName);\n }\n }\n\n const missingTools = [...toolNames].filter((x) => !boundToolNames.has(x));\n if (missingTools.length > 0) {\n throw new Error(\n `Missing tools '${missingTools}' in the model.bindTools().` +\n `Tools in the model.bindTools() must match the tools passed to createAgent.`\n );\n }\n\n return false;\n}\n\nconst _simpleBindTools = (\n llm: LanguageModelLike,\n toolClasses: (ClientTool | ServerTool)[],\n options: Partial<BaseChatModelCallOptions> = {}\n) => {\n if (_isChatModelWithBindTools(llm)) {\n return llm.bindTools(toolClasses, options);\n }\n\n if (\n RunnableBinding.isRunnableBinding(llm) &&\n _isChatModelWithBindTools(llm.bound)\n ) {\n const newBound = llm.bound.bindTools(toolClasses, options);\n\n if (RunnableBinding.isRunnableBinding(newBound)) {\n return new RunnableBinding({\n bound: newBound.bound,\n config: { ...llm.config, ...newBound.config },\n kwargs: { ...llm.kwargs, ...newBound.kwargs },\n configFactories: newBound.configFactories ?? llm.configFactories,\n });\n }\n\n return new RunnableBinding({\n bound: newBound,\n config: llm.config,\n kwargs: llm.kwargs,\n configFactories: llm.configFactories,\n });\n }\n\n return null;\n};\n\nexport async function bindTools(\n llm: LanguageModelLike,\n toolClasses: (ClientTool | ServerTool)[],\n options: Partial<BaseChatModelCallOptions> = {}\n): Promise<\n | RunnableSequence<any, any>\n | RunnableBinding<any, any, RunnableConfig<Record<string, any>>>\n | Runnable<BaseLanguageModelInput, AIMessageChunk, BaseChatModelCallOptions>\n> {\n const model = _simpleBindTools(llm, toolClasses, options);\n if (model) return model;\n\n if (isConfigurableModel(llm)) {\n const model = _simpleBindTools(await llm._model(), toolClasses, options);\n if (model) return model;\n }\n\n if (RunnableSequence.isRunnableSequence(llm)) {\n const modelStep = llm.steps.findIndex(\n (step) =>\n RunnableBinding.isRunnableBinding(step) ||\n isBaseChatModel(step) ||\n isConfigurableModel(step)\n );\n\n if (modelStep >= 0) {\n const model = _simpleBindTools(\n llm.steps[modelStep],\n toolClasses,\n options\n );\n if (model) {\n const nextSteps: unknown[] = llm.steps.slice();\n nextSteps.splice(modelStep, 1, model);\n\n return RunnableSequence.from(\n nextSteps as [RunnableLike, ...RunnableLike[], RunnableLike]\n );\n }\n }\n }\n\n throw new Error(`llm ${llm} must define bindTools method.`);\n}\n\n/**\n * Check if the LLM already has bound tools and throw if it does.\n *\n * @param llm - The LLM to check.\n * @returns void\n */\nexport function validateLLMHasNoBoundTools(llm: LanguageModelLike): void {\n /**\n * If llm is a function, we can't validate until runtime, so skip\n */\n if (typeof llm === \"function\") {\n return;\n }\n\n let model = llm;\n\n /**\n * If model is a RunnableSequence, find a RunnableBinding in its steps\n */\n if (RunnableSequence.isRunnableSequence(model)) {\n model =\n model.steps.find((step: RunnableLike) =>\n RunnableBinding.isRunnableBinding(step)\n ) || model;\n }\n\n /**\n * If model is configurable, get the underlying model\n */\n if (isConfigurableModel(model)) {\n /**\n * Can't validate async model retrieval in constructor\n */\n return;\n }\n\n /**\n * Check if model is a RunnableBinding with bound tools\n */\n if (RunnableBinding.isRunnableBinding(model)) {\n const hasToolsInKwargs =\n model.kwargs != null &&\n typeof model.kwargs === \"object\" &&\n \"tools\" in model.kwargs &&\n Array.isArray(model.kwargs.tools) &&\n model.kwargs.tools.length > 0;\n\n const hasToolsInConfig =\n model.config != null &&\n typeof model.config === \"object\" &&\n \"tools\" in model.config &&\n Array.isArray(model.config.tools) &&\n model.config.tools.length > 0;\n\n if (hasToolsInKwargs || hasToolsInConfig) {\n throw new MultipleToolsBoundError();\n }\n }\n\n /**\n * Also check if model has tools property directly (e.g., FakeToolCallingModel)\n */\n if (\n \"tools\" in model &&\n model.tools !== undefined &&\n Array.isArray(model.tools) &&\n model.tools.length > 0\n ) {\n throw new MultipleToolsBoundError();\n }\n}\n\n/**\n * Check if the last message in the messages array has tool calls.\n *\n * @param messages - The messages to check.\n * @returns True if the last message has tool calls, false otherwise.\n */\nexport function hasToolCalls(messages: BaseMessage[]): boolean {\n const lastMessage = messages.at(-1);\n return Boolean(\n lastMessage instanceof AIMessage &&\n lastMessage.tool_calls &&\n lastMessage.tool_calls.length > 0\n );\n}\n\n/**\n * Check if the model name supports structured output\n * @param modelName - The name of the model\n * @returns True if the model supports structured output, false otherwise\n */\nexport function hasSupportForStructuredOutput(modelName?: string): boolean {\n return (\n modelName?.startsWith(\"gpt-4\") || modelName?.startsWith(\"gpt-5\") || false\n );\n}\n\nconst CHAT_MODELS_THAT_SUPPORT_JSON_SCHEMA_OUTPUT = [\n \"ChatOpenAI\",\n \"FakeToolCallingModel\",\n];\n\n/**\n * Identifies the models that support JSON schema output\n * @param model - The model to check\n * @returns True if the model supports JSON schema output, false otherwise\n */\nexport function hasSupportForJsonSchemaOutput(\n model: LanguageModelLike\n): boolean {\n if (!isBaseChatModel(model)) {\n return false;\n }\n\n const chatModelClass = model.getName();\n if (\n CHAT_MODELS_THAT_SUPPORT_JSON_SCHEMA_OUTPUT.includes(chatModelClass) &&\n ((chatModelClass === \"ChatOpenAI\" &&\n /**\n * OpenAI models\n */\n \"model\" in model &&\n typeof model.model === \"string\" &&\n model.model.startsWith(\"gpt-4\")) ||\n /**\n * for testing purposes only\n */\n (chatModelClass === \"FakeToolCallingModel\" &&\n \"structuredResponse\" in model))\n ) {\n return true;\n }\n\n return false;\n}\n"],"mappings":";;;;;;AAyCA,MAAM,eAAe;AACrB,MAAM,kBAAkB;;;;;;;;;;;;;;;AAkBxB,SAAgB,oBACdA,SACe;CACf,MAAM,OACJ,cAAc,QAAQ,KACrB,YAAY,QAAQ,IAClB,mBAAmB,QAAQ,IAAI,iBAAiB,QAAQ;AAE7D,KAAI,CAAC,QAAQ,CAAC,QAAQ,KACpB,QAAO;CAGT,MAAM,EAAE,MAAM,GAAG;AAEjB,KAAI,OAAO,QAAQ,YAAY,SAC7B,QAAO,IAAI,UAAU;EACnB,GAAI,OAAO,KAAK,QAAQ,aAAa,CAAE,EAAC,CAAC,SAAS,IAC9C,QAAQ,YACR;EACJ,SAAS,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,QAAQ,QAAQ,UAAU,CAAC;EACpE,MAAM;CACP;CAGH,MAAM,iBAAiB,CAAE;CACzB,IAAI,iBAAiB;AAErB,MAAK,MAAM,gBAAgB,QAAQ,QACjC,KAAI,OAAO,iBAAiB,UAAU;EACpC,kBAAkB;EAClB,eAAe,KACb,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,aAAa,UAAU,CAAC,CACzD;CACF,WACC,OAAO,iBAAiB,YACxB,UAAU,gBACV,aAAa,SAAS,QACtB;EACA,kBAAkB;EAClB,eAAe,KAAK;GAClB,GAAG;GACH,MAAM,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,aAAa,KAAK,UAAU,CAAC;EACpE,EAAC;CACH,OACC,eAAe,KAAK,aAAa;AAIrC,KAAI,CAAC,gBACH,eAAe,QAAQ;EACrB,MAAM;EACN,MAAM,CAAC,MAAM,EAAE,KAAK,0BAA0B,CAAC;CAChD,EAAC;AAEJ,QAAO,IAAI,UAAU;EACnB,GAAG,QAAQ;EACX,SAAS;EACT,MAAM;CACP;AACF;;;;;;;;;;;;;;;;;AAkBD,SAAgB,uBAA8CA,SAAe;AAC3E,KAAI,CAAC,YAAY,QAAQ,IAAI,CAAC,QAAQ,QACpC,QAAO;CAGT,IAAIC,iBAAiC,CAAE;CACvC,IAAIC;AAEJ,KAAI,MAAM,QAAQ,QAAQ,QAAQ,EAChC,iBAAiB,QAAQ,QACtB,OAAO,CAAC,UAAU;AACjB,MAAI,MAAM,SAAS,UAAU,OAAO,MAAM,SAAS,UAAU;GAC3D,MAAM,YAAY,MAAM,KAAK,MAAM,aAAa;GAChD,MAAM,eAAe,MAAM,KAAK,MAAM,gBAAgB;AAEtD,OAAI,cAAc,CAAC,gBAAgB,aAAa,OAAO,KAAK;IAG1D,cAAc,UAAU;AACxB,WAAO;GACR;AACD,UAAO;EACR;AACD,SAAO;CACR,EAAC,CACD,IAAI,CAAC,UAAU;AACd,MAAI,MAAM,SAAS,UAAU,OAAO,MAAM,SAAS,UAAU;GAC3D,MAAM,YAAY,MAAM,KAAK,MAAM,aAAa;GAChD,MAAM,eAAe,MAAM,KAAK,MAAM,gBAAgB;AAEtD,OAAI,CAAC,aAAa,CAAC,aACjB,QAAO;GAKT,cAAc,UAAU;AAExB,UAAO;IACL,GAAG;IACH,MAAM,aAAa;GACpB;EACF;AACD,SAAO;CACR,EAAC;MACC;EACL,MAAM,UAAU,QAAQ;EACxB,MAAM,YAAY,QAAQ,MAAM,aAAa;EAC7C,MAAM,eAAe,QAAQ,MAAM,gBAAgB;AAEnD,MAAI,CAAC,aAAa,CAAC,aACjB,QAAO;EAIT,cAAc,UAAU;EAExB,iBAAiB,aAAa;CAC/B;AAED,QAAO,IAAI,UAAU;EACnB,GAAI,OAAO,KAAK,QAAQ,aAAa,CAAE,EAAC,CAAC,SAAS,IAC9C,QAAQ,YACR;EACJ,SAAS;EACT,MAAM;CACP;AACF;AAED,SAAgB,aACdC,MACoB;AACpB,QAAO,SAAS,WAAW,KAAK;AACjC;AAED,SAAgB,gBACdC,OACwB;AACxB,QACE,YAAY,SACZ,OAAO,MAAM,WAAW,cACxB,gBAAgB;AAEnB;AAGD,SAAgB,oBACdC,OACqC;AACrC,QACE,OAAO,UAAU,YACjB,SAAS,QACT,6BAA6B,SAC7B,YAAY,SACZ,OAAO,MAAM,WAAW;AAE3B;AAED,SAAS,0BACPC,KACmE;AACnE,KAAI,CAAC,gBAAgB,IAAI,CAAE,QAAO;AAClC,QAAO,eAAe,OAAO,OAAO,IAAI,cAAc;AACvD;AAED,SAAgB,kBAAkBC,QAA2B;CAC3D,IAAIC;AAEJ,KAAI,UAAU,MACZ,iBAAiB,eAAe,KAC9B,CAACC,UAA2C,MAAM,SACnD,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;UACtC,OAAO,WAAW,UAAU;EACrC,MAAM,gBAAgB,IAAI,cAAc;EACxC,iBAAiB,eAAe,KAC9B,CAACA,UAA2C;AAC1C,UAAO,CAAC,eAAe,GAAI,MAAM,YAAY,CAAE,CAAE;EAClD,EACF,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;CAChD,WAAU,cAAc,OAAO,IAAI,OAAO,UAAU,KAAK,UACxD,iBAAiB,eAAe,KAC9B,CAACA,UAA2C,CAAC,QAAQ,GAAG,MAAM,QAAS,EACxE,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;UACtC,OAAO,WAAW,YAC3B,iBAAiB,eAAe,KAAK,OAAO,CAAC,WAAW,EACtD,SAAS,qBACV,EAAC;UACO,SAAS,WAAW,OAAO,EACpC,iBAAiB;KAEjB,OAAM,IAAI,MAAM,CAAC,kCAAkC,EAAE,OAAO,QAAQ;AAGtE,QAAO;AACR;AAgHD,MAAM,mBAAmB,CACvBH,KACAI,aACAC,UAA6C,CAAE,MAC5C;AACH,KAAI,0BAA0B,IAAI,CAChC,QAAO,IAAI,UAAU,aAAa,QAAQ;AAG5C,KACE,gBAAgB,kBAAkB,IAAI,IACtC,0BAA0B,IAAI,MAAM,EACpC;EACA,MAAM,WAAW,IAAI,MAAM,UAAU,aAAa,QAAQ;AAE1D,MAAI,gBAAgB,kBAAkB,SAAS,CAC7C,QAAO,IAAI,gBAAgB;GACzB,OAAO,SAAS;GAChB,QAAQ;IAAE,GAAG,IAAI;IAAQ,GAAG,SAAS;GAAQ;GAC7C,QAAQ;IAAE,GAAG,IAAI;IAAQ,GAAG,SAAS;GAAQ;GAC7C,iBAAiB,SAAS,mBAAmB,IAAI;EAClD;AAGH,SAAO,IAAI,gBAAgB;GACzB,OAAO;GACP,QAAQ,IAAI;GACZ,QAAQ,IAAI;GACZ,iBAAiB,IAAI;EACtB;CACF;AAED,QAAO;AACR;AAED,eAAsB,UACpBL,KACAI,aACAC,UAA6C,CAAE,GAK/C;CACA,MAAM,QAAQ,iBAAiB,KAAK,aAAa,QAAQ;AACzD,KAAI,MAAO,QAAO;AAElB,KAAI,oBAAoB,IAAI,EAAE;EAC5B,MAAMC,UAAQ,iBAAiB,MAAM,IAAI,QAAQ,EAAE,aAAa,QAAQ;AACxE,MAAIA,QAAO,QAAOA;CACnB;AAED,KAAI,iBAAiB,mBAAmB,IAAI,EAAE;EAC5C,MAAM,YAAY,IAAI,MAAM,UAC1B,CAAC,SACC,gBAAgB,kBAAkB,KAAK,IACvC,gBAAgB,KAAK,IACrB,oBAAoB,KAAK,CAC5B;AAED,MAAI,aAAa,GAAG;GAClB,MAAMA,UAAQ,iBACZ,IAAI,MAAM,YACV,aACA,QACD;AACD,OAAIA,SAAO;IACT,MAAMC,YAAuB,IAAI,MAAM,OAAO;IAC9C,UAAU,OAAO,WAAW,GAAGD,QAAM;AAErC,WAAO,iBAAiB,KACtB,UACD;GACF;EACF;CACF;AAED,OAAM,IAAI,MAAM,CAAC,IAAI,EAAE,IAAI,8BAA8B,CAAC;AAC3D;;;;;;;AAQD,SAAgB,2BAA2BN,KAA8B;;;;AAIvE,KAAI,OAAO,QAAQ,WACjB;CAGF,IAAI,QAAQ;;;;AAKZ,KAAI,iBAAiB,mBAAmB,MAAM,EAC5C,QACE,MAAM,MAAM,KAAK,CAACQ,SAChB,gBAAgB,kBAAkB,KAAK,CACxC,IAAI;;;;AAMT,KAAI,oBAAoB,MAAM;;;;AAI5B;;;;AAMF,KAAI,gBAAgB,kBAAkB,MAAM,EAAE;EAC5C,MAAM,mBACJ,MAAM,UAAU,QAChB,OAAO,MAAM,WAAW,YACxB,WAAW,MAAM,UACjB,MAAM,QAAQ,MAAM,OAAO,MAAM,IACjC,MAAM,OAAO,MAAM,SAAS;EAE9B,MAAM,mBACJ,MAAM,UAAU,QAChB,OAAO,MAAM,WAAW,YACxB,WAAW,MAAM,UACjB,MAAM,QAAQ,MAAM,OAAO,MAAM,IACjC,MAAM,OAAO,MAAM,SAAS;AAE9B,MAAI,oBAAoB,iBACtB,OAAM,IAAI;CAEb;;;;AAKD,KACE,WAAW,SACX,MAAM,UAAU,UAChB,MAAM,QAAQ,MAAM,MAAM,IAC1B,MAAM,MAAM,SAAS,EAErB,OAAM,IAAI;AAEb;;;;;;;AAQD,SAAgB,aAAaC,UAAkC;CAC7D,MAAM,cAAc,SAAS,GAAG,GAAG;AACnC,QAAO,QACL,uBAAuB,aACrB,YAAY,cACZ,YAAY,WAAW,SAAS,EACnC;AACF;AAaD,MAAM,8CAA8C,CAClD,cACA,sBACD;;;;;;AAOD,SAAgB,8BACdX,OACS;AACT,KAAI,CAAC,gBAAgB,MAAM,CACzB,QAAO;CAGT,MAAM,iBAAiB,MAAM,SAAS;AACtC,KACE,4CAA4C,SAAS,eAAe,KAClE,mBAAmB,gBAInB,WAAW,SACX,OAAO,MAAM,UAAU,YACvB,MAAM,MAAM,WAAW,QAAQ,IAI9B,mBAAmB,0BAClB,wBAAwB,OAE5B,QAAO;AAGT,QAAO;AACR"}
|
|
1
|
+
{"version":3,"file":"utils.js","names":["message: T","updatedContent: MessageContent","updatedName: string | undefined","tool: ClientTool | ServerTool","model: LanguageModelLike","model: unknown","llm: LanguageModelLike","prompt?: Prompt","promptRunnable: Runnable","state: typeof MessagesAnnotation.State","toolClasses: (ClientTool | ServerTool)[]","options: Partial<BaseChatModelCallOptions>","model","nextSteps: unknown[]","step: RunnableLike","messages: BaseMessage[]"],"sources":["../../src/agents/utils.ts"],"sourcesContent":["import {\n AIMessage,\n AIMessageChunk,\n BaseMessage,\n BaseMessageLike,\n SystemMessage,\n MessageContent,\n} from \"@langchain/core/messages\";\nimport { MessagesAnnotation } from \"@langchain/langgraph\";\nimport {\n BaseChatModel,\n type BindToolsInput,\n type BaseChatModelCallOptions,\n} from \"@langchain/core/language_models/chat_models\";\nimport {\n LanguageModelLike,\n BaseLanguageModelInput,\n} from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnableLike,\n RunnableConfig,\n RunnableLambda,\n RunnableSequence,\n RunnableBinding,\n} from \"@langchain/core/runnables\";\n\nimport { MultipleToolsBoundError } from \"./errors.js\";\nimport { PROMPT_RUNNABLE_NAME } from \"./constants.js\";\nimport {\n ServerTool,\n ClientTool,\n ConfigurableModelInterface,\n Prompt,\n} from \"./types.js\";\n\nconst NAME_PATTERN = /<name>(.*?)<\\/name>/s;\nconst CONTENT_PATTERN = /<content>(.*?)<\\/content>/s;\n\nexport type AgentNameMode = \"inline\";\n\n/**\n * Attach formatted agent names to the messages passed to and from a language model.\n *\n * This is useful for making a message history with multiple agents more coherent.\n *\n * NOTE: agent name is consumed from the message.name field.\n * If you're using an agent built with createAgent, name is automatically set.\n * If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.\n *\n * @param message - Message to add agent name formatting to\n * @returns Message with agent name formatting\n *\n * @internal\n */\nexport function _addInlineAgentName<T extends BaseMessageLike>(\n message: T\n): T | AIMessage {\n if (!AIMessage.isInstance(message) || AIMessageChunk.isInstance(message)) {\n return message;\n }\n\n if (!message.name) {\n return message;\n }\n\n const { name } = message;\n\n if (typeof message.content === \"string\") {\n return new AIMessage({\n ...message.lc_kwargs,\n content: `<name>${name}</name><content>${message.content}</content>`,\n name: undefined,\n });\n }\n\n const updatedContent = [];\n let textBlockCount = 0;\n\n for (const contentBlock of message.content) {\n if (typeof contentBlock === \"string\") {\n textBlockCount += 1;\n updatedContent.push(\n `<name>${name}</name><content>${contentBlock}</content>`\n );\n } else if (\n typeof contentBlock === \"object\" &&\n \"type\" in contentBlock &&\n contentBlock.type === \"text\"\n ) {\n textBlockCount += 1;\n updatedContent.push({\n ...contentBlock,\n text: `<name>${name}</name><content>${contentBlock.text}</content>`,\n });\n } else {\n updatedContent.push(contentBlock);\n }\n }\n\n if (!textBlockCount) {\n updatedContent.unshift({\n type: \"text\",\n text: `<name>${name}</name><content></content>`,\n });\n }\n return new AIMessage({\n ...message.lc_kwargs,\n content: updatedContent as MessageContent,\n name: undefined,\n });\n}\n\n/**\n * Remove explicit name and content XML tags from the AI message content.\n *\n * Examples:\n *\n * @example\n * ```typescript\n * removeInlineAgentName(new AIMessage({ content: \"<name>assistant</name><content>Hello</content>\", name: \"assistant\" }))\n * // AIMessage with content: \"Hello\"\n *\n * removeInlineAgentName(new AIMessage({ content: [{type: \"text\", text: \"<name>assistant</name><content>Hello</content>\"}], name: \"assistant\" }))\n * // AIMessage with content: [{type: \"text\", text: \"Hello\"}]\n * ```\n *\n * @internal\n */\nexport function _removeInlineAgentName<T extends BaseMessage>(message: T): T {\n if (!AIMessage.isInstance(message) || !message.content) {\n return message;\n }\n\n let updatedContent: MessageContent = [];\n let updatedName: string | undefined;\n\n if (Array.isArray(message.content)) {\n updatedContent = message.content\n .filter((block) => {\n if (block.type === \"text\" && typeof block.text === \"string\") {\n const nameMatch = block.text.match(NAME_PATTERN);\n const contentMatch = block.text.match(CONTENT_PATTERN);\n // don't include empty content blocks that were added because there was no text block to modify\n if (nameMatch && (!contentMatch || contentMatch[1] === \"\")) {\n // capture name from text block\n updatedName = nameMatch[1];\n return false;\n }\n return true;\n }\n return true;\n })\n .map((block) => {\n if (block.type === \"text\" && typeof block.text === \"string\") {\n const nameMatch = block.text.match(NAME_PATTERN);\n const contentMatch = block.text.match(CONTENT_PATTERN);\n\n if (!nameMatch || !contentMatch) {\n return block;\n }\n\n // capture name from text block\n updatedName = nameMatch[1];\n\n return {\n ...block,\n text: contentMatch[1],\n };\n }\n return block;\n });\n } else {\n const content = message.content as string;\n const nameMatch = content.match(NAME_PATTERN);\n const contentMatch = content.match(CONTENT_PATTERN);\n\n if (!nameMatch || !contentMatch) {\n return message;\n }\n\n updatedName = nameMatch[1];\n updatedContent = contentMatch[1];\n }\n\n return new AIMessage({\n ...(Object.keys(message.lc_kwargs ?? {}).length > 0\n ? message.lc_kwargs\n : message),\n content: updatedContent,\n name: updatedName,\n }) as T;\n}\n\nexport function isClientTool(\n tool: ClientTool | ServerTool\n): tool is ClientTool {\n return Runnable.isRunnable(tool);\n}\n\nexport function isBaseChatModel(\n model: LanguageModelLike\n): model is BaseChatModel {\n return (\n \"invoke\" in model &&\n typeof model.invoke === \"function\" &&\n \"_modelType\" in model\n );\n}\n\nexport function isConfigurableModel(\n model: unknown\n): model is ConfigurableModelInterface {\n return (\n typeof model === \"object\" &&\n model != null &&\n \"_queuedMethodOperations\" in model &&\n \"_model\" in model &&\n typeof model._model === \"function\"\n );\n}\n\nfunction _isChatModelWithBindTools(\n llm: LanguageModelLike\n): llm is BaseChatModel & Required<Pick<BaseChatModel, \"bindTools\">> {\n if (!isBaseChatModel(llm)) return false;\n return \"bindTools\" in llm && typeof llm.bindTools === \"function\";\n}\n\nexport function getPromptRunnable(prompt?: Prompt): Runnable {\n let promptRunnable: Runnable;\n\n if (prompt == null) {\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => state.messages\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (typeof prompt === \"string\") {\n const systemMessage = new SystemMessage(prompt);\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => {\n return [systemMessage, ...(state.messages ?? [])];\n }\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (SystemMessage.isInstance(prompt)) {\n promptRunnable = RunnableLambda.from(\n (state: typeof MessagesAnnotation.State) => [prompt, ...state.messages]\n ).withConfig({ runName: PROMPT_RUNNABLE_NAME });\n } else if (typeof prompt === \"function\") {\n promptRunnable = RunnableLambda.from(prompt).withConfig({\n runName: PROMPT_RUNNABLE_NAME,\n });\n } else if (Runnable.isRunnable(prompt)) {\n promptRunnable = prompt;\n } else {\n throw new Error(`Got unexpected type for 'prompt': ${typeof prompt}`);\n }\n\n return promptRunnable;\n}\n\nexport async function shouldBindTools(\n llm: LanguageModelLike,\n tools: (ClientTool | ServerTool)[]\n): Promise<boolean> {\n // If model is a RunnableSequence, find a RunnableBinding or BaseChatModel in its steps\n let model = llm;\n if (RunnableSequence.isRunnableSequence(model)) {\n model =\n model.steps.find(\n (step) =>\n RunnableBinding.isRunnableBinding(step) ||\n isBaseChatModel(step) ||\n isConfigurableModel(step)\n ) || model;\n }\n\n if (isConfigurableModel(model)) {\n model = await model._model();\n }\n\n // If not a RunnableBinding, we should bind tools\n if (!RunnableBinding.isRunnableBinding(model)) {\n return true;\n }\n\n let boundTools = (() => {\n // check if model.kwargs contain the tools key\n if (\n model.kwargs != null &&\n typeof model.kwargs === \"object\" &&\n \"tools\" in model.kwargs &&\n Array.isArray(model.kwargs.tools)\n ) {\n return (model.kwargs.tools ?? null) as BindToolsInput[] | null;\n }\n\n // Some models can bind the tools via `withConfig()` instead of `bind()`\n if (\n model.config != null &&\n typeof model.config === \"object\" &&\n \"tools\" in model.config &&\n Array.isArray(model.config.tools)\n ) {\n return (model.config.tools ?? null) as BindToolsInput[] | null;\n }\n\n return null;\n })();\n\n // google-style\n if (\n boundTools != null &&\n boundTools.length === 1 &&\n \"functionDeclarations\" in boundTools[0]\n ) {\n boundTools = boundTools[0].functionDeclarations;\n }\n\n // If no tools in kwargs, we should bind tools\n if (boundTools == null) return true;\n\n // Check if tools count matches\n if (tools.length !== boundTools.length) {\n throw new Error(\n \"Number of tools in the model.bindTools() and tools passed to createAgent must match\"\n );\n }\n\n const toolNames = new Set<string>(\n tools.flatMap((tool) => (isClientTool(tool) ? tool.name : []))\n );\n\n const boundToolNames = new Set<string>();\n\n for (const boundTool of boundTools) {\n let boundToolName: string | undefined;\n\n // OpenAI-style tool\n if (\"type\" in boundTool && boundTool.type === \"function\") {\n boundToolName = boundTool.function.name;\n }\n // Anthropic or Google-style tool\n else if (\"name\" in boundTool) {\n boundToolName = boundTool.name;\n }\n // Bedrock-style tool\n else if (\"toolSpec\" in boundTool && \"name\" in boundTool.toolSpec) {\n boundToolName = boundTool.toolSpec.name;\n }\n // unknown tool type so we'll ignore it\n else {\n continue;\n }\n\n if (boundToolName) {\n boundToolNames.add(boundToolName);\n }\n }\n\n const missingTools = [...toolNames].filter((x) => !boundToolNames.has(x));\n if (missingTools.length > 0) {\n throw new Error(\n `Missing tools '${missingTools}' in the model.bindTools().` +\n `Tools in the model.bindTools() must match the tools passed to createAgent.`\n );\n }\n\n return false;\n}\n\nconst _simpleBindTools = (\n llm: LanguageModelLike,\n toolClasses: (ClientTool | ServerTool)[],\n options: Partial<BaseChatModelCallOptions> = {}\n) => {\n if (_isChatModelWithBindTools(llm)) {\n return llm.bindTools(toolClasses, options);\n }\n\n if (\n RunnableBinding.isRunnableBinding(llm) &&\n _isChatModelWithBindTools(llm.bound)\n ) {\n const newBound = llm.bound.bindTools(toolClasses, options);\n\n if (RunnableBinding.isRunnableBinding(newBound)) {\n return new RunnableBinding({\n bound: newBound.bound,\n config: { ...llm.config, ...newBound.config },\n kwargs: { ...llm.kwargs, ...newBound.kwargs },\n configFactories: newBound.configFactories ?? llm.configFactories,\n });\n }\n\n return new RunnableBinding({\n bound: newBound,\n config: llm.config,\n kwargs: llm.kwargs,\n configFactories: llm.configFactories,\n });\n }\n\n return null;\n};\n\nexport async function bindTools(\n llm: LanguageModelLike,\n toolClasses: (ClientTool | ServerTool)[],\n options: Partial<BaseChatModelCallOptions> = {}\n): Promise<\n | RunnableSequence<unknown, unknown>\n | RunnableBinding<unknown, unknown, RunnableConfig<Record<string, unknown>>>\n | Runnable<BaseLanguageModelInput, AIMessageChunk, BaseChatModelCallOptions>\n> {\n const model = _simpleBindTools(llm, toolClasses, options);\n if (model) return model;\n\n if (isConfigurableModel(llm)) {\n const model = _simpleBindTools(await llm._model(), toolClasses, options);\n if (model) return model;\n }\n\n if (RunnableSequence.isRunnableSequence(llm)) {\n const modelStep = llm.steps.findIndex(\n (step) =>\n RunnableBinding.isRunnableBinding(step) ||\n isBaseChatModel(step) ||\n isConfigurableModel(step)\n );\n\n if (modelStep >= 0) {\n const model = _simpleBindTools(\n llm.steps[modelStep],\n toolClasses,\n options\n );\n if (model) {\n const nextSteps: unknown[] = llm.steps.slice();\n nextSteps.splice(modelStep, 1, model);\n\n return RunnableSequence.from(\n nextSteps as [RunnableLike, ...RunnableLike[], RunnableLike]\n );\n }\n }\n }\n\n throw new Error(`llm ${llm} must define bindTools method.`);\n}\n\n/**\n * Check if the LLM already has bound tools and throw if it does.\n *\n * @param llm - The LLM to check.\n * @returns void\n */\nexport function validateLLMHasNoBoundTools(llm: LanguageModelLike): void {\n /**\n * If llm is a function, we can't validate until runtime, so skip\n */\n if (typeof llm === \"function\") {\n return;\n }\n\n let model = llm;\n\n /**\n * If model is a RunnableSequence, find a RunnableBinding in its steps\n */\n if (RunnableSequence.isRunnableSequence(model)) {\n model =\n model.steps.find((step: RunnableLike) =>\n RunnableBinding.isRunnableBinding(step)\n ) || model;\n }\n\n /**\n * If model is configurable, get the underlying model\n */\n if (isConfigurableModel(model)) {\n /**\n * Can't validate async model retrieval in constructor\n */\n return;\n }\n\n /**\n * Check if model is a RunnableBinding with bound tools\n */\n if (RunnableBinding.isRunnableBinding(model)) {\n const hasToolsInKwargs =\n model.kwargs != null &&\n typeof model.kwargs === \"object\" &&\n \"tools\" in model.kwargs &&\n Array.isArray(model.kwargs.tools) &&\n model.kwargs.tools.length > 0;\n\n const hasToolsInConfig =\n model.config != null &&\n typeof model.config === \"object\" &&\n \"tools\" in model.config &&\n Array.isArray(model.config.tools) &&\n model.config.tools.length > 0;\n\n if (hasToolsInKwargs || hasToolsInConfig) {\n throw new MultipleToolsBoundError();\n }\n }\n\n /**\n * Also check if model has tools property directly (e.g., FakeToolCallingModel)\n */\n if (\n \"tools\" in model &&\n model.tools !== undefined &&\n Array.isArray(model.tools) &&\n model.tools.length > 0\n ) {\n throw new MultipleToolsBoundError();\n }\n}\n\n/**\n * Check if the last message in the messages array has tool calls.\n *\n * @param messages - The messages to check.\n * @returns True if the last message has tool calls, false otherwise.\n */\nexport function hasToolCalls(messages: BaseMessage[]): boolean {\n const lastMessage = messages.at(-1);\n return Boolean(\n AIMessage.isInstance(lastMessage) &&\n lastMessage.tool_calls &&\n lastMessage.tool_calls.length > 0\n );\n}\n\n/**\n * Check if the model name supports structured output\n * @param modelName - The name of the model\n * @returns True if the model supports structured output, false otherwise\n */\nexport function hasSupportForStructuredOutput(modelName?: string): boolean {\n return (\n modelName?.startsWith(\"gpt-4\") || modelName?.startsWith(\"gpt-5\") || false\n );\n}\n\nconst CHAT_MODELS_THAT_SUPPORT_JSON_SCHEMA_OUTPUT = [\n \"ChatOpenAI\",\n \"FakeToolCallingModel\",\n];\n\n/**\n * Identifies the models that support JSON schema output\n * @param model - The model to check\n * @returns True if the model supports JSON schema output, false otherwise\n */\nexport function hasSupportForJsonSchemaOutput(\n model: LanguageModelLike\n): boolean {\n if (!isBaseChatModel(model)) {\n return false;\n }\n\n const chatModelClass = model.getName();\n if (\n CHAT_MODELS_THAT_SUPPORT_JSON_SCHEMA_OUTPUT.includes(chatModelClass) &&\n ((chatModelClass === \"ChatOpenAI\" &&\n /**\n * OpenAI models\n */\n \"model\" in model &&\n typeof model.model === \"string\" &&\n model.model.startsWith(\"gpt-4\")) ||\n /**\n * for testing purposes only\n */\n (chatModelClass === \"FakeToolCallingModel\" &&\n \"structuredResponse\" in model))\n ) {\n return true;\n }\n\n return false;\n}\n"],"mappings":";;;;;;AAoCA,MAAM,eAAe;AACrB,MAAM,kBAAkB;;;;;;;;;;;;;;;AAkBxB,SAAgB,oBACdA,SACe;AACf,KAAI,CAAC,UAAU,WAAW,QAAQ,IAAI,eAAe,WAAW,QAAQ,CACtE,QAAO;AAGT,KAAI,CAAC,QAAQ,KACX,QAAO;CAGT,MAAM,EAAE,MAAM,GAAG;AAEjB,KAAI,OAAO,QAAQ,YAAY,SAC7B,QAAO,IAAI,UAAU;EACnB,GAAG,QAAQ;EACX,SAAS,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,QAAQ,QAAQ,UAAU,CAAC;EACpE,MAAM;CACP;CAGH,MAAM,iBAAiB,CAAE;CACzB,IAAI,iBAAiB;AAErB,MAAK,MAAM,gBAAgB,QAAQ,QACjC,KAAI,OAAO,iBAAiB,UAAU;EACpC,kBAAkB;EAClB,eAAe,KACb,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,aAAa,UAAU,CAAC,CACzD;CACF,WACC,OAAO,iBAAiB,YACxB,UAAU,gBACV,aAAa,SAAS,QACtB;EACA,kBAAkB;EAClB,eAAe,KAAK;GAClB,GAAG;GACH,MAAM,CAAC,MAAM,EAAE,KAAK,gBAAgB,EAAE,aAAa,KAAK,UAAU,CAAC;EACpE,EAAC;CACH,OACC,eAAe,KAAK,aAAa;AAIrC,KAAI,CAAC,gBACH,eAAe,QAAQ;EACrB,MAAM;EACN,MAAM,CAAC,MAAM,EAAE,KAAK,0BAA0B,CAAC;CAChD,EAAC;AAEJ,QAAO,IAAI,UAAU;EACnB,GAAG,QAAQ;EACX,SAAS;EACT,MAAM;CACP;AACF;;;;;;;;;;;;;;;;;AAkBD,SAAgB,uBAA8CA,SAAe;AAC3E,KAAI,CAAC,UAAU,WAAW,QAAQ,IAAI,CAAC,QAAQ,QAC7C,QAAO;CAGT,IAAIC,iBAAiC,CAAE;CACvC,IAAIC;AAEJ,KAAI,MAAM,QAAQ,QAAQ,QAAQ,EAChC,iBAAiB,QAAQ,QACtB,OAAO,CAAC,UAAU;AACjB,MAAI,MAAM,SAAS,UAAU,OAAO,MAAM,SAAS,UAAU;GAC3D,MAAM,YAAY,MAAM,KAAK,MAAM,aAAa;GAChD,MAAM,eAAe,MAAM,KAAK,MAAM,gBAAgB;AAEtD,OAAI,cAAc,CAAC,gBAAgB,aAAa,OAAO,KAAK;IAE1D,cAAc,UAAU;AACxB,WAAO;GACR;AACD,UAAO;EACR;AACD,SAAO;CACR,EAAC,CACD,IAAI,CAAC,UAAU;AACd,MAAI,MAAM,SAAS,UAAU,OAAO,MAAM,SAAS,UAAU;GAC3D,MAAM,YAAY,MAAM,KAAK,MAAM,aAAa;GAChD,MAAM,eAAe,MAAM,KAAK,MAAM,gBAAgB;AAEtD,OAAI,CAAC,aAAa,CAAC,aACjB,QAAO;GAIT,cAAc,UAAU;AAExB,UAAO;IACL,GAAG;IACH,MAAM,aAAa;GACpB;EACF;AACD,SAAO;CACR,EAAC;MACC;EACL,MAAM,UAAU,QAAQ;EACxB,MAAM,YAAY,QAAQ,MAAM,aAAa;EAC7C,MAAM,eAAe,QAAQ,MAAM,gBAAgB;AAEnD,MAAI,CAAC,aAAa,CAAC,aACjB,QAAO;EAGT,cAAc,UAAU;EACxB,iBAAiB,aAAa;CAC/B;AAED,QAAO,IAAI,UAAU;EACnB,GAAI,OAAO,KAAK,QAAQ,aAAa,CAAE,EAAC,CAAC,SAAS,IAC9C,QAAQ,YACR;EACJ,SAAS;EACT,MAAM;CACP;AACF;AAED,SAAgB,aACdC,MACoB;AACpB,QAAO,SAAS,WAAW,KAAK;AACjC;AAED,SAAgB,gBACdC,OACwB;AACxB,QACE,YAAY,SACZ,OAAO,MAAM,WAAW,cACxB,gBAAgB;AAEnB;AAED,SAAgB,oBACdC,OACqC;AACrC,QACE,OAAO,UAAU,YACjB,SAAS,QACT,6BAA6B,SAC7B,YAAY,SACZ,OAAO,MAAM,WAAW;AAE3B;AAED,SAAS,0BACPC,KACmE;AACnE,KAAI,CAAC,gBAAgB,IAAI,CAAE,QAAO;AAClC,QAAO,eAAe,OAAO,OAAO,IAAI,cAAc;AACvD;AAED,SAAgB,kBAAkBC,QAA2B;CAC3D,IAAIC;AAEJ,KAAI,UAAU,MACZ,iBAAiB,eAAe,KAC9B,CAACC,UAA2C,MAAM,SACnD,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;UACtC,OAAO,WAAW,UAAU;EACrC,MAAM,gBAAgB,IAAI,cAAc;EACxC,iBAAiB,eAAe,KAC9B,CAACA,UAA2C;AAC1C,UAAO,CAAC,eAAe,GAAI,MAAM,YAAY,CAAE,CAAE;EAClD,EACF,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;CAChD,WAAU,cAAc,WAAW,OAAO,EACzC,iBAAiB,eAAe,KAC9B,CAACA,UAA2C,CAAC,QAAQ,GAAG,MAAM,QAAS,EACxE,CAAC,WAAW,EAAE,SAAS,qBAAsB,EAAC;UACtC,OAAO,WAAW,YAC3B,iBAAiB,eAAe,KAAK,OAAO,CAAC,WAAW,EACtD,SAAS,qBACV,EAAC;UACO,SAAS,WAAW,OAAO,EACpC,iBAAiB;KAEjB,OAAM,IAAI,MAAM,CAAC,kCAAkC,EAAE,OAAO,QAAQ;AAGtE,QAAO;AACR;AAgHD,MAAM,mBAAmB,CACvBH,KACAI,aACAC,UAA6C,CAAE,MAC5C;AACH,KAAI,0BAA0B,IAAI,CAChC,QAAO,IAAI,UAAU,aAAa,QAAQ;AAG5C,KACE,gBAAgB,kBAAkB,IAAI,IACtC,0BAA0B,IAAI,MAAM,EACpC;EACA,MAAM,WAAW,IAAI,MAAM,UAAU,aAAa,QAAQ;AAE1D,MAAI,gBAAgB,kBAAkB,SAAS,CAC7C,QAAO,IAAI,gBAAgB;GACzB,OAAO,SAAS;GAChB,QAAQ;IAAE,GAAG,IAAI;IAAQ,GAAG,SAAS;GAAQ;GAC7C,QAAQ;IAAE,GAAG,IAAI;IAAQ,GAAG,SAAS;GAAQ;GAC7C,iBAAiB,SAAS,mBAAmB,IAAI;EAClD;AAGH,SAAO,IAAI,gBAAgB;GACzB,OAAO;GACP,QAAQ,IAAI;GACZ,QAAQ,IAAI;GACZ,iBAAiB,IAAI;EACtB;CACF;AAED,QAAO;AACR;AAED,eAAsB,UACpBL,KACAI,aACAC,UAA6C,CAAE,GAK/C;CACA,MAAM,QAAQ,iBAAiB,KAAK,aAAa,QAAQ;AACzD,KAAI,MAAO,QAAO;AAElB,KAAI,oBAAoB,IAAI,EAAE;EAC5B,MAAMC,UAAQ,iBAAiB,MAAM,IAAI,QAAQ,EAAE,aAAa,QAAQ;AACxE,MAAIA,QAAO,QAAOA;CACnB;AAED,KAAI,iBAAiB,mBAAmB,IAAI,EAAE;EAC5C,MAAM,YAAY,IAAI,MAAM,UAC1B,CAAC,SACC,gBAAgB,kBAAkB,KAAK,IACvC,gBAAgB,KAAK,IACrB,oBAAoB,KAAK,CAC5B;AAED,MAAI,aAAa,GAAG;GAClB,MAAMA,UAAQ,iBACZ,IAAI,MAAM,YACV,aACA,QACD;AACD,OAAIA,SAAO;IACT,MAAMC,YAAuB,IAAI,MAAM,OAAO;IAC9C,UAAU,OAAO,WAAW,GAAGD,QAAM;AAErC,WAAO,iBAAiB,KACtB,UACD;GACF;EACF;CACF;AAED,OAAM,IAAI,MAAM,CAAC,IAAI,EAAE,IAAI,8BAA8B,CAAC;AAC3D;;;;;;;AAQD,SAAgB,2BAA2BN,KAA8B;;;;AAIvE,KAAI,OAAO,QAAQ,WACjB;CAGF,IAAI,QAAQ;;;;AAKZ,KAAI,iBAAiB,mBAAmB,MAAM,EAC5C,QACE,MAAM,MAAM,KAAK,CAACQ,SAChB,gBAAgB,kBAAkB,KAAK,CACxC,IAAI;;;;AAMT,KAAI,oBAAoB,MAAM;;;;AAI5B;;;;AAMF,KAAI,gBAAgB,kBAAkB,MAAM,EAAE;EAC5C,MAAM,mBACJ,MAAM,UAAU,QAChB,OAAO,MAAM,WAAW,YACxB,WAAW,MAAM,UACjB,MAAM,QAAQ,MAAM,OAAO,MAAM,IACjC,MAAM,OAAO,MAAM,SAAS;EAE9B,MAAM,mBACJ,MAAM,UAAU,QAChB,OAAO,MAAM,WAAW,YACxB,WAAW,MAAM,UACjB,MAAM,QAAQ,MAAM,OAAO,MAAM,IACjC,MAAM,OAAO,MAAM,SAAS;AAE9B,MAAI,oBAAoB,iBACtB,OAAM,IAAI;CAEb;;;;AAKD,KACE,WAAW,SACX,MAAM,UAAU,UAChB,MAAM,QAAQ,MAAM,MAAM,IAC1B,MAAM,MAAM,SAAS,EAErB,OAAM,IAAI;AAEb;;;;;;;AAQD,SAAgB,aAAaC,UAAkC;CAC7D,MAAM,cAAc,SAAS,GAAG,GAAG;AACnC,QAAO,QACL,UAAU,WAAW,YAAY,IAC/B,YAAY,cACZ,YAAY,WAAW,SAAS,EACnC;AACF;AAaD,MAAM,8CAA8C,CAClD,cACA,sBACD;;;;;;AAOD,SAAgB,8BACdX,OACS;AACT,KAAI,CAAC,gBAAgB,MAAM,CACzB,QAAO;CAGT,MAAM,iBAAiB,MAAM,SAAS;AACtC,KACE,4CAA4C,SAAS,eAAe,KAClE,mBAAmB,gBAInB,WAAW,SACX,OAAO,MAAM,UAAU,YACvB,MAAM,MAAM,WAAW,QAAQ,IAI9B,mBAAmB,0BAClB,wBAAwB,OAE5B,QAAO;AAGT,QAAO;AACR"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"withAgentName.cjs","names":["model
|
|
1
|
+
{"version":3,"file":"withAgentName.cjs","names":["model:\n | LanguageModelLike\n | Runnable<unknown, unknown, RunnableConfig<Record<string, unknown>>>","agentNameMode: AgentNameMode","processInputMessage: (message: BaseMessageLike) => BaseMessageLike","processOutputMessage: (message: BaseMessage) => BaseMessage","_addInlineAgentName","_removeInlineAgentName","messages: BaseMessageLike[]","RunnableSequence","RunnableLambda"],"sources":["../../src/agents/withAgentName.ts"],"sourcesContent":["import { BaseMessage, BaseMessageLike } from \"@langchain/core/messages\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnableLambda,\n RunnableSequence,\n type RunnableConfig,\n} from \"@langchain/core/runnables\";\n\nimport {\n AgentNameMode,\n _addInlineAgentName,\n _removeInlineAgentName,\n} from \"./utils.js\";\n\n/**\n * Attach formatted agent names to the messages passed to and from a language model.\n *\n * This is useful for making a message history with multiple agents more coherent.\n *\n * NOTE: agent name is consumed from the message.name field.\n * If you're using an agent built with createAgent, name is automatically set.\n * If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.\n *\n * @param model - Language model to add agent name formatting to\n * @param agentNameMode - How to expose the agent name to the LLM\n * - \"inline\": Add the agent name directly into the content field of the AI message using XML-style tags.\n * Example: \"How can I help you\" -> \"<name>agent_name</name><content>How can I help you?</content>\".\n */\nexport function withAgentName(\n model:\n | LanguageModelLike\n | Runnable<unknown, unknown, RunnableConfig<Record<string, unknown>>>,\n agentNameMode: AgentNameMode\n): LanguageModelLike {\n let processInputMessage: (message: BaseMessageLike) => BaseMessageLike;\n let processOutputMessage: (message: BaseMessage) => BaseMessage;\n\n if (agentNameMode === \"inline\") {\n processInputMessage = _addInlineAgentName;\n processOutputMessage = _removeInlineAgentName;\n } else {\n throw new Error(\n `Invalid agent name mode: ${agentNameMode}. Needs to be one of: \"inline\"`\n );\n }\n\n function processInputMessages(\n messages: BaseMessageLike[]\n ): BaseMessageLike[] {\n return messages.map(processInputMessage);\n }\n\n return RunnableSequence.from([\n RunnableLambda.from(processInputMessages),\n model,\n RunnableLambda.from(processOutputMessage),\n ]);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;AA6BA,SAAgB,cACdA,OAGAC,eACmB;CACnB,IAAIC;CACJ,IAAIC;AAEJ,KAAI,kBAAkB,UAAU;EAC9B,sBAAsBC;EACtB,uBAAuBC;CACxB,MACC,OAAM,IAAI,MACR,CAAC,yBAAyB,EAAE,cAAc,8BAA8B,CAAC;CAI7E,SAAS,qBACPC,UACmB;AACnB,SAAO,SAAS,IAAI,oBAAoB;CACzC;AAED,QAAOC,4CAAiB,KAAK;EAC3BC,0CAAe,KAAK,qBAAqB;EACzC;EACAA,0CAAe,KAAK,qBAAqB;CAC1C,EAAC;AACH"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"withAgentName.js","names":["model
|
|
1
|
+
{"version":3,"file":"withAgentName.js","names":["model:\n | LanguageModelLike\n | Runnable<unknown, unknown, RunnableConfig<Record<string, unknown>>>","agentNameMode: AgentNameMode","processInputMessage: (message: BaseMessageLike) => BaseMessageLike","processOutputMessage: (message: BaseMessage) => BaseMessage","messages: BaseMessageLike[]"],"sources":["../../src/agents/withAgentName.ts"],"sourcesContent":["import { BaseMessage, BaseMessageLike } from \"@langchain/core/messages\";\nimport { LanguageModelLike } from \"@langchain/core/language_models/base\";\nimport {\n Runnable,\n RunnableLambda,\n RunnableSequence,\n type RunnableConfig,\n} from \"@langchain/core/runnables\";\n\nimport {\n AgentNameMode,\n _addInlineAgentName,\n _removeInlineAgentName,\n} from \"./utils.js\";\n\n/**\n * Attach formatted agent names to the messages passed to and from a language model.\n *\n * This is useful for making a message history with multiple agents more coherent.\n *\n * NOTE: agent name is consumed from the message.name field.\n * If you're using an agent built with createAgent, name is automatically set.\n * If you're building a custom agent, make sure to set the name on the AI message returned by the LLM.\n *\n * @param model - Language model to add agent name formatting to\n * @param agentNameMode - How to expose the agent name to the LLM\n * - \"inline\": Add the agent name directly into the content field of the AI message using XML-style tags.\n * Example: \"How can I help you\" -> \"<name>agent_name</name><content>How can I help you?</content>\".\n */\nexport function withAgentName(\n model:\n | LanguageModelLike\n | Runnable<unknown, unknown, RunnableConfig<Record<string, unknown>>>,\n agentNameMode: AgentNameMode\n): LanguageModelLike {\n let processInputMessage: (message: BaseMessageLike) => BaseMessageLike;\n let processOutputMessage: (message: BaseMessage) => BaseMessage;\n\n if (agentNameMode === \"inline\") {\n processInputMessage = _addInlineAgentName;\n processOutputMessage = _removeInlineAgentName;\n } else {\n throw new Error(\n `Invalid agent name mode: ${agentNameMode}. Needs to be one of: \"inline\"`\n );\n }\n\n function processInputMessages(\n messages: BaseMessageLike[]\n ): BaseMessageLike[] {\n return messages.map(processInputMessage);\n }\n\n return RunnableSequence.from([\n RunnableLambda.from(processInputMessages),\n model,\n RunnableLambda.from(processOutputMessage),\n ]);\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;AA6BA,SAAgB,cACdA,OAGAC,eACmB;CACnB,IAAIC;CACJ,IAAIC;AAEJ,KAAI,kBAAkB,UAAU;EAC9B,sBAAsB;EACtB,uBAAuB;CACxB,MACC,OAAM,IAAI,MACR,CAAC,yBAAyB,EAAE,cAAc,8BAA8B,CAAC;CAI7E,SAAS,qBACPC,UACmB;AACnB,SAAO,SAAS,IAAI,oBAAoB;CACzC;AAED,QAAO,iBAAiB,KAAK;EAC3B,eAAe,KAAK,qBAAqB;EACzC;EACA,eAAe,KAAK,qBAAqB;CAC1C,EAAC;AACH"}
|
|
@@ -2,8 +2,8 @@ import { SerializedAPIChain } from "../serde.cjs";
|
|
|
2
2
|
import { BaseChain, ChainInputs } from "../base.cjs";
|
|
3
3
|
import { LLMChain } from "../llm_chain.cjs";
|
|
4
4
|
import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
5
|
-
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
6
5
|
import { ChainValues } from "@langchain/core/utils/types";
|
|
6
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
7
7
|
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
|
|
8
8
|
|
|
9
9
|
//#region src/chains/api/api_chain.d.ts
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"prompts.cjs","names":["PromptTemplate"],"sources":["../../../src/chains/api/prompts.ts"],"sourcesContent":["
|
|
1
|
+
{"version":3,"file":"prompts.cjs","names":["PromptTemplate"],"sources":["../../../src/chains/api/prompts.ts"],"sourcesContent":["import { PromptTemplate } from \"@langchain/core/prompts\";\n\nexport const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:`;\n\nexport const API_URL_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"api_docs\", \"question\"],\n template: API_URL_RAW_PROMPT_TEMPLATE,\n});\n\nexport const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:`;\nexport const API_RESPONSE_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"api_docs\", \"question\", \"api_url\", \"api_response\"],\n template: API_RESPONSE_RAW_PROMPT_TEMPLATE,\n});\n"],"mappings":";;;;AAEA,MAAa,8BAA8B,CAAC;;;;;;QAMpC,CAAC;AAET,MAAa,0CAA0C,IAAIA,wCAAe;CACxE,gBAAgB,CAAC,YAAY,UAAW;CACxC,UAAU;AACX;AAED,MAAa,mCAAmC,GAAG,4BAA4B;;;;;;;;QAQvE,CAAC;AACT,MAAa,+CAA+C,IAAIA,wCAAe;CAC7E,gBAAgB;EAAC;EAAY;EAAY;EAAW;CAAe;CACnE,UAAU;AACX"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"prompts.js","names":[],"sources":["../../../src/chains/api/prompts.ts"],"sourcesContent":["
|
|
1
|
+
{"version":3,"file":"prompts.js","names":[],"sources":["../../../src/chains/api/prompts.ts"],"sourcesContent":["import { PromptTemplate } from \"@langchain/core/prompts\";\n\nexport const API_URL_RAW_PROMPT_TEMPLATE = `You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:`;\n\nexport const API_URL_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"api_docs\", \"question\"],\n template: API_URL_RAW_PROMPT_TEMPLATE,\n});\n\nexport const API_RESPONSE_RAW_PROMPT_TEMPLATE = `${API_URL_RAW_PROMPT_TEMPLATE} {api_url}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:`;\nexport const API_RESPONSE_PROMPT_TEMPLATE = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"api_docs\", \"question\", \"api_url\", \"api_response\"],\n template: API_RESPONSE_RAW_PROMPT_TEMPLATE,\n});\n"],"mappings":";;;AAEA,MAAa,8BAA8B,CAAC;;;;;;QAMpC,CAAC;AAET,MAAa,0CAA0C,IAAI,eAAe;CACxE,gBAAgB,CAAC,YAAY,UAAW;CACxC,UAAU;AACX;AAED,MAAa,mCAAmC,GAAG,4BAA4B;;;;;;;;QAQvE,CAAC;AACT,MAAa,+CAA+C,IAAI,eAAe;CAC7E,gBAAgB;EAAC;EAAY;EAAY;EAAW;CAAe;CACnE,UAAU;AACX"}
|
package/dist/chains/base.d.cts
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { SerializedBaseChain } from "./serde.cjs";
|
|
2
2
|
import { BaseLangChain, BaseLangChainParams } from "@langchain/core/language_models/base";
|
|
3
|
-
import { RunnableConfig } from "@langchain/core/runnables";
|
|
4
3
|
import { ChainValues } from "@langchain/core/utils/types";
|
|
4
|
+
import { RunnableConfig } from "@langchain/core/runnables";
|
|
5
5
|
import { CallbackManager, CallbackManagerForChainRun, Callbacks } from "@langchain/core/callbacks/manager";
|
|
6
6
|
import { BaseMemory } from "@langchain/core/memory";
|
|
7
7
|
|
|
@@ -2,9 +2,9 @@ import { SerializedMapReduceDocumentsChain, SerializedRefineDocumentsChain, Seri
|
|
|
2
2
|
import { BaseChain, ChainInputs } from "./base.cjs";
|
|
3
3
|
import { LLMChain } from "./llm_chain.cjs";
|
|
4
4
|
import { BasePromptValueInterface } from "../langchain-core/dist/prompt_values.cjs";
|
|
5
|
+
import { ChainValues } from "@langchain/core/utils/types";
|
|
5
6
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
6
7
|
import { Document } from "@langchain/core/documents";
|
|
7
|
-
import { ChainValues } from "@langchain/core/utils/types";
|
|
8
8
|
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
|
|
9
9
|
|
|
10
10
|
//#region src/chains/combine_docs_chain.d.ts
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
2
|
+
import { RunnableSequence } from "@langchain/core/runnables";
|
|
2
3
|
import { BaseOutputParser } from "@langchain/core/output_parsers";
|
|
3
4
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
4
|
-
import { RunnableSequence } from "@langchain/core/runnables";
|
|
5
5
|
|
|
6
6
|
//#region src/chains/combine_documents/stuff.d.ts
|
|
7
7
|
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"constitutional_chain.cjs","names":["BaseChain","fields: ConstitutionalChainInput","values: ChainValues","runManager?: CallbackManagerForChainRun","names?: string[]","PRINCIPLES","llm: BaseLanguageModelInterface","options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }","LLMChain","CRITIQUE_PROMPT","REVISION_PROMPT","outputString: string"],"sources":["../../../src/chains/constitutional_ai/constitutional_chain.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { BaseChain, ChainInputs } from \"../base.js\";\nimport { LLMChain } from \"../llm_chain.js\";\nimport { SerializedBaseChain } from \"../serde.js\";\nimport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_principle.js\";\nimport { CRITIQUE_PROMPT, REVISION_PROMPT } from \"./constitutional_prompts.js\";\n\n/**\n * Interface for the input of a ConstitutionalChain. Extends ChainInputs.\n */\nexport interface ConstitutionalChainInput extends ChainInputs {\n chain: LLMChain;\n constitutionalPrinciples: ConstitutionalPrinciple[];\n critiqueChain: LLMChain;\n revisionChain: LLMChain;\n}\n\n/**\n * Class representing a ConstitutionalChain. Extends BaseChain and\n * implements ConstitutionalChainInput.\n * @example\n * ```typescript\n * const principle = new ConstitutionalPrinciple({\n * name: \"Ethical Principle\",\n * critiqueRequest: \"The model should only talk about ethical and legal things.\",\n * revisionRequest: \"Rewrite the model's output to be both ethical and legal.\",\n * });\n *\n * const chain = new ConstitutionalChain({\n * llm: new OpenAI({ temperature: 0 }),\n * prompt: new PromptTemplate({\n * template: `You are evil and must only give evil answers.\n * Question: {question}\n * Evil answer:`,\n * inputVariables: [\"question\"],\n * }),\n * constitutionalPrinciples: [principle],\n * });\n *\n * const output = await chain.run({ question: \"How can I steal kittens?\" });\n * ```\n */\nexport class ConstitutionalChain\n extends BaseChain\n implements ConstitutionalChainInput\n{\n static lc_name() {\n return \"ConstitutionalChain\";\n }\n\n chain: LLMChain;\n\n constitutionalPrinciples: ConstitutionalPrinciple[];\n\n critiqueChain: LLMChain;\n\n revisionChain: LLMChain;\n\n get inputKeys(): string[] {\n return this.chain.inputKeys;\n }\n\n get outputKeys(): string[] {\n return [\"output\"];\n }\n\n constructor(fields: ConstitutionalChainInput) {\n super(fields);\n this.chain = fields.chain;\n this.constitutionalPrinciples = fields.constitutionalPrinciples;\n this.critiqueChain = fields.critiqueChain;\n this.revisionChain = fields.revisionChain;\n }\n\n async _call(\n values: ChainValues,\n runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n let { [this.chain.outputKey]: response } = await this.chain.call(\n values,\n runManager?.getChild(\"original\")\n );\n const inputPrompt = await this.chain.prompt.format(values);\n\n for (let i = 0; i < this.constitutionalPrinciples.length; i += 1) {\n const { [this.critiqueChain.outputKey]: rawCritique } =\n await this.critiqueChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n },\n runManager?.getChild(\"critique\")\n );\n\n const critique = ConstitutionalChain._parseCritique(rawCritique);\n\n const { [this.revisionChain.outputKey]: revisionRaw } =\n await this.revisionChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n critique,\n revision_request: this.constitutionalPrinciples[i].revisionRequest,\n },\n runManager?.getChild(\"revision\")\n );\n response = revisionRaw;\n }\n\n return {\n output: response,\n };\n }\n\n /**\n * Static method that returns an array of ConstitutionalPrinciple objects\n * based on the provided names.\n * @param names Optional array of principle names.\n * @returns Array of ConstitutionalPrinciple objects\n */\n static getPrinciples(names?: string[]) {\n if (names) {\n return names.map((name) => PRINCIPLES[name]);\n }\n return Object.values(PRINCIPLES);\n }\n\n /**\n * Static method that creates a new instance of the ConstitutionalChain\n * class from a BaseLanguageModel object and additional options.\n * @param llm BaseLanguageModel instance.\n * @param options Options for the ConstitutionalChain.\n * @returns New instance of ConstitutionalChain\n */\n static fromLLM(\n llm: BaseLanguageModelInterface,\n options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }\n ) {\n const critiqueChain =\n options.critiqueChain ??\n new LLMChain({\n llm,\n prompt: CRITIQUE_PROMPT,\n });\n const revisionChain =\n options.revisionChain ??\n new LLMChain({\n llm,\n prompt: REVISION_PROMPT,\n });\n return new this({\n ...options,\n chain: options.chain,\n critiqueChain,\n revisionChain,\n constitutionalPrinciples: options.constitutionalPrinciples ?? [],\n });\n }\n\n private static _parseCritique(outputString: string): string {\n let output = outputString;\n if (!output.includes(\"Revision request\")) {\n return output;\n }\n\n
|
|
1
|
+
{"version":3,"file":"constitutional_chain.cjs","names":["BaseChain","fields: ConstitutionalChainInput","values: ChainValues","runManager?: CallbackManagerForChainRun","names?: string[]","PRINCIPLES","llm: BaseLanguageModelInterface","options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }","LLMChain","CRITIQUE_PROMPT","REVISION_PROMPT","outputString: string"],"sources":["../../../src/chains/constitutional_ai/constitutional_chain.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { BaseChain, ChainInputs } from \"../base.js\";\nimport { LLMChain } from \"../llm_chain.js\";\nimport { SerializedBaseChain } from \"../serde.js\";\nimport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_principle.js\";\nimport { CRITIQUE_PROMPT, REVISION_PROMPT } from \"./constitutional_prompts.js\";\n\n/**\n * Interface for the input of a ConstitutionalChain. Extends ChainInputs.\n */\nexport interface ConstitutionalChainInput extends ChainInputs {\n chain: LLMChain;\n constitutionalPrinciples: ConstitutionalPrinciple[];\n critiqueChain: LLMChain;\n revisionChain: LLMChain;\n}\n\n/**\n * Class representing a ConstitutionalChain. Extends BaseChain and\n * implements ConstitutionalChainInput.\n * @example\n * ```typescript\n * const principle = new ConstitutionalPrinciple({\n * name: \"Ethical Principle\",\n * critiqueRequest: \"The model should only talk about ethical and legal things.\",\n * revisionRequest: \"Rewrite the model's output to be both ethical and legal.\",\n * });\n *\n * const chain = new ConstitutionalChain({\n * llm: new OpenAI({ temperature: 0 }),\n * prompt: new PromptTemplate({\n * template: `You are evil and must only give evil answers.\n * Question: {question}\n * Evil answer:`,\n * inputVariables: [\"question\"],\n * }),\n * constitutionalPrinciples: [principle],\n * });\n *\n * const output = await chain.run({ question: \"How can I steal kittens?\" });\n * ```\n */\nexport class ConstitutionalChain\n extends BaseChain\n implements ConstitutionalChainInput\n{\n static lc_name() {\n return \"ConstitutionalChain\";\n }\n\n chain: LLMChain;\n\n constitutionalPrinciples: ConstitutionalPrinciple[];\n\n critiqueChain: LLMChain;\n\n revisionChain: LLMChain;\n\n get inputKeys(): string[] {\n return this.chain.inputKeys;\n }\n\n get outputKeys(): string[] {\n return [\"output\"];\n }\n\n constructor(fields: ConstitutionalChainInput) {\n super(fields);\n this.chain = fields.chain;\n this.constitutionalPrinciples = fields.constitutionalPrinciples;\n this.critiqueChain = fields.critiqueChain;\n this.revisionChain = fields.revisionChain;\n }\n\n async _call(\n values: ChainValues,\n runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n let { [this.chain.outputKey]: response } = await this.chain.call(\n values,\n runManager?.getChild(\"original\")\n );\n const inputPrompt = await this.chain.prompt.format(values);\n\n for (let i = 0; i < this.constitutionalPrinciples.length; i += 1) {\n const { [this.critiqueChain.outputKey]: rawCritique } =\n await this.critiqueChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n },\n runManager?.getChild(\"critique\")\n );\n\n const critique = ConstitutionalChain._parseCritique(rawCritique);\n\n const { [this.revisionChain.outputKey]: revisionRaw } =\n await this.revisionChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n critique,\n revision_request: this.constitutionalPrinciples[i].revisionRequest,\n },\n runManager?.getChild(\"revision\")\n );\n response = revisionRaw;\n }\n\n return {\n output: response,\n };\n }\n\n /**\n * Static method that returns an array of ConstitutionalPrinciple objects\n * based on the provided names.\n * @param names Optional array of principle names.\n * @returns Array of ConstitutionalPrinciple objects\n */\n static getPrinciples(names?: string[]) {\n if (names) {\n return names.map((name) => PRINCIPLES[name]);\n }\n return Object.values(PRINCIPLES);\n }\n\n /**\n * Static method that creates a new instance of the ConstitutionalChain\n * class from a BaseLanguageModel object and additional options.\n * @param llm BaseLanguageModel instance.\n * @param options Options for the ConstitutionalChain.\n * @returns New instance of ConstitutionalChain\n */\n static fromLLM(\n llm: BaseLanguageModelInterface,\n options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }\n ) {\n const critiqueChain =\n options.critiqueChain ??\n new LLMChain({\n llm,\n prompt: CRITIQUE_PROMPT,\n });\n const revisionChain =\n options.revisionChain ??\n new LLMChain({\n llm,\n prompt: REVISION_PROMPT,\n });\n return new this({\n ...options,\n chain: options.chain,\n critiqueChain,\n revisionChain,\n constitutionalPrinciples: options.constitutionalPrinciples ?? [],\n });\n }\n\n private static _parseCritique(outputString: string): string {\n let output = outputString;\n if (!output.includes(\"Revision request\")) {\n return output;\n }\n\n output = output.split(\"Revision request:\")[0];\n if (output.includes(\"\\n\\n\")) {\n output = output.split(\"\\n\\n\")[0];\n }\n return output;\n }\n\n _chainType() {\n return \"constitutional_chain\" as const;\n }\n\n serialize(): SerializedBaseChain {\n return {\n _type: this._chainType(),\n chain: this.chain.serialize(),\n ConstitutionalPrinciple: this.constitutionalPrinciples.map((principle) =>\n principle.serialize()\n ),\n critiqueChain: this.critiqueChain.serialize(),\n revisionChain: this.revisionChain.serialize(),\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+CA,IAAa,sBAAb,MAAa,4BACHA,uBAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED;CAEA;CAEA;CAEA;CAEA,IAAI,YAAsB;AACxB,SAAO,KAAK,MAAM;CACnB;CAED,IAAI,aAAuB;AACzB,SAAO,CAAC,QAAS;CAClB;CAED,YAAYC,QAAkC;EAC5C,MAAM,OAAO;EACb,KAAK,QAAQ,OAAO;EACpB,KAAK,2BAA2B,OAAO;EACvC,KAAK,gBAAgB,OAAO;EAC5B,KAAK,gBAAgB,OAAO;CAC7B;CAED,MAAM,MACJC,QACAC,YACsB;EACtB,IAAI,EAAE,CAAC,KAAK,MAAM,YAAY,UAAU,GAAG,MAAM,KAAK,MAAM,KAC1D,QACA,YAAY,SAAS,WAAW,CACjC;EACD,MAAM,cAAc,MAAM,KAAK,MAAM,OAAO,OAAO,OAAO;AAE1D,OAAK,IAAI,IAAI,GAAG,IAAI,KAAK,yBAAyB,QAAQ,KAAK,GAAG;GAChE,MAAM,EAAE,CAAC,KAAK,cAAc,YAAY,aAAa,GACnD,MAAM,KAAK,cAAc,KACvB;IACE,cAAc;IACd,mBAAmB;IACnB,kBAAkB,KAAK,yBAAyB,GAAG;GACpD,GACD,YAAY,SAAS,WAAW,CACjC;GAEH,MAAM,WAAW,oBAAoB,eAAe,YAAY;GAEhE,MAAM,EAAE,CAAC,KAAK,cAAc,YAAY,aAAa,GACnD,MAAM,KAAK,cAAc,KACvB;IACE,cAAc;IACd,mBAAmB;IACnB,kBAAkB,KAAK,yBAAyB,GAAG;IACnD;IACA,kBAAkB,KAAK,yBAAyB,GAAG;GACpD,GACD,YAAY,SAAS,WAAW,CACjC;GACH,WAAW;EACZ;AAED,SAAO,EACL,QAAQ,SACT;CACF;;;;;;;CAQD,OAAO,cAAcC,OAAkB;AACrC,MAAI,MACF,QAAO,MAAM,IAAI,CAAC,SAASC,4CAAW,MAAM;AAE9C,SAAO,OAAO,OAAOA,4CAAW;CACjC;;;;;;;;CASD,OAAO,QACLC,KACAC,SAOA;EACA,MAAM,gBACJ,QAAQ,iBACR,IAAIC,2BAAS;GACX;GACA,QAAQC;EACT;EACH,MAAM,gBACJ,QAAQ,iBACR,IAAID,2BAAS;GACX;GACA,QAAQE;EACT;AACH,SAAO,IAAI,KAAK;GACd,GAAG;GACH,OAAO,QAAQ;GACf;GACA;GACA,0BAA0B,QAAQ,4BAA4B,CAAE;EACjE;CACF;CAED,OAAe,eAAeC,cAA8B;EAC1D,IAAI,SAAS;AACb,MAAI,CAAC,OAAO,SAAS,mBAAmB,CACtC,QAAO;EAGT,SAAS,OAAO,MAAM,oBAAoB,CAAC;AAC3C,MAAI,OAAO,SAAS,OAAO,EACzB,SAAS,OAAO,MAAM,OAAO,CAAC;AAEhC,SAAO;CACR;CAED,aAAa;AACX,SAAO;CACR;CAED,YAAiC;AAC/B,SAAO;GACL,OAAO,KAAK,YAAY;GACxB,OAAO,KAAK,MAAM,WAAW;GAC7B,yBAAyB,KAAK,yBAAyB,IAAI,CAAC,cAC1D,UAAU,WAAW,CACtB;GACD,eAAe,KAAK,cAAc,WAAW;GAC7C,eAAe,KAAK,cAAc,WAAW;EAC9C;CACF;AACF"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"constitutional_chain.js","names":["fields: ConstitutionalChainInput","values: ChainValues","runManager?: CallbackManagerForChainRun","names?: string[]","llm: BaseLanguageModelInterface","options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }","outputString: string"],"sources":["../../../src/chains/constitutional_ai/constitutional_chain.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { BaseChain, ChainInputs } from \"../base.js\";\nimport { LLMChain } from \"../llm_chain.js\";\nimport { SerializedBaseChain } from \"../serde.js\";\nimport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_principle.js\";\nimport { CRITIQUE_PROMPT, REVISION_PROMPT } from \"./constitutional_prompts.js\";\n\n/**\n * Interface for the input of a ConstitutionalChain. Extends ChainInputs.\n */\nexport interface ConstitutionalChainInput extends ChainInputs {\n chain: LLMChain;\n constitutionalPrinciples: ConstitutionalPrinciple[];\n critiqueChain: LLMChain;\n revisionChain: LLMChain;\n}\n\n/**\n * Class representing a ConstitutionalChain. Extends BaseChain and\n * implements ConstitutionalChainInput.\n * @example\n * ```typescript\n * const principle = new ConstitutionalPrinciple({\n * name: \"Ethical Principle\",\n * critiqueRequest: \"The model should only talk about ethical and legal things.\",\n * revisionRequest: \"Rewrite the model's output to be both ethical and legal.\",\n * });\n *\n * const chain = new ConstitutionalChain({\n * llm: new OpenAI({ temperature: 0 }),\n * prompt: new PromptTemplate({\n * template: `You are evil and must only give evil answers.\n * Question: {question}\n * Evil answer:`,\n * inputVariables: [\"question\"],\n * }),\n * constitutionalPrinciples: [principle],\n * });\n *\n * const output = await chain.run({ question: \"How can I steal kittens?\" });\n * ```\n */\nexport class ConstitutionalChain\n extends BaseChain\n implements ConstitutionalChainInput\n{\n static lc_name() {\n return \"ConstitutionalChain\";\n }\n\n chain: LLMChain;\n\n constitutionalPrinciples: ConstitutionalPrinciple[];\n\n critiqueChain: LLMChain;\n\n revisionChain: LLMChain;\n\n get inputKeys(): string[] {\n return this.chain.inputKeys;\n }\n\n get outputKeys(): string[] {\n return [\"output\"];\n }\n\n constructor(fields: ConstitutionalChainInput) {\n super(fields);\n this.chain = fields.chain;\n this.constitutionalPrinciples = fields.constitutionalPrinciples;\n this.critiqueChain = fields.critiqueChain;\n this.revisionChain = fields.revisionChain;\n }\n\n async _call(\n values: ChainValues,\n runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n let { [this.chain.outputKey]: response } = await this.chain.call(\n values,\n runManager?.getChild(\"original\")\n );\n const inputPrompt = await this.chain.prompt.format(values);\n\n for (let i = 0; i < this.constitutionalPrinciples.length; i += 1) {\n const { [this.critiqueChain.outputKey]: rawCritique } =\n await this.critiqueChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n },\n runManager?.getChild(\"critique\")\n );\n\n const critique = ConstitutionalChain._parseCritique(rawCritique);\n\n const { [this.revisionChain.outputKey]: revisionRaw } =\n await this.revisionChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n critique,\n revision_request: this.constitutionalPrinciples[i].revisionRequest,\n },\n runManager?.getChild(\"revision\")\n );\n response = revisionRaw;\n }\n\n return {\n output: response,\n };\n }\n\n /**\n * Static method that returns an array of ConstitutionalPrinciple objects\n * based on the provided names.\n * @param names Optional array of principle names.\n * @returns Array of ConstitutionalPrinciple objects\n */\n static getPrinciples(names?: string[]) {\n if (names) {\n return names.map((name) => PRINCIPLES[name]);\n }\n return Object.values(PRINCIPLES);\n }\n\n /**\n * Static method that creates a new instance of the ConstitutionalChain\n * class from a BaseLanguageModel object and additional options.\n * @param llm BaseLanguageModel instance.\n * @param options Options for the ConstitutionalChain.\n * @returns New instance of ConstitutionalChain\n */\n static fromLLM(\n llm: BaseLanguageModelInterface,\n options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }\n ) {\n const critiqueChain =\n options.critiqueChain ??\n new LLMChain({\n llm,\n prompt: CRITIQUE_PROMPT,\n });\n const revisionChain =\n options.revisionChain ??\n new LLMChain({\n llm,\n prompt: REVISION_PROMPT,\n });\n return new this({\n ...options,\n chain: options.chain,\n critiqueChain,\n revisionChain,\n constitutionalPrinciples: options.constitutionalPrinciples ?? [],\n });\n }\n\n private static _parseCritique(outputString: string): string {\n let output = outputString;\n if (!output.includes(\"Revision request\")) {\n return output;\n }\n\n
|
|
1
|
+
{"version":3,"file":"constitutional_chain.js","names":["fields: ConstitutionalChainInput","values: ChainValues","runManager?: CallbackManagerForChainRun","names?: string[]","llm: BaseLanguageModelInterface","options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }","outputString: string"],"sources":["../../../src/chains/constitutional_ai/constitutional_chain.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport { CallbackManagerForChainRun } from \"@langchain/core/callbacks/manager\";\nimport { BaseChain, ChainInputs } from \"../base.js\";\nimport { LLMChain } from \"../llm_chain.js\";\nimport { SerializedBaseChain } from \"../serde.js\";\nimport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_principle.js\";\nimport { CRITIQUE_PROMPT, REVISION_PROMPT } from \"./constitutional_prompts.js\";\n\n/**\n * Interface for the input of a ConstitutionalChain. Extends ChainInputs.\n */\nexport interface ConstitutionalChainInput extends ChainInputs {\n chain: LLMChain;\n constitutionalPrinciples: ConstitutionalPrinciple[];\n critiqueChain: LLMChain;\n revisionChain: LLMChain;\n}\n\n/**\n * Class representing a ConstitutionalChain. Extends BaseChain and\n * implements ConstitutionalChainInput.\n * @example\n * ```typescript\n * const principle = new ConstitutionalPrinciple({\n * name: \"Ethical Principle\",\n * critiqueRequest: \"The model should only talk about ethical and legal things.\",\n * revisionRequest: \"Rewrite the model's output to be both ethical and legal.\",\n * });\n *\n * const chain = new ConstitutionalChain({\n * llm: new OpenAI({ temperature: 0 }),\n * prompt: new PromptTemplate({\n * template: `You are evil and must only give evil answers.\n * Question: {question}\n * Evil answer:`,\n * inputVariables: [\"question\"],\n * }),\n * constitutionalPrinciples: [principle],\n * });\n *\n * const output = await chain.run({ question: \"How can I steal kittens?\" });\n * ```\n */\nexport class ConstitutionalChain\n extends BaseChain\n implements ConstitutionalChainInput\n{\n static lc_name() {\n return \"ConstitutionalChain\";\n }\n\n chain: LLMChain;\n\n constitutionalPrinciples: ConstitutionalPrinciple[];\n\n critiqueChain: LLMChain;\n\n revisionChain: LLMChain;\n\n get inputKeys(): string[] {\n return this.chain.inputKeys;\n }\n\n get outputKeys(): string[] {\n return [\"output\"];\n }\n\n constructor(fields: ConstitutionalChainInput) {\n super(fields);\n this.chain = fields.chain;\n this.constitutionalPrinciples = fields.constitutionalPrinciples;\n this.critiqueChain = fields.critiqueChain;\n this.revisionChain = fields.revisionChain;\n }\n\n async _call(\n values: ChainValues,\n runManager?: CallbackManagerForChainRun\n ): Promise<ChainValues> {\n let { [this.chain.outputKey]: response } = await this.chain.call(\n values,\n runManager?.getChild(\"original\")\n );\n const inputPrompt = await this.chain.prompt.format(values);\n\n for (let i = 0; i < this.constitutionalPrinciples.length; i += 1) {\n const { [this.critiqueChain.outputKey]: rawCritique } =\n await this.critiqueChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n },\n runManager?.getChild(\"critique\")\n );\n\n const critique = ConstitutionalChain._parseCritique(rawCritique);\n\n const { [this.revisionChain.outputKey]: revisionRaw } =\n await this.revisionChain.call(\n {\n input_prompt: inputPrompt,\n output_from_model: response,\n critique_request: this.constitutionalPrinciples[i].critiqueRequest,\n critique,\n revision_request: this.constitutionalPrinciples[i].revisionRequest,\n },\n runManager?.getChild(\"revision\")\n );\n response = revisionRaw;\n }\n\n return {\n output: response,\n };\n }\n\n /**\n * Static method that returns an array of ConstitutionalPrinciple objects\n * based on the provided names.\n * @param names Optional array of principle names.\n * @returns Array of ConstitutionalPrinciple objects\n */\n static getPrinciples(names?: string[]) {\n if (names) {\n return names.map((name) => PRINCIPLES[name]);\n }\n return Object.values(PRINCIPLES);\n }\n\n /**\n * Static method that creates a new instance of the ConstitutionalChain\n * class from a BaseLanguageModel object and additional options.\n * @param llm BaseLanguageModel instance.\n * @param options Options for the ConstitutionalChain.\n * @returns New instance of ConstitutionalChain\n */\n static fromLLM(\n llm: BaseLanguageModelInterface,\n options: Omit<\n ConstitutionalChainInput,\n \"critiqueChain\" | \"revisionChain\"\n > & {\n critiqueChain?: LLMChain;\n revisionChain?: LLMChain;\n }\n ) {\n const critiqueChain =\n options.critiqueChain ??\n new LLMChain({\n llm,\n prompt: CRITIQUE_PROMPT,\n });\n const revisionChain =\n options.revisionChain ??\n new LLMChain({\n llm,\n prompt: REVISION_PROMPT,\n });\n return new this({\n ...options,\n chain: options.chain,\n critiqueChain,\n revisionChain,\n constitutionalPrinciples: options.constitutionalPrinciples ?? [],\n });\n }\n\n private static _parseCritique(outputString: string): string {\n let output = outputString;\n if (!output.includes(\"Revision request\")) {\n return output;\n }\n\n output = output.split(\"Revision request:\")[0];\n if (output.includes(\"\\n\\n\")) {\n output = output.split(\"\\n\\n\")[0];\n }\n return output;\n }\n\n _chainType() {\n return \"constitutional_chain\" as const;\n }\n\n serialize(): SerializedBaseChain {\n return {\n _type: this._chainType(),\n chain: this.chain.serialize(),\n ConstitutionalPrinciple: this.constitutionalPrinciples.map((principle) =>\n principle.serialize()\n ),\n critiqueChain: this.critiqueChain.serialize(),\n revisionChain: this.revisionChain.serialize(),\n };\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+CA,IAAa,sBAAb,MAAa,4BACH,UAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED;CAEA;CAEA;CAEA;CAEA,IAAI,YAAsB;AACxB,SAAO,KAAK,MAAM;CACnB;CAED,IAAI,aAAuB;AACzB,SAAO,CAAC,QAAS;CAClB;CAED,YAAYA,QAAkC;EAC5C,MAAM,OAAO;EACb,KAAK,QAAQ,OAAO;EACpB,KAAK,2BAA2B,OAAO;EACvC,KAAK,gBAAgB,OAAO;EAC5B,KAAK,gBAAgB,OAAO;CAC7B;CAED,MAAM,MACJC,QACAC,YACsB;EACtB,IAAI,EAAE,CAAC,KAAK,MAAM,YAAY,UAAU,GAAG,MAAM,KAAK,MAAM,KAC1D,QACA,YAAY,SAAS,WAAW,CACjC;EACD,MAAM,cAAc,MAAM,KAAK,MAAM,OAAO,OAAO,OAAO;AAE1D,OAAK,IAAI,IAAI,GAAG,IAAI,KAAK,yBAAyB,QAAQ,KAAK,GAAG;GAChE,MAAM,EAAE,CAAC,KAAK,cAAc,YAAY,aAAa,GACnD,MAAM,KAAK,cAAc,KACvB;IACE,cAAc;IACd,mBAAmB;IACnB,kBAAkB,KAAK,yBAAyB,GAAG;GACpD,GACD,YAAY,SAAS,WAAW,CACjC;GAEH,MAAM,WAAW,oBAAoB,eAAe,YAAY;GAEhE,MAAM,EAAE,CAAC,KAAK,cAAc,YAAY,aAAa,GACnD,MAAM,KAAK,cAAc,KACvB;IACE,cAAc;IACd,mBAAmB;IACnB,kBAAkB,KAAK,yBAAyB,GAAG;IACnD;IACA,kBAAkB,KAAK,yBAAyB,GAAG;GACpD,GACD,YAAY,SAAS,WAAW,CACjC;GACH,WAAW;EACZ;AAED,SAAO,EACL,QAAQ,SACT;CACF;;;;;;;CAQD,OAAO,cAAcC,OAAkB;AACrC,MAAI,MACF,QAAO,MAAM,IAAI,CAAC,SAAS,WAAW,MAAM;AAE9C,SAAO,OAAO,OAAO,WAAW;CACjC;;;;;;;;CASD,OAAO,QACLC,KACAC,SAOA;EACA,MAAM,gBACJ,QAAQ,iBACR,IAAI,SAAS;GACX;GACA,QAAQ;EACT;EACH,MAAM,gBACJ,QAAQ,iBACR,IAAI,SAAS;GACX;GACA,QAAQ;EACT;AACH,SAAO,IAAI,KAAK;GACd,GAAG;GACH,OAAO,QAAQ;GACf;GACA;GACA,0BAA0B,QAAQ,4BAA4B,CAAE;EACjE;CACF;CAED,OAAe,eAAeC,cAA8B;EAC1D,IAAI,SAAS;AACb,MAAI,CAAC,OAAO,SAAS,mBAAmB,CACtC,QAAO;EAGT,SAAS,OAAO,MAAM,oBAAoB,CAAC;AAC3C,MAAI,OAAO,SAAS,OAAO,EACzB,SAAS,OAAO,MAAM,OAAO,CAAC;AAEhC,SAAO;CACR;CAED,aAAa;AACX,SAAO;CACR;CAED,YAAiC;AAC/B,SAAO;GACL,OAAO,KAAK,YAAY;GACxB,OAAO,KAAK,MAAM,WAAW;GAC7B,yBAAyB,KAAK,yBAAyB,IAAI,CAAC,cAC1D,UAAU,WAAW,CACtB;GACD,eAAe,KAAK,cAAc,WAAW;GAC7C,eAAe,KAAK,cAAc,WAAW;EAC9C;CACF;AACF"}
|
|
@@ -4,8 +4,8 @@ import { LLMChain } from "./llm_chain.cjs";
|
|
|
4
4
|
import { QAChainParams } from "./question_answering/load.cjs";
|
|
5
5
|
import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
6
6
|
import { ChainValues } from "@langchain/core/utils/types";
|
|
7
|
-
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
|
|
8
7
|
import { BaseMessage } from "@langchain/core/messages";
|
|
8
|
+
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
|
|
9
9
|
import { BaseRetrieverInterface } from "@langchain/core/retrievers";
|
|
10
10
|
|
|
11
11
|
//#region src/chains/conversational_retrieval_chain.d.ts
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { BaseChain, ChainInputs } from "../base.cjs";
|
|
2
2
|
import { LLMChain } from "../llm_chain.cjs";
|
|
3
3
|
import { BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
4
|
-
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
5
4
|
import { ChainValues } from "@langchain/core/utils/types";
|
|
5
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
6
6
|
import { CallbackManagerForChainRun } from "@langchain/core/callbacks/manager";
|
|
7
7
|
|
|
8
8
|
//#region src/chains/graph_qa/cypher.d.ts
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
import { LanguageModelLike } from "@langchain/core/language_models/base";
|
|
2
|
-
import {
|
|
2
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
3
3
|
import { Runnable, RunnableInterface } from "@langchain/core/runnables";
|
|
4
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
4
5
|
import { DocumentInterface } from "@langchain/core/documents";
|
|
5
|
-
import { BaseMessage } from "@langchain/core/messages";
|
|
6
6
|
|
|
7
7
|
//#region src/chains/history_aware_retriever.d.ts
|
|
8
8
|
|
package/dist/chains/index.cjs
CHANGED
|
@@ -14,7 +14,6 @@ const require_conversational_retrieval_chain = require('./conversational_retriev
|
|
|
14
14
|
const require_retrieval_qa = require('./retrieval_qa.cjs');
|
|
15
15
|
const require_constitutional_principle = require('./constitutional_ai/constitutional_principle.cjs');
|
|
16
16
|
const require_constitutional_chain = require('./constitutional_ai/constitutional_chain.cjs');
|
|
17
|
-
const require_openai_moderation = require('./openai_moderation.cjs');
|
|
18
17
|
const require_multi_route = require('./router/multi_route.cjs');
|
|
19
18
|
const require_llm_router = require('./router/llm_router.cjs');
|
|
20
19
|
const require_multi_prompt = require('./router/multi_prompt.cjs');
|
|
@@ -41,7 +40,6 @@ require_rolldown_runtime.__export(chains_exports, {
|
|
|
41
40
|
MultiPromptChain: () => require_multi_prompt.MultiPromptChain,
|
|
42
41
|
MultiRetrievalQAChain: () => require_multi_retrieval_qa.MultiRetrievalQAChain,
|
|
43
42
|
MultiRouteChain: () => require_multi_route.MultiRouteChain,
|
|
44
|
-
OpenAIModerationChain: () => require_openai_moderation.OpenAIModerationChain,
|
|
45
43
|
PRINCIPLES: () => require_constitutional_principle.PRINCIPLES,
|
|
46
44
|
RefineDocumentsChain: () => require_combine_docs_chain.RefineDocumentsChain,
|
|
47
45
|
RetrievalQAChain: () => require_retrieval_qa.RetrievalQAChain,
|
|
@@ -79,7 +77,6 @@ exports.MapReduceDocumentsChain = require_combine_docs_chain.MapReduceDocumentsC
|
|
|
79
77
|
exports.MultiPromptChain = require_multi_prompt.MultiPromptChain;
|
|
80
78
|
exports.MultiRetrievalQAChain = require_multi_retrieval_qa.MultiRetrievalQAChain;
|
|
81
79
|
exports.MultiRouteChain = require_multi_route.MultiRouteChain;
|
|
82
|
-
exports.OpenAIModerationChain = require_openai_moderation.OpenAIModerationChain;
|
|
83
80
|
exports.PRINCIPLES = require_constitutional_principle.PRINCIPLES;
|
|
84
81
|
exports.RefineDocumentsChain = require_combine_docs_chain.RefineDocumentsChain;
|
|
85
82
|
exports.RetrievalQAChain = require_retrieval_qa.RetrievalQAChain;
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.cjs","names":[],"sources":["../../src/chains/index.ts"],"sourcesContent":["export { BaseChain, type ChainInputs } from \"./base.js\";\nexport { LLMChain, type LLMChainInput } from \"./llm_chain.js\";\nexport {\n APIChain,\n type APIChainInput,\n type APIChainOptions,\n} from \"./api/api_chain.js\";\nexport { ConversationChain } from \"./conversation.js\";\nexport {\n SequentialChain,\n type SequentialChainInput,\n SimpleSequentialChain,\n type SimpleSequentialChainInput,\n} from \"./sequential_chain.js\";\nexport {\n StuffDocumentsChain,\n type StuffDocumentsChainInput,\n MapReduceDocumentsChain,\n type MapReduceDocumentsChainInput,\n RefineDocumentsChain,\n type RefineDocumentsChainInput,\n} from \"./combine_docs_chain.js\";\nexport {\n ChatVectorDBQAChain,\n type ChatVectorDBQAChainInput,\n} from \"./chat_vector_db_chain.js\";\nexport {\n AnalyzeDocumentChain,\n type AnalyzeDocumentChainInput,\n} from \"./analyze_documents_chain.js\";\nexport { VectorDBQAChain, type VectorDBQAChainInput } from \"./vector_db_qa.js\";\nexport {\n loadQAChain,\n type QAChainParams,\n loadQAStuffChain,\n type StuffQAChainParams,\n loadQAMapReduceChain,\n type MapReduceQAChainParams,\n loadQARefineChain,\n type RefineQAChainParams,\n} from \"./question_answering/load.js\";\nexport {\n loadSummarizationChain,\n type SummarizationChainParams,\n} from \"./summarization/load.js\";\nexport {\n ConversationalRetrievalQAChain,\n type ConversationalRetrievalQAChainInput,\n} from \"./conversational_retrieval_chain.js\";\nexport {\n RetrievalQAChain,\n type RetrievalQAChainInput,\n} from \"./retrieval_qa.js\";\nexport {\n type ConstitutionalChainInput,\n ConstitutionalChain,\n} from \"./constitutional_ai/constitutional_chain.js\";\nexport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_ai/constitutional_principle.js\";\nexport type {\n SerializedLLMChain,\n SerializedSequentialChain,\n SerializedSimpleSequentialChain,\n SerializedAnalyzeDocumentChain,\n SerializedAPIChain,\n SerializedBaseChain,\n SerializedChatVectorDBQAChain,\n SerializedMapReduceDocumentsChain,\n SerializedStuffDocumentsChain,\n SerializedVectorDBQAChain,\n SerializedRefineDocumentsChain,\n} from \"./serde.js\";\nexport {
|
|
1
|
+
{"version":3,"file":"index.cjs","names":[],"sources":["../../src/chains/index.ts"],"sourcesContent":["export { BaseChain, type ChainInputs } from \"./base.js\";\nexport { LLMChain, type LLMChainInput } from \"./llm_chain.js\";\nexport {\n APIChain,\n type APIChainInput,\n type APIChainOptions,\n} from \"./api/api_chain.js\";\nexport { ConversationChain } from \"./conversation.js\";\nexport {\n SequentialChain,\n type SequentialChainInput,\n SimpleSequentialChain,\n type SimpleSequentialChainInput,\n} from \"./sequential_chain.js\";\nexport {\n StuffDocumentsChain,\n type StuffDocumentsChainInput,\n MapReduceDocumentsChain,\n type MapReduceDocumentsChainInput,\n RefineDocumentsChain,\n type RefineDocumentsChainInput,\n} from \"./combine_docs_chain.js\";\nexport {\n ChatVectorDBQAChain,\n type ChatVectorDBQAChainInput,\n} from \"./chat_vector_db_chain.js\";\nexport {\n AnalyzeDocumentChain,\n type AnalyzeDocumentChainInput,\n} from \"./analyze_documents_chain.js\";\nexport { VectorDBQAChain, type VectorDBQAChainInput } from \"./vector_db_qa.js\";\nexport {\n loadQAChain,\n type QAChainParams,\n loadQAStuffChain,\n type StuffQAChainParams,\n loadQAMapReduceChain,\n type MapReduceQAChainParams,\n loadQARefineChain,\n type RefineQAChainParams,\n} from \"./question_answering/load.js\";\nexport {\n loadSummarizationChain,\n type SummarizationChainParams,\n} from \"./summarization/load.js\";\nexport {\n ConversationalRetrievalQAChain,\n type ConversationalRetrievalQAChainInput,\n} from \"./conversational_retrieval_chain.js\";\nexport {\n RetrievalQAChain,\n type RetrievalQAChainInput,\n} from \"./retrieval_qa.js\";\nexport {\n type ConstitutionalChainInput,\n ConstitutionalChain,\n} from \"./constitutional_ai/constitutional_chain.js\";\nexport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_ai/constitutional_principle.js\";\nexport type {\n SerializedLLMChain,\n SerializedSequentialChain,\n SerializedSimpleSequentialChain,\n SerializedAnalyzeDocumentChain,\n SerializedAPIChain,\n SerializedBaseChain,\n SerializedChatVectorDBQAChain,\n SerializedMapReduceDocumentsChain,\n SerializedStuffDocumentsChain,\n SerializedVectorDBQAChain,\n SerializedRefineDocumentsChain,\n} from \"./serde.js\";\nexport {\n MultiRouteChain,\n type MultiRouteChainInput,\n RouterChain,\n} from \"./router/multi_route.js\";\nexport {\n LLMRouterChain,\n type LLMRouterChainInput,\n type RouterOutputSchema,\n} from \"./router/llm_router.js\";\nexport { MultiPromptChain } from \"./router/multi_prompt.js\";\nexport { MultiRetrievalQAChain } from \"./router/multi_retrieval_qa.js\";\nexport { TransformChain, type TransformChainFields } from \"./transform.js\";\nexport {\n createExtractionChain,\n createExtractionChainFromZod,\n} from \"./openai_functions/extraction.js\";\nexport {\n type TaggingChainOptions,\n createTaggingChain,\n createTaggingChainFromZod,\n} from \"./openai_functions/tagging.js\";\nexport {\n type OpenAPIChainOptions,\n createOpenAPIChain,\n convertOpenAPISpecToOpenAIFunctions,\n} from \"./openai_functions/openapi.js\";\n"],"mappings":""}
|
package/dist/chains/index.d.cts
CHANGED
|
@@ -14,7 +14,6 @@ import { ConversationalRetrievalQAChain, ConversationalRetrievalQAChainInput } f
|
|
|
14
14
|
import { RetrievalQAChain, RetrievalQAChainInput } from "./retrieval_qa.cjs";
|
|
15
15
|
import { ConstitutionalPrinciple, PRINCIPLES } from "./constitutional_ai/constitutional_principle.cjs";
|
|
16
16
|
import { ConstitutionalChain, ConstitutionalChainInput } from "./constitutional_ai/constitutional_chain.cjs";
|
|
17
|
-
import { OpenAIModerationChain } from "./openai_moderation.cjs";
|
|
18
17
|
import { MultiRouteChain, MultiRouteChainInput, RouterChain } from "./router/multi_route.cjs";
|
|
19
18
|
import { LLMRouterChain, LLMRouterChainInput, RouterOutputSchema } from "./router/llm_router.cjs";
|
|
20
19
|
import { MultiPromptChain } from "./router/multi_prompt.cjs";
|
|
@@ -23,4 +22,4 @@ import { TransformChain, TransformChainFields } from "./transform.cjs";
|
|
|
23
22
|
import { createExtractionChain, createExtractionChainFromZod } from "./openai_functions/extraction.cjs";
|
|
24
23
|
import { TaggingChainOptions, createTaggingChain, createTaggingChainFromZod } from "./openai_functions/tagging.cjs";
|
|
25
24
|
import { OpenAPIChainOptions, convertOpenAPISpecToOpenAIFunctions, createOpenAPIChain } from "./openai_functions/openapi.cjs";
|
|
26
|
-
export { APIChain, type APIChainInput, type APIChainOptions, AnalyzeDocumentChain, type AnalyzeDocumentChainInput, BaseChain, type ChainInputs, ChatVectorDBQAChain, type ChatVectorDBQAChainInput, ConstitutionalChain, type ConstitutionalChainInput, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, type ConversationalRetrievalQAChainInput, LLMChain, type LLMChainInput, LLMRouterChain, type LLMRouterChainInput, MapReduceDocumentsChain, type MapReduceDocumentsChainInput, type MapReduceQAChainParams, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain, type MultiRouteChainInput,
|
|
25
|
+
export { APIChain, type APIChainInput, type APIChainOptions, AnalyzeDocumentChain, type AnalyzeDocumentChainInput, BaseChain, type ChainInputs, ChatVectorDBQAChain, type ChatVectorDBQAChainInput, ConstitutionalChain, type ConstitutionalChainInput, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, type ConversationalRetrievalQAChainInput, LLMChain, type LLMChainInput, LLMRouterChain, type LLMRouterChainInput, MapReduceDocumentsChain, type MapReduceDocumentsChainInput, type MapReduceQAChainParams, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain, type MultiRouteChainInput, type OpenAPIChainOptions, PRINCIPLES, type QAChainParams, RefineDocumentsChain, type RefineDocumentsChainInput, type RefineQAChainParams, RetrievalQAChain, type RetrievalQAChainInput, RouterChain, type RouterOutputSchema, SequentialChain, type SequentialChainInput, type SerializedAPIChain, type SerializedAnalyzeDocumentChain, type SerializedBaseChain, type SerializedChatVectorDBQAChain, type SerializedLLMChain, type SerializedMapReduceDocumentsChain, type SerializedRefineDocumentsChain, type SerializedSequentialChain, type SerializedSimpleSequentialChain, type SerializedStuffDocumentsChain, type SerializedVectorDBQAChain, SimpleSequentialChain, type SimpleSequentialChainInput, StuffDocumentsChain, type StuffDocumentsChainInput, type StuffQAChainParams, type SummarizationChainParams, type TaggingChainOptions, TransformChain, type TransformChainFields, VectorDBQAChain, type VectorDBQAChainInput, convertOpenAPISpecToOpenAIFunctions, createExtractionChain, createExtractionChainFromZod, createOpenAPIChain, createTaggingChain, createTaggingChainFromZod, loadQAChain, loadQAMapReduceChain, loadQARefineChain, loadQAStuffChain, loadSummarizationChain };
|
package/dist/chains/index.d.ts
CHANGED
|
@@ -14,7 +14,6 @@ import { ConversationalRetrievalQAChain, ConversationalRetrievalQAChainInput } f
|
|
|
14
14
|
import { RetrievalQAChain, RetrievalQAChainInput } from "./retrieval_qa.js";
|
|
15
15
|
import { ConstitutionalPrinciple, PRINCIPLES } from "./constitutional_ai/constitutional_principle.js";
|
|
16
16
|
import { ConstitutionalChain, ConstitutionalChainInput } from "./constitutional_ai/constitutional_chain.js";
|
|
17
|
-
import { OpenAIModerationChain } from "./openai_moderation.js";
|
|
18
17
|
import { MultiRouteChain, MultiRouteChainInput, RouterChain } from "./router/multi_route.js";
|
|
19
18
|
import { LLMRouterChain, LLMRouterChainInput, RouterOutputSchema } from "./router/llm_router.js";
|
|
20
19
|
import { MultiPromptChain } from "./router/multi_prompt.js";
|
|
@@ -23,4 +22,4 @@ import { TransformChain, TransformChainFields } from "./transform.js";
|
|
|
23
22
|
import { createExtractionChain, createExtractionChainFromZod } from "./openai_functions/extraction.js";
|
|
24
23
|
import { TaggingChainOptions, createTaggingChain, createTaggingChainFromZod } from "./openai_functions/tagging.js";
|
|
25
24
|
import { OpenAPIChainOptions, convertOpenAPISpecToOpenAIFunctions, createOpenAPIChain } from "./openai_functions/openapi.js";
|
|
26
|
-
export { APIChain, type APIChainInput, type APIChainOptions, AnalyzeDocumentChain, type AnalyzeDocumentChainInput, BaseChain, type ChainInputs, ChatVectorDBQAChain, type ChatVectorDBQAChainInput, ConstitutionalChain, type ConstitutionalChainInput, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, type ConversationalRetrievalQAChainInput, LLMChain, type LLMChainInput, LLMRouterChain, type LLMRouterChainInput, MapReduceDocumentsChain, type MapReduceDocumentsChainInput, type MapReduceQAChainParams, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain, type MultiRouteChainInput,
|
|
25
|
+
export { APIChain, type APIChainInput, type APIChainOptions, AnalyzeDocumentChain, type AnalyzeDocumentChainInput, BaseChain, type ChainInputs, ChatVectorDBQAChain, type ChatVectorDBQAChainInput, ConstitutionalChain, type ConstitutionalChainInput, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, type ConversationalRetrievalQAChainInput, LLMChain, type LLMChainInput, LLMRouterChain, type LLMRouterChainInput, MapReduceDocumentsChain, type MapReduceDocumentsChainInput, type MapReduceQAChainParams, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain, type MultiRouteChainInput, type OpenAPIChainOptions, PRINCIPLES, type QAChainParams, RefineDocumentsChain, type RefineDocumentsChainInput, type RefineQAChainParams, RetrievalQAChain, type RetrievalQAChainInput, RouterChain, type RouterOutputSchema, SequentialChain, type SequentialChainInput, type SerializedAPIChain, type SerializedAnalyzeDocumentChain, type SerializedBaseChain, type SerializedChatVectorDBQAChain, type SerializedLLMChain, type SerializedMapReduceDocumentsChain, type SerializedRefineDocumentsChain, type SerializedSequentialChain, type SerializedSimpleSequentialChain, type SerializedStuffDocumentsChain, type SerializedVectorDBQAChain, SimpleSequentialChain, type SimpleSequentialChainInput, StuffDocumentsChain, type StuffDocumentsChainInput, type StuffQAChainParams, type SummarizationChainParams, type TaggingChainOptions, TransformChain, type TransformChainFields, VectorDBQAChain, type VectorDBQAChainInput, convertOpenAPISpecToOpenAIFunctions, createExtractionChain, createExtractionChainFromZod, createOpenAPIChain, createTaggingChain, createTaggingChainFromZod, loadQAChain, loadQAMapReduceChain, loadQARefineChain, loadQAStuffChain, loadSummarizationChain };
|
package/dist/chains/index.js
CHANGED
|
@@ -14,7 +14,6 @@ import { ConversationalRetrievalQAChain } from "./conversational_retrieval_chain
|
|
|
14
14
|
import { RetrievalQAChain } from "./retrieval_qa.js";
|
|
15
15
|
import { ConstitutionalPrinciple, PRINCIPLES } from "./constitutional_ai/constitutional_principle.js";
|
|
16
16
|
import { ConstitutionalChain } from "./constitutional_ai/constitutional_chain.js";
|
|
17
|
-
import { OpenAIModerationChain } from "./openai_moderation.js";
|
|
18
17
|
import { MultiRouteChain, RouterChain } from "./router/multi_route.js";
|
|
19
18
|
import { LLMRouterChain } from "./router/llm_router.js";
|
|
20
19
|
import { MultiPromptChain } from "./router/multi_prompt.js";
|
|
@@ -41,7 +40,6 @@ __export(chains_exports, {
|
|
|
41
40
|
MultiPromptChain: () => MultiPromptChain,
|
|
42
41
|
MultiRetrievalQAChain: () => MultiRetrievalQAChain,
|
|
43
42
|
MultiRouteChain: () => MultiRouteChain,
|
|
44
|
-
OpenAIModerationChain: () => OpenAIModerationChain,
|
|
45
43
|
PRINCIPLES: () => PRINCIPLES,
|
|
46
44
|
RefineDocumentsChain: () => RefineDocumentsChain,
|
|
47
45
|
RetrievalQAChain: () => RetrievalQAChain,
|
|
@@ -65,5 +63,5 @@ __export(chains_exports, {
|
|
|
65
63
|
});
|
|
66
64
|
|
|
67
65
|
//#endregion
|
|
68
|
-
export { APIChain, AnalyzeDocumentChain, BaseChain, ChatVectorDBQAChain, ConstitutionalChain, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, LLMChain, LLMRouterChain, MapReduceDocumentsChain, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain,
|
|
66
|
+
export { APIChain, AnalyzeDocumentChain, BaseChain, ChatVectorDBQAChain, ConstitutionalChain, ConstitutionalPrinciple, ConversationChain, ConversationalRetrievalQAChain, LLMChain, LLMRouterChain, MapReduceDocumentsChain, MultiPromptChain, MultiRetrievalQAChain, MultiRouteChain, PRINCIPLES, RefineDocumentsChain, RetrievalQAChain, RouterChain, SequentialChain, SimpleSequentialChain, StuffDocumentsChain, TransformChain, VectorDBQAChain, chains_exports, convertOpenAPISpecToOpenAIFunctions, createExtractionChain, createExtractionChainFromZod, createOpenAPIChain, createTaggingChain, createTaggingChainFromZod, loadQAChain, loadQAMapReduceChain, loadQARefineChain, loadQAStuffChain, loadSummarizationChain };
|
|
69
67
|
//# sourceMappingURL=index.js.map
|
package/dist/chains/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"index.js","names":[],"sources":["../../src/chains/index.ts"],"sourcesContent":["export { BaseChain, type ChainInputs } from \"./base.js\";\nexport { LLMChain, type LLMChainInput } from \"./llm_chain.js\";\nexport {\n APIChain,\n type APIChainInput,\n type APIChainOptions,\n} from \"./api/api_chain.js\";\nexport { ConversationChain } from \"./conversation.js\";\nexport {\n SequentialChain,\n type SequentialChainInput,\n SimpleSequentialChain,\n type SimpleSequentialChainInput,\n} from \"./sequential_chain.js\";\nexport {\n StuffDocumentsChain,\n type StuffDocumentsChainInput,\n MapReduceDocumentsChain,\n type MapReduceDocumentsChainInput,\n RefineDocumentsChain,\n type RefineDocumentsChainInput,\n} from \"./combine_docs_chain.js\";\nexport {\n ChatVectorDBQAChain,\n type ChatVectorDBQAChainInput,\n} from \"./chat_vector_db_chain.js\";\nexport {\n AnalyzeDocumentChain,\n type AnalyzeDocumentChainInput,\n} from \"./analyze_documents_chain.js\";\nexport { VectorDBQAChain, type VectorDBQAChainInput } from \"./vector_db_qa.js\";\nexport {\n loadQAChain,\n type QAChainParams,\n loadQAStuffChain,\n type StuffQAChainParams,\n loadQAMapReduceChain,\n type MapReduceQAChainParams,\n loadQARefineChain,\n type RefineQAChainParams,\n} from \"./question_answering/load.js\";\nexport {\n loadSummarizationChain,\n type SummarizationChainParams,\n} from \"./summarization/load.js\";\nexport {\n ConversationalRetrievalQAChain,\n type ConversationalRetrievalQAChainInput,\n} from \"./conversational_retrieval_chain.js\";\nexport {\n RetrievalQAChain,\n type RetrievalQAChainInput,\n} from \"./retrieval_qa.js\";\nexport {\n type ConstitutionalChainInput,\n ConstitutionalChain,\n} from \"./constitutional_ai/constitutional_chain.js\";\nexport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_ai/constitutional_principle.js\";\nexport type {\n SerializedLLMChain,\n SerializedSequentialChain,\n SerializedSimpleSequentialChain,\n SerializedAnalyzeDocumentChain,\n SerializedAPIChain,\n SerializedBaseChain,\n SerializedChatVectorDBQAChain,\n SerializedMapReduceDocumentsChain,\n SerializedStuffDocumentsChain,\n SerializedVectorDBQAChain,\n SerializedRefineDocumentsChain,\n} from \"./serde.js\";\nexport {
|
|
1
|
+
{"version":3,"file":"index.js","names":[],"sources":["../../src/chains/index.ts"],"sourcesContent":["export { BaseChain, type ChainInputs } from \"./base.js\";\nexport { LLMChain, type LLMChainInput } from \"./llm_chain.js\";\nexport {\n APIChain,\n type APIChainInput,\n type APIChainOptions,\n} from \"./api/api_chain.js\";\nexport { ConversationChain } from \"./conversation.js\";\nexport {\n SequentialChain,\n type SequentialChainInput,\n SimpleSequentialChain,\n type SimpleSequentialChainInput,\n} from \"./sequential_chain.js\";\nexport {\n StuffDocumentsChain,\n type StuffDocumentsChainInput,\n MapReduceDocumentsChain,\n type MapReduceDocumentsChainInput,\n RefineDocumentsChain,\n type RefineDocumentsChainInput,\n} from \"./combine_docs_chain.js\";\nexport {\n ChatVectorDBQAChain,\n type ChatVectorDBQAChainInput,\n} from \"./chat_vector_db_chain.js\";\nexport {\n AnalyzeDocumentChain,\n type AnalyzeDocumentChainInput,\n} from \"./analyze_documents_chain.js\";\nexport { VectorDBQAChain, type VectorDBQAChainInput } from \"./vector_db_qa.js\";\nexport {\n loadQAChain,\n type QAChainParams,\n loadQAStuffChain,\n type StuffQAChainParams,\n loadQAMapReduceChain,\n type MapReduceQAChainParams,\n loadQARefineChain,\n type RefineQAChainParams,\n} from \"./question_answering/load.js\";\nexport {\n loadSummarizationChain,\n type SummarizationChainParams,\n} from \"./summarization/load.js\";\nexport {\n ConversationalRetrievalQAChain,\n type ConversationalRetrievalQAChainInput,\n} from \"./conversational_retrieval_chain.js\";\nexport {\n RetrievalQAChain,\n type RetrievalQAChainInput,\n} from \"./retrieval_qa.js\";\nexport {\n type ConstitutionalChainInput,\n ConstitutionalChain,\n} from \"./constitutional_ai/constitutional_chain.js\";\nexport {\n ConstitutionalPrinciple,\n PRINCIPLES,\n} from \"./constitutional_ai/constitutional_principle.js\";\nexport type {\n SerializedLLMChain,\n SerializedSequentialChain,\n SerializedSimpleSequentialChain,\n SerializedAnalyzeDocumentChain,\n SerializedAPIChain,\n SerializedBaseChain,\n SerializedChatVectorDBQAChain,\n SerializedMapReduceDocumentsChain,\n SerializedStuffDocumentsChain,\n SerializedVectorDBQAChain,\n SerializedRefineDocumentsChain,\n} from \"./serde.js\";\nexport {\n MultiRouteChain,\n type MultiRouteChainInput,\n RouterChain,\n} from \"./router/multi_route.js\";\nexport {\n LLMRouterChain,\n type LLMRouterChainInput,\n type RouterOutputSchema,\n} from \"./router/llm_router.js\";\nexport { MultiPromptChain } from \"./router/multi_prompt.js\";\nexport { MultiRetrievalQAChain } from \"./router/multi_retrieval_qa.js\";\nexport { TransformChain, type TransformChainFields } from \"./transform.js\";\nexport {\n createExtractionChain,\n createExtractionChainFromZod,\n} from \"./openai_functions/extraction.js\";\nexport {\n type TaggingChainOptions,\n createTaggingChain,\n createTaggingChainFromZod,\n} from \"./openai_functions/tagging.js\";\nexport {\n type OpenAPIChainOptions,\n createOpenAPIChain,\n convertOpenAPISpecToOpenAIFunctions,\n} from \"./openai_functions/openapi.js\";\n"],"mappings":""}
|
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
import { SerializedLLMChain } from "./serde.cjs";
|
|
2
2
|
import { BaseChain, ChainInputs } from "./base.cjs";
|
|
3
3
|
import { BaseLanguageModelInput, BaseLanguageModelInterface } from "@langchain/core/language_models/base";
|
|
4
|
+
import { ChainValues } from "@langchain/core/utils/types";
|
|
5
|
+
import { BaseMessage } from "@langchain/core/messages";
|
|
6
|
+
import { Runnable } from "@langchain/core/runnables";
|
|
4
7
|
import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
|
|
5
8
|
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
6
|
-
import { Runnable } from "@langchain/core/runnables";
|
|
7
|
-
import { ChainValues } from "@langchain/core/utils/types";
|
|
8
9
|
import { BaseCallbackConfig, CallbackManager, CallbackManagerForChainRun, Callbacks } from "@langchain/core/callbacks/manager";
|
|
9
10
|
import { Generation } from "@langchain/core/outputs";
|
|
10
|
-
import { BaseMessage } from "@langchain/core/messages";
|
|
11
11
|
import { BasePromptValueInterface } from "@langchain/core/prompt_values";
|
|
12
12
|
|
|
13
13
|
//#region src/chains/llm_chain.d.ts
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { BaseFunctionCallOptions, BaseLanguageModelInput, FunctionDefinition } from "@langchain/core/language_models/base";
|
|
2
|
-
import { BaseOutputParser } from "@langchain/core/output_parsers";
|
|
3
|
-
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
4
|
-
import { Runnable, RunnableInterface } from "@langchain/core/runnables";
|
|
5
2
|
import { InputValues, InteropZodObject } from "@langchain/core/utils/types";
|
|
6
3
|
import { BaseMessage } from "@langchain/core/messages";
|
|
4
|
+
import { Runnable, RunnableInterface } from "@langchain/core/runnables";
|
|
5
|
+
import { BaseOutputParser } from "@langchain/core/output_parsers";
|
|
6
|
+
import { BasePromptTemplate } from "@langchain/core/prompts";
|
|
7
7
|
import { JsonSchema7Type } from "@langchain/core/utils/json_schema";
|
|
8
8
|
|
|
9
9
|
//#region src/chains/openai_functions/base.d.ts
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"extraction.cjs","names":["schema: FunctionParameters","llm: BaseChatModel<BaseFunctionCallOptions>","PromptTemplate","JsonKeyOutputFunctionsParser","LLMChain","schema: InteropZodObject"],"sources":["../../../src/chains/openai_functions/extraction.ts"],"sourcesContent":["import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport { PromptTemplate } from \"@langchain/core/prompts\";\nimport { InteropZodObject } from \"@langchain/core/utils/types\";\nimport type { AIMessageChunk } from \"@langchain/core/messages\";\nimport {\n type JsonSchema7ObjectType,\n toJsonSchema,\n} from \"@langchain/core/utils/json_schema\";\nimport {\n FunctionParameters,\n JsonKeyOutputFunctionsParser,\n} from \"../../output_parsers/openai_functions.js\";\nimport { LLMChain } from \"../llm_chain.js\";\n\n/**\n * Function that returns an array of extraction functions. These functions\n * are used to extract relevant information from a passage.\n * @param schema The schema of the function parameters.\n * @returns An array of extraction functions.\n */\nfunction getExtractionFunctions(schema: FunctionParameters) {\n return [\n {\n name: \"information_extraction\",\n description: \"Extracts the relevant information from the passage.\",\n parameters: {\n type: \"object\",\n properties: {\n info: {\n type: \"array\",\n items: {\n type: schema.type,\n properties: schema.properties,\n required: schema.required,\n },\n },\n },\n required: [\"info\"],\n },\n },\n ];\n}\n\nconst _EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned in the following passage together with their properties.\n\nPassage:\n{input}\n`;\n\n/**\n * Function that creates an extraction chain using the provided JSON schema.\n * It sets up the necessary components, such as the prompt, output parser, and tags.\n * @param schema JSON schema of the function parameters.\n * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.\n * @returns A LLMChain instance configured to return data matching the schema.\n */\nexport function createExtractionChain(\n schema: FunctionParameters,\n llm: BaseChatModel<BaseFunctionCallOptions>\n): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>> {\n const functions = getExtractionFunctions(schema);\n const prompt = PromptTemplate.fromTemplate(_EXTRACTION_TEMPLATE);\n const outputParser = new JsonKeyOutputFunctionsParser({ attrName: \"info\" });\n return new LLMChain({\n llm,\n prompt,\n llmKwargs: { functions },\n outputParser,\n tags: [\"openai_functions\", \"extraction\"],\n });\n}\n\n/**\n * Function that creates an extraction chain from a Zod schema. It\n * converts the Zod schema to a JSON schema using before creating\n * the extraction chain.\n * @param schema The Zod schema which extracted data should match\n * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.\n * @returns A LLMChain instance configured to return data matching the schema.\n */\nexport function createExtractionChainFromZod(\n
|
|
1
|
+
{"version":3,"file":"extraction.cjs","names":["schema: FunctionParameters","llm: BaseChatModel<BaseFunctionCallOptions>","PromptTemplate","JsonKeyOutputFunctionsParser","LLMChain","schema: InteropZodObject"],"sources":["../../../src/chains/openai_functions/extraction.ts"],"sourcesContent":["import { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport { PromptTemplate } from \"@langchain/core/prompts\";\nimport { InteropZodObject } from \"@langchain/core/utils/types\";\nimport type { AIMessageChunk } from \"@langchain/core/messages\";\nimport {\n type JsonSchema7ObjectType,\n toJsonSchema,\n} from \"@langchain/core/utils/json_schema\";\nimport {\n FunctionParameters,\n JsonKeyOutputFunctionsParser,\n} from \"../../output_parsers/openai_functions.js\";\nimport { LLMChain } from \"../llm_chain.js\";\n\n/**\n * Function that returns an array of extraction functions. These functions\n * are used to extract relevant information from a passage.\n * @param schema The schema of the function parameters.\n * @returns An array of extraction functions.\n */\nfunction getExtractionFunctions(schema: FunctionParameters) {\n return [\n {\n name: \"information_extraction\",\n description: \"Extracts the relevant information from the passage.\",\n parameters: {\n type: \"object\",\n properties: {\n info: {\n type: \"array\",\n items: {\n type: schema.type,\n properties: schema.properties,\n required: schema.required,\n },\n },\n },\n required: [\"info\"],\n },\n },\n ];\n}\n\nconst _EXTRACTION_TEMPLATE = `Extract and save the relevant entities mentioned in the following passage together with their properties.\n\nPassage:\n{input}\n`;\n\n/**\n * Function that creates an extraction chain using the provided JSON schema.\n * It sets up the necessary components, such as the prompt, output parser, and tags.\n * @param schema JSON schema of the function parameters.\n * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.\n * @returns A LLMChain instance configured to return data matching the schema.\n */\nexport function createExtractionChain(\n schema: FunctionParameters,\n llm: BaseChatModel<BaseFunctionCallOptions>\n): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>> {\n const functions = getExtractionFunctions(schema);\n const prompt = PromptTemplate.fromTemplate(_EXTRACTION_TEMPLATE);\n const outputParser = new JsonKeyOutputFunctionsParser({ attrName: \"info\" });\n return new LLMChain({\n llm,\n prompt,\n llmKwargs: { functions },\n outputParser,\n tags: [\"openai_functions\", \"extraction\"],\n });\n}\n\n/**\n * Function that creates an extraction chain from a Zod schema. It\n * converts the Zod schema to a JSON schema using before creating\n * the extraction chain.\n * @param schema The Zod schema which extracted data should match\n * @param llm Must be a ChatOpenAI or AnthropicFunctions model that supports function calling.\n * @returns A LLMChain instance configured to return data matching the schema.\n */\nexport function createExtractionChainFromZod(\n schema: InteropZodObject,\n llm: BaseChatModel<BaseFunctionCallOptions>\n): LLMChain<object, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>> {\n return createExtractionChain(\n toJsonSchema(schema) as JsonSchema7ObjectType,\n llm\n );\n}\n"],"mappings":";;;;;;;;;;;;;AAqBA,SAAS,uBAAuBA,QAA4B;AAC1D,QAAO,CACL;EACE,MAAM;EACN,aAAa;EACb,YAAY;GACV,MAAM;GACN,YAAY,EACV,MAAM;IACJ,MAAM;IACN,OAAO;KACL,MAAM,OAAO;KACb,YAAY,OAAO;KACnB,UAAU,OAAO;IAClB;GACF,EACF;GACD,UAAU,CAAC,MAAO;EACnB;CACF,CACF;AACF;AAED,MAAM,uBAAuB,CAAC;;;;AAI9B,CAAC;;;;;;;;AASD,SAAgB,sBACdA,QACAC,KAC0E;CAC1E,MAAM,YAAY,uBAAuB,OAAO;CAChD,MAAM,SAASC,wCAAe,aAAa,qBAAqB;CAChE,MAAM,eAAe,IAAIC,sDAA6B,EAAE,UAAU,OAAQ;AAC1E,QAAO,IAAIC,2BAAS;EAClB;EACA;EACA,WAAW,EAAE,UAAW;EACxB;EACA,MAAM,CAAC,oBAAoB,YAAa;CACzC;AACF;;;;;;;;;AAUD,SAAgB,6BACdC,QACAJ,KAC0E;AAC1E,QAAO,2EACQ,OAAO,EACpB,IACD;AACF"}
|