langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
langchain/chains/sequential.py
CHANGED
|
@@ -48,17 +48,18 @@ class SequentialChain(Chain):
|
|
|
48
48
|
"""Validate that the correct inputs exist for all chains."""
|
|
49
49
|
chains = values["chains"]
|
|
50
50
|
input_variables = values["input_variables"]
|
|
51
|
-
memory_keys =
|
|
51
|
+
memory_keys = []
|
|
52
52
|
if "memory" in values and values["memory"] is not None:
|
|
53
53
|
"""Validate that prompt input variables are consistent."""
|
|
54
54
|
memory_keys = values["memory"].memory_variables
|
|
55
55
|
if set(input_variables).intersection(set(memory_keys)):
|
|
56
56
|
overlapping_keys = set(input_variables) & set(memory_keys)
|
|
57
|
-
|
|
57
|
+
msg = (
|
|
58
58
|
f"The input key(s) {''.join(overlapping_keys)} are found "
|
|
59
59
|
f"in the Memory keys ({memory_keys}) - please use input and "
|
|
60
60
|
f"memory keys that don't overlap."
|
|
61
61
|
)
|
|
62
|
+
raise ValueError(msg)
|
|
62
63
|
|
|
63
64
|
known_variables = set(input_variables + memory_keys)
|
|
64
65
|
|
|
@@ -68,15 +69,15 @@ class SequentialChain(Chain):
|
|
|
68
69
|
missing_vars = missing_vars.difference(chain.memory.memory_variables)
|
|
69
70
|
|
|
70
71
|
if missing_vars:
|
|
71
|
-
|
|
72
|
+
msg = (
|
|
72
73
|
f"Missing required input keys: {missing_vars}, "
|
|
73
74
|
f"only had {known_variables}"
|
|
74
75
|
)
|
|
76
|
+
raise ValueError(msg)
|
|
75
77
|
overlapping_keys = known_variables.intersection(chain.output_keys)
|
|
76
78
|
if overlapping_keys:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
)
|
|
79
|
+
msg = f"Chain returned keys that already exist: {overlapping_keys}"
|
|
80
|
+
raise ValueError(msg)
|
|
80
81
|
|
|
81
82
|
known_variables |= set(chain.output_keys)
|
|
82
83
|
|
|
@@ -89,9 +90,8 @@ class SequentialChain(Chain):
|
|
|
89
90
|
else:
|
|
90
91
|
missing_vars = set(values["output_variables"]).difference(known_variables)
|
|
91
92
|
if missing_vars:
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
)
|
|
93
|
+
msg = f"Expected output variables that were not found: {missing_vars}."
|
|
94
|
+
raise ValueError(msg)
|
|
95
95
|
|
|
96
96
|
return values
|
|
97
97
|
|
|
@@ -102,7 +102,7 @@ class SequentialChain(Chain):
|
|
|
102
102
|
) -> dict[str, str]:
|
|
103
103
|
known_values = inputs.copy()
|
|
104
104
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
105
|
-
for
|
|
105
|
+
for _i, chain in enumerate(self.chains):
|
|
106
106
|
callbacks = _run_manager.get_child()
|
|
107
107
|
outputs = chain(known_values, return_only_outputs=True, callbacks=callbacks)
|
|
108
108
|
known_values.update(outputs)
|
|
@@ -116,9 +116,11 @@ class SequentialChain(Chain):
|
|
|
116
116
|
known_values = inputs.copy()
|
|
117
117
|
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
|
|
118
118
|
callbacks = _run_manager.get_child()
|
|
119
|
-
for
|
|
119
|
+
for _i, chain in enumerate(self.chains):
|
|
120
120
|
outputs = await chain.acall(
|
|
121
|
-
known_values,
|
|
121
|
+
known_values,
|
|
122
|
+
return_only_outputs=True,
|
|
123
|
+
callbacks=callbacks,
|
|
122
124
|
)
|
|
123
125
|
known_values.update(outputs)
|
|
124
126
|
return {k: known_values[k] for k in self.output_variables}
|
|
@@ -158,15 +160,17 @@ class SimpleSequentialChain(Chain):
|
|
|
158
160
|
"""Validate that chains are all single input/output."""
|
|
159
161
|
for chain in self.chains:
|
|
160
162
|
if len(chain.input_keys) != 1:
|
|
161
|
-
|
|
163
|
+
msg = (
|
|
162
164
|
"Chains used in SimplePipeline should all have one input, got "
|
|
163
165
|
f"{chain} with {len(chain.input_keys)} inputs."
|
|
164
166
|
)
|
|
167
|
+
raise ValueError(msg)
|
|
165
168
|
if len(chain.output_keys) != 1:
|
|
166
|
-
|
|
169
|
+
msg = (
|
|
167
170
|
"Chains used in SimplePipeline should all have one output, got "
|
|
168
171
|
f"{chain} with {len(chain.output_keys)} outputs."
|
|
169
172
|
)
|
|
173
|
+
raise ValueError(msg)
|
|
170
174
|
return self
|
|
171
175
|
|
|
172
176
|
def _call(
|
|
@@ -179,12 +183,16 @@ class SimpleSequentialChain(Chain):
|
|
|
179
183
|
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
|
|
180
184
|
for i, chain in enumerate(self.chains):
|
|
181
185
|
_input = chain.run(
|
|
182
|
-
_input,
|
|
186
|
+
_input,
|
|
187
|
+
callbacks=_run_manager.get_child(f"step_{i + 1}"),
|
|
183
188
|
)
|
|
184
189
|
if self.strip_outputs:
|
|
185
190
|
_input = _input.strip()
|
|
186
191
|
_run_manager.on_text(
|
|
187
|
-
_input,
|
|
192
|
+
_input,
|
|
193
|
+
color=color_mapping[str(i)],
|
|
194
|
+
end="\n",
|
|
195
|
+
verbose=self.verbose,
|
|
188
196
|
)
|
|
189
197
|
return {self.output_key: _input}
|
|
190
198
|
|
|
@@ -198,11 +206,15 @@ class SimpleSequentialChain(Chain):
|
|
|
198
206
|
color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))])
|
|
199
207
|
for i, chain in enumerate(self.chains):
|
|
200
208
|
_input = await chain.arun(
|
|
201
|
-
_input,
|
|
209
|
+
_input,
|
|
210
|
+
callbacks=_run_manager.get_child(f"step_{i + 1}"),
|
|
202
211
|
)
|
|
203
212
|
if self.strip_outputs:
|
|
204
213
|
_input = _input.strip()
|
|
205
214
|
await _run_manager.on_text(
|
|
206
|
-
_input,
|
|
215
|
+
_input,
|
|
216
|
+
color=color_mapping[str(i)],
|
|
217
|
+
end="\n",
|
|
218
|
+
verbose=self.verbose,
|
|
207
219
|
)
|
|
208
220
|
return {self.output_key: _input}
|
|
@@ -1,8 +1,6 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.output_parsers.list import CommaSeparatedListOutputParser
|
|
3
2
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
4
3
|
|
|
5
|
-
|
|
6
4
|
PROMPT_SUFFIX = """Only use the following tables:
|
|
7
5
|
{table_info}
|
|
8
6
|
|
|
@@ -21,7 +19,7 @@ SQLQuery: SQL Query to run
|
|
|
21
19
|
SQLResult: Result of the SQLQuery
|
|
22
20
|
Answer: Final answer here
|
|
23
21
|
|
|
24
|
-
"""
|
|
22
|
+
""" # noqa: E501
|
|
25
23
|
|
|
26
24
|
PROMPT = PromptTemplate(
|
|
27
25
|
input_variables=["input", "table_info", "dialect", "top_k"],
|
|
@@ -35,7 +33,7 @@ Question: {query}
|
|
|
35
33
|
|
|
36
34
|
Table Names: {table_names}
|
|
37
35
|
|
|
38
|
-
Relevant Table Names:"""
|
|
36
|
+
Relevant Table Names:""" # noqa: E501
|
|
39
37
|
DECIDER_PROMPT = PromptTemplate(
|
|
40
38
|
input_variables=["query", "table_names"],
|
|
41
39
|
template=_DECIDER_TEMPLATE,
|
|
@@ -46,7 +44,7 @@ _cratedb_prompt = """You are a CrateDB expert. Given an input question, first cr
|
|
|
46
44
|
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per CrateDB. You can order the results to return the most informative data in the database.
|
|
47
45
|
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
|
|
48
46
|
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
|
|
49
|
-
Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today".
|
|
47
|
+
Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today".
|
|
50
48
|
|
|
51
49
|
Use the following format:
|
|
52
50
|
|
|
@@ -55,7 +53,7 @@ SQLQuery: SQL Query to run
|
|
|
55
53
|
SQLResult: Result of the SQLQuery
|
|
56
54
|
Answer: Final answer here
|
|
57
55
|
|
|
58
|
-
"""
|
|
56
|
+
""" # noqa: E501
|
|
59
57
|
|
|
60
58
|
CRATEDB_PROMPT = PromptTemplate(
|
|
61
59
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -75,7 +73,7 @@ SQLQuery: SQL Query to run
|
|
|
75
73
|
SQLResult: Result of the SQLQuery
|
|
76
74
|
Answer: Final answer here
|
|
77
75
|
|
|
78
|
-
"""
|
|
76
|
+
""" # noqa: E501
|
|
79
77
|
|
|
80
78
|
DUCKDB_PROMPT = PromptTemplate(
|
|
81
79
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -95,7 +93,7 @@ SQLQuery: SQL Query to run
|
|
|
95
93
|
SQLResult: Result of the SQLQuery
|
|
96
94
|
Answer: Final answer here
|
|
97
95
|
|
|
98
|
-
"""
|
|
96
|
+
""" # noqa: E501
|
|
99
97
|
|
|
100
98
|
GOOGLESQL_PROMPT = PromptTemplate(
|
|
101
99
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -116,7 +114,7 @@ SQLQuery: SQL Query to run
|
|
|
116
114
|
SQLResult: Result of the SQLQuery
|
|
117
115
|
Answer: Final answer here
|
|
118
116
|
|
|
119
|
-
"""
|
|
117
|
+
""" # noqa: E501
|
|
120
118
|
|
|
121
119
|
MSSQL_PROMPT = PromptTemplate(
|
|
122
120
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -137,7 +135,7 @@ SQLQuery: SQL Query to run
|
|
|
137
135
|
SQLResult: Result of the SQLQuery
|
|
138
136
|
Answer: Final answer here
|
|
139
137
|
|
|
140
|
-
"""
|
|
138
|
+
""" # noqa: E501
|
|
141
139
|
|
|
142
140
|
MYSQL_PROMPT = PromptTemplate(
|
|
143
141
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -158,7 +156,7 @@ SQLQuery: SQL Query to run
|
|
|
158
156
|
SQLResult: Result of the SQLQuery
|
|
159
157
|
Answer: Final answer here
|
|
160
158
|
|
|
161
|
-
"""
|
|
159
|
+
""" # noqa: E501
|
|
162
160
|
|
|
163
161
|
MARIADB_PROMPT = PromptTemplate(
|
|
164
162
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -179,7 +177,7 @@ SQLQuery: SQL Query to run
|
|
|
179
177
|
SQLResult: Result of the SQLQuery
|
|
180
178
|
Answer: Final answer here
|
|
181
179
|
|
|
182
|
-
"""
|
|
180
|
+
""" # noqa: E501
|
|
183
181
|
|
|
184
182
|
ORACLE_PROMPT = PromptTemplate(
|
|
185
183
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -200,7 +198,7 @@ SQLQuery: SQL Query to run
|
|
|
200
198
|
SQLResult: Result of the SQLQuery
|
|
201
199
|
Answer: Final answer here
|
|
202
200
|
|
|
203
|
-
"""
|
|
201
|
+
""" # noqa: E501
|
|
204
202
|
|
|
205
203
|
POSTGRES_PROMPT = PromptTemplate(
|
|
206
204
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -221,7 +219,7 @@ SQLQuery: SQL Query to run
|
|
|
221
219
|
SQLResult: Result of the SQLQuery
|
|
222
220
|
Answer: Final answer here
|
|
223
221
|
|
|
224
|
-
"""
|
|
222
|
+
""" # noqa: E501
|
|
225
223
|
|
|
226
224
|
SQLITE_PROMPT = PromptTemplate(
|
|
227
225
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -241,7 +239,7 @@ SQLQuery: "SQL Query to run"
|
|
|
241
239
|
SQLResult: "Result of the SQLQuery"
|
|
242
240
|
Answer: "Final answer here"
|
|
243
241
|
|
|
244
|
-
"""
|
|
242
|
+
""" # noqa: E501
|
|
245
243
|
|
|
246
244
|
CLICKHOUSE_PROMPT = PromptTemplate(
|
|
247
245
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -261,7 +259,7 @@ SQLQuery: "SQL Query to run"
|
|
|
261
259
|
SQLResult: "Result of the SQLQuery"
|
|
262
260
|
Answer: "Final answer here"
|
|
263
261
|
|
|
264
|
-
"""
|
|
262
|
+
""" # noqa: E501
|
|
265
263
|
|
|
266
264
|
PRESTODB_PROMPT = PromptTemplate(
|
|
267
265
|
input_variables=["input", "table_info", "top_k"],
|
|
@@ -113,6 +113,7 @@ def create_sql_query_chain(
|
|
|
113
113
|
|
|
114
114
|
Question: {input}'''
|
|
115
115
|
prompt = PromptTemplate.from_template(template)
|
|
116
|
+
|
|
116
117
|
""" # noqa: E501
|
|
117
118
|
if prompt is not None:
|
|
118
119
|
prompt_to_use = prompt
|
|
@@ -121,26 +122,27 @@ def create_sql_query_chain(
|
|
|
121
122
|
else:
|
|
122
123
|
prompt_to_use = PROMPT
|
|
123
124
|
if {"input", "top_k", "table_info"}.difference(
|
|
124
|
-
prompt_to_use.input_variables + list(prompt_to_use.partial_variables)
|
|
125
|
+
prompt_to_use.input_variables + list(prompt_to_use.partial_variables),
|
|
125
126
|
):
|
|
126
|
-
|
|
127
|
+
msg = (
|
|
127
128
|
f"Prompt must have input variables: 'input', 'top_k', "
|
|
128
129
|
f"'table_info'. Received prompt with input variables: "
|
|
129
130
|
f"{prompt_to_use.input_variables}. Full prompt:\n\n{prompt_to_use}"
|
|
130
131
|
)
|
|
132
|
+
raise ValueError(msg)
|
|
131
133
|
if "dialect" in prompt_to_use.input_variables:
|
|
132
134
|
prompt_to_use = prompt_to_use.partial(dialect=db.dialect)
|
|
133
135
|
|
|
134
136
|
table_info_kwargs = {}
|
|
135
137
|
if get_col_comments:
|
|
136
138
|
if db.dialect not in ("postgresql", "mysql", "oracle"):
|
|
137
|
-
|
|
139
|
+
msg = (
|
|
138
140
|
f"get_col_comments=True is only supported for dialects "
|
|
139
141
|
f"'postgresql', 'mysql', and 'oracle'. Received dialect: "
|
|
140
142
|
f"{db.dialect}"
|
|
141
143
|
)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
+
raise ValueError(msg)
|
|
145
|
+
table_info_kwargs["get_col_comments"] = True
|
|
144
146
|
|
|
145
147
|
inputs = {
|
|
146
148
|
"input": lambda x: x["question"] + "\nSQLQuery: ",
|
|
@@ -47,18 +47,18 @@ from pydantic import BaseModel
|
|
|
47
47
|
"""
|
|
48
48
|
from pydantic import BaseModel, Field
|
|
49
49
|
from langchain_anthropic import ChatAnthropic
|
|
50
|
-
|
|
50
|
+
|
|
51
51
|
class Joke(BaseModel):
|
|
52
52
|
setup: str = Field(description="The setup of the joke")
|
|
53
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
54
|
-
|
|
53
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
54
|
+
|
|
55
55
|
# Or any other chat model that supports tools.
|
|
56
56
|
# Please reference to to the documentation of structured_output
|
|
57
|
-
# to see an up to date list of which models support
|
|
57
|
+
# to see an up to date list of which models support
|
|
58
58
|
# with_structured_output.
|
|
59
59
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
60
60
|
structured_llm = model.with_structured_output(Joke)
|
|
61
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
61
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
62
62
|
Make sure to call the Joke function.")
|
|
63
63
|
"""
|
|
64
64
|
),
|
|
@@ -132,9 +132,11 @@ def create_openai_fn_runnable(
|
|
|
132
132
|
structured_llm = create_openai_fn_runnable([RecordPerson, RecordDog], llm)
|
|
133
133
|
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken)
|
|
134
134
|
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
|
|
135
|
+
|
|
135
136
|
""" # noqa: E501
|
|
136
137
|
if not functions:
|
|
137
|
-
|
|
138
|
+
msg = "Need to pass in at least one function. Received zero."
|
|
139
|
+
raise ValueError(msg)
|
|
138
140
|
openai_functions = [convert_to_openai_function(f) for f in functions]
|
|
139
141
|
llm_kwargs_: dict[str, Any] = {"functions": openai_functions, **llm_kwargs}
|
|
140
142
|
if len(openai_functions) == 1 and enforce_single_function_usage:
|
|
@@ -142,8 +144,7 @@ def create_openai_fn_runnable(
|
|
|
142
144
|
output_parser = output_parser or get_openai_output_parser(functions)
|
|
143
145
|
if prompt:
|
|
144
146
|
return prompt | llm.bind(**llm_kwargs_) | output_parser
|
|
145
|
-
|
|
146
|
-
return llm.bind(**llm_kwargs_) | output_parser
|
|
147
|
+
return llm.bind(**llm_kwargs_) | output_parser
|
|
147
148
|
|
|
148
149
|
|
|
149
150
|
@deprecated(
|
|
@@ -168,15 +169,15 @@ def create_openai_fn_runnable(
|
|
|
168
169
|
|
|
169
170
|
class Joke(BaseModel):
|
|
170
171
|
setup: str = Field(description="The setup of the joke")
|
|
171
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
172
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
172
173
|
|
|
173
174
|
# Or any other chat model that supports tools.
|
|
174
175
|
# Please reference to to the documentation of structured_output
|
|
175
|
-
# to see an up to date list of which models support
|
|
176
|
+
# to see an up to date list of which models support
|
|
176
177
|
# with_structured_output.
|
|
177
178
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
178
179
|
structured_llm = model.with_structured_output(Joke)
|
|
179
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
180
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
180
181
|
Make sure to call the Joke function.")
|
|
181
182
|
"""
|
|
182
183
|
),
|
|
@@ -190,7 +191,9 @@ def create_structured_output_runnable(
|
|
|
190
191
|
enforce_function_usage: bool = True,
|
|
191
192
|
return_single: bool = True,
|
|
192
193
|
mode: Literal[
|
|
193
|
-
"openai-functions",
|
|
194
|
+
"openai-functions",
|
|
195
|
+
"openai-tools",
|
|
196
|
+
"openai-json",
|
|
194
197
|
] = "openai-functions",
|
|
195
198
|
**kwargs: Any,
|
|
196
199
|
) -> Runnable:
|
|
@@ -201,39 +204,39 @@ def create_structured_output_runnable(
|
|
|
201
204
|
is passed in, it's assumed to already be a valid JsonSchema.
|
|
202
205
|
For best results, pydantic.BaseModels should have docstrings describing what
|
|
203
206
|
the schema represents and descriptions for the parameters.
|
|
204
|
-
llm: Language model to use. Assumed to support the OpenAI function-calling API
|
|
205
|
-
if mode is 'openai-function'. Assumed to support OpenAI response_format
|
|
207
|
+
llm: Language model to use. Assumed to support the OpenAI function-calling API
|
|
208
|
+
if mode is 'openai-function'. Assumed to support OpenAI response_format
|
|
206
209
|
parameter if mode is 'openai-json'.
|
|
207
|
-
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
|
|
208
|
-
prompt has input variable 'output_schema' then the given output_schema
|
|
210
|
+
prompt: BasePromptTemplate to pass to the model. If mode is 'openai-json' and
|
|
211
|
+
prompt has input variable 'output_schema' then the given output_schema
|
|
209
212
|
will be converted to a JsonSchema and inserted in the prompt.
|
|
210
213
|
output_parser: Output parser to use for parsing model outputs. By default
|
|
211
214
|
will be inferred from the function types. If pydantic.BaseModel is passed
|
|
212
|
-
in, then the OutputParser will try to parse outputs using the pydantic
|
|
215
|
+
in, then the OutputParser will try to parse outputs using the pydantic
|
|
213
216
|
class. Otherwise model outputs will be parsed as JSON.
|
|
214
|
-
mode: How structured outputs are extracted from the model. If 'openai-functions'
|
|
215
|
-
then OpenAI function calling is used with the deprecated 'functions',
|
|
216
|
-
'function_call' schema. If 'openai-tools' then OpenAI function
|
|
217
|
-
calling with the latest 'tools', 'tool_choice' schema is used. This is
|
|
218
|
-
recommended over 'openai-functions'. If 'openai-json' then OpenAI model
|
|
217
|
+
mode: How structured outputs are extracted from the model. If 'openai-functions'
|
|
218
|
+
then OpenAI function calling is used with the deprecated 'functions',
|
|
219
|
+
'function_call' schema. If 'openai-tools' then OpenAI function
|
|
220
|
+
calling with the latest 'tools', 'tool_choice' schema is used. This is
|
|
221
|
+
recommended over 'openai-functions'. If 'openai-json' then OpenAI model
|
|
219
222
|
with response_format set to JSON is used.
|
|
220
|
-
enforce_function_usage: Only applies when mode is 'openai-tools' or
|
|
221
|
-
'openai-functions'. If True, then the model will be forced to use the given
|
|
222
|
-
output schema. If False, then the model can elect whether to use the output
|
|
223
|
+
enforce_function_usage: Only applies when mode is 'openai-tools' or
|
|
224
|
+
'openai-functions'. If True, then the model will be forced to use the given
|
|
225
|
+
output schema. If False, then the model can elect whether to use the output
|
|
223
226
|
schema.
|
|
224
|
-
return_single: Only applies when mode is 'openai-tools'. Whether to a list of
|
|
225
|
-
structured outputs or a single one. If True and model does not return any
|
|
226
|
-
structured outputs then chain output is None. If False and model does not
|
|
227
|
+
return_single: Only applies when mode is 'openai-tools'. Whether to a list of
|
|
228
|
+
structured outputs or a single one. If True and model does not return any
|
|
229
|
+
structured outputs then chain output is None. If False and model does not
|
|
227
230
|
return any structured outputs then chain output is an empty list.
|
|
228
231
|
kwargs: Additional named arguments.
|
|
229
232
|
|
|
230
233
|
Returns:
|
|
231
|
-
A runnable sequence that will return a structured output(s) matching the given
|
|
234
|
+
A runnable sequence that will return a structured output(s) matching the given
|
|
232
235
|
output_schema.
|
|
233
|
-
|
|
236
|
+
|
|
234
237
|
OpenAI tools example with Pydantic schema (mode='openai-tools'):
|
|
235
238
|
.. code-block:: python
|
|
236
|
-
|
|
239
|
+
|
|
237
240
|
from typing import Optional
|
|
238
241
|
|
|
239
242
|
from langchain.chains import create_structured_output_runnable
|
|
@@ -251,23 +254,23 @@ def create_structured_output_runnable(
|
|
|
251
254
|
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
252
255
|
prompt = ChatPromptTemplate.from_messages(
|
|
253
256
|
[
|
|
254
|
-
("system", "You are an extraction algorithm. Please extract every possible instance"),
|
|
257
|
+
("system", "You are an extraction algorithm. Please extract every possible instance"),
|
|
255
258
|
('human', '{input}')
|
|
256
259
|
]
|
|
257
260
|
)
|
|
258
261
|
structured_llm = create_structured_output_runnable(
|
|
259
|
-
RecordDog,
|
|
260
|
-
llm,
|
|
261
|
-
mode="openai-tools",
|
|
262
|
-
enforce_function_usage=True,
|
|
262
|
+
RecordDog,
|
|
263
|
+
llm,
|
|
264
|
+
mode="openai-tools",
|
|
265
|
+
enforce_function_usage=True,
|
|
263
266
|
return_single=True
|
|
264
267
|
)
|
|
265
268
|
structured_llm.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
|
|
266
269
|
# -> RecordDog(name="Harry", color="brown", fav_food="chicken")
|
|
267
|
-
|
|
270
|
+
|
|
268
271
|
OpenAI tools example with dict schema (mode="openai-tools"):
|
|
269
272
|
.. code-block:: python
|
|
270
|
-
|
|
273
|
+
|
|
271
274
|
from typing import Optional
|
|
272
275
|
|
|
273
276
|
from langchain.chains import create_structured_output_runnable
|
|
@@ -303,15 +306,15 @@ def create_structured_output_runnable(
|
|
|
303
306
|
|
|
304
307
|
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
305
308
|
structured_llm = create_structured_output_runnable(
|
|
306
|
-
dog_schema,
|
|
307
|
-
llm,
|
|
308
|
-
mode="openai-tools",
|
|
309
|
-
enforce_function_usage=True,
|
|
309
|
+
dog_schema,
|
|
310
|
+
llm,
|
|
311
|
+
mode="openai-tools",
|
|
312
|
+
enforce_function_usage=True,
|
|
310
313
|
return_single=True
|
|
311
314
|
)
|
|
312
315
|
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
|
|
313
316
|
# -> {'name': 'Harry', 'color': 'brown', 'fav_food': 'chicken'}
|
|
314
|
-
|
|
317
|
+
|
|
315
318
|
OpenAI functions example (mode="openai-functions"):
|
|
316
319
|
.. code-block:: python
|
|
317
320
|
|
|
@@ -332,7 +335,7 @@ def create_structured_output_runnable(
|
|
|
332
335
|
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-functions")
|
|
333
336
|
structured_llm.invoke("Harry was a chubby brown beagle who loved chicken")
|
|
334
337
|
# -> Dog(name="Harry", color="brown", fav_food="chicken")
|
|
335
|
-
|
|
338
|
+
|
|
336
339
|
OpenAI functions with prompt example:
|
|
337
340
|
.. code-block:: python
|
|
338
341
|
|
|
@@ -361,7 +364,7 @@ def create_structured_output_runnable(
|
|
|
361
364
|
# -> Dog(name="Harry", color="brown", fav_food="chicken")
|
|
362
365
|
OpenAI json response format example (mode="openai-json"):
|
|
363
366
|
.. code-block:: python
|
|
364
|
-
|
|
367
|
+
|
|
365
368
|
from typing import Optional
|
|
366
369
|
|
|
367
370
|
from langchain.chains import create_structured_output_runnable
|
|
@@ -379,19 +382,21 @@ def create_structured_output_runnable(
|
|
|
379
382
|
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
|
|
380
383
|
structured_llm = create_structured_output_runnable(Dog, llm, mode="openai-json")
|
|
381
384
|
system = '''You are a world class assistant for extracting information in structured JSON formats. \
|
|
382
|
-
|
|
385
|
+
|
|
383
386
|
Extract a valid JSON blob from the user input that matches the following JSON Schema:
|
|
384
|
-
|
|
387
|
+
|
|
385
388
|
{output_schema}'''
|
|
386
389
|
prompt = ChatPromptTemplate.from_messages(
|
|
387
390
|
[("system", system), ("human", "{input}"),]
|
|
388
391
|
)
|
|
389
392
|
chain = prompt | structured_llm
|
|
390
393
|
chain.invoke({"input": "Harry was a chubby brown beagle who loved chicken"})
|
|
394
|
+
|
|
391
395
|
""" # noqa: E501
|
|
392
396
|
# for backwards compatibility
|
|
393
397
|
force_function_usage = kwargs.get(
|
|
394
|
-
"enforce_single_function_usage",
|
|
398
|
+
"enforce_single_function_usage",
|
|
399
|
+
enforce_function_usage,
|
|
395
400
|
)
|
|
396
401
|
|
|
397
402
|
if mode == "openai-tools":
|
|
@@ -400,9 +405,8 @@ def create_structured_output_runnable(
|
|
|
400
405
|
# Backwards compatibility keys
|
|
401
406
|
unrecognized_keys = keys_in_kwargs - {"enforce_single_function_usage"}
|
|
402
407
|
if unrecognized_keys:
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
)
|
|
408
|
+
msg = f"Got an unexpected keyword argument(s): {unrecognized_keys}."
|
|
409
|
+
raise TypeError(msg)
|
|
406
410
|
|
|
407
411
|
return _create_openai_tools_runnable(
|
|
408
412
|
output_schema,
|
|
@@ -413,7 +417,7 @@ def create_structured_output_runnable(
|
|
|
413
417
|
first_tool_only=return_single,
|
|
414
418
|
)
|
|
415
419
|
|
|
416
|
-
|
|
420
|
+
if mode == "openai-functions":
|
|
417
421
|
return _create_openai_functions_structured_output_runnable(
|
|
418
422
|
output_schema,
|
|
419
423
|
llm,
|
|
@@ -422,19 +426,24 @@ def create_structured_output_runnable(
|
|
|
422
426
|
enforce_single_function_usage=force_function_usage,
|
|
423
427
|
**kwargs, # llm-specific kwargs
|
|
424
428
|
)
|
|
425
|
-
|
|
429
|
+
if mode == "openai-json":
|
|
426
430
|
if force_function_usage:
|
|
427
|
-
|
|
431
|
+
msg = (
|
|
428
432
|
"enforce_single_function_usage is not supported for mode='openai-json'."
|
|
429
433
|
)
|
|
434
|
+
raise ValueError(msg)
|
|
430
435
|
return _create_openai_json_runnable(
|
|
431
|
-
output_schema,
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
f"'openai-json'."
|
|
436
|
+
output_schema,
|
|
437
|
+
llm,
|
|
438
|
+
prompt=prompt,
|
|
439
|
+
output_parser=output_parser,
|
|
440
|
+
**kwargs,
|
|
437
441
|
)
|
|
442
|
+
msg = (
|
|
443
|
+
f"Invalid mode {mode}. Expected one of 'openai-tools', 'openai-functions', "
|
|
444
|
+
f"'openai-json'."
|
|
445
|
+
)
|
|
446
|
+
raise ValueError(msg)
|
|
438
447
|
|
|
439
448
|
|
|
440
449
|
def _create_openai_tools_runnable(
|
|
@@ -454,12 +463,12 @@ def _create_openai_tools_runnable(
|
|
|
454
463
|
"function": {"name": oai_tool["function"]["name"]},
|
|
455
464
|
}
|
|
456
465
|
output_parser = output_parser or _get_openai_tool_output_parser(
|
|
457
|
-
tool,
|
|
466
|
+
tool,
|
|
467
|
+
first_tool_only=first_tool_only,
|
|
458
468
|
)
|
|
459
469
|
if prompt:
|
|
460
470
|
return prompt | llm.bind(**llm_kwargs) | output_parser
|
|
461
|
-
|
|
462
|
-
return llm.bind(**llm_kwargs) | output_parser
|
|
471
|
+
return llm.bind(**llm_kwargs) | output_parser
|
|
463
472
|
|
|
464
473
|
|
|
465
474
|
def _get_openai_tool_output_parser(
|
|
@@ -474,7 +483,8 @@ def _get_openai_tool_output_parser(
|
|
|
474
483
|
else:
|
|
475
484
|
key_name = convert_to_openai_tool(tool)["function"]["name"]
|
|
476
485
|
output_parser = JsonOutputKeyToolsParser(
|
|
477
|
-
first_tool_only=first_tool_only,
|
|
486
|
+
first_tool_only=first_tool_only,
|
|
487
|
+
key_name=key_name,
|
|
478
488
|
)
|
|
479
489
|
return output_parser
|
|
480
490
|
|
|
@@ -533,8 +543,7 @@ def _create_openai_json_runnable(
|
|
|
533
543
|
prompt = prompt.partial(output_schema=json.dumps(schema_as_dict, indent=2))
|
|
534
544
|
|
|
535
545
|
return prompt | llm | output_parser
|
|
536
|
-
|
|
537
|
-
return llm | output_parser
|
|
546
|
+
return llm | output_parser
|
|
538
547
|
|
|
539
548
|
|
|
540
549
|
def _create_openai_functions_structured_output_runnable(
|
|
@@ -563,7 +572,8 @@ def _create_openai_functions_structured_output_runnable(
|
|
|
563
572
|
|
|
564
573
|
function = _OutputFormatter
|
|
565
574
|
output_parser = output_parser or PydanticAttrOutputFunctionsParser(
|
|
566
|
-
pydantic_schema=_OutputFormatter,
|
|
575
|
+
pydantic_schema=_OutputFormatter,
|
|
576
|
+
attr_name="output",
|
|
567
577
|
)
|
|
568
578
|
return create_openai_fn_runnable(
|
|
569
579
|
[function],
|