langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -22,6 +22,7 @@ from langchain_core.retrievers import BaseRetriever
|
|
|
22
22
|
from langchain_core.runnables import RunnableConfig
|
|
23
23
|
from langchain_core.vectorstores import VectorStore
|
|
24
24
|
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
25
|
+
from typing_extensions import override
|
|
25
26
|
|
|
26
27
|
from langchain.chains.base import Chain
|
|
27
28
|
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
|
|
@@ -44,18 +45,20 @@ def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
|
|
|
44
45
|
if isinstance(dialogue_turn, BaseMessage):
|
|
45
46
|
if len(dialogue_turn.content) > 0:
|
|
46
47
|
role_prefix = _ROLE_MAP.get(
|
|
47
|
-
dialogue_turn.type,
|
|
48
|
+
dialogue_turn.type,
|
|
49
|
+
f"{dialogue_turn.type}: ",
|
|
48
50
|
)
|
|
49
51
|
buffer += f"\n{role_prefix}{dialogue_turn.content}"
|
|
50
52
|
elif isinstance(dialogue_turn, tuple):
|
|
51
53
|
human = "Human: " + dialogue_turn[0]
|
|
52
54
|
ai = "Assistant: " + dialogue_turn[1]
|
|
53
|
-
buffer += "\n
|
|
55
|
+
buffer += f"\n{human}\n{ai}"
|
|
54
56
|
else:
|
|
55
|
-
|
|
57
|
+
msg = (
|
|
56
58
|
f"Unsupported chat history format: {type(dialogue_turn)}."
|
|
57
59
|
f" Full chat history: {chat_history} "
|
|
58
60
|
)
|
|
61
|
+
raise ValueError(msg) # noqa: TRY004
|
|
59
62
|
return buffer
|
|
60
63
|
|
|
61
64
|
|
|
@@ -93,7 +96,7 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
93
96
|
"""An optional function to get a string of the chat history.
|
|
94
97
|
If None is provided, will use a default."""
|
|
95
98
|
response_if_no_docs_found: Optional[str] = None
|
|
96
|
-
"""If specified, the chain will return a fixed response if no docs
|
|
99
|
+
"""If specified, the chain will return a fixed response if no docs
|
|
97
100
|
are found for the question. """
|
|
98
101
|
|
|
99
102
|
model_config = ConfigDict(
|
|
@@ -107,8 +110,10 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
107
110
|
"""Input keys."""
|
|
108
111
|
return ["question", "chat_history"]
|
|
109
112
|
|
|
113
|
+
@override
|
|
110
114
|
def get_input_schema(
|
|
111
|
-
self,
|
|
115
|
+
self,
|
|
116
|
+
config: Optional[RunnableConfig] = None,
|
|
112
117
|
) -> type[BaseModel]:
|
|
113
118
|
return InputType
|
|
114
119
|
|
|
@@ -120,9 +125,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
120
125
|
"""
|
|
121
126
|
_output_keys = [self.output_key]
|
|
122
127
|
if self.return_source_documents:
|
|
123
|
-
_output_keys = _output_keys
|
|
128
|
+
_output_keys = [*_output_keys, "source_documents"]
|
|
124
129
|
if self.return_generated_question:
|
|
125
|
-
_output_keys = _output_keys
|
|
130
|
+
_output_keys = [*_output_keys, "generated_question"]
|
|
126
131
|
return _output_keys
|
|
127
132
|
|
|
128
133
|
@abstractmethod
|
|
@@ -148,7 +153,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
148
153
|
if chat_history_str:
|
|
149
154
|
callbacks = _run_manager.get_child()
|
|
150
155
|
new_question = self.question_generator.run(
|
|
151
|
-
question=question,
|
|
156
|
+
question=question,
|
|
157
|
+
chat_history=chat_history_str,
|
|
158
|
+
callbacks=callbacks,
|
|
152
159
|
)
|
|
153
160
|
else:
|
|
154
161
|
new_question = question
|
|
@@ -168,7 +175,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
168
175
|
new_inputs["question"] = new_question
|
|
169
176
|
new_inputs["chat_history"] = chat_history_str
|
|
170
177
|
answer = self.combine_docs_chain.run(
|
|
171
|
-
input_documents=docs,
|
|
178
|
+
input_documents=docs,
|
|
179
|
+
callbacks=_run_manager.get_child(),
|
|
180
|
+
**new_inputs,
|
|
172
181
|
)
|
|
173
182
|
output[self.output_key] = answer
|
|
174
183
|
|
|
@@ -200,7 +209,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
200
209
|
if chat_history_str:
|
|
201
210
|
callbacks = _run_manager.get_child()
|
|
202
211
|
new_question = await self.question_generator.arun(
|
|
203
|
-
question=question,
|
|
212
|
+
question=question,
|
|
213
|
+
chat_history=chat_history_str,
|
|
214
|
+
callbacks=callbacks,
|
|
204
215
|
)
|
|
205
216
|
else:
|
|
206
217
|
new_question = question
|
|
@@ -221,7 +232,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
221
232
|
new_inputs["question"] = new_question
|
|
222
233
|
new_inputs["chat_history"] = chat_history_str
|
|
223
234
|
answer = await self.combine_docs_chain.arun(
|
|
224
|
-
input_documents=docs,
|
|
235
|
+
input_documents=docs,
|
|
236
|
+
callbacks=_run_manager.get_child(),
|
|
237
|
+
**new_inputs,
|
|
225
238
|
)
|
|
226
239
|
output[self.output_key] = answer
|
|
227
240
|
|
|
@@ -231,9 +244,11 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
231
244
|
output["generated_question"] = new_question
|
|
232
245
|
return output
|
|
233
246
|
|
|
247
|
+
@override
|
|
234
248
|
def save(self, file_path: Union[Path, str]) -> None:
|
|
235
249
|
if self.get_chat_history:
|
|
236
|
-
|
|
250
|
+
msg = "Chain not saveable when `get_chat_history` is not None."
|
|
251
|
+
raise ValueError(msg)
|
|
237
252
|
super().save(file_path)
|
|
238
253
|
|
|
239
254
|
|
|
@@ -252,68 +267,67 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
252
267
|
`create_retrieval_chain`. Additional walkthroughs can be found at
|
|
253
268
|
https://python.langchain.com/docs/use_cases/question_answering/chat_history
|
|
254
269
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
from langchain.chains import (
|
|
258
|
-
create_history_aware_retriever,
|
|
259
|
-
create_retrieval_chain,
|
|
260
|
-
)
|
|
261
|
-
from langchain.chains.combine_documents import create_stuff_documents_chain
|
|
262
|
-
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
263
|
-
from langchain_openai import ChatOpenAI
|
|
270
|
+
.. code-block:: python
|
|
264
271
|
|
|
272
|
+
from langchain.chains import (
|
|
273
|
+
create_history_aware_retriever,
|
|
274
|
+
create_retrieval_chain,
|
|
275
|
+
)
|
|
276
|
+
from langchain.chains.combine_documents import create_stuff_documents_chain
|
|
277
|
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
278
|
+
from langchain_openai import ChatOpenAI
|
|
265
279
|
|
|
266
|
-
|
|
280
|
+
retriever = ... # Your retriever
|
|
267
281
|
|
|
268
|
-
|
|
282
|
+
llm = ChatOpenAI()
|
|
269
283
|
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
284
|
+
# Contextualize question
|
|
285
|
+
contextualize_q_system_prompt = (
|
|
286
|
+
"Given a chat history and the latest user question "
|
|
287
|
+
"which might reference context in the chat history, "
|
|
288
|
+
"formulate a standalone question which can be understood "
|
|
289
|
+
"without the chat history. Do NOT answer the question, just "
|
|
290
|
+
"reformulate it if needed and otherwise return it as is."
|
|
291
|
+
)
|
|
292
|
+
contextualize_q_prompt = ChatPromptTemplate.from_messages(
|
|
293
|
+
[
|
|
294
|
+
("system", contextualize_q_system_prompt),
|
|
295
|
+
MessagesPlaceholder("chat_history"),
|
|
296
|
+
("human", "{input}"),
|
|
297
|
+
]
|
|
298
|
+
)
|
|
299
|
+
history_aware_retriever = create_history_aware_retriever(
|
|
300
|
+
llm, retriever, contextualize_q_prompt
|
|
301
|
+
)
|
|
288
302
|
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
303
|
+
# Answer question
|
|
304
|
+
qa_system_prompt = (
|
|
305
|
+
"You are an assistant for question-answering tasks. Use "
|
|
306
|
+
"the following pieces of retrieved context to answer the "
|
|
307
|
+
"question. If you don't know the answer, just say that you "
|
|
308
|
+
"don't know. Use three sentences maximum and keep the answer "
|
|
309
|
+
"concise."
|
|
310
|
+
"\n\n"
|
|
311
|
+
"{context}"
|
|
312
|
+
)
|
|
313
|
+
qa_prompt = ChatPromptTemplate.from_messages(
|
|
314
|
+
[
|
|
315
|
+
("system", qa_system_prompt),
|
|
316
|
+
MessagesPlaceholder("chat_history"),
|
|
317
|
+
("human", "{input}"),
|
|
318
|
+
]
|
|
319
|
+
)
|
|
320
|
+
# Below we use create_stuff_documents_chain to feed all retrieved context
|
|
321
|
+
# into the LLM. Note that we can also use StuffDocumentsChain and other
|
|
322
|
+
# instances of BaseCombineDocumentsChain.
|
|
323
|
+
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
|
|
324
|
+
rag_chain = create_retrieval_chain(
|
|
325
|
+
history_aware_retriever, question_answer_chain
|
|
326
|
+
)
|
|
313
327
|
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
328
|
+
# Usage:
|
|
329
|
+
chat_history = [] # Collect chat history here (a sequence of messages)
|
|
330
|
+
rag_chain.invoke({"input": query, "chat_history": chat_history})
|
|
317
331
|
|
|
318
332
|
This chain takes in chat history (a list of messages) and new questions,
|
|
319
333
|
and then returns an answer to that question.
|
|
@@ -360,22 +374,26 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
360
374
|
retriever=retriever,
|
|
361
375
|
question_generator=question_generator_chain,
|
|
362
376
|
)
|
|
377
|
+
|
|
363
378
|
"""
|
|
364
379
|
|
|
365
380
|
retriever: BaseRetriever
|
|
366
381
|
"""Retriever to use to fetch documents."""
|
|
367
382
|
max_tokens_limit: Optional[int] = None
|
|
368
383
|
"""If set, enforces that the documents returned are less than this limit.
|
|
369
|
-
|
|
384
|
+
|
|
385
|
+
This is only enforced if ``combine_docs_chain`` is of type StuffDocumentsChain.
|
|
386
|
+
"""
|
|
370
387
|
|
|
371
388
|
def _reduce_tokens_below_limit(self, docs: list[Document]) -> list[Document]:
|
|
372
389
|
num_docs = len(docs)
|
|
373
390
|
|
|
374
391
|
if self.max_tokens_limit and isinstance(
|
|
375
|
-
self.combine_docs_chain,
|
|
392
|
+
self.combine_docs_chain,
|
|
393
|
+
StuffDocumentsChain,
|
|
376
394
|
):
|
|
377
395
|
tokens = [
|
|
378
|
-
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
|
|
396
|
+
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
|
|
379
397
|
for doc in docs
|
|
380
398
|
]
|
|
381
399
|
token_count = sum(tokens[:num_docs])
|
|
@@ -385,6 +403,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
385
403
|
|
|
386
404
|
return docs[:num_docs]
|
|
387
405
|
|
|
406
|
+
@override
|
|
388
407
|
def _get_docs(
|
|
389
408
|
self,
|
|
390
409
|
question: str,
|
|
@@ -394,10 +413,12 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
394
413
|
) -> list[Document]:
|
|
395
414
|
"""Get docs."""
|
|
396
415
|
docs = self.retriever.invoke(
|
|
397
|
-
question,
|
|
416
|
+
question,
|
|
417
|
+
config={"callbacks": run_manager.get_child()},
|
|
398
418
|
)
|
|
399
419
|
return self._reduce_tokens_below_limit(docs)
|
|
400
420
|
|
|
421
|
+
@override
|
|
401
422
|
async def _aget_docs(
|
|
402
423
|
self,
|
|
403
424
|
question: str,
|
|
@@ -407,7 +428,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
407
428
|
) -> list[Document]:
|
|
408
429
|
"""Get docs."""
|
|
409
430
|
docs = await self.retriever.ainvoke(
|
|
410
|
-
question,
|
|
431
|
+
question,
|
|
432
|
+
config={"callbacks": run_manager.get_child()},
|
|
411
433
|
)
|
|
412
434
|
return self._reduce_tokens_below_limit(docs)
|
|
413
435
|
|
|
@@ -418,7 +440,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
418
440
|
retriever: BaseRetriever,
|
|
419
441
|
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
|
420
442
|
chain_type: str = "stuff",
|
|
421
|
-
verbose: bool = False,
|
|
443
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
422
444
|
condense_question_llm: Optional[BaseLanguageModel] = None,
|
|
423
445
|
combine_docs_chain_kwargs: Optional[dict] = None,
|
|
424
446
|
callbacks: Callbacks = None,
|
|
@@ -485,13 +507,15 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
485
507
|
|
|
486
508
|
@model_validator(mode="before")
|
|
487
509
|
@classmethod
|
|
488
|
-
def
|
|
510
|
+
def _raise_deprecation(cls, values: dict) -> Any:
|
|
489
511
|
warnings.warn(
|
|
490
512
|
"`ChatVectorDBChain` is deprecated - "
|
|
491
|
-
"please use `from langchain.chains import ConversationalRetrievalChain`"
|
|
513
|
+
"please use `from langchain.chains import ConversationalRetrievalChain`",
|
|
514
|
+
stacklevel=4,
|
|
492
515
|
)
|
|
493
516
|
return values
|
|
494
517
|
|
|
518
|
+
@override
|
|
495
519
|
def _get_docs(
|
|
496
520
|
self,
|
|
497
521
|
question: str,
|
|
@@ -503,7 +527,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
503
527
|
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
|
504
528
|
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
|
|
505
529
|
return self.vectorstore.similarity_search(
|
|
506
|
-
question,
|
|
530
|
+
question,
|
|
531
|
+
k=self.top_k_docs_for_context,
|
|
532
|
+
**full_kwargs,
|
|
507
533
|
)
|
|
508
534
|
|
|
509
535
|
async def _aget_docs(
|
|
@@ -514,7 +540,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
514
540
|
run_manager: AsyncCallbackManagerForChainRun,
|
|
515
541
|
) -> list[Document]:
|
|
516
542
|
"""Get docs."""
|
|
517
|
-
|
|
543
|
+
msg = "ChatVectorDBChain does not support async"
|
|
544
|
+
raise NotImplementedError(msg)
|
|
518
545
|
|
|
519
546
|
@classmethod
|
|
520
547
|
def from_llm(
|
|
@@ -536,7 +563,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
536
563
|
**combine_docs_chain_kwargs,
|
|
537
564
|
)
|
|
538
565
|
condense_question_chain = LLMChain(
|
|
539
|
-
llm=llm,
|
|
566
|
+
llm=llm,
|
|
567
|
+
prompt=condense_question_prompt,
|
|
568
|
+
callbacks=callbacks,
|
|
540
569
|
)
|
|
541
570
|
return cls(
|
|
542
571
|
vectorstore=vectorstore,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
|
|
@@ -6,7 +5,7 @@ _template = """Given the following conversation and a follow up question, rephra
|
|
|
6
5
|
Chat History:
|
|
7
6
|
{chat_history}
|
|
8
7
|
Follow Up Input: {question}
|
|
9
|
-
Standalone question:"""
|
|
8
|
+
Standalone question:""" # noqa: E501
|
|
10
9
|
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
|
11
10
|
|
|
12
11
|
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
|
@@ -14,7 +13,7 @@ prompt_template = """Use the following pieces of context to answer the question
|
|
|
14
13
|
{context}
|
|
15
14
|
|
|
16
15
|
Question: {question}
|
|
17
|
-
Helpful Answer:"""
|
|
16
|
+
Helpful Answer:""" # noqa: E501
|
|
18
17
|
QA_PROMPT = PromptTemplate(
|
|
19
18
|
template=prompt_template, input_variables=["context", "question"]
|
|
20
19
|
)
|
|
@@ -34,6 +34,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
34
34
|
|
|
35
35
|
database = Elasticsearch("http://localhost:9200")
|
|
36
36
|
db_chain = ElasticsearchDatabaseChain.from_llm(OpenAI(), database)
|
|
37
|
+
|
|
37
38
|
"""
|
|
38
39
|
|
|
39
40
|
query_chain: Runnable
|
|
@@ -58,11 +59,10 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
58
59
|
)
|
|
59
60
|
|
|
60
61
|
@model_validator(mode="after")
|
|
61
|
-
def
|
|
62
|
+
def _validate_indices(self) -> Self:
|
|
62
63
|
if self.include_indices and self.ignore_indices:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
)
|
|
64
|
+
msg = "Cannot specify both 'include_indices' and 'ignore_indices'."
|
|
65
|
+
raise ValueError(msg)
|
|
66
66
|
return self
|
|
67
67
|
|
|
68
68
|
@property
|
|
@@ -81,8 +81,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
81
81
|
"""
|
|
82
82
|
if not self.return_intermediate_steps:
|
|
83
83
|
return [self.output_key]
|
|
84
|
-
|
|
85
|
-
return [self.output_key, INTERMEDIATE_STEPS_KEY]
|
|
84
|
+
return [self.output_key, INTERMEDIATE_STEPS_KEY]
|
|
86
85
|
|
|
87
86
|
def _list_indices(self) -> list[str]:
|
|
88
87
|
all_indices = [
|
|
@@ -111,7 +110,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
111
110
|
[
|
|
112
111
|
"Mapping for index {}:\n{}".format(index, mappings[index]["mappings"])
|
|
113
112
|
for index in mappings
|
|
114
|
-
]
|
|
113
|
+
],
|
|
115
114
|
)
|
|
116
115
|
|
|
117
116
|
def _search(self, indices: list[str], query: str) -> str:
|
|
@@ -144,7 +143,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
144
143
|
|
|
145
144
|
_run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
|
|
146
145
|
intermediate_steps.append(
|
|
147
|
-
es_cmd
|
|
146
|
+
es_cmd,
|
|
148
147
|
) # output: elasticsearch dsl generation (no checker)
|
|
149
148
|
intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search
|
|
150
149
|
result = self._search(indices=indices, query=es_cmd)
|
|
@@ -166,12 +165,13 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
166
165
|
chain_result: dict[str, Any] = {self.output_key: final_result}
|
|
167
166
|
if self.return_intermediate_steps:
|
|
168
167
|
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
|
|
169
|
-
return chain_result
|
|
170
168
|
except Exception as exc:
|
|
171
169
|
# Append intermediate steps to exception, to aid in logging and later
|
|
172
170
|
# improvement of few shot prompt seeds
|
|
173
171
|
exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
|
|
174
|
-
raise
|
|
172
|
+
raise
|
|
173
|
+
|
|
174
|
+
return chain_result
|
|
175
175
|
|
|
176
176
|
@property
|
|
177
177
|
def _chain_type(self) -> str:
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
PROMPT_SUFFIX = """Only use the following Elasticsearch indices:
|
|
@@ -17,7 +16,7 @@ Use the following format:
|
|
|
17
16
|
|
|
18
17
|
Question: Question here
|
|
19
18
|
ESQuery: Elasticsearch Query formatted as json
|
|
20
|
-
"""
|
|
19
|
+
""" # noqa: E501
|
|
21
20
|
|
|
22
21
|
DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX)
|
|
23
22
|
|
|
@@ -31,6 +30,6 @@ Answer: Final answer here
|
|
|
31
30
|
|
|
32
31
|
Question: {input}
|
|
33
32
|
Data: {data}
|
|
34
|
-
Answer:"""
|
|
33
|
+
Answer:""" # noqa: E501
|
|
35
34
|
|
|
36
35
|
ANSWER_PROMPT = PromptTemplate.from_template(DEFAULT_ANSWER_TEMPLATE)
|
|
@@ -36,9 +36,9 @@ def __getattr__(name: str) -> Any:
|
|
|
36
36
|
|
|
37
37
|
__all__ = [
|
|
38
38
|
"convert_to_ernie_function",
|
|
39
|
-
"create_structured_output_chain",
|
|
40
39
|
"create_ernie_fn_chain",
|
|
41
|
-
"create_structured_output_runnable",
|
|
42
40
|
"create_ernie_fn_runnable",
|
|
41
|
+
"create_structured_output_chain",
|
|
42
|
+
"create_structured_output_runnable",
|
|
43
43
|
"get_ernie_output_parser",
|
|
44
44
|
]
|
|
@@ -7,7 +7,9 @@ TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def generate_example(
|
|
10
|
-
examples: list[dict],
|
|
10
|
+
examples: list[dict],
|
|
11
|
+
llm: BaseLanguageModel,
|
|
12
|
+
prompt_template: PromptTemplate,
|
|
11
13
|
) -> str:
|
|
12
14
|
"""Return another example given a list of examples for a prompt."""
|
|
13
15
|
prompt = FewShotPromptTemplate(
|
langchain/chains/flare/base.py
CHANGED
|
@@ -15,6 +15,7 @@ from langchain_core.prompts import BasePromptTemplate
|
|
|
15
15
|
from langchain_core.retrievers import BaseRetriever
|
|
16
16
|
from langchain_core.runnables import Runnable
|
|
17
17
|
from pydantic import Field
|
|
18
|
+
from typing_extensions import override
|
|
18
19
|
|
|
19
20
|
from langchain.chains.base import Chain
|
|
20
21
|
from langchain.chains.flare.prompts import (
|
|
@@ -44,6 +45,7 @@ class QuestionGeneratorChain(LLMChain):
|
|
|
44
45
|
"""Prompt template for the chain."""
|
|
45
46
|
|
|
46
47
|
@classmethod
|
|
48
|
+
@override
|
|
47
49
|
def is_lc_serializable(cls) -> bool:
|
|
48
50
|
return False
|
|
49
51
|
|
|
@@ -69,7 +71,8 @@ def _low_confidence_spans(
|
|
|
69
71
|
"NumPy not found in the current Python environment. FlareChain will use a "
|
|
70
72
|
"pure Python implementation for internal calculations, which may "
|
|
71
73
|
"significantly impact performance, especially for large datasets. For "
|
|
72
|
-
"optimal speed and efficiency, consider installing NumPy: pip install
|
|
74
|
+
"optimal speed and efficiency, consider installing NumPy: pip install "
|
|
75
|
+
"numpy",
|
|
73
76
|
)
|
|
74
77
|
import math
|
|
75
78
|
|
|
@@ -171,7 +174,8 @@ class FlareChain(Chain):
|
|
|
171
174
|
callbacks = _run_manager.get_child()
|
|
172
175
|
if isinstance(self.question_generator_chain, LLMChain):
|
|
173
176
|
question_gen_outputs = self.question_generator_chain.apply(
|
|
174
|
-
question_gen_inputs,
|
|
177
|
+
question_gen_inputs,
|
|
178
|
+
callbacks=callbacks,
|
|
175
179
|
)
|
|
176
180
|
questions = [
|
|
177
181
|
output[self.question_generator_chain.output_keys[0]]
|
|
@@ -179,10 +183,13 @@ class FlareChain(Chain):
|
|
|
179
183
|
]
|
|
180
184
|
else:
|
|
181
185
|
questions = self.question_generator_chain.batch(
|
|
182
|
-
question_gen_inputs,
|
|
186
|
+
question_gen_inputs,
|
|
187
|
+
config={"callbacks": callbacks},
|
|
183
188
|
)
|
|
184
189
|
_run_manager.on_text(
|
|
185
|
-
f"Generated Questions: {questions}",
|
|
190
|
+
f"Generated Questions: {questions}",
|
|
191
|
+
color="yellow",
|
|
192
|
+
end="\n",
|
|
186
193
|
)
|
|
187
194
|
return self._do_generation(questions, user_input, response, _run_manager)
|
|
188
195
|
|
|
@@ -197,15 +204,18 @@ class FlareChain(Chain):
|
|
|
197
204
|
|
|
198
205
|
response = ""
|
|
199
206
|
|
|
200
|
-
for
|
|
207
|
+
for _i in range(self.max_iter):
|
|
201
208
|
_run_manager.on_text(
|
|
202
|
-
f"Current Response: {response}",
|
|
209
|
+
f"Current Response: {response}",
|
|
210
|
+
color="blue",
|
|
211
|
+
end="\n",
|
|
203
212
|
)
|
|
204
213
|
_input = {"user_input": user_input, "context": "", "response": response}
|
|
205
214
|
tokens, log_probs = _extract_tokens_and_log_probs(
|
|
206
215
|
self.response_chain.invoke(
|
|
207
|
-
_input,
|
|
208
|
-
|
|
216
|
+
_input,
|
|
217
|
+
{"callbacks": _run_manager.get_child()},
|
|
218
|
+
),
|
|
209
219
|
)
|
|
210
220
|
low_confidence_spans = _low_confidence_spans(
|
|
211
221
|
tokens,
|
|
@@ -236,7 +246,10 @@ class FlareChain(Chain):
|
|
|
236
246
|
|
|
237
247
|
@classmethod
|
|
238
248
|
def from_llm(
|
|
239
|
-
cls,
|
|
249
|
+
cls,
|
|
250
|
+
llm: BaseLanguageModel,
|
|
251
|
+
max_generation_len: int = 32,
|
|
252
|
+
**kwargs: Any,
|
|
240
253
|
) -> FlareChain:
|
|
241
254
|
"""Creates a FlareChain from a language model.
|
|
242
255
|
|
|
@@ -250,14 +263,17 @@ class FlareChain(Chain):
|
|
|
250
263
|
"""
|
|
251
264
|
try:
|
|
252
265
|
from langchain_openai import ChatOpenAI
|
|
253
|
-
except ImportError:
|
|
254
|
-
|
|
266
|
+
except ImportError as e:
|
|
267
|
+
msg = (
|
|
255
268
|
"OpenAI is required for FlareChain. "
|
|
256
269
|
"Please install langchain-openai."
|
|
257
270
|
"pip install langchain-openai"
|
|
258
271
|
)
|
|
272
|
+
raise ImportError(msg) from e
|
|
259
273
|
llm = ChatOpenAI(
|
|
260
|
-
max_completion_tokens=max_generation_len,
|
|
274
|
+
max_completion_tokens=max_generation_len,
|
|
275
|
+
logprobs=True,
|
|
276
|
+
temperature=0,
|
|
261
277
|
)
|
|
262
278
|
response_chain = PROMPT | llm
|
|
263
279
|
question_gen_chain = QUESTION_GENERATOR_PROMPT | llm | StrOutputParser()
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from langchain_core.output_parsers import BaseOutputParser
|
|
2
2
|
from langchain_core.prompts import PromptTemplate
|
|
3
|
+
from typing_extensions import override
|
|
3
4
|
|
|
4
5
|
|
|
5
6
|
class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]):
|
|
@@ -8,6 +9,7 @@ class FinishedOutputParser(BaseOutputParser[tuple[str, bool]]):
|
|
|
8
9
|
finished_value: str = "FINISHED"
|
|
9
10
|
"""Value that indicates the output is finished."""
|
|
10
11
|
|
|
12
|
+
@override
|
|
11
13
|
def parse(self, text: str) -> tuple[str, bool]:
|
|
12
14
|
cleaned = text.strip()
|
|
13
15
|
finished = self.finished_value in cleaned
|