langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
|
-
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
|
3
1
|
from langchain_core.prompts.chat import (
|
|
4
2
|
ChatPromptTemplate,
|
|
5
3
|
HumanMessagePromptTemplate,
|
|
@@ -7,6 +5,8 @@ from langchain_core.prompts.chat import (
|
|
|
7
5
|
)
|
|
8
6
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
9
7
|
|
|
8
|
+
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
|
9
|
+
|
|
10
10
|
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
|
|
11
11
|
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
|
|
12
12
|
When coming up with this question/answer pair, you must respond in the following format:
|
|
@@ -18,10 +18,10 @@ When coming up with this question/answer pair, you must respond in the following
|
|
|
18
18
|
```
|
|
19
19
|
|
|
20
20
|
Everything between the ``` must be valid json.
|
|
21
|
-
"""
|
|
21
|
+
""" # noqa: E501
|
|
22
22
|
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
|
23
23
|
----------------
|
|
24
|
-
{text}"""
|
|
24
|
+
{text}""" # noqa: E501
|
|
25
25
|
CHAT_PROMPT = ChatPromptTemplate.from_messages(
|
|
26
26
|
[
|
|
27
27
|
SystemMessagePromptTemplate.from_template(templ1),
|
|
@@ -42,7 +42,7 @@ Everything between the ``` must be valid json.
|
|
|
42
42
|
|
|
43
43
|
Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
|
44
44
|
----------------
|
|
45
|
-
{text}"""
|
|
45
|
+
{text}""" # noqa: E501
|
|
46
46
|
PROMPT = PromptTemplate.from_template(templ)
|
|
47
47
|
|
|
48
48
|
PROMPT_SELECTOR = ConditionalPromptSelector(
|
|
@@ -16,6 +16,7 @@ from langchain_core.documents import Document
|
|
|
16
16
|
from langchain_core.language_models import BaseLanguageModel
|
|
17
17
|
from langchain_core.prompts import BasePromptTemplate
|
|
18
18
|
from pydantic import ConfigDict, model_validator
|
|
19
|
+
from typing_extensions import override
|
|
19
20
|
|
|
20
21
|
from langchain.chains import ReduceDocumentsChain
|
|
21
22
|
from langchain.chains.base import Chain
|
|
@@ -70,7 +71,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
70
71
|
document_variable_name="summaries",
|
|
71
72
|
)
|
|
72
73
|
reduce_documents_chain = ReduceDocumentsChain(
|
|
73
|
-
combine_documents_chain=combine_results_chain
|
|
74
|
+
combine_documents_chain=combine_results_chain,
|
|
74
75
|
)
|
|
75
76
|
combine_documents_chain = MapReduceDocumentsChain(
|
|
76
77
|
llm_chain=llm_question_chain,
|
|
@@ -93,7 +94,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
93
94
|
"""Load chain from chain type."""
|
|
94
95
|
_chain_kwargs = chain_type_kwargs or {}
|
|
95
96
|
combine_documents_chain = load_qa_with_sources_chain(
|
|
96
|
-
llm,
|
|
97
|
+
llm,
|
|
98
|
+
chain_type=chain_type,
|
|
99
|
+
**_chain_kwargs,
|
|
97
100
|
)
|
|
98
101
|
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
|
99
102
|
|
|
@@ -118,7 +121,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
118
121
|
"""
|
|
119
122
|
_output_keys = [self.answer_key, self.sources_answer_key]
|
|
120
123
|
if self.return_source_documents:
|
|
121
|
-
_output_keys = _output_keys
|
|
124
|
+
_output_keys = [*_output_keys, "source_documents"]
|
|
122
125
|
return _output_keys
|
|
123
126
|
|
|
124
127
|
@model_validator(mode="before")
|
|
@@ -133,7 +136,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
133
136
|
"""Split sources from answer."""
|
|
134
137
|
if re.search(r"SOURCES?:", answer, re.IGNORECASE):
|
|
135
138
|
answer, sources = re.split(
|
|
136
|
-
r"SOURCES?:|QUESTION:\s",
|
|
139
|
+
r"SOURCES?:|QUESTION:\s",
|
|
140
|
+
answer,
|
|
141
|
+
flags=re.IGNORECASE,
|
|
137
142
|
)[:2]
|
|
138
143
|
sources = re.split(r"\n", sources)[0].strip()
|
|
139
144
|
else:
|
|
@@ -164,7 +169,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
164
169
|
docs = self._get_docs(inputs) # type: ignore[call-arg]
|
|
165
170
|
|
|
166
171
|
answer = self.combine_documents_chain.run(
|
|
167
|
-
input_documents=docs,
|
|
172
|
+
input_documents=docs,
|
|
173
|
+
callbacks=_run_manager.get_child(),
|
|
174
|
+
**inputs,
|
|
168
175
|
)
|
|
169
176
|
answer, sources = self._split_sources(answer)
|
|
170
177
|
result: dict[str, Any] = {
|
|
@@ -198,7 +205,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
198
205
|
else:
|
|
199
206
|
docs = await self._aget_docs(inputs) # type: ignore[call-arg]
|
|
200
207
|
answer = await self.combine_documents_chain.arun(
|
|
201
|
-
input_documents=docs,
|
|
208
|
+
input_documents=docs,
|
|
209
|
+
callbacks=_run_manager.get_child(),
|
|
210
|
+
**inputs,
|
|
202
211
|
)
|
|
203
212
|
answer, sources = self._split_sources(answer)
|
|
204
213
|
result: dict[str, Any] = {
|
|
@@ -232,6 +241,7 @@ class QAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
232
241
|
"""
|
|
233
242
|
return [self.input_docs_key, self.question_key]
|
|
234
243
|
|
|
244
|
+
@override
|
|
235
245
|
def _get_docs(
|
|
236
246
|
self,
|
|
237
247
|
inputs: dict[str, Any],
|
|
@@ -241,6 +251,7 @@ class QAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
241
251
|
"""Get docs to run questioning over."""
|
|
242
252
|
return inputs.pop(self.input_docs_key)
|
|
243
253
|
|
|
254
|
+
@override
|
|
244
255
|
async def _aget_docs(
|
|
245
256
|
self,
|
|
246
257
|
inputs: dict[str, Any],
|
|
@@ -30,13 +30,16 @@ class LoadingCallable(Protocol):
|
|
|
30
30
|
"""Interface for loading the combine documents chain."""
|
|
31
31
|
|
|
32
32
|
def __call__(
|
|
33
|
-
self,
|
|
33
|
+
self,
|
|
34
|
+
llm: BaseLanguageModel,
|
|
35
|
+
**kwargs: Any,
|
|
34
36
|
) -> BaseCombineDocumentsChain:
|
|
35
37
|
"""Callable to load the combine documents chain."""
|
|
36
38
|
|
|
37
39
|
|
|
38
40
|
def _load_map_rerank_chain(
|
|
39
41
|
llm: BaseLanguageModel,
|
|
42
|
+
*,
|
|
40
43
|
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
|
|
41
44
|
verbose: bool = False,
|
|
42
45
|
document_variable_name: str = "context",
|
|
@@ -56,6 +59,7 @@ def _load_map_rerank_chain(
|
|
|
56
59
|
|
|
57
60
|
def _load_stuff_chain(
|
|
58
61
|
llm: BaseLanguageModel,
|
|
62
|
+
*,
|
|
59
63
|
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
|
|
60
64
|
document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT,
|
|
61
65
|
document_variable_name: str = "summaries",
|
|
@@ -74,6 +78,7 @@ def _load_stuff_chain(
|
|
|
74
78
|
|
|
75
79
|
def _load_map_reduce_chain(
|
|
76
80
|
llm: BaseLanguageModel,
|
|
81
|
+
*,
|
|
77
82
|
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
|
|
78
83
|
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
|
|
79
84
|
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
|
|
@@ -98,10 +103,11 @@ def _load_map_reduce_chain(
|
|
|
98
103
|
if collapse_prompt is None:
|
|
99
104
|
collapse_chain = None
|
|
100
105
|
if collapse_llm is not None:
|
|
101
|
-
|
|
106
|
+
msg = (
|
|
102
107
|
"collapse_llm provided, but collapse_prompt was not: please "
|
|
103
108
|
"provide one or stop providing collapse_llm."
|
|
104
109
|
)
|
|
110
|
+
raise ValueError(msg)
|
|
105
111
|
else:
|
|
106
112
|
_collapse_llm = collapse_llm or llm
|
|
107
113
|
collapse_chain = StuffDocumentsChain(
|
|
@@ -130,6 +136,7 @@ def _load_map_reduce_chain(
|
|
|
130
136
|
|
|
131
137
|
def _load_refine_chain(
|
|
132
138
|
llm: BaseLanguageModel,
|
|
139
|
+
*,
|
|
133
140
|
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
|
|
134
141
|
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
|
|
135
142
|
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
|
|
@@ -162,16 +169,16 @@ def _load_refine_chain(
|
|
|
162
169
|
"https://python.langchain.com/docs/how_to/qa_sources/"
|
|
163
170
|
"\nSee also the following migration guides for replacements "
|
|
164
171
|
"based on `chain_type`:\n"
|
|
165
|
-
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
|
|
166
|
-
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
|
|
167
|
-
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
|
|
168
|
-
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
|
|
172
|
+
"stuff: https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain\n"
|
|
173
|
+
"map_reduce: https://python.langchain.com/docs/versions/migrating_chains/map_reduce_chain\n"
|
|
174
|
+
"refine: https://python.langchain.com/docs/versions/migrating_chains/refine_chain\n"
|
|
175
|
+
"map_rerank: https://python.langchain.com/docs/versions/migrating_chains/map_rerank_docs_chain\n"
|
|
169
176
|
),
|
|
170
177
|
)
|
|
171
178
|
def load_qa_with_sources_chain(
|
|
172
179
|
llm: BaseLanguageModel,
|
|
173
180
|
chain_type: str = "stuff",
|
|
174
|
-
verbose: Optional[bool] = None,
|
|
181
|
+
verbose: Optional[bool] = None, # noqa: FBT001
|
|
175
182
|
**kwargs: Any,
|
|
176
183
|
) -> BaseCombineDocumentsChain:
|
|
177
184
|
"""Load a question answering with sources chain.
|
|
@@ -193,9 +200,10 @@ def load_qa_with_sources_chain(
|
|
|
193
200
|
"map_rerank": _load_map_rerank_chain,
|
|
194
201
|
}
|
|
195
202
|
if chain_type not in loader_mapping:
|
|
196
|
-
|
|
203
|
+
msg = (
|
|
197
204
|
f"Got unsupported chain type: {chain_type}. "
|
|
198
205
|
f"Should be one of {loader_mapping.keys()}"
|
|
199
206
|
)
|
|
207
|
+
raise ValueError(msg)
|
|
200
208
|
_func: LoadingCallable = loader_mapping[chain_type]
|
|
201
209
|
return _func(llm, verbose=verbose, **kwargs)
|
|
@@ -1,16 +1,15 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts import PromptTemplate
|
|
3
2
|
|
|
4
|
-
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
|
|
3
|
+
question_prompt_template = """Use the following portion of a long document to see if any of the text is relevant to answer the question.
|
|
5
4
|
Return any relevant text verbatim.
|
|
6
5
|
{context}
|
|
7
6
|
Question: {question}
|
|
8
|
-
Relevant text, if any:"""
|
|
7
|
+
Relevant text, if any:""" # noqa: E501
|
|
9
8
|
QUESTION_PROMPT = PromptTemplate(
|
|
10
9
|
template=question_prompt_template, input_variables=["context", "question"]
|
|
11
10
|
)
|
|
12
11
|
|
|
13
|
-
combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
|
12
|
+
combine_prompt_template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
|
14
13
|
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
|
15
14
|
ALWAYS return a "SOURCES" part in your answer.
|
|
16
15
|
|
|
@@ -28,13 +27,13 @@ SOURCES: 28-pl
|
|
|
28
27
|
|
|
29
28
|
QUESTION: What did the president say about Michael Jackson?
|
|
30
29
|
=========
|
|
31
|
-
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia
|
|
30
|
+
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia's Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
|
|
32
31
|
Source: 0-pl
|
|
33
|
-
Content: And we won
|
|
32
|
+
Content: And we won't stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet's use this moment to reset. Let's stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet's stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can't change how divided we've been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who'd grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
|
|
34
33
|
Source: 24-pl
|
|
35
|
-
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I
|
|
34
|
+
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I've always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I'm taking robust action to make sure the pain of our sanctions is targeted at Russia's economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what's happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
|
|
36
35
|
Source: 5-pl
|
|
37
|
-
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt
|
|
36
|
+
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt's based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer's, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
|
|
38
37
|
Source: 34-pl
|
|
39
38
|
=========
|
|
40
39
|
FINAL ANSWER: The president did not mention Michael Jackson.
|
|
@@ -44,7 +43,7 @@ QUESTION: {question}
|
|
|
44
43
|
=========
|
|
45
44
|
{summaries}
|
|
46
45
|
=========
|
|
47
|
-
FINAL ANSWER:"""
|
|
46
|
+
FINAL ANSWER:""" # noqa: E501
|
|
48
47
|
COMBINE_PROMPT = PromptTemplate(
|
|
49
48
|
template=combine_prompt_template, input_variables=["summaries", "question"]
|
|
50
49
|
)
|
|
@@ -29,10 +29,11 @@ class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
29
29
|
num_docs = len(docs)
|
|
30
30
|
|
|
31
31
|
if self.reduce_k_below_max_tokens and isinstance(
|
|
32
|
-
self.combine_documents_chain,
|
|
32
|
+
self.combine_documents_chain,
|
|
33
|
+
StuffDocumentsChain,
|
|
33
34
|
):
|
|
34
35
|
tokens = [
|
|
35
|
-
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
|
|
36
|
+
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
|
|
36
37
|
for doc in docs
|
|
37
38
|
]
|
|
38
39
|
token_count = sum(tokens[:num_docs])
|
|
@@ -43,20 +44,28 @@ class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
43
44
|
return docs[:num_docs]
|
|
44
45
|
|
|
45
46
|
def _get_docs(
|
|
46
|
-
self,
|
|
47
|
+
self,
|
|
48
|
+
inputs: dict[str, Any],
|
|
49
|
+
*,
|
|
50
|
+
run_manager: CallbackManagerForChainRun,
|
|
47
51
|
) -> list[Document]:
|
|
48
52
|
question = inputs[self.question_key]
|
|
49
53
|
docs = self.retriever.invoke(
|
|
50
|
-
question,
|
|
54
|
+
question,
|
|
55
|
+
config={"callbacks": run_manager.get_child()},
|
|
51
56
|
)
|
|
52
57
|
return self._reduce_tokens_below_limit(docs)
|
|
53
58
|
|
|
54
59
|
async def _aget_docs(
|
|
55
|
-
self,
|
|
60
|
+
self,
|
|
61
|
+
inputs: dict[str, Any],
|
|
62
|
+
*,
|
|
63
|
+
run_manager: AsyncCallbackManagerForChainRun,
|
|
56
64
|
) -> list[Document]:
|
|
57
65
|
question = inputs[self.question_key]
|
|
58
66
|
docs = await self.retriever.ainvoke(
|
|
59
|
-
question,
|
|
67
|
+
question,
|
|
68
|
+
config={"callbacks": run_manager.get_child()},
|
|
60
69
|
)
|
|
61
70
|
return self._reduce_tokens_below_limit(docs)
|
|
62
71
|
|
|
@@ -1,7 +1,6 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts import PromptTemplate
|
|
3
2
|
|
|
4
|
-
template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
|
3
|
+
template = """Given the following extracted parts of a long document and a question, create a final answer with references ("SOURCES").
|
|
5
4
|
If you don't know the answer, just say that you don't know. Don't try to make up an answer.
|
|
6
5
|
ALWAYS return a "SOURCES" part in your answer.
|
|
7
6
|
|
|
@@ -19,13 +18,13 @@ SOURCES: 28-pl
|
|
|
19
18
|
|
|
20
19
|
QUESTION: What did the president say about Michael Jackson?
|
|
21
20
|
=========
|
|
22
|
-
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia
|
|
21
|
+
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny. \n\nSix days ago, Russia's Vladimir Putin sought to shake the foundations of the free world thinking he could make it bend to his menacing ways. But he badly miscalculated. \n\nHe thought he could roll into Ukraine and the world would roll over. Instead he met a wall of strength he never imagined. \n\nHe met the Ukrainian people. \n\nFrom President Zelenskyy to every Ukrainian, their fearlessness, their courage, their determination, inspires the world. \n\nGroups of citizens blocking tanks with their bodies. Everyone from students to retirees teachers turned soldiers defending their homeland.
|
|
23
22
|
Source: 0-pl
|
|
24
|
-
Content: And we won
|
|
23
|
+
Content: And we won't stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet's use this moment to reset. Let's stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet's stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can't change how divided we've been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who'd grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
|
|
25
24
|
Source: 24-pl
|
|
26
|
-
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I
|
|
25
|
+
Content: And a proud Ukrainian people, who have known 30 years of independence, have repeatedly shown that they will not tolerate anyone who tries to take their country backwards. \n\nTo all Americans, I will be honest with you, as I've always promised. A Russian dictator, invading a foreign country, has costs around the world. \n\nAnd I'm taking robust action to make sure the pain of our sanctions is targeted at Russia's economy. And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what's happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
|
|
27
26
|
Source: 5-pl
|
|
28
|
-
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt
|
|
27
|
+
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt's based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer's, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
|
|
29
28
|
Source: 34-pl
|
|
30
29
|
=========
|
|
31
30
|
FINAL ANSWER: The president did not mention Michael Jackson.
|
|
@@ -35,7 +34,7 @@ QUESTION: {question}
|
|
|
35
34
|
=========
|
|
36
35
|
{summaries}
|
|
37
36
|
=========
|
|
38
|
-
FINAL ANSWER:"""
|
|
37
|
+
FINAL ANSWER:""" # noqa: E501
|
|
39
38
|
PROMPT = PromptTemplate(template=template, input_variables=["summaries", "question"])
|
|
40
39
|
|
|
41
40
|
EXAMPLE_PROMPT = PromptTemplate(
|
|
@@ -10,6 +10,7 @@ from langchain_core.callbacks import (
|
|
|
10
10
|
from langchain_core.documents import Document
|
|
11
11
|
from langchain_core.vectorstores import VectorStore
|
|
12
12
|
from pydantic import Field, model_validator
|
|
13
|
+
from typing_extensions import override
|
|
13
14
|
|
|
14
15
|
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
|
|
15
16
|
from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain
|
|
@@ -34,10 +35,11 @@ class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
34
35
|
num_docs = len(docs)
|
|
35
36
|
|
|
36
37
|
if self.reduce_k_below_max_tokens and isinstance(
|
|
37
|
-
self.combine_documents_chain,
|
|
38
|
+
self.combine_documents_chain,
|
|
39
|
+
StuffDocumentsChain,
|
|
38
40
|
):
|
|
39
41
|
tokens = [
|
|
40
|
-
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content)
|
|
42
|
+
self.combine_documents_chain.llm_chain._get_num_tokens(doc.page_content) # noqa: SLF001
|
|
41
43
|
for doc in docs
|
|
42
44
|
]
|
|
43
45
|
token_count = sum(tokens[:num_docs])
|
|
@@ -47,26 +49,37 @@ class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain):
|
|
|
47
49
|
|
|
48
50
|
return docs[:num_docs]
|
|
49
51
|
|
|
52
|
+
@override
|
|
50
53
|
def _get_docs(
|
|
51
|
-
self,
|
|
54
|
+
self,
|
|
55
|
+
inputs: dict[str, Any],
|
|
56
|
+
*,
|
|
57
|
+
run_manager: CallbackManagerForChainRun,
|
|
52
58
|
) -> list[Document]:
|
|
53
59
|
question = inputs[self.question_key]
|
|
54
60
|
docs = self.vectorstore.similarity_search(
|
|
55
|
-
question,
|
|
61
|
+
question,
|
|
62
|
+
k=self.k,
|
|
63
|
+
**self.search_kwargs,
|
|
56
64
|
)
|
|
57
65
|
return self._reduce_tokens_below_limit(docs)
|
|
58
66
|
|
|
59
67
|
async def _aget_docs(
|
|
60
|
-
self,
|
|
68
|
+
self,
|
|
69
|
+
inputs: dict[str, Any],
|
|
70
|
+
*,
|
|
71
|
+
run_manager: AsyncCallbackManagerForChainRun,
|
|
61
72
|
) -> list[Document]:
|
|
62
|
-
|
|
73
|
+
msg = "VectorDBQAWithSourcesChain does not support async"
|
|
74
|
+
raise NotImplementedError(msg)
|
|
63
75
|
|
|
64
76
|
@model_validator(mode="before")
|
|
65
77
|
@classmethod
|
|
66
|
-
def
|
|
78
|
+
def _raise_deprecation(cls, values: dict) -> Any:
|
|
67
79
|
warnings.warn(
|
|
68
80
|
"`VectorDBQAWithSourcesChain` is deprecated - "
|
|
69
|
-
"please use `from langchain.chains import RetrievalQAWithSourcesChain`"
|
|
81
|
+
"please use `from langchain.chains import RetrievalQAWithSourcesChain`",
|
|
82
|
+
stacklevel=5,
|
|
70
83
|
)
|
|
71
84
|
return values
|
|
72
85
|
|
|
@@ -22,6 +22,7 @@ from langchain_core.structured_query import (
|
|
|
22
22
|
Operator,
|
|
23
23
|
StructuredQuery,
|
|
24
24
|
)
|
|
25
|
+
from typing_extensions import override
|
|
25
26
|
|
|
26
27
|
from langchain.chains.llm import LLMChain
|
|
27
28
|
from langchain.chains.query_constructor.parser import get_parser
|
|
@@ -46,6 +47,7 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
|
|
|
46
47
|
ast_parse: Callable
|
|
47
48
|
"""Callable that parses dict into internal representation of query language."""
|
|
48
49
|
|
|
50
|
+
@override
|
|
49
51
|
def parse(self, text: str) -> StructuredQuery:
|
|
50
52
|
try:
|
|
51
53
|
expected_keys = ["query", "filter"]
|
|
@@ -60,12 +62,11 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
|
|
|
60
62
|
if not parsed.get("limit"):
|
|
61
63
|
parsed.pop("limit", None)
|
|
62
64
|
return StructuredQuery(
|
|
63
|
-
**{k: v for k, v in parsed.items() if k in allowed_keys}
|
|
65
|
+
**{k: v for k, v in parsed.items() if k in allowed_keys},
|
|
64
66
|
)
|
|
65
67
|
except Exception as e:
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
)
|
|
68
|
+
msg = f"Parsing text\n{text}\n raised following error:\n{e}"
|
|
69
|
+
raise OutputParserException(msg) from e
|
|
69
70
|
|
|
70
71
|
@classmethod
|
|
71
72
|
def from_components(
|
|
@@ -73,7 +74,7 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
|
|
|
73
74
|
allowed_comparators: Optional[Sequence[Comparator]] = None,
|
|
74
75
|
allowed_operators: Optional[Sequence[Operator]] = None,
|
|
75
76
|
allowed_attributes: Optional[Sequence[str]] = None,
|
|
76
|
-
fix_invalid: bool = False,
|
|
77
|
+
fix_invalid: bool = False, # noqa: FBT001,FBT002
|
|
77
78
|
) -> StructuredQueryOutputParser:
|
|
78
79
|
"""
|
|
79
80
|
Create a structured query output parser from components.
|
|
@@ -89,14 +90,16 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
|
|
|
89
90
|
if fix_invalid:
|
|
90
91
|
|
|
91
92
|
def ast_parse(raw_filter: str) -> Optional[FilterDirective]:
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
93
|
+
filter_directive = cast(
|
|
94
|
+
"Optional[FilterDirective]",
|
|
95
|
+
get_parser().parse(raw_filter),
|
|
96
|
+
)
|
|
97
|
+
return fix_filter_directive(
|
|
98
|
+
filter_directive,
|
|
95
99
|
allowed_comparators=allowed_comparators,
|
|
96
100
|
allowed_operators=allowed_operators,
|
|
97
101
|
allowed_attributes=allowed_attributes,
|
|
98
102
|
)
|
|
99
|
-
return fixed
|
|
100
103
|
|
|
101
104
|
else:
|
|
102
105
|
ast_parse = get_parser(
|
|
@@ -108,7 +111,7 @@ class StructuredQueryOutputParser(BaseOutputParser[StructuredQuery]):
|
|
|
108
111
|
|
|
109
112
|
|
|
110
113
|
def fix_filter_directive(
|
|
111
|
-
filter: Optional[FilterDirective],
|
|
114
|
+
filter: Optional[FilterDirective], # noqa: A002
|
|
112
115
|
*,
|
|
113
116
|
allowed_comparators: Optional[Sequence[Comparator]] = None,
|
|
114
117
|
allowed_operators: Optional[Sequence[Operator]] = None,
|
|
@@ -130,18 +133,18 @@ def fix_filter_directive(
|
|
|
130
133
|
) or not filter:
|
|
131
134
|
return filter
|
|
132
135
|
|
|
133
|
-
|
|
136
|
+
if isinstance(filter, Comparison):
|
|
134
137
|
if allowed_comparators and filter.comparator not in allowed_comparators:
|
|
135
138
|
return None
|
|
136
139
|
if allowed_attributes and filter.attribute not in allowed_attributes:
|
|
137
140
|
return None
|
|
138
141
|
return filter
|
|
139
|
-
|
|
142
|
+
if isinstance(filter, Operation):
|
|
140
143
|
if allowed_operators and filter.operator not in allowed_operators:
|
|
141
144
|
return None
|
|
142
145
|
args = [
|
|
143
146
|
cast(
|
|
144
|
-
FilterDirective,
|
|
147
|
+
"FilterDirective",
|
|
145
148
|
fix_filter_directive(
|
|
146
149
|
arg,
|
|
147
150
|
allowed_comparators=allowed_comparators,
|
|
@@ -154,15 +157,13 @@ def fix_filter_directive(
|
|
|
154
157
|
]
|
|
155
158
|
if not args:
|
|
156
159
|
return None
|
|
157
|
-
|
|
160
|
+
if len(args) == 1 and filter.operator in (Operator.AND, Operator.OR):
|
|
158
161
|
return args[0]
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
else:
|
|
165
|
-
return filter
|
|
162
|
+
return Operation(
|
|
163
|
+
operator=filter.operator,
|
|
164
|
+
arguments=args,
|
|
165
|
+
)
|
|
166
|
+
return filter
|
|
166
167
|
|
|
167
168
|
|
|
168
169
|
def _format_attribute_info(info: Sequence[Union[AttributeInfo, dict]]) -> str:
|
|
@@ -237,7 +238,9 @@ def get_query_constructor_prompt(
|
|
|
237
238
|
examples = construct_examples(examples)
|
|
238
239
|
example_prompt = USER_SPECIFIED_EXAMPLE_PROMPT
|
|
239
240
|
prefix = PREFIX_WITH_DATA_SOURCE.format(
|
|
240
|
-
schema=schema,
|
|
241
|
+
schema=schema,
|
|
242
|
+
content=document_contents,
|
|
243
|
+
attributes=attribute_str,
|
|
241
244
|
)
|
|
242
245
|
suffix = SUFFIX_WITHOUT_DATA_SOURCE.format(i=len(examples) + 1)
|
|
243
246
|
else:
|
|
@@ -247,7 +250,9 @@ def get_query_constructor_prompt(
|
|
|
247
250
|
example_prompt = EXAMPLE_PROMPT
|
|
248
251
|
prefix = DEFAULT_PREFIX.format(schema=schema)
|
|
249
252
|
suffix = DEFAULT_SUFFIX.format(
|
|
250
|
-
i=len(examples) + 1,
|
|
253
|
+
i=len(examples) + 1,
|
|
254
|
+
content=document_contents,
|
|
255
|
+
attributes=attribute_str,
|
|
251
256
|
)
|
|
252
257
|
return FewShotPromptTemplate(
|
|
253
258
|
examples=list(examples),
|
|
@@ -271,7 +276,7 @@ def load_query_constructor_chain(
|
|
|
271
276
|
examples: Optional[list] = None,
|
|
272
277
|
allowed_comparators: Sequence[Comparator] = tuple(Comparator),
|
|
273
278
|
allowed_operators: Sequence[Operator] = tuple(Operator),
|
|
274
|
-
enable_limit: bool = False,
|
|
279
|
+
enable_limit: bool = False, # noqa: FBT001,FBT002
|
|
275
280
|
schema_prompt: Optional[BasePromptTemplate] = None,
|
|
276
281
|
**kwargs: Any,
|
|
277
282
|
) -> LLMChain:
|
|
@@ -302,11 +307,10 @@ def load_query_constructor_chain(
|
|
|
302
307
|
enable_limit=enable_limit,
|
|
303
308
|
schema_prompt=schema_prompt,
|
|
304
309
|
)
|
|
305
|
-
allowed_attributes = [
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
)
|
|
310
|
+
allowed_attributes = [
|
|
311
|
+
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
|
|
312
|
+
for ainfo in attribute_info
|
|
313
|
+
]
|
|
310
314
|
output_parser = StructuredQueryOutputParser.from_components(
|
|
311
315
|
allowed_comparators=allowed_comparators,
|
|
312
316
|
allowed_operators=allowed_operators,
|
|
@@ -361,11 +365,10 @@ def load_query_constructor_runnable(
|
|
|
361
365
|
schema_prompt=schema_prompt,
|
|
362
366
|
**kwargs,
|
|
363
367
|
)
|
|
364
|
-
allowed_attributes = [
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
)
|
|
368
|
+
allowed_attributes = [
|
|
369
|
+
ainfo.name if isinstance(ainfo, AttributeInfo) else ainfo["name"]
|
|
370
|
+
for ainfo in attribute_info
|
|
371
|
+
]
|
|
369
372
|
output_parser = StructuredQueryOutputParser.from_components(
|
|
370
373
|
allowed_comparators=allowed_comparators,
|
|
371
374
|
allowed_operators=allowed_operators,
|
|
@@ -12,12 +12,12 @@ from langchain_core.structured_query import (
|
|
|
12
12
|
)
|
|
13
13
|
|
|
14
14
|
__all__ = [
|
|
15
|
-
"Visitor",
|
|
16
|
-
"Expr",
|
|
17
|
-
"Operator",
|
|
18
15
|
"Comparator",
|
|
19
|
-
"FilterDirective",
|
|
20
16
|
"Comparison",
|
|
17
|
+
"Expr",
|
|
18
|
+
"FilterDirective",
|
|
21
19
|
"Operation",
|
|
20
|
+
"Operator",
|
|
22
21
|
"StructuredQuery",
|
|
22
|
+
"Visitor",
|
|
23
23
|
]
|