langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -74,7 +74,9 @@ __all__ = [
|
|
|
74
74
|
"AQL_FIX_TEMPLATE",
|
|
75
75
|
"AQL_GENERATION_TEMPLATE",
|
|
76
76
|
"AQL_QA_TEMPLATE",
|
|
77
|
+
"CYPHER_GENERATION_PROMPT",
|
|
77
78
|
"CYPHER_GENERATION_TEMPLATE",
|
|
79
|
+
"CYPHER_QA_PROMPT",
|
|
78
80
|
"CYPHER_QA_TEMPLATE",
|
|
79
81
|
"GRAPHDB_QA_TEMPLATE",
|
|
80
82
|
"GRAPHDB_SPARQL_FIX_TEMPLATE",
|
|
@@ -91,6 +93,4 @@ __all__ = [
|
|
|
91
93
|
"SPARQL_GENERATION_UPDATE_TEMPLATE",
|
|
92
94
|
"SPARQL_INTENT_TEMPLATE",
|
|
93
95
|
"SPARQL_QA_TEMPLATE",
|
|
94
|
-
"CYPHER_QA_PROMPT",
|
|
95
|
-
"CYPHER_GENERATION_PROMPT",
|
|
96
96
|
]
|
|
@@ -49,10 +49,11 @@ def create_history_aware_retriever(
|
|
|
49
49
|
|
|
50
50
|
"""
|
|
51
51
|
if "input" not in prompt.input_variables:
|
|
52
|
-
|
|
52
|
+
msg = (
|
|
53
53
|
"Expected `input` to be a prompt variable, "
|
|
54
54
|
f"but got {prompt.input_variables}"
|
|
55
55
|
)
|
|
56
|
+
raise ValueError(msg)
|
|
56
57
|
|
|
57
58
|
retrieve_documents: RetrieverOutputLike = RunnableBranch(
|
|
58
59
|
(
|
langchain/chains/hyde/base.py
CHANGED
|
@@ -47,8 +47,7 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
|
|
|
47
47
|
"""Output keys for Hyde's LLM chain."""
|
|
48
48
|
if isinstance(self.llm_chain, LLMChain):
|
|
49
49
|
return self.llm_chain.output_keys
|
|
50
|
-
|
|
51
|
-
return ["text"]
|
|
50
|
+
return ["text"]
|
|
52
51
|
|
|
53
52
|
def embed_documents(self, texts: list[str]) -> list[list[float]]:
|
|
54
53
|
"""Call the base embeddings."""
|
|
@@ -66,7 +65,7 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
|
|
|
66
65
|
"HypotheticalDocumentEmbedder will use a pure Python implementation "
|
|
67
66
|
"for internal calculations, which may significantly impact "
|
|
68
67
|
"performance, especially for large datasets. For optimal speed and "
|
|
69
|
-
"efficiency, consider installing NumPy: pip install numpy"
|
|
68
|
+
"efficiency, consider installing NumPy: pip install numpy",
|
|
70
69
|
)
|
|
71
70
|
if not embeddings:
|
|
72
71
|
return []
|
|
@@ -92,7 +91,8 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
|
|
|
92
91
|
"""Call the internal llm chain."""
|
|
93
92
|
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
|
|
94
93
|
return self.llm_chain.invoke(
|
|
95
|
-
inputs,
|
|
94
|
+
inputs,
|
|
95
|
+
config={"callbacks": _run_manager.get_child()},
|
|
96
96
|
)
|
|
97
97
|
|
|
98
98
|
@classmethod
|
|
@@ -110,10 +110,11 @@ class HypotheticalDocumentEmbedder(Chain, Embeddings):
|
|
|
110
110
|
elif prompt_key is not None and prompt_key in PROMPT_MAP:
|
|
111
111
|
prompt = PROMPT_MAP[prompt_key]
|
|
112
112
|
else:
|
|
113
|
-
|
|
113
|
+
msg = (
|
|
114
114
|
f"Must specify prompt_key if custom_prompt not provided. Should be one "
|
|
115
115
|
f"of {list(PROMPT_MAP.keys())}."
|
|
116
116
|
)
|
|
117
|
+
raise ValueError(msg)
|
|
117
118
|
|
|
118
119
|
llm_chain = prompt | llm | StrOutputParser()
|
|
119
120
|
return cls(base_embeddings=base_embeddings, llm_chain=llm_chain, **kwargs)
|
langchain/chains/hyde/prompts.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
|
-
web_search_template = """Please write a passage to answer the question
|
|
3
|
+
web_search_template = """Please write a passage to answer the question
|
|
5
4
|
Question: {QUESTION}
|
|
6
5
|
Passage:"""
|
|
7
6
|
web_search = PromptTemplate(template=web_search_template, input_variables=["QUESTION"])
|
|
8
|
-
sci_fact_template = """Please write a scientific paper passage to support/refute the claim
|
|
7
|
+
sci_fact_template = """Please write a scientific paper passage to support/refute the claim
|
|
9
8
|
Claim: {Claim}
|
|
10
|
-
Passage:"""
|
|
9
|
+
Passage:""" # noqa: E501
|
|
11
10
|
sci_fact = PromptTemplate(template=sci_fact_template, input_variables=["Claim"])
|
|
12
|
-
arguana_template = """Please write a counter argument for the passage
|
|
11
|
+
arguana_template = """Please write a counter argument for the passage
|
|
13
12
|
Passage: {PASSAGE}
|
|
14
13
|
Counter Argument:"""
|
|
15
14
|
arguana = PromptTemplate(template=arguana_template, input_variables=["PASSAGE"])
|
|
@@ -33,7 +32,7 @@ Passage:"""
|
|
|
33
32
|
trec_news = PromptTemplate(template=trec_news_template, input_variables=["TOPIC"])
|
|
34
33
|
mr_tydi_template = """Please write a passage in Swahili/Korean/Japanese/Bengali to answer the question in detail.
|
|
35
34
|
Question: {QUESTION}
|
|
36
|
-
Passage:"""
|
|
35
|
+
Passage:""" # noqa: E501
|
|
37
36
|
mr_tydi = PromptTemplate(template=mr_tydi_template, input_variables=["QUESTION"])
|
|
38
37
|
PROMPT_MAP = {
|
|
39
38
|
"web_search": web_search,
|
langchain/chains/llm.py
CHANGED
|
@@ -32,6 +32,7 @@ from langchain_core.runnables import (
|
|
|
32
32
|
from langchain_core.runnables.configurable import DynamicRunnable
|
|
33
33
|
from langchain_core.utils.input import get_colored_text
|
|
34
34
|
from pydantic import ConfigDict, Field
|
|
35
|
+
from typing_extensions import override
|
|
35
36
|
|
|
36
37
|
from langchain.chains.base import Chain
|
|
37
38
|
|
|
@@ -73,22 +74,25 @@ class LLMChain(Chain):
|
|
|
73
74
|
input_variables=["adjective"], template=prompt_template
|
|
74
75
|
)
|
|
75
76
|
llm = LLMChain(llm=OpenAI(), prompt=prompt)
|
|
77
|
+
|
|
76
78
|
"""
|
|
77
79
|
|
|
78
80
|
@classmethod
|
|
81
|
+
@override
|
|
79
82
|
def is_lc_serializable(self) -> bool:
|
|
80
83
|
return True
|
|
81
84
|
|
|
82
85
|
prompt: BasePromptTemplate
|
|
83
86
|
"""Prompt object to use."""
|
|
84
87
|
llm: Union[
|
|
85
|
-
Runnable[LanguageModelInput, str],
|
|
88
|
+
Runnable[LanguageModelInput, str],
|
|
89
|
+
Runnable[LanguageModelInput, BaseMessage],
|
|
86
90
|
]
|
|
87
91
|
"""Language model to call."""
|
|
88
92
|
output_key: str = "text" #: :meta private:
|
|
89
93
|
output_parser: BaseLLMOutputParser = Field(default_factory=StrOutputParser)
|
|
90
94
|
"""Output parser to use.
|
|
91
|
-
Defaults to one that takes the most likely string but does not change it
|
|
95
|
+
Defaults to one that takes the most likely string but does not change it
|
|
92
96
|
otherwise."""
|
|
93
97
|
return_final_only: bool = True
|
|
94
98
|
"""Whether to return only the final parsed result. Defaults to True.
|
|
@@ -116,8 +120,7 @@ class LLMChain(Chain):
|
|
|
116
120
|
"""
|
|
117
121
|
if self.return_final_only:
|
|
118
122
|
return [self.output_key]
|
|
119
|
-
|
|
120
|
-
return [self.output_key, "full_generation"]
|
|
123
|
+
return [self.output_key, "full_generation"]
|
|
121
124
|
|
|
122
125
|
def _call(
|
|
123
126
|
self,
|
|
@@ -142,17 +145,17 @@ class LLMChain(Chain):
|
|
|
142
145
|
callbacks=callbacks,
|
|
143
146
|
**self.llm_kwargs,
|
|
144
147
|
)
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
148
|
+
results = self.llm.bind(stop=stop, **self.llm_kwargs).batch(
|
|
149
|
+
cast("list", prompts),
|
|
150
|
+
{"callbacks": callbacks},
|
|
151
|
+
)
|
|
152
|
+
generations: list[list[Generation]] = []
|
|
153
|
+
for res in results:
|
|
154
|
+
if isinstance(res, BaseMessage):
|
|
155
|
+
generations.append([ChatGeneration(message=res)])
|
|
156
|
+
else:
|
|
157
|
+
generations.append([Generation(text=res)])
|
|
158
|
+
return LLMResult(generations=generations)
|
|
156
159
|
|
|
157
160
|
async def agenerate(
|
|
158
161
|
self,
|
|
@@ -169,17 +172,17 @@ class LLMChain(Chain):
|
|
|
169
172
|
callbacks=callbacks,
|
|
170
173
|
**self.llm_kwargs,
|
|
171
174
|
)
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
175
|
+
results = await self.llm.bind(stop=stop, **self.llm_kwargs).abatch(
|
|
176
|
+
cast("list", prompts),
|
|
177
|
+
{"callbacks": callbacks},
|
|
178
|
+
)
|
|
179
|
+
generations: list[list[Generation]] = []
|
|
180
|
+
for res in results:
|
|
181
|
+
if isinstance(res, BaseMessage):
|
|
182
|
+
generations.append([ChatGeneration(message=res)])
|
|
183
|
+
else:
|
|
184
|
+
generations.append([Generation(text=res)])
|
|
185
|
+
return LLMResult(generations=generations)
|
|
183
186
|
|
|
184
187
|
def prep_prompts(
|
|
185
188
|
self,
|
|
@@ -201,9 +204,8 @@ class LLMChain(Chain):
|
|
|
201
204
|
if run_manager:
|
|
202
205
|
run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
|
203
206
|
if "stop" in inputs and inputs["stop"] != stop:
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
)
|
|
207
|
+
msg = "If `stop` is present in any inputs, should be present in all."
|
|
208
|
+
raise ValueError(msg)
|
|
207
209
|
prompts.append(prompt)
|
|
208
210
|
return prompts, stop
|
|
209
211
|
|
|
@@ -227,18 +229,21 @@ class LLMChain(Chain):
|
|
|
227
229
|
if run_manager:
|
|
228
230
|
await run_manager.on_text(_text, end="\n", verbose=self.verbose)
|
|
229
231
|
if "stop" in inputs and inputs["stop"] != stop:
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
)
|
|
232
|
+
msg = "If `stop` is present in any inputs, should be present in all."
|
|
233
|
+
raise ValueError(msg)
|
|
233
234
|
prompts.append(prompt)
|
|
234
235
|
return prompts, stop
|
|
235
236
|
|
|
236
237
|
def apply(
|
|
237
|
-
self,
|
|
238
|
+
self,
|
|
239
|
+
input_list: list[dict[str, Any]],
|
|
240
|
+
callbacks: Callbacks = None,
|
|
238
241
|
) -> list[dict[str, str]]:
|
|
239
242
|
"""Utilize the LLM generate method for speed gains."""
|
|
240
243
|
callback_manager = CallbackManager.configure(
|
|
241
|
-
callbacks,
|
|
244
|
+
callbacks,
|
|
245
|
+
self.callbacks,
|
|
246
|
+
self.verbose,
|
|
242
247
|
)
|
|
243
248
|
run_manager = callback_manager.on_chain_start(
|
|
244
249
|
None,
|
|
@@ -249,17 +254,21 @@ class LLMChain(Chain):
|
|
|
249
254
|
response = self.generate(input_list, run_manager=run_manager)
|
|
250
255
|
except BaseException as e:
|
|
251
256
|
run_manager.on_chain_error(e)
|
|
252
|
-
raise
|
|
257
|
+
raise
|
|
253
258
|
outputs = self.create_outputs(response)
|
|
254
259
|
run_manager.on_chain_end({"outputs": outputs})
|
|
255
260
|
return outputs
|
|
256
261
|
|
|
257
262
|
async def aapply(
|
|
258
|
-
self,
|
|
263
|
+
self,
|
|
264
|
+
input_list: list[dict[str, Any]],
|
|
265
|
+
callbacks: Callbacks = None,
|
|
259
266
|
) -> list[dict[str, str]]:
|
|
260
267
|
"""Utilize the LLM generate method for speed gains."""
|
|
261
268
|
callback_manager = AsyncCallbackManager.configure(
|
|
262
|
-
callbacks,
|
|
269
|
+
callbacks,
|
|
270
|
+
self.callbacks,
|
|
271
|
+
self.verbose,
|
|
263
272
|
)
|
|
264
273
|
run_manager = await callback_manager.on_chain_start(
|
|
265
274
|
None,
|
|
@@ -270,7 +279,7 @@ class LLMChain(Chain):
|
|
|
270
279
|
response = await self.agenerate(input_list, run_manager=run_manager)
|
|
271
280
|
except BaseException as e:
|
|
272
281
|
await run_manager.on_chain_error(e)
|
|
273
|
-
raise
|
|
282
|
+
raise
|
|
274
283
|
outputs = self.create_outputs(response)
|
|
275
284
|
await run_manager.on_chain_end({"outputs": outputs})
|
|
276
285
|
return outputs
|
|
@@ -315,6 +324,7 @@ class LLMChain(Chain):
|
|
|
315
324
|
.. code-block:: python
|
|
316
325
|
|
|
317
326
|
completion = llm.predict(adjective="funny")
|
|
327
|
+
|
|
318
328
|
"""
|
|
319
329
|
return self(kwargs, callbacks=callbacks)[self.output_key]
|
|
320
330
|
|
|
@@ -332,66 +342,77 @@ class LLMChain(Chain):
|
|
|
332
342
|
.. code-block:: python
|
|
333
343
|
|
|
334
344
|
completion = llm.predict(adjective="funny")
|
|
345
|
+
|
|
335
346
|
"""
|
|
336
347
|
return (await self.acall(kwargs, callbacks=callbacks))[self.output_key]
|
|
337
348
|
|
|
338
349
|
def predict_and_parse(
|
|
339
|
-
self,
|
|
350
|
+
self,
|
|
351
|
+
callbacks: Callbacks = None,
|
|
352
|
+
**kwargs: Any,
|
|
340
353
|
) -> Union[str, list[str], dict[str, Any]]:
|
|
341
354
|
"""Call predict and then parse the results."""
|
|
342
355
|
warnings.warn(
|
|
343
356
|
"The predict_and_parse method is deprecated, "
|
|
344
|
-
"instead pass an output parser directly to LLMChain."
|
|
357
|
+
"instead pass an output parser directly to LLMChain.",
|
|
358
|
+
stacklevel=2,
|
|
345
359
|
)
|
|
346
360
|
result = self.predict(callbacks=callbacks, **kwargs)
|
|
347
361
|
if self.prompt.output_parser is not None:
|
|
348
362
|
return self.prompt.output_parser.parse(result)
|
|
349
|
-
|
|
350
|
-
return result
|
|
363
|
+
return result
|
|
351
364
|
|
|
352
365
|
async def apredict_and_parse(
|
|
353
|
-
self,
|
|
366
|
+
self,
|
|
367
|
+
callbacks: Callbacks = None,
|
|
368
|
+
**kwargs: Any,
|
|
354
369
|
) -> Union[str, list[str], dict[str, str]]:
|
|
355
370
|
"""Call apredict and then parse the results."""
|
|
356
371
|
warnings.warn(
|
|
357
372
|
"The apredict_and_parse method is deprecated, "
|
|
358
|
-
"instead pass an output parser directly to LLMChain."
|
|
373
|
+
"instead pass an output parser directly to LLMChain.",
|
|
374
|
+
stacklevel=2,
|
|
359
375
|
)
|
|
360
376
|
result = await self.apredict(callbacks=callbacks, **kwargs)
|
|
361
377
|
if self.prompt.output_parser is not None:
|
|
362
378
|
return self.prompt.output_parser.parse(result)
|
|
363
|
-
|
|
364
|
-
return result
|
|
379
|
+
return result
|
|
365
380
|
|
|
366
381
|
def apply_and_parse(
|
|
367
|
-
self,
|
|
382
|
+
self,
|
|
383
|
+
input_list: list[dict[str, Any]],
|
|
384
|
+
callbacks: Callbacks = None,
|
|
368
385
|
) -> Sequence[Union[str, list[str], dict[str, str]]]:
|
|
369
386
|
"""Call apply and then parse the results."""
|
|
370
387
|
warnings.warn(
|
|
371
388
|
"The apply_and_parse method is deprecated, "
|
|
372
|
-
"instead pass an output parser directly to LLMChain."
|
|
389
|
+
"instead pass an output parser directly to LLMChain.",
|
|
390
|
+
stacklevel=2,
|
|
373
391
|
)
|
|
374
392
|
result = self.apply(input_list, callbacks=callbacks)
|
|
375
393
|
return self._parse_generation(result)
|
|
376
394
|
|
|
377
395
|
def _parse_generation(
|
|
378
|
-
self,
|
|
396
|
+
self,
|
|
397
|
+
generation: list[dict[str, str]],
|
|
379
398
|
) -> Sequence[Union[str, list[str], dict[str, str]]]:
|
|
380
399
|
if self.prompt.output_parser is not None:
|
|
381
400
|
return [
|
|
382
401
|
self.prompt.output_parser.parse(res[self.output_key])
|
|
383
402
|
for res in generation
|
|
384
403
|
]
|
|
385
|
-
|
|
386
|
-
return generation
|
|
404
|
+
return generation
|
|
387
405
|
|
|
388
406
|
async def aapply_and_parse(
|
|
389
|
-
self,
|
|
407
|
+
self,
|
|
408
|
+
input_list: list[dict[str, Any]],
|
|
409
|
+
callbacks: Callbacks = None,
|
|
390
410
|
) -> Sequence[Union[str, list[str], dict[str, str]]]:
|
|
391
411
|
"""Call apply and then parse the results."""
|
|
392
412
|
warnings.warn(
|
|
393
413
|
"The aapply_and_parse method is deprecated, "
|
|
394
|
-
"instead pass an output parser directly to LLMChain."
|
|
414
|
+
"instead pass an output parser directly to LLMChain.",
|
|
415
|
+
stacklevel=2,
|
|
395
416
|
)
|
|
396
417
|
result = await self.aapply(input_list, callbacks=callbacks)
|
|
397
418
|
return self._parse_generation(result)
|
|
@@ -413,14 +434,14 @@ class LLMChain(Chain):
|
|
|
413
434
|
def _get_language_model(llm_like: Runnable) -> BaseLanguageModel:
|
|
414
435
|
if isinstance(llm_like, BaseLanguageModel):
|
|
415
436
|
return llm_like
|
|
416
|
-
|
|
437
|
+
if isinstance(llm_like, RunnableBinding):
|
|
417
438
|
return _get_language_model(llm_like.bound)
|
|
418
|
-
|
|
439
|
+
if isinstance(llm_like, RunnableWithFallbacks):
|
|
419
440
|
return _get_language_model(llm_like.runnable)
|
|
420
|
-
|
|
441
|
+
if isinstance(llm_like, (RunnableBranch, DynamicRunnable)):
|
|
421
442
|
return _get_language_model(llm_like.default)
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
443
|
+
msg = (
|
|
444
|
+
f"Unable to extract BaseLanguageModel from llm_like object of type "
|
|
445
|
+
f"{type(llm_like)}"
|
|
446
|
+
)
|
|
447
|
+
raise ValueError(msg)
|
|
@@ -1,9 +1,10 @@
|
|
|
1
|
-
def __getattr__(
|
|
1
|
+
def __getattr__(_: str = "") -> None:
|
|
2
2
|
"""Raise an error on import since is deprecated."""
|
|
3
|
-
|
|
3
|
+
msg = (
|
|
4
4
|
"This module has been moved to langchain-experimental. "
|
|
5
5
|
"For more details: https://github.com/langchain-ai/langchain/discussions/11352."
|
|
6
6
|
"To access this code, install it with `pip install langchain-experimental`."
|
|
7
7
|
"`from langchain_experimental.llm_bash.base "
|
|
8
8
|
"import LLMBashChain`"
|
|
9
9
|
)
|
|
10
|
+
raise AttributeError(msg)
|
|
@@ -55,13 +55,12 @@ def _load_question_to_checked_assertions_chain(
|
|
|
55
55
|
check_assertions_chain,
|
|
56
56
|
revised_answer_chain,
|
|
57
57
|
]
|
|
58
|
-
|
|
58
|
+
return SequentialChain(
|
|
59
59
|
chains=chains, # type: ignore[arg-type]
|
|
60
60
|
input_variables=["question"],
|
|
61
61
|
output_variables=["revised_statement"],
|
|
62
62
|
verbose=True,
|
|
63
63
|
)
|
|
64
|
-
return question_to_checked_assertions_chain
|
|
65
64
|
|
|
66
65
|
|
|
67
66
|
@deprecated(
|
|
@@ -83,6 +82,7 @@ class LLMCheckerChain(Chain):
|
|
|
83
82
|
from langchain.chains import LLMCheckerChain
|
|
84
83
|
llm = OpenAI(temperature=0.7)
|
|
85
84
|
checker_chain = LLMCheckerChain.from_llm(llm)
|
|
85
|
+
|
|
86
86
|
"""
|
|
87
87
|
|
|
88
88
|
question_to_checked_assertions_chain: SequentialChain
|
|
@@ -107,12 +107,13 @@ class LLMCheckerChain(Chain):
|
|
|
107
107
|
|
|
108
108
|
@model_validator(mode="before")
|
|
109
109
|
@classmethod
|
|
110
|
-
def
|
|
110
|
+
def _raise_deprecation(cls, values: dict) -> Any:
|
|
111
111
|
if "llm" in values:
|
|
112
112
|
warnings.warn(
|
|
113
113
|
"Directly instantiating an LLMCheckerChain with an llm is deprecated. "
|
|
114
114
|
"Please instantiate with question_to_checked_assertions_chain "
|
|
115
|
-
"or using the from_llm class method."
|
|
115
|
+
"or using the from_llm class method.",
|
|
116
|
+
stacklevel=5,
|
|
116
117
|
)
|
|
117
118
|
if (
|
|
118
119
|
"question_to_checked_assertions_chain" not in values
|
|
@@ -122,7 +123,8 @@ class LLMCheckerChain(Chain):
|
|
|
122
123
|
_load_question_to_checked_assertions_chain(
|
|
123
124
|
values["llm"],
|
|
124
125
|
values.get(
|
|
125
|
-
"create_draft_answer_prompt",
|
|
126
|
+
"create_draft_answer_prompt",
|
|
127
|
+
CREATE_DRAFT_ANSWER_PROMPT,
|
|
126
128
|
),
|
|
127
129
|
values.get("list_assertions_prompt", LIST_ASSERTIONS_PROMPT),
|
|
128
130
|
values.get("check_assertions_prompt", CHECK_ASSERTIONS_PROMPT),
|
|
@@ -159,7 +161,8 @@ class LLMCheckerChain(Chain):
|
|
|
159
161
|
question = inputs[self.input_key]
|
|
160
162
|
|
|
161
163
|
output = self.question_to_checked_assertions_chain(
|
|
162
|
-
{"question": question},
|
|
164
|
+
{"question": question},
|
|
165
|
+
callbacks=_run_manager.get_child(),
|
|
163
166
|
)
|
|
164
167
|
return {self.output_key: output["revised_statement"]}
|
|
165
168
|
|
|
@@ -177,6 +180,16 @@ class LLMCheckerChain(Chain):
|
|
|
177
180
|
revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT,
|
|
178
181
|
**kwargs: Any,
|
|
179
182
|
) -> LLMCheckerChain:
|
|
183
|
+
"""Create an LLMCheckerChain from a language model.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
llm: a language model
|
|
187
|
+
create_draft_answer_prompt: prompt to create a draft answer
|
|
188
|
+
list_assertions_prompt: prompt to list assertions
|
|
189
|
+
check_assertions_prompt: prompt to check assertions
|
|
190
|
+
revised_answer_prompt: prompt to revise the answer
|
|
191
|
+
**kwargs: additional arguments
|
|
192
|
+
"""
|
|
180
193
|
question_to_checked_assertions_chain = (
|
|
181
194
|
_load_question_to_checked_assertions_chain(
|
|
182
195
|
llm,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
_CREATE_DRAFT_ANSWER_TEMPLATE = """{question}\n\n"""
|
|
@@ -8,14 +7,14 @@ CREATE_DRAFT_ANSWER_PROMPT = PromptTemplate(
|
|
|
8
7
|
|
|
9
8
|
_LIST_ASSERTIONS_TEMPLATE = """Here is a statement:
|
|
10
9
|
{statement}
|
|
11
|
-
Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
|
|
10
|
+
Make a bullet point list of the assumptions you made when producing the above statement.\n\n""" # noqa: E501
|
|
12
11
|
LIST_ASSERTIONS_PROMPT = PromptTemplate(
|
|
13
12
|
input_variables=["statement"], template=_LIST_ASSERTIONS_TEMPLATE
|
|
14
13
|
)
|
|
15
14
|
|
|
16
15
|
_CHECK_ASSERTIONS_TEMPLATE = """Here is a bullet point list of assertions:
|
|
17
16
|
{assertions}
|
|
18
|
-
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
|
|
17
|
+
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n""" # noqa: E501
|
|
19
18
|
CHECK_ASSERTIONS_PROMPT = PromptTemplate(
|
|
20
19
|
input_variables=["assertions"], template=_CHECK_ASSERTIONS_TEMPLATE
|
|
21
20
|
)
|
|
@@ -24,7 +23,7 @@ _REVISED_ANSWER_TEMPLATE = """{checked_assertions}
|
|
|
24
23
|
|
|
25
24
|
Question: In light of the above assertions and checks, how would you answer the question '{question}'?
|
|
26
25
|
|
|
27
|
-
Answer:"""
|
|
26
|
+
Answer:""" # noqa: E501
|
|
28
27
|
REVISED_ANSWER_PROMPT = PromptTemplate(
|
|
29
28
|
input_variables=["checked_assertions", "question"],
|
|
30
29
|
template=_REVISED_ANSWER_TEMPLATE,
|
|
@@ -26,7 +26,7 @@ from langchain.chains.llm_math.prompt import PROMPT
|
|
|
26
26
|
message=(
|
|
27
27
|
"This class is deprecated and will be removed in langchain 1.0. "
|
|
28
28
|
"See API reference for replacement: "
|
|
29
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.llm_math.base.LLMMathChain.html"
|
|
29
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.llm_math.base.LLMMathChain.html"
|
|
30
30
|
),
|
|
31
31
|
removal="1.0",
|
|
32
32
|
)
|
|
@@ -146,6 +146,7 @@ class LLMMathChain(Chain):
|
|
|
146
146
|
from langchain.chains import LLMMathChain
|
|
147
147
|
from langchain_community.llms import OpenAI
|
|
148
148
|
llm_math = LLMMathChain.from_llm(OpenAI())
|
|
149
|
+
|
|
149
150
|
""" # noqa: E501
|
|
150
151
|
|
|
151
152
|
llm_chain: LLMChain
|
|
@@ -163,19 +164,21 @@ class LLMMathChain(Chain):
|
|
|
163
164
|
|
|
164
165
|
@model_validator(mode="before")
|
|
165
166
|
@classmethod
|
|
166
|
-
def
|
|
167
|
+
def _raise_deprecation(cls, values: dict) -> Any:
|
|
167
168
|
try:
|
|
168
169
|
import numexpr # noqa: F401
|
|
169
|
-
except ImportError:
|
|
170
|
-
|
|
170
|
+
except ImportError as e:
|
|
171
|
+
msg = (
|
|
171
172
|
"LLMMathChain requires the numexpr package. "
|
|
172
173
|
"Please install it with `pip install numexpr`."
|
|
173
174
|
)
|
|
175
|
+
raise ImportError(msg) from e
|
|
174
176
|
if "llm" in values:
|
|
175
177
|
warnings.warn(
|
|
176
178
|
"Directly instantiating an LLMMathChain with an llm is deprecated. "
|
|
177
179
|
"Please instantiate with llm_chain argument or using the from_llm "
|
|
178
|
-
"class method."
|
|
180
|
+
"class method.",
|
|
181
|
+
stacklevel=5,
|
|
179
182
|
)
|
|
180
183
|
if "llm_chain" not in values and values["llm"] is not None:
|
|
181
184
|
prompt = values.get("prompt", PROMPT)
|
|
@@ -208,19 +211,22 @@ class LLMMathChain(Chain):
|
|
|
208
211
|
expression.strip(),
|
|
209
212
|
global_dict={}, # restrict access to globals
|
|
210
213
|
local_dict=local_dict, # add common mathematical functions
|
|
211
|
-
)
|
|
214
|
+
),
|
|
212
215
|
)
|
|
213
216
|
except Exception as e:
|
|
214
|
-
|
|
217
|
+
msg = (
|
|
215
218
|
f'LLMMathChain._evaluate("{expression}") raised error: {e}.'
|
|
216
219
|
" Please try again with a valid numerical expression"
|
|
217
220
|
)
|
|
221
|
+
raise ValueError(msg) from e
|
|
218
222
|
|
|
219
223
|
# Remove any leading and trailing brackets from the output
|
|
220
224
|
return re.sub(r"^\[|\]$", "", output)
|
|
221
225
|
|
|
222
226
|
def _process_llm_result(
|
|
223
|
-
self,
|
|
227
|
+
self,
|
|
228
|
+
llm_output: str,
|
|
229
|
+
run_manager: CallbackManagerForChainRun,
|
|
224
230
|
) -> dict[str, str]:
|
|
225
231
|
run_manager.on_text(llm_output, color="green", verbose=self.verbose)
|
|
226
232
|
llm_output = llm_output.strip()
|
|
@@ -236,7 +242,8 @@ class LLMMathChain(Chain):
|
|
|
236
242
|
elif "Answer:" in llm_output:
|
|
237
243
|
answer = "Answer: " + llm_output.split("Answer:")[-1]
|
|
238
244
|
else:
|
|
239
|
-
|
|
245
|
+
msg = f"unknown format from LLM: {llm_output}"
|
|
246
|
+
raise ValueError(msg)
|
|
240
247
|
return {self.output_key: answer}
|
|
241
248
|
|
|
242
249
|
async def _aprocess_llm_result(
|
|
@@ -258,7 +265,8 @@ class LLMMathChain(Chain):
|
|
|
258
265
|
elif "Answer:" in llm_output:
|
|
259
266
|
answer = "Answer: " + llm_output.split("Answer:")[-1]
|
|
260
267
|
else:
|
|
261
|
-
|
|
268
|
+
msg = f"unknown format from LLM: {llm_output}"
|
|
269
|
+
raise ValueError(msg)
|
|
262
270
|
return {self.output_key: answer}
|
|
263
271
|
|
|
264
272
|
def _call(
|
|
@@ -300,5 +308,12 @@ class LLMMathChain(Chain):
|
|
|
300
308
|
prompt: BasePromptTemplate = PROMPT,
|
|
301
309
|
**kwargs: Any,
|
|
302
310
|
) -> LLMMathChain:
|
|
311
|
+
"""Create a LLMMathChain from a language model.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
llm: a language model
|
|
315
|
+
prompt: a prompt template
|
|
316
|
+
**kwargs: additional arguments
|
|
317
|
+
"""
|
|
303
318
|
llm_chain = LLMChain(llm=llm, prompt=prompt)
|
|
304
319
|
return cls(llm_chain=llm_chain, **kwargs)
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
_PROMPT_TEMPLATE = """Translate a math problem into a expression that can be executed using Python's numexpr library. Use the output of running this code to answer the question.
|
|
@@ -36,7 +35,7 @@ Question: 37593^(1/5)
|
|
|
36
35
|
Answer: 8.222831614237718
|
|
37
36
|
|
|
38
37
|
Question: {question}
|
|
39
|
-
"""
|
|
38
|
+
""" # noqa: E501
|
|
40
39
|
|
|
41
40
|
PROMPT = PromptTemplate(
|
|
42
41
|
input_variables=["question"],
|