langchain 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +160 -133
- langchain/agents/agent_iterator.py +31 -14
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +7 -7
- langchain/agents/chat/base.py +6 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +9 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +7 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +18 -9
- langchain/agents/json_chat/base.py +18 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +9 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +80 -70
- langchain/agents/openai_functions_agent/base.py +47 -38
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +8 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +7 -7
- langchain/agents/output_parsers/openai_functions.py +15 -7
- langchain/agents/output_parsers/openai_tools.py +9 -4
- langchain/agents/output_parsers/react_json_single_input.py +10 -5
- langchain/agents/output_parsers/react_single_input.py +15 -11
- langchain/agents/output_parsers/self_ask.py +3 -2
- langchain/agents/output_parsers/tools.py +18 -13
- langchain/agents/output_parsers/xml.py +99 -28
- langchain/agents/react/agent.py +4 -4
- langchain/agents/react/base.py +22 -17
- langchain/agents/react/output_parser.py +5 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +3 -2
- langchain/agents/self_ask_with_search/base.py +19 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +14 -11
- langchain/agents/structured_chat/output_parser.py +16 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +7 -6
- langchain/agents/tools.py +2 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +5 -5
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +6 -6
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +4 -1
- langchain/callbacks/streaming_aiter_final_only.py +5 -3
- langchain/callbacks/streaming_stdout_final_only.py +5 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/base.py +2 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +1 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +36 -22
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +89 -55
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +24 -11
- langchain/chains/combine_documents/map_reduce.py +39 -31
- langchain/chains/combine_documents/map_rerank.py +34 -21
- langchain/chains/combine_documents/reduce.py +47 -26
- langchain/chains/combine_documents/refine.py +26 -17
- langchain/chains/combine_documents/stuff.py +19 -12
- langchain/chains/constitutional_ai/base.py +4 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +6 -7
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +41 -20
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +8 -9
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +26 -12
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +77 -61
- langchain/chains/llm_bash/__init__.py +2 -1
- langchain/chains/llm_checker/base.py +7 -5
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +16 -9
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +9 -6
- langchain/chains/llm_symbolic_math/__init__.py +2 -1
- langchain/chains/loading.py +170 -153
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +8 -9
- langchain/chains/natbot/base.py +8 -8
- langchain/chains/natbot/crawler.py +73 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +13 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +35 -35
- langchain/chains/openai_functions/qa_with_structure.py +19 -12
- langchain/chains/openai_functions/tagging.py +2 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +4 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +14 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +14 -5
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +17 -6
- langchain/chains/query_constructor/base.py +34 -33
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +37 -32
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +34 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +24 -20
- langchain/chains/router/embedding_router.py +12 -8
- langchain/chains/router/llm_router.py +17 -16
- langchain/chains/router/multi_prompt.py +2 -2
- langchain/chains/router/multi_retrieval_qa.py +10 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +6 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +75 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +5 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +160 -123
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +33 -24
- langchain/embeddings/cache.py +117 -26
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +24 -24
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -14
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +21 -12
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +24 -21
- langchain/evaluation/loading.py +15 -11
- langchain/evaluation/parsing/base.py +4 -1
- langchain/evaluation/parsing/json_distance.py +5 -2
- langchain/evaluation/parsing/json_schema.py +12 -8
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +13 -6
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +2 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -16
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +15 -10
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +11 -8
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +13 -12
- langchain/memory/entity.py +84 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +1 -3
- langchain/memory/summary.py +13 -11
- langchain/memory/summary_buffer.py +17 -8
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +13 -6
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +10 -5
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +5 -3
- langchain/output_parsers/fix.py +52 -52
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +43 -47
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +7 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +77 -78
- langchain/output_parsers/structured.py +11 -6
- langchain/output_parsers/yaml.py +15 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +10 -9
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +15 -13
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +7 -5
- langchain/retrievers/document_compressors/base.py +13 -7
- langchain/retrievers/document_compressors/chain_extract.py +4 -5
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +11 -12
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -19
- langchain/retrievers/document_compressors/embeddings_filter.py +23 -23
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +11 -6
- langchain/retrievers/ensemble.py +28 -25
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +32 -26
- langchain/retrievers/multi_vector.py +20 -8
- langchain/retrievers/parent_document_retriever.py +18 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +138 -119
- langchain/retrievers/time_weighted_retriever.py +18 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +2 -1
- langchain/runnables/openai_functions.py +6 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +3 -2
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +1 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +10 -7
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +11 -2
- langchain/smith/evaluation/runner_utils.py +181 -129
- langchain/smith/evaluation/string_run_evaluator.py +75 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +6 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +2 -1
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/METADATA +5 -5
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/RECORD +582 -582
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
|
@@ -51,14 +51,16 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
51
51
|
buffer = self.chat_memory.messages
|
|
52
52
|
if self.moving_summary_buffer != "":
|
|
53
53
|
first_messages: list[BaseMessage] = [
|
|
54
|
-
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
54
|
+
self.summary_message_cls(content=self.moving_summary_buffer),
|
|
55
55
|
]
|
|
56
56
|
buffer = first_messages + buffer
|
|
57
57
|
if self.return_messages:
|
|
58
58
|
final_buffer: Any = buffer
|
|
59
59
|
else:
|
|
60
60
|
final_buffer = get_buffer_string(
|
|
61
|
-
buffer,
|
|
61
|
+
buffer,
|
|
62
|
+
human_prefix=self.human_prefix,
|
|
63
|
+
ai_prefix=self.ai_prefix,
|
|
62
64
|
)
|
|
63
65
|
return {self.memory_key: final_buffer}
|
|
64
66
|
|
|
@@ -67,14 +69,16 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
67
69
|
buffer = await self.chat_memory.aget_messages()
|
|
68
70
|
if self.moving_summary_buffer != "":
|
|
69
71
|
first_messages: list[BaseMessage] = [
|
|
70
|
-
self.summary_message_cls(content=self.moving_summary_buffer)
|
|
72
|
+
self.summary_message_cls(content=self.moving_summary_buffer),
|
|
71
73
|
]
|
|
72
74
|
buffer = first_messages + buffer
|
|
73
75
|
if self.return_messages:
|
|
74
76
|
final_buffer: Any = buffer
|
|
75
77
|
else:
|
|
76
78
|
final_buffer = get_buffer_string(
|
|
77
|
-
buffer,
|
|
79
|
+
buffer,
|
|
80
|
+
human_prefix=self.human_prefix,
|
|
81
|
+
ai_prefix=self.ai_prefix,
|
|
78
82
|
)
|
|
79
83
|
return {self.memory_key: final_buffer}
|
|
80
84
|
|
|
@@ -84,10 +88,11 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
84
88
|
prompt_variables = values["prompt"].input_variables
|
|
85
89
|
expected_keys = {"summary", "new_lines"}
|
|
86
90
|
if expected_keys != set(prompt_variables):
|
|
87
|
-
|
|
91
|
+
msg = (
|
|
88
92
|
"Got unexpected prompt input variables. The prompt expects "
|
|
89
93
|
f"{prompt_variables}, but it should have {expected_keys}."
|
|
90
94
|
)
|
|
95
|
+
raise ValueError(msg)
|
|
91
96
|
return values
|
|
92
97
|
|
|
93
98
|
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
@@ -96,7 +101,9 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
96
101
|
self.prune()
|
|
97
102
|
|
|
98
103
|
async def asave_context(
|
|
99
|
-
self,
|
|
104
|
+
self,
|
|
105
|
+
inputs: dict[str, Any],
|
|
106
|
+
outputs: dict[str, str],
|
|
100
107
|
) -> None:
|
|
101
108
|
"""Asynchronously save context from this conversation to buffer."""
|
|
102
109
|
await super().asave_context(inputs, outputs)
|
|
@@ -112,7 +119,8 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
112
119
|
pruned_memory.append(buffer.pop(0))
|
|
113
120
|
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
114
121
|
self.moving_summary_buffer = self.predict_new_summary(
|
|
115
|
-
pruned_memory,
|
|
122
|
+
pruned_memory,
|
|
123
|
+
self.moving_summary_buffer,
|
|
116
124
|
)
|
|
117
125
|
|
|
118
126
|
async def aprune(self) -> None:
|
|
@@ -125,7 +133,8 @@ class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin):
|
|
|
125
133
|
pruned_memory.append(buffer.pop(0))
|
|
126
134
|
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
|
|
127
135
|
self.moving_summary_buffer = await self.apredict_new_summary(
|
|
128
|
-
pruned_memory,
|
|
136
|
+
pruned_memory,
|
|
137
|
+
self.moving_summary_buffer,
|
|
129
138
|
)
|
|
130
139
|
|
|
131
140
|
def clear(self) -> None:
|
langchain/memory/utils.py
CHANGED
|
@@ -14,7 +14,8 @@ def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) ->
|
|
|
14
14
|
"""
|
|
15
15
|
# "stop" is a special key that can be passed as input but is not used to
|
|
16
16
|
# format the prompt.
|
|
17
|
-
prompt_input_keys = list(set(inputs).difference(memory_variables
|
|
17
|
+
prompt_input_keys = list(set(inputs).difference([*memory_variables, "stop"]))
|
|
18
18
|
if len(prompt_input_keys) != 1:
|
|
19
|
-
|
|
19
|
+
msg = f"One input key expected got {prompt_input_keys}"
|
|
20
|
+
raise ValueError(msg)
|
|
20
21
|
return prompt_input_keys[0]
|
langchain/memory/vectorstore.py
CHANGED
|
@@ -5,10 +5,10 @@ from typing import Any, Optional, Union
|
|
|
5
5
|
|
|
6
6
|
from langchain_core._api import deprecated
|
|
7
7
|
from langchain_core.documents import Document
|
|
8
|
+
from langchain_core.memory import BaseMemory
|
|
8
9
|
from langchain_core.vectorstores import VectorStoreRetriever
|
|
9
10
|
from pydantic import Field
|
|
10
11
|
|
|
11
|
-
from langchain.memory.chat_memory import BaseMemory
|
|
12
12
|
from langchain.memory.utils import get_prompt_input_key
|
|
13
13
|
|
|
14
14
|
|
|
@@ -52,7 +52,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
|
|
|
52
52
|
return self.input_key
|
|
53
53
|
|
|
54
54
|
def _documents_to_memory_variables(
|
|
55
|
-
self,
|
|
55
|
+
self,
|
|
56
|
+
docs: list[Document],
|
|
56
57
|
) -> dict[str, Union[list[Document], str]]:
|
|
57
58
|
result: Union[list[Document], str]
|
|
58
59
|
if not self.return_docs:
|
|
@@ -62,7 +63,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
|
|
|
62
63
|
return {self.memory_key: result}
|
|
63
64
|
|
|
64
65
|
def load_memory_variables(
|
|
65
|
-
self,
|
|
66
|
+
self,
|
|
67
|
+
inputs: dict[str, Any],
|
|
66
68
|
) -> dict[str, Union[list[Document], str]]:
|
|
67
69
|
"""Return history buffer."""
|
|
68
70
|
input_key = self._get_prompt_input_key(inputs)
|
|
@@ -71,7 +73,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
|
|
|
71
73
|
return self._documents_to_memory_variables(docs)
|
|
72
74
|
|
|
73
75
|
async def aload_memory_variables(
|
|
74
|
-
self,
|
|
76
|
+
self,
|
|
77
|
+
inputs: dict[str, Any],
|
|
75
78
|
) -> dict[str, Union[list[Document], str]]:
|
|
76
79
|
"""Return history buffer."""
|
|
77
80
|
input_key = self._get_prompt_input_key(inputs)
|
|
@@ -80,7 +83,9 @@ class VectorStoreRetrieverMemory(BaseMemory):
|
|
|
80
83
|
return self._documents_to_memory_variables(docs)
|
|
81
84
|
|
|
82
85
|
def _form_documents(
|
|
83
|
-
self,
|
|
86
|
+
self,
|
|
87
|
+
inputs: dict[str, Any],
|
|
88
|
+
outputs: dict[str, str],
|
|
84
89
|
) -> list[Document]:
|
|
85
90
|
"""Format context from this conversation to buffer."""
|
|
86
91
|
# Each document should only include the current turn, not the chat history
|
|
@@ -100,7 +105,9 @@ class VectorStoreRetrieverMemory(BaseMemory):
|
|
|
100
105
|
self.retriever.add_documents(documents)
|
|
101
106
|
|
|
102
107
|
async def asave_context(
|
|
103
|
-
self,
|
|
108
|
+
self,
|
|
109
|
+
inputs: dict[str, Any],
|
|
110
|
+
outputs: dict[str, str],
|
|
104
111
|
) -> None:
|
|
105
112
|
"""Save context from this conversation to buffer."""
|
|
106
113
|
documents = self._form_documents(inputs, outputs)
|
|
@@ -23,7 +23,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
23
23
|
DEFAULT_HISTORY_TEMPLATE = """
|
|
24
24
|
Current date and time: {current_time}.
|
|
25
25
|
|
|
26
|
-
Potentially relevant timestamped excerpts of previous conversations (you
|
|
26
|
+
Potentially relevant timestamped excerpts of previous conversations (you
|
|
27
27
|
do not need to use these if irrelevant):
|
|
28
28
|
{previous_history}
|
|
29
29
|
|
|
@@ -131,13 +131,13 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
131
131
|
previous_history = ""
|
|
132
132
|
current_history = super().load_memory_variables(inputs)
|
|
133
133
|
template = SystemMessagePromptTemplate.from_template(
|
|
134
|
-
self.previous_history_template
|
|
134
|
+
self.previous_history_template,
|
|
135
135
|
)
|
|
136
136
|
messages = [
|
|
137
137
|
template.format(
|
|
138
138
|
previous_history=previous_history,
|
|
139
139
|
current_time=datetime.now().astimezone().strftime(TIMESTAMP_FORMAT),
|
|
140
|
-
)
|
|
140
|
+
),
|
|
141
141
|
]
|
|
142
142
|
messages.extend(current_history[self.memory_key])
|
|
143
143
|
return {self.memory_key: messages}
|
|
@@ -167,7 +167,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
167
167
|
self._pop_and_store_interaction(buffer)
|
|
168
168
|
|
|
169
169
|
def _pop_and_store_interaction(self, buffer: list[BaseMessage]) -> None:
|
|
170
|
-
|
|
170
|
+
input_ = buffer.pop(0)
|
|
171
171
|
output = buffer.pop(0)
|
|
172
172
|
timestamp = self._timestamps.pop(0).strftime(TIMESTAMP_FORMAT)
|
|
173
173
|
# Split AI output into smaller chunks to avoid creating documents
|
|
@@ -175,7 +175,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
|
|
|
175
175
|
ai_chunks = self._split_long_ai_text(str(output.content))
|
|
176
176
|
for index, chunk in enumerate(ai_chunks):
|
|
177
177
|
self.memory_retriever.save_context(
|
|
178
|
-
{"Human": f"<{timestamp}/00> {
|
|
178
|
+
{"Human": f"<{timestamp}/00> {input_.content!s}"},
|
|
179
179
|
{"AI": f"<{timestamp}/{index:02}> {chunk}"},
|
|
180
180
|
)
|
|
181
181
|
|
langchain/model_laboratory.py
CHANGED
|
@@ -34,24 +34,26 @@ class ModelLaboratory:
|
|
|
34
34
|
"""
|
|
35
35
|
for chain in chains:
|
|
36
36
|
if not isinstance(chain, Chain):
|
|
37
|
-
|
|
37
|
+
msg = (
|
|
38
38
|
"ModelLaboratory should now be initialized with Chains. "
|
|
39
39
|
"If you want to initialize with LLMs, use the `from_llms` method "
|
|
40
40
|
"instead (`ModelLaboratory.from_llms(...)`)"
|
|
41
41
|
)
|
|
42
|
+
raise ValueError(msg) # noqa: TRY004
|
|
42
43
|
if len(chain.input_keys) != 1:
|
|
43
|
-
|
|
44
|
+
msg = (
|
|
44
45
|
"Currently only support chains with one input variable, "
|
|
45
46
|
f"got {chain.input_keys}"
|
|
46
47
|
)
|
|
48
|
+
raise ValueError(msg)
|
|
47
49
|
if len(chain.output_keys) != 1:
|
|
48
|
-
|
|
50
|
+
msg = (
|
|
49
51
|
"Currently only support chains with one output variable, "
|
|
50
52
|
f"got {chain.output_keys}"
|
|
51
53
|
)
|
|
52
|
-
if names is not None:
|
|
53
|
-
|
|
54
|
-
|
|
54
|
+
if names is not None and len(names) != len(chains):
|
|
55
|
+
msg = "Length of chains does not match length of names."
|
|
56
|
+
raise ValueError(msg)
|
|
55
57
|
self.chains = chains
|
|
56
58
|
chain_range = [str(i) for i in range(len(self.chains))]
|
|
57
59
|
self.chain_colors = get_color_mapping(chain_range)
|
|
@@ -59,7 +61,9 @@ class ModelLaboratory:
|
|
|
59
61
|
|
|
60
62
|
@classmethod
|
|
61
63
|
def from_llms(
|
|
62
|
-
cls,
|
|
64
|
+
cls,
|
|
65
|
+
llms: list[BaseLLM],
|
|
66
|
+
prompt: Optional[PromptTemplate] = None,
|
|
63
67
|
) -> ModelLaboratory:
|
|
64
68
|
"""Initialize the ModelLaboratory with LLMs and an optional prompt.
|
|
65
69
|
|
|
@@ -89,10 +93,7 @@ class ModelLaboratory:
|
|
|
89
93
|
"""
|
|
90
94
|
print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
|
|
91
95
|
for i, chain in enumerate(self.chains):
|
|
92
|
-
if self.names is not None
|
|
93
|
-
name = self.names[i]
|
|
94
|
-
else:
|
|
95
|
-
name = str(chain)
|
|
96
|
+
name = self.names[i] if self.names is not None else str(chain)
|
|
96
97
|
print_text(name, end="\n")
|
|
97
98
|
output = chain.run(text)
|
|
98
99
|
print_text(output, color=self.chain_colors[str(i)], end="\n\n")
|
|
@@ -49,7 +49,7 @@ if TYPE_CHECKING:
|
|
|
49
49
|
# Used to consolidate logic for raising deprecation warnings and
|
|
50
50
|
# handling optional imports.
|
|
51
51
|
DEPRECATED_LOOKUP = {
|
|
52
|
-
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser"
|
|
52
|
+
"GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser",
|
|
53
53
|
}
|
|
54
54
|
|
|
55
55
|
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
|
|
@@ -67,12 +67,15 @@ __all__ = [
|
|
|
67
67
|
"DatetimeOutputParser",
|
|
68
68
|
"EnumOutputParser",
|
|
69
69
|
"GuardrailsOutputParser",
|
|
70
|
+
"JsonOutputKeyToolsParser",
|
|
71
|
+
"JsonOutputToolsParser",
|
|
70
72
|
"ListOutputParser",
|
|
71
73
|
"MarkdownListOutputParser",
|
|
72
74
|
"NumberedListOutputParser",
|
|
73
75
|
"OutputFixingParser",
|
|
74
76
|
"PandasDataFrameOutputParser",
|
|
75
77
|
"PydanticOutputParser",
|
|
78
|
+
"PydanticToolsParser",
|
|
76
79
|
"RegexDictParser",
|
|
77
80
|
"RegexParser",
|
|
78
81
|
"ResponseSchema",
|
|
@@ -80,8 +83,5 @@ __all__ = [
|
|
|
80
83
|
"RetryWithErrorOutputParser",
|
|
81
84
|
"StructuredOutputParser",
|
|
82
85
|
"XMLOutputParser",
|
|
83
|
-
"JsonOutputToolsParser",
|
|
84
|
-
"PydanticToolsParser",
|
|
85
|
-
"JsonOutputKeyToolsParser",
|
|
86
86
|
"YamlOutputParser",
|
|
87
87
|
]
|
|
@@ -28,22 +28,25 @@ class BooleanOutputParser(BaseOutputParser[bool]):
|
|
|
28
28
|
}
|
|
29
29
|
if self.true_val.upper() in truthy:
|
|
30
30
|
if self.false_val.upper() in truthy:
|
|
31
|
-
|
|
31
|
+
msg = (
|
|
32
32
|
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
|
|
33
33
|
f"in received: {text}."
|
|
34
34
|
)
|
|
35
|
+
raise ValueError(msg)
|
|
35
36
|
return True
|
|
36
|
-
|
|
37
|
+
if self.false_val.upper() in truthy:
|
|
37
38
|
if self.true_val.upper() in truthy:
|
|
38
|
-
|
|
39
|
+
msg = (
|
|
39
40
|
f"Ambiguous response. Both {self.true_val} and {self.false_val} "
|
|
40
41
|
f"in received: {text}."
|
|
41
42
|
)
|
|
43
|
+
raise ValueError(msg)
|
|
42
44
|
return False
|
|
43
|
-
|
|
45
|
+
msg = (
|
|
44
46
|
f"BooleanOutputParser expected output value to include either "
|
|
45
47
|
f"{self.true_val} or {self.false_val}. Received {text}."
|
|
46
48
|
)
|
|
49
|
+
raise ValueError(msg)
|
|
47
50
|
|
|
48
51
|
@property
|
|
49
52
|
def _type(self) -> str:
|
|
@@ -5,6 +5,8 @@ from typing import Any
|
|
|
5
5
|
from langchain_core.output_parsers import BaseOutputParser
|
|
6
6
|
from langchain_core.utils import pre_init
|
|
7
7
|
|
|
8
|
+
_MIN_PARSERS = 2
|
|
9
|
+
|
|
8
10
|
|
|
9
11
|
class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
|
|
10
12
|
"""Combine multiple output parsers into one."""
|
|
@@ -19,13 +21,16 @@ class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
|
|
|
19
21
|
def validate_parsers(cls, values: dict[str, Any]) -> dict[str, Any]:
|
|
20
22
|
"""Validate the parsers."""
|
|
21
23
|
parsers = values["parsers"]
|
|
22
|
-
if len(parsers) <
|
|
23
|
-
|
|
24
|
+
if len(parsers) < _MIN_PARSERS:
|
|
25
|
+
msg = "Must have at least two parsers"
|
|
26
|
+
raise ValueError(msg)
|
|
24
27
|
for parser in parsers:
|
|
25
28
|
if parser._type == "combining":
|
|
26
|
-
|
|
29
|
+
msg = "Cannot nest combining parsers"
|
|
30
|
+
raise ValueError(msg)
|
|
27
31
|
if parser._type == "list":
|
|
28
|
-
|
|
32
|
+
msg = "Cannot combine list parsers"
|
|
33
|
+
raise ValueError(msg)
|
|
29
34
|
return values
|
|
30
35
|
|
|
31
36
|
@property
|
|
@@ -46,7 +51,7 @@ class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
|
|
|
46
51
|
def parse(self, text: str) -> dict[str, Any]:
|
|
47
52
|
"""Parse the output of an LLM call."""
|
|
48
53
|
texts = text.split("\n\n")
|
|
49
|
-
output =
|
|
54
|
+
output = {}
|
|
50
55
|
for txt, parser in zip(texts, self.parsers):
|
|
51
56
|
output.update(parser.parse(txt.strip()))
|
|
52
57
|
return output
|
|
@@ -1,42 +1,43 @@
|
|
|
1
|
-
import
|
|
2
|
-
from datetime import datetime, timedelta
|
|
1
|
+
from datetime import datetime, timedelta, timezone
|
|
3
2
|
|
|
4
3
|
from langchain_core.exceptions import OutputParserException
|
|
5
4
|
from langchain_core.output_parsers import BaseOutputParser
|
|
6
5
|
from langchain_core.utils import comma_list
|
|
7
6
|
|
|
8
7
|
|
|
9
|
-
def _generate_random_datetime_strings(
|
|
10
|
-
pattern: str,
|
|
11
|
-
n: int = 3,
|
|
12
|
-
start_date: datetime = datetime(1, 1, 1),
|
|
13
|
-
end_date: datetime = datetime.now() + timedelta(days=3650),
|
|
14
|
-
) -> list[str]:
|
|
15
|
-
"""Generates n random datetime strings conforming to the
|
|
16
|
-
given pattern within the specified date range.
|
|
17
|
-
|
|
18
|
-
Pattern should be a string containing the desired format codes.
|
|
19
|
-
start_date and end_date should be datetime objects representing
|
|
20
|
-
the start and end of the date range.
|
|
21
|
-
"""
|
|
22
|
-
examples = []
|
|
23
|
-
delta = end_date - start_date
|
|
24
|
-
for i in range(n):
|
|
25
|
-
random_delta = random.uniform(0, delta.total_seconds())
|
|
26
|
-
dt = start_date + timedelta(seconds=random_delta)
|
|
27
|
-
date_string = dt.strftime(pattern)
|
|
28
|
-
examples.append(date_string)
|
|
29
|
-
return examples
|
|
30
|
-
|
|
31
|
-
|
|
32
8
|
class DatetimeOutputParser(BaseOutputParser[datetime]):
|
|
33
9
|
"""Parse the output of an LLM call to a datetime."""
|
|
34
10
|
|
|
35
11
|
format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
|
|
36
|
-
"""The string value that used as the datetime format.
|
|
12
|
+
"""The string value that is used as the datetime format.
|
|
13
|
+
|
|
14
|
+
Update this to match the desired datetime format for your application.
|
|
15
|
+
"""
|
|
37
16
|
|
|
38
17
|
def get_format_instructions(self) -> str:
|
|
39
|
-
|
|
18
|
+
"""Returns the format instructions for the given format."""
|
|
19
|
+
if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
|
|
20
|
+
examples = comma_list(
|
|
21
|
+
[
|
|
22
|
+
"2023-07-04T14:30:00.000000Z",
|
|
23
|
+
"1999-12-31T23:59:59.999999Z",
|
|
24
|
+
"2025-01-01T00:00:00.000000Z",
|
|
25
|
+
],
|
|
26
|
+
)
|
|
27
|
+
else:
|
|
28
|
+
try:
|
|
29
|
+
now = datetime.now(tz=timezone.utc)
|
|
30
|
+
examples = comma_list(
|
|
31
|
+
[
|
|
32
|
+
now.strftime(self.format),
|
|
33
|
+
(now.replace(year=now.year - 1)).strftime(self.format),
|
|
34
|
+
(now - timedelta(days=1)).strftime(self.format),
|
|
35
|
+
],
|
|
36
|
+
)
|
|
37
|
+
except ValueError:
|
|
38
|
+
# Fallback if the format is very unusual
|
|
39
|
+
examples = f"e.g., a valid string in the format {self.format}"
|
|
40
|
+
|
|
40
41
|
return (
|
|
41
42
|
f"Write a datetime string that matches the "
|
|
42
43
|
f"following pattern: '{self.format}'.\n\n"
|
|
@@ -45,12 +46,12 @@ class DatetimeOutputParser(BaseOutputParser[datetime]):
|
|
|
45
46
|
)
|
|
46
47
|
|
|
47
48
|
def parse(self, response: str) -> datetime:
|
|
49
|
+
"""Parse a string into a datetime object."""
|
|
48
50
|
try:
|
|
49
|
-
return datetime.strptime(response.strip(), self.format)
|
|
51
|
+
return datetime.strptime(response.strip(), self.format) # noqa: DTZ007
|
|
50
52
|
except ValueError as e:
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
) from e
|
|
53
|
+
msg = f"Could not parse datetime string: {response}"
|
|
54
|
+
raise OutputParserException(msg) from e
|
|
54
55
|
|
|
55
56
|
@property
|
|
56
57
|
def _type(self) -> str:
|
langchain/output_parsers/enum.py
CHANGED
|
@@ -15,7 +15,8 @@ class EnumOutputParser(BaseOutputParser[Enum]):
|
|
|
15
15
|
def raise_deprecation(cls, values: dict) -> dict:
|
|
16
16
|
enum = values["enum"]
|
|
17
17
|
if not all(isinstance(e.value, str) for e in enum):
|
|
18
|
-
|
|
18
|
+
msg = "Enum values must be strings"
|
|
19
|
+
raise ValueError(msg)
|
|
19
20
|
return values
|
|
20
21
|
|
|
21
22
|
@property
|
|
@@ -25,11 +26,12 @@ class EnumOutputParser(BaseOutputParser[Enum]):
|
|
|
25
26
|
def parse(self, response: str) -> Enum:
|
|
26
27
|
try:
|
|
27
28
|
return self.enum(response.strip())
|
|
28
|
-
except ValueError:
|
|
29
|
-
|
|
29
|
+
except ValueError as e:
|
|
30
|
+
msg = (
|
|
30
31
|
f"Response '{response}' is not one of the "
|
|
31
32
|
f"expected values: {self._valid_values}"
|
|
32
33
|
)
|
|
34
|
+
raise OutputParserException(msg) from e
|
|
33
35
|
|
|
34
36
|
def get_format_instructions(self) -> str:
|
|
35
37
|
return f"Select one of the following options: {', '.join(self._valid_values)}"
|
langchain/output_parsers/fix.py
CHANGED
|
@@ -70,34 +70,34 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
70
70
|
return self.parser.parse(completion)
|
|
71
71
|
except OutputParserException as e:
|
|
72
72
|
if retries == self.max_retries:
|
|
73
|
-
raise
|
|
73
|
+
raise
|
|
74
|
+
retries += 1
|
|
75
|
+
if self.legacy and hasattr(self.retry_chain, "run"):
|
|
76
|
+
completion = self.retry_chain.run(
|
|
77
|
+
instructions=self.parser.get_format_instructions(),
|
|
78
|
+
completion=completion,
|
|
79
|
+
error=repr(e),
|
|
80
|
+
)
|
|
74
81
|
else:
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
82
|
+
try:
|
|
83
|
+
completion = self.retry_chain.invoke(
|
|
84
|
+
{
|
|
85
|
+
"instructions": self.parser.get_format_instructions(),
|
|
86
|
+
"completion": completion,
|
|
87
|
+
"error": repr(e),
|
|
88
|
+
},
|
|
81
89
|
)
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
completion = self.retry_chain.invoke(
|
|
94
|
-
dict(
|
|
95
|
-
completion=completion,
|
|
96
|
-
error=repr(e),
|
|
97
|
-
)
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
raise OutputParserException("Failed to parse")
|
|
90
|
+
except (NotImplementedError, AttributeError):
|
|
91
|
+
# Case: self.parser does not have get_format_instructions
|
|
92
|
+
completion = self.retry_chain.invoke(
|
|
93
|
+
{
|
|
94
|
+
"completion": completion,
|
|
95
|
+
"error": repr(e),
|
|
96
|
+
},
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
msg = "Failed to parse"
|
|
100
|
+
raise OutputParserException(msg)
|
|
101
101
|
|
|
102
102
|
async def aparse(self, completion: str) -> T:
|
|
103
103
|
retries = 0
|
|
@@ -107,34 +107,34 @@ class OutputFixingParser(BaseOutputParser[T]):
|
|
|
107
107
|
return await self.parser.aparse(completion)
|
|
108
108
|
except OutputParserException as e:
|
|
109
109
|
if retries == self.max_retries:
|
|
110
|
-
raise
|
|
110
|
+
raise
|
|
111
|
+
retries += 1
|
|
112
|
+
if self.legacy and hasattr(self.retry_chain, "arun"):
|
|
113
|
+
completion = await self.retry_chain.arun(
|
|
114
|
+
instructions=self.parser.get_format_instructions(),
|
|
115
|
+
completion=completion,
|
|
116
|
+
error=repr(e),
|
|
117
|
+
)
|
|
111
118
|
else:
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
119
|
+
try:
|
|
120
|
+
completion = await self.retry_chain.ainvoke(
|
|
121
|
+
{
|
|
122
|
+
"instructions": self.parser.get_format_instructions(),
|
|
123
|
+
"completion": completion,
|
|
124
|
+
"error": repr(e),
|
|
125
|
+
},
|
|
118
126
|
)
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
completion = await self.retry_chain.ainvoke(
|
|
131
|
-
dict(
|
|
132
|
-
completion=completion,
|
|
133
|
-
error=repr(e),
|
|
134
|
-
)
|
|
135
|
-
)
|
|
136
|
-
|
|
137
|
-
raise OutputParserException("Failed to parse")
|
|
127
|
+
except (NotImplementedError, AttributeError):
|
|
128
|
+
# Case: self.parser does not have get_format_instructions
|
|
129
|
+
completion = await self.retry_chain.ainvoke(
|
|
130
|
+
{
|
|
131
|
+
"completion": completion,
|
|
132
|
+
"error": repr(e),
|
|
133
|
+
},
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
msg = "Failed to parse"
|
|
137
|
+
raise OutputParserException(msg)
|
|
138
138
|
|
|
139
139
|
def get_format_instructions(self) -> str:
|
|
140
140
|
return self.parser.get_format_instructions()
|
|
@@ -1,12 +1,10 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
|
-
|
|
3
1
|
STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
|
|
4
2
|
|
|
5
3
|
```json
|
|
6
4
|
{{
|
|
7
5
|
{format}
|
|
8
6
|
}}
|
|
9
|
-
```"""
|
|
7
|
+
```""" # noqa: E501
|
|
10
8
|
|
|
11
9
|
STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
|
|
12
10
|
```json
|
|
@@ -24,7 +22,7 @@ the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema.
|
|
|
24
22
|
Here is the output schema:
|
|
25
23
|
```
|
|
26
24
|
{schema}
|
|
27
|
-
```"""
|
|
25
|
+
```""" # noqa: E501
|
|
28
26
|
|
|
29
27
|
YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance that conforms to the given JSON schema below.
|
|
30
28
|
|
|
@@ -49,14 +47,14 @@ YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance
|
|
|
49
47
|
```
|
|
50
48
|
habit: Using disposable water bottles for daily hydration.
|
|
51
49
|
sustainable_alternative: Switch to a reusable water bottle to reduce plastic waste and decrease your environmental footprint.
|
|
52
|
-
```
|
|
50
|
+
```
|
|
53
51
|
|
|
54
|
-
Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
|
|
52
|
+
Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
|
|
55
53
|
```
|
|
56
54
|
{schema}
|
|
57
55
|
```
|
|
58
56
|
|
|
59
|
-
Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!"""
|
|
57
|
+
Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!""" # noqa: E501
|
|
60
58
|
|
|
61
59
|
|
|
62
60
|
PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS = """The output should be formatted as a string as the operation, followed by a colon, followed by the column or row to be queried on, followed by optional array parameters.
|
|
@@ -78,4 +76,4 @@ Here are the possible columns:
|
|
|
78
76
|
```
|
|
79
77
|
{columns}
|
|
80
78
|
```
|
|
81
|
-
"""
|
|
79
|
+
""" # noqa: E501
|
langchain/output_parsers/json.py
CHANGED
langchain/output_parsers/list.py
CHANGED