langchain 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +160 -133
- langchain/agents/agent_iterator.py +31 -14
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +7 -7
- langchain/agents/chat/base.py +6 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +9 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +7 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +18 -9
- langchain/agents/json_chat/base.py +18 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +9 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +80 -70
- langchain/agents/openai_functions_agent/base.py +47 -38
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +8 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +7 -7
- langchain/agents/output_parsers/openai_functions.py +15 -7
- langchain/agents/output_parsers/openai_tools.py +9 -4
- langchain/agents/output_parsers/react_json_single_input.py +10 -5
- langchain/agents/output_parsers/react_single_input.py +15 -11
- langchain/agents/output_parsers/self_ask.py +3 -2
- langchain/agents/output_parsers/tools.py +18 -13
- langchain/agents/output_parsers/xml.py +99 -28
- langchain/agents/react/agent.py +4 -4
- langchain/agents/react/base.py +22 -17
- langchain/agents/react/output_parser.py +5 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +3 -2
- langchain/agents/self_ask_with_search/base.py +19 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +14 -11
- langchain/agents/structured_chat/output_parser.py +16 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +7 -6
- langchain/agents/tools.py +2 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +5 -5
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +6 -6
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +4 -1
- langchain/callbacks/streaming_aiter_final_only.py +5 -3
- langchain/callbacks/streaming_stdout_final_only.py +5 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/base.py +2 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +1 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +36 -22
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +89 -55
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +24 -11
- langchain/chains/combine_documents/map_reduce.py +39 -31
- langchain/chains/combine_documents/map_rerank.py +34 -21
- langchain/chains/combine_documents/reduce.py +47 -26
- langchain/chains/combine_documents/refine.py +26 -17
- langchain/chains/combine_documents/stuff.py +19 -12
- langchain/chains/constitutional_ai/base.py +4 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +6 -7
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +41 -20
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +8 -9
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +26 -12
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +77 -61
- langchain/chains/llm_bash/__init__.py +2 -1
- langchain/chains/llm_checker/base.py +7 -5
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +16 -9
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +9 -6
- langchain/chains/llm_symbolic_math/__init__.py +2 -1
- langchain/chains/loading.py +170 -153
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +8 -9
- langchain/chains/natbot/base.py +8 -8
- langchain/chains/natbot/crawler.py +73 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +13 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +35 -35
- langchain/chains/openai_functions/qa_with_structure.py +19 -12
- langchain/chains/openai_functions/tagging.py +2 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +4 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +14 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +14 -5
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +17 -6
- langchain/chains/query_constructor/base.py +34 -33
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +37 -32
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +34 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +24 -20
- langchain/chains/router/embedding_router.py +12 -8
- langchain/chains/router/llm_router.py +17 -16
- langchain/chains/router/multi_prompt.py +2 -2
- langchain/chains/router/multi_retrieval_qa.py +10 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +6 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +75 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +5 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +160 -123
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +33 -24
- langchain/embeddings/cache.py +117 -26
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +24 -24
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -14
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +21 -12
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +24 -21
- langchain/evaluation/loading.py +15 -11
- langchain/evaluation/parsing/base.py +4 -1
- langchain/evaluation/parsing/json_distance.py +5 -2
- langchain/evaluation/parsing/json_schema.py +12 -8
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +13 -6
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +2 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -16
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +15 -10
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +11 -8
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +13 -12
- langchain/memory/entity.py +84 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +1 -3
- langchain/memory/summary.py +13 -11
- langchain/memory/summary_buffer.py +17 -8
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +13 -6
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +10 -5
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +5 -3
- langchain/output_parsers/fix.py +52 -52
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +43 -47
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +7 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +77 -78
- langchain/output_parsers/structured.py +11 -6
- langchain/output_parsers/yaml.py +15 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +10 -9
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +15 -13
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +7 -5
- langchain/retrievers/document_compressors/base.py +13 -7
- langchain/retrievers/document_compressors/chain_extract.py +4 -5
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +11 -12
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -19
- langchain/retrievers/document_compressors/embeddings_filter.py +23 -23
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +11 -6
- langchain/retrievers/ensemble.py +28 -25
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +32 -26
- langchain/retrievers/multi_vector.py +20 -8
- langchain/retrievers/parent_document_retriever.py +18 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +138 -119
- langchain/retrievers/time_weighted_retriever.py +18 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +2 -1
- langchain/runnables/openai_functions.py +6 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +3 -2
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +1 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +10 -7
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +11 -2
- langchain/smith/evaluation/runner_utils.py +181 -129
- langchain/smith/evaluation/string_run_evaluator.py +75 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +6 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +2 -1
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/METADATA +5 -5
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/RECORD +582 -582
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
|
@@ -25,7 +25,7 @@ def _get_extraction_function(entity_schema: dict) -> dict:
|
|
|
25
25
|
"parameters": {
|
|
26
26
|
"type": "object",
|
|
27
27
|
"properties": {
|
|
28
|
-
"info": {"type": "array", "items": _convert_schema(entity_schema)}
|
|
28
|
+
"info": {"type": "array", "items": _convert_schema(entity_schema)},
|
|
29
29
|
},
|
|
30
30
|
"required": ["info"],
|
|
31
31
|
},
|
|
@@ -63,18 +63,18 @@ Passage:
|
|
|
63
63
|
"""
|
|
64
64
|
from pydantic import BaseModel, Field
|
|
65
65
|
from langchain_anthropic import ChatAnthropic
|
|
66
|
-
|
|
66
|
+
|
|
67
67
|
class Joke(BaseModel):
|
|
68
68
|
setup: str = Field(description="The setup of the joke")
|
|
69
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
70
|
-
|
|
69
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
70
|
+
|
|
71
71
|
# Or any other chat model that supports tools.
|
|
72
72
|
# Please reference to to the documentation of structured_output
|
|
73
|
-
# to see an up to date list of which models support
|
|
73
|
+
# to see an up to date list of which models support
|
|
74
74
|
# with_structured_output.
|
|
75
75
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
76
76
|
structured_llm = model.with_structured_output(Joke)
|
|
77
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
77
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
78
78
|
Make sure to call the Joke function.")
|
|
79
79
|
"""
|
|
80
80
|
),
|
|
@@ -84,7 +84,7 @@ def create_extraction_chain(
|
|
|
84
84
|
llm: BaseLanguageModel,
|
|
85
85
|
prompt: Optional[BasePromptTemplate] = None,
|
|
86
86
|
tags: Optional[list[str]] = None,
|
|
87
|
-
verbose: bool = False,
|
|
87
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
88
88
|
) -> Chain:
|
|
89
89
|
"""Creates a chain that extracts information from a passage.
|
|
90
90
|
|
|
@@ -103,7 +103,7 @@ def create_extraction_chain(
|
|
|
103
103
|
extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
|
|
104
104
|
output_parser = JsonKeyOutputFunctionsParser(key_name="info")
|
|
105
105
|
llm_kwargs = get_llm_kwargs(function)
|
|
106
|
-
|
|
106
|
+
return LLMChain(
|
|
107
107
|
llm=llm,
|
|
108
108
|
prompt=extraction_prompt,
|
|
109
109
|
llm_kwargs=llm_kwargs,
|
|
@@ -111,7 +111,6 @@ def create_extraction_chain(
|
|
|
111
111
|
tags=tags,
|
|
112
112
|
verbose=verbose,
|
|
113
113
|
)
|
|
114
|
-
return chain
|
|
115
114
|
|
|
116
115
|
|
|
117
116
|
@deprecated(
|
|
@@ -133,18 +132,18 @@ def create_extraction_chain(
|
|
|
133
132
|
"""
|
|
134
133
|
from pydantic import BaseModel, Field
|
|
135
134
|
from langchain_anthropic import ChatAnthropic
|
|
136
|
-
|
|
135
|
+
|
|
137
136
|
class Joke(BaseModel):
|
|
138
137
|
setup: str = Field(description="The setup of the joke")
|
|
139
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
140
|
-
|
|
138
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
139
|
+
|
|
141
140
|
# Or any other chat model that supports tools.
|
|
142
141
|
# Please reference to to the documentation of structured_output
|
|
143
|
-
# to see an up to date list of which models support
|
|
142
|
+
# to see an up to date list of which models support
|
|
144
143
|
# with_structured_output.
|
|
145
144
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
146
145
|
structured_llm = model.with_structured_output(Joke)
|
|
147
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
146
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
148
147
|
Make sure to call the Joke function.")
|
|
149
148
|
"""
|
|
150
149
|
),
|
|
@@ -153,7 +152,7 @@ def create_extraction_chain_pydantic(
|
|
|
153
152
|
pydantic_schema: Any,
|
|
154
153
|
llm: BaseLanguageModel,
|
|
155
154
|
prompt: Optional[BasePromptTemplate] = None,
|
|
156
|
-
verbose: bool = False,
|
|
155
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
157
156
|
) -> Chain:
|
|
158
157
|
"""Creates a chain that extracts information from a passage using pydantic schema.
|
|
159
158
|
|
|
@@ -178,20 +177,21 @@ def create_extraction_chain_pydantic(
|
|
|
178
177
|
openai_schema = pydantic_schema.schema()
|
|
179
178
|
|
|
180
179
|
openai_schema = _resolve_schema_references(
|
|
181
|
-
openai_schema,
|
|
180
|
+
openai_schema,
|
|
181
|
+
openai_schema.get("definitions", {}),
|
|
182
182
|
)
|
|
183
183
|
|
|
184
184
|
function = _get_extraction_function(openai_schema)
|
|
185
185
|
extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
|
|
186
186
|
output_parser = PydanticAttrOutputFunctionsParser(
|
|
187
|
-
pydantic_schema=PydanticSchema,
|
|
187
|
+
pydantic_schema=PydanticSchema,
|
|
188
|
+
attr_name="info",
|
|
188
189
|
)
|
|
189
190
|
llm_kwargs = get_llm_kwargs(function)
|
|
190
|
-
|
|
191
|
+
return LLMChain(
|
|
191
192
|
llm=llm,
|
|
192
193
|
prompt=extraction_prompt,
|
|
193
194
|
llm_kwargs=llm_kwargs,
|
|
194
195
|
output_parser=output_parser,
|
|
195
196
|
verbose=verbose,
|
|
196
197
|
)
|
|
197
|
-
return chain
|
|
@@ -23,14 +23,6 @@ if TYPE_CHECKING:
|
|
|
23
23
|
from openapi_pydantic import Parameter
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
|
|
27
|
-
summary = getattr(o, "summary", None)
|
|
28
|
-
description = getattr(o, "description", None)
|
|
29
|
-
if prefer_short:
|
|
30
|
-
return summary or description
|
|
31
|
-
return description or summary
|
|
32
|
-
|
|
33
|
-
|
|
34
26
|
def _format_url(url: str, path_params: dict) -> str:
|
|
35
27
|
expected_path_param = re.findall(r"{(.*?)}", url)
|
|
36
28
|
new_params = {}
|
|
@@ -59,13 +51,12 @@ def _format_url(url: str, path_params: dict) -> str:
|
|
|
59
51
|
sep = ","
|
|
60
52
|
new_val = ""
|
|
61
53
|
new_val += sep.join(kv_strs)
|
|
54
|
+
elif param[0] == ".":
|
|
55
|
+
new_val = f".{val}"
|
|
56
|
+
elif param[0] == ";":
|
|
57
|
+
new_val = f";{clean_param}={val}"
|
|
62
58
|
else:
|
|
63
|
-
|
|
64
|
-
new_val = f".{val}"
|
|
65
|
-
elif param[0] == ";":
|
|
66
|
-
new_val = f";{clean_param}={val}"
|
|
67
|
-
else:
|
|
68
|
-
new_val = val
|
|
59
|
+
new_val = val
|
|
69
60
|
new_params[param] = new_val
|
|
70
61
|
return url.format(**new_params)
|
|
71
62
|
|
|
@@ -77,7 +68,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
|
|
|
77
68
|
if p.param_schema:
|
|
78
69
|
schema = spec.get_schema(p.param_schema)
|
|
79
70
|
else:
|
|
80
|
-
media_type_schema =
|
|
71
|
+
media_type_schema = next(iter(p.content.values())).media_type_schema
|
|
81
72
|
schema = spec.get_schema(media_type_schema)
|
|
82
73
|
if p.description and not schema.description:
|
|
83
74
|
schema.description = p.description
|
|
@@ -102,11 +93,12 @@ def openapi_spec_to_openai_fn(
|
|
|
102
93
|
"""
|
|
103
94
|
try:
|
|
104
95
|
from langchain_community.tools import APIOperation
|
|
105
|
-
except ImportError:
|
|
106
|
-
|
|
96
|
+
except ImportError as e:
|
|
97
|
+
msg = (
|
|
107
98
|
"Could not import langchain_community.tools. "
|
|
108
99
|
"Please install it with `pip install langchain-community`."
|
|
109
100
|
)
|
|
101
|
+
raise ImportError(msg) from e
|
|
110
102
|
|
|
111
103
|
if not spec.paths:
|
|
112
104
|
return [], lambda: None
|
|
@@ -134,7 +126,8 @@ def openapi_spec_to_openai_fn(
|
|
|
134
126
|
for param_loc, arg_name in param_loc_to_arg_name.items():
|
|
135
127
|
if params_by_type[param_loc]:
|
|
136
128
|
request_args[arg_name] = _openapi_params_to_json_schema(
|
|
137
|
-
params_by_type[param_loc],
|
|
129
|
+
params_by_type[param_loc],
|
|
130
|
+
spec,
|
|
138
131
|
)
|
|
139
132
|
request_body = spec.get_request_body_for_operation(op)
|
|
140
133
|
# TODO: Support more MIME types.
|
|
@@ -144,10 +137,10 @@ def openapi_spec_to_openai_fn(
|
|
|
144
137
|
if media_type_object.media_type_schema:
|
|
145
138
|
schema = spec.get_schema(media_type_object.media_type_schema)
|
|
146
139
|
media_types[media_type] = json.loads(
|
|
147
|
-
schema.json(exclude_none=True)
|
|
140
|
+
schema.json(exclude_none=True),
|
|
148
141
|
)
|
|
149
142
|
if len(media_types) == 1:
|
|
150
|
-
media_type, schema_dict =
|
|
143
|
+
media_type, schema_dict = next(iter(media_types.items()))
|
|
151
144
|
key = "json" if media_type == "application/json" else "data"
|
|
152
145
|
request_args[key] = schema_dict
|
|
153
146
|
elif len(media_types) > 1:
|
|
@@ -173,6 +166,7 @@ def openapi_spec_to_openai_fn(
|
|
|
173
166
|
fn_args: dict,
|
|
174
167
|
headers: Optional[dict] = None,
|
|
175
168
|
params: Optional[dict] = None,
|
|
169
|
+
timeout: Optional[int] = 30,
|
|
176
170
|
**kwargs: Any,
|
|
177
171
|
) -> Any:
|
|
178
172
|
method = _name_to_call_map[name]["method"]
|
|
@@ -192,7 +186,7 @@ def openapi_spec_to_openai_fn(
|
|
|
192
186
|
_kwargs["params"].update(params)
|
|
193
187
|
else:
|
|
194
188
|
_kwargs["params"] = params
|
|
195
|
-
return requests.request(method, url, **_kwargs)
|
|
189
|
+
return requests.request(method, url, **_kwargs, timeout=timeout)
|
|
196
190
|
|
|
197
191
|
return functions, default_call_api
|
|
198
192
|
|
|
@@ -229,11 +223,11 @@ class SimpleRequestChain(Chain):
|
|
|
229
223
|
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
|
|
230
224
|
_run_manager.on_text(_text)
|
|
231
225
|
api_response: Response = self.request_method(name, args)
|
|
232
|
-
if api_response.status_code !=
|
|
226
|
+
if api_response.status_code != requests.codes.ok:
|
|
233
227
|
response = (
|
|
234
228
|
f"{api_response.status_code}: {api_response.reason}"
|
|
235
|
-
|
|
236
|
-
|
|
229
|
+
f"\nFor {name} "
|
|
230
|
+
f"Called with args: {args.get('params', '')}"
|
|
237
231
|
)
|
|
238
232
|
else:
|
|
239
233
|
try:
|
|
@@ -248,7 +242,7 @@ class SimpleRequestChain(Chain):
|
|
|
248
242
|
message=(
|
|
249
243
|
"This function is deprecated and will be removed in langchain 1.0. "
|
|
250
244
|
"See API reference for replacement: "
|
|
251
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
|
|
245
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
|
|
252
246
|
),
|
|
253
247
|
removal="1.0",
|
|
254
248
|
)
|
|
@@ -258,7 +252,7 @@ def get_openapi_chain(
|
|
|
258
252
|
prompt: Optional[BasePromptTemplate] = None,
|
|
259
253
|
request_chain: Optional[Chain] = None,
|
|
260
254
|
llm_chain_kwargs: Optional[dict] = None,
|
|
261
|
-
verbose: bool = False,
|
|
255
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
262
256
|
headers: Optional[dict] = None,
|
|
263
257
|
params: Optional[dict] = None,
|
|
264
258
|
**kwargs: Any,
|
|
@@ -352,10 +346,11 @@ def get_openapi_chain(
|
|
|
352
346
|
try:
|
|
353
347
|
from langchain_community.utilities.openapi import OpenAPISpec
|
|
354
348
|
except ImportError as e:
|
|
355
|
-
|
|
349
|
+
msg = (
|
|
356
350
|
"Could not import langchain_community.utilities.openapi. "
|
|
357
351
|
"Please install it with `pip install langchain-community`."
|
|
358
|
-
)
|
|
352
|
+
)
|
|
353
|
+
raise ImportError(msg) from e
|
|
359
354
|
if isinstance(spec, str):
|
|
360
355
|
for conversion in (
|
|
361
356
|
OpenAPISpec.from_url,
|
|
@@ -365,21 +360,23 @@ def get_openapi_chain(
|
|
|
365
360
|
try:
|
|
366
361
|
spec = conversion(spec)
|
|
367
362
|
break
|
|
368
|
-
except ImportError
|
|
369
|
-
raise
|
|
370
|
-
except Exception:
|
|
363
|
+
except ImportError:
|
|
364
|
+
raise
|
|
365
|
+
except Exception: # noqa: S110
|
|
371
366
|
pass
|
|
372
367
|
if isinstance(spec, str):
|
|
373
|
-
|
|
368
|
+
msg = f"Unable to parse spec from source {spec}"
|
|
369
|
+
raise ValueError(msg) # noqa: TRY004
|
|
374
370
|
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
|
|
375
371
|
if not llm:
|
|
376
|
-
|
|
372
|
+
msg = (
|
|
377
373
|
"Must provide an LLM for this chain.For example,\n"
|
|
378
374
|
"from langchain_openai import ChatOpenAI\n"
|
|
379
375
|
"llm = ChatOpenAI()\n"
|
|
380
376
|
)
|
|
377
|
+
raise ValueError(msg)
|
|
381
378
|
prompt = prompt or ChatPromptTemplate.from_template(
|
|
382
|
-
"Use the provided API's to respond to this user query:\n\n{query}"
|
|
379
|
+
"Use the provided API's to respond to this user query:\n\n{query}",
|
|
383
380
|
)
|
|
384
381
|
llm_chain = LLMChain(
|
|
385
382
|
llm=llm,
|
|
@@ -392,7 +389,10 @@ def get_openapi_chain(
|
|
|
392
389
|
)
|
|
393
390
|
request_chain = request_chain or SimpleRequestChain(
|
|
394
391
|
request_method=lambda name, args: call_api_fn(
|
|
395
|
-
name,
|
|
392
|
+
name,
|
|
393
|
+
args,
|
|
394
|
+
headers=headers,
|
|
395
|
+
params=params,
|
|
396
396
|
),
|
|
397
397
|
verbose=verbose,
|
|
398
398
|
)
|
|
@@ -22,7 +22,8 @@ class AnswerWithSources(BaseModel):
|
|
|
22
22
|
|
|
23
23
|
answer: str = Field(..., description="Answer to the question that was asked")
|
|
24
24
|
sources: list[str] = Field(
|
|
25
|
-
...,
|
|
25
|
+
...,
|
|
26
|
+
description="List of sources used to answer the question",
|
|
26
27
|
)
|
|
27
28
|
|
|
28
29
|
|
|
@@ -32,7 +33,7 @@ class AnswerWithSources(BaseModel):
|
|
|
32
33
|
message=(
|
|
33
34
|
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
34
35
|
"answering with structured responses: "
|
|
35
|
-
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
36
|
+
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
36
37
|
),
|
|
37
38
|
)
|
|
38
39
|
def create_qa_with_structure_chain(
|
|
@@ -40,7 +41,7 @@ def create_qa_with_structure_chain(
|
|
|
40
41
|
schema: Union[dict, type[BaseModel]],
|
|
41
42
|
output_parser: str = "base",
|
|
42
43
|
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
|
|
43
|
-
verbose: bool = False,
|
|
44
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
44
45
|
) -> LLMChain:
|
|
45
46
|
"""Create a question answering chain that returns an answer with sources
|
|
46
47
|
based on schema.
|
|
@@ -57,20 +58,22 @@ def create_qa_with_structure_chain(
|
|
|
57
58
|
"""
|
|
58
59
|
if output_parser == "pydantic":
|
|
59
60
|
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
|
|
60
|
-
|
|
61
|
+
msg = (
|
|
61
62
|
"Must provide a pydantic class for schema when output_parser is "
|
|
62
63
|
"'pydantic'."
|
|
63
64
|
)
|
|
65
|
+
raise ValueError(msg)
|
|
64
66
|
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
|
|
65
|
-
pydantic_schema=schema
|
|
67
|
+
pydantic_schema=schema,
|
|
66
68
|
)
|
|
67
69
|
elif output_parser == "base":
|
|
68
70
|
_output_parser = OutputFunctionsParser()
|
|
69
71
|
else:
|
|
70
|
-
|
|
72
|
+
msg = (
|
|
71
73
|
f"Got unexpected output_parser: {output_parser}. "
|
|
72
74
|
f"Should be one of `pydantic` or `base`."
|
|
73
75
|
)
|
|
76
|
+
raise ValueError(msg)
|
|
74
77
|
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
|
75
78
|
if hasattr(schema, "model_json_schema"):
|
|
76
79
|
schema_dict = cast(dict, schema.model_json_schema())
|
|
@@ -89,7 +92,7 @@ def create_qa_with_structure_chain(
|
|
|
89
92
|
content=(
|
|
90
93
|
"You are a world class algorithm to answer "
|
|
91
94
|
"questions in a specific format."
|
|
92
|
-
)
|
|
95
|
+
),
|
|
93
96
|
),
|
|
94
97
|
HumanMessage(content="Answer question using the following context"),
|
|
95
98
|
HumanMessagePromptTemplate.from_template("{context}"),
|
|
@@ -98,14 +101,13 @@ def create_qa_with_structure_chain(
|
|
|
98
101
|
]
|
|
99
102
|
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
|
100
103
|
|
|
101
|
-
|
|
104
|
+
return LLMChain(
|
|
102
105
|
llm=llm,
|
|
103
106
|
prompt=prompt,
|
|
104
107
|
llm_kwargs=llm_kwargs,
|
|
105
108
|
output_parser=_output_parser,
|
|
106
109
|
verbose=verbose,
|
|
107
110
|
)
|
|
108
|
-
return chain
|
|
109
111
|
|
|
110
112
|
|
|
111
113
|
@deprecated(
|
|
@@ -114,11 +116,13 @@ def create_qa_with_structure_chain(
|
|
|
114
116
|
message=(
|
|
115
117
|
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
116
118
|
"answering with sources: "
|
|
117
|
-
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
119
|
+
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
118
120
|
),
|
|
119
121
|
)
|
|
120
122
|
def create_qa_with_sources_chain(
|
|
121
|
-
llm: BaseLanguageModel,
|
|
123
|
+
llm: BaseLanguageModel,
|
|
124
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
125
|
+
**kwargs: Any,
|
|
122
126
|
) -> LLMChain:
|
|
123
127
|
"""Create a question answering chain that returns an answer with sources.
|
|
124
128
|
|
|
@@ -131,5 +135,8 @@ def create_qa_with_sources_chain(
|
|
|
131
135
|
Chain (LLMChain) that can be used to answer questions with citations.
|
|
132
136
|
"""
|
|
133
137
|
return create_qa_with_structure_chain(
|
|
134
|
-
llm,
|
|
138
|
+
llm,
|
|
139
|
+
AnswerWithSources,
|
|
140
|
+
verbose=verbose,
|
|
141
|
+
**kwargs,
|
|
135
142
|
)
|
|
@@ -91,14 +91,13 @@ def create_tagging_chain(
|
|
|
91
91
|
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
|
92
92
|
output_parser = JsonOutputFunctionsParser()
|
|
93
93
|
llm_kwargs = get_llm_kwargs(function)
|
|
94
|
-
|
|
94
|
+
return LLMChain(
|
|
95
95
|
llm=llm,
|
|
96
96
|
prompt=prompt,
|
|
97
97
|
llm_kwargs=llm_kwargs,
|
|
98
98
|
output_parser=output_parser,
|
|
99
99
|
**kwargs,
|
|
100
100
|
)
|
|
101
|
-
return chain
|
|
102
101
|
|
|
103
102
|
|
|
104
103
|
@deprecated(
|
|
@@ -164,11 +163,10 @@ def create_tagging_chain_pydantic(
|
|
|
164
163
|
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
|
165
164
|
output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
|
|
166
165
|
llm_kwargs = get_llm_kwargs(function)
|
|
167
|
-
|
|
166
|
+
return LLMChain(
|
|
168
167
|
llm=llm,
|
|
169
168
|
prompt=prompt,
|
|
170
169
|
llm_kwargs=llm_kwargs,
|
|
171
170
|
output_parser=output_parser,
|
|
172
171
|
**kwargs,
|
|
173
172
|
)
|
|
174
|
-
return chain
|
|
@@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
|
|
|
34
34
|
"""
|
|
35
35
|
from pydantic import BaseModel, Field
|
|
36
36
|
from langchain_anthropic import ChatAnthropic
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
class Joke(BaseModel):
|
|
39
39
|
setup: str = Field(description="The setup of the joke")
|
|
40
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
41
|
-
|
|
40
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
41
|
+
|
|
42
42
|
# Or any other chat model that supports tools.
|
|
43
43
|
# Please reference to to the documentation of structured_output
|
|
44
|
-
# to see an up to date list of which models support
|
|
44
|
+
# to see an up to date list of which models support
|
|
45
45
|
# with_structured_output.
|
|
46
46
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
47
47
|
structured_llm = model.with_structured_output(Joke)
|
|
48
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
48
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
49
49
|
Make sure to call the Joke function.")
|
|
50
50
|
"""
|
|
51
51
|
),
|
|
@@ -71,10 +71,9 @@ def create_extraction_chain_pydantic(
|
|
|
71
71
|
[
|
|
72
72
|
("system", system_message),
|
|
73
73
|
("user", "{input}"),
|
|
74
|
-
]
|
|
74
|
+
],
|
|
75
75
|
)
|
|
76
76
|
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
|
|
77
77
|
tools = [{"type": "function", "function": d} for d in functions]
|
|
78
78
|
model = llm.bind(tools=tools)
|
|
79
|
-
|
|
80
|
-
return chain
|
|
79
|
+
return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
|
@@ -19,7 +19,7 @@ from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
|
|
|
19
19
|
since="0.2.7",
|
|
20
20
|
alternative=(
|
|
21
21
|
"example in API reference with more detail: "
|
|
22
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
|
|
22
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
|
|
23
23
|
),
|
|
24
24
|
removal="1.0",
|
|
25
25
|
)
|
|
@@ -66,7 +66,7 @@ class QAGenerationChain(Chain):
|
|
|
66
66
|
llm_chain: LLMChain
|
|
67
67
|
"""LLM Chain that generates responses from user input and context."""
|
|
68
68
|
text_splitter: TextSplitter = Field(
|
|
69
|
-
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
|
|
69
|
+
default=RecursiveCharacterTextSplitter(chunk_overlap=500),
|
|
70
70
|
)
|
|
71
71
|
"""Text splitter that splits the input into chunks."""
|
|
72
72
|
input_key: str = "text"
|
|
@@ -117,7 +117,8 @@ class QAGenerationChain(Chain):
|
|
|
117
117
|
) -> dict[str, list]:
|
|
118
118
|
docs = self.text_splitter.create_documents([inputs[self.input_key]])
|
|
119
119
|
results = self.llm_chain.generate(
|
|
120
|
-
[{"text": d.page_content} for d in docs],
|
|
120
|
+
[{"text": d.page_content} for d in docs],
|
|
121
|
+
run_manager=run_manager,
|
|
121
122
|
)
|
|
122
123
|
qa = [json.loads(res[0].text) for res in results.generations]
|
|
123
124
|
return {self.output_key: qa}
|
|
@@ -1,5 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
|
-
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
|
3
1
|
from langchain_core.prompts.chat import (
|
|
4
2
|
ChatPromptTemplate,
|
|
5
3
|
HumanMessagePromptTemplate,
|
|
@@ -7,6 +5,8 @@ from langchain_core.prompts.chat import (
|
|
|
7
5
|
)
|
|
8
6
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
9
7
|
|
|
8
|
+
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
|
|
9
|
+
|
|
10
10
|
templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
|
|
11
11
|
Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
|
|
12
12
|
When coming up with this question/answer pair, you must respond in the following format:
|
|
@@ -18,10 +18,10 @@ When coming up with this question/answer pair, you must respond in the following
|
|
|
18
18
|
```
|
|
19
19
|
|
|
20
20
|
Everything between the ``` must be valid json.
|
|
21
|
-
"""
|
|
21
|
+
""" # noqa: E501
|
|
22
22
|
templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
|
23
23
|
----------------
|
|
24
|
-
{text}"""
|
|
24
|
+
{text}""" # noqa: E501
|
|
25
25
|
CHAT_PROMPT = ChatPromptTemplate.from_messages(
|
|
26
26
|
[
|
|
27
27
|
SystemMessagePromptTemplate.from_template(templ1),
|
|
@@ -42,7 +42,7 @@ Everything between the ``` must be valid json.
|
|
|
42
42
|
|
|
43
43
|
Please come up with a question/answer pair, in the specified JSON format, for the following text:
|
|
44
44
|
----------------
|
|
45
|
-
{text}"""
|
|
45
|
+
{text}""" # noqa: E501
|
|
46
46
|
PROMPT = PromptTemplate.from_template(templ)
|
|
47
47
|
|
|
48
48
|
PROMPT_SELECTOR = ConditionalPromptSelector(
|
|
@@ -70,7 +70,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
70
70
|
document_variable_name="summaries",
|
|
71
71
|
)
|
|
72
72
|
reduce_documents_chain = ReduceDocumentsChain(
|
|
73
|
-
combine_documents_chain=combine_results_chain
|
|
73
|
+
combine_documents_chain=combine_results_chain,
|
|
74
74
|
)
|
|
75
75
|
combine_documents_chain = MapReduceDocumentsChain(
|
|
76
76
|
llm_chain=llm_question_chain,
|
|
@@ -93,7 +93,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
93
93
|
"""Load chain from chain type."""
|
|
94
94
|
_chain_kwargs = chain_type_kwargs or {}
|
|
95
95
|
combine_documents_chain = load_qa_with_sources_chain(
|
|
96
|
-
llm,
|
|
96
|
+
llm,
|
|
97
|
+
chain_type=chain_type,
|
|
98
|
+
**_chain_kwargs,
|
|
97
99
|
)
|
|
98
100
|
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
|
|
99
101
|
|
|
@@ -118,7 +120,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
118
120
|
"""
|
|
119
121
|
_output_keys = [self.answer_key, self.sources_answer_key]
|
|
120
122
|
if self.return_source_documents:
|
|
121
|
-
_output_keys = _output_keys
|
|
123
|
+
_output_keys = [*_output_keys, "source_documents"]
|
|
122
124
|
return _output_keys
|
|
123
125
|
|
|
124
126
|
@model_validator(mode="before")
|
|
@@ -133,7 +135,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
133
135
|
"""Split sources from answer."""
|
|
134
136
|
if re.search(r"SOURCES?:", answer, re.IGNORECASE):
|
|
135
137
|
answer, sources = re.split(
|
|
136
|
-
r"SOURCES?:|QUESTION:\s",
|
|
138
|
+
r"SOURCES?:|QUESTION:\s",
|
|
139
|
+
answer,
|
|
140
|
+
flags=re.IGNORECASE,
|
|
137
141
|
)[:2]
|
|
138
142
|
sources = re.split(r"\n", sources)[0].strip()
|
|
139
143
|
else:
|
|
@@ -164,7 +168,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
164
168
|
docs = self._get_docs(inputs) # type: ignore[call-arg]
|
|
165
169
|
|
|
166
170
|
answer = self.combine_documents_chain.run(
|
|
167
|
-
input_documents=docs,
|
|
171
|
+
input_documents=docs,
|
|
172
|
+
callbacks=_run_manager.get_child(),
|
|
173
|
+
**inputs,
|
|
168
174
|
)
|
|
169
175
|
answer, sources = self._split_sources(answer)
|
|
170
176
|
result: dict[str, Any] = {
|
|
@@ -198,7 +204,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
|
|
|
198
204
|
else:
|
|
199
205
|
docs = await self._aget_docs(inputs) # type: ignore[call-arg]
|
|
200
206
|
answer = await self.combine_documents_chain.arun(
|
|
201
|
-
input_documents=docs,
|
|
207
|
+
input_documents=docs,
|
|
208
|
+
callbacks=_run_manager.get_child(),
|
|
209
|
+
**inputs,
|
|
202
210
|
)
|
|
203
211
|
answer, sources = self._split_sources(answer)
|
|
204
212
|
result: dict[str, Any] = {
|