langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -45,6 +45,14 @@ class FactWithEvidence(BaseModel):
|
|
|
45
45
|
yield from s.spans()
|
|
46
46
|
|
|
47
47
|
def get_spans(self, context: str) -> Iterator[str]:
|
|
48
|
+
"""Get spans of the substring quote in the context.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
context: The context in which to find the spans of the substring quote.
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
An iterator over the spans of the substring quote in the context.
|
|
55
|
+
"""
|
|
48
56
|
for quote in self.substring_quote:
|
|
49
57
|
yield from self._get_span(quote, context)
|
|
50
58
|
|
|
@@ -86,25 +94,25 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
|
|
|
86
94
|
|
|
87
95
|
Returns:
|
|
88
96
|
Runnable that can be used to answer questions with citations.
|
|
97
|
+
|
|
89
98
|
"""
|
|
90
99
|
if llm.bind_tools is BaseChatModel.bind_tools:
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
)
|
|
100
|
+
msg = "Language model must implement bind_tools to use this function."
|
|
101
|
+
raise ValueError(msg)
|
|
94
102
|
prompt = ChatPromptTemplate(
|
|
95
103
|
[
|
|
96
104
|
SystemMessage(
|
|
97
105
|
"You are a world class algorithm to answer "
|
|
98
|
-
"questions with correct and exact citations."
|
|
106
|
+
"questions with correct and exact citations.",
|
|
99
107
|
),
|
|
100
108
|
HumanMessagePromptTemplate.from_template(
|
|
101
109
|
"Answer question using the following context."
|
|
102
110
|
"\n\n{context}"
|
|
103
111
|
"\n\nQuestion: {question}"
|
|
104
112
|
"\n\nTips: Make sure to cite your sources, "
|
|
105
|
-
"and use the exact words from the context."
|
|
113
|
+
"and use the exact words from the context.",
|
|
106
114
|
),
|
|
107
|
-
]
|
|
115
|
+
],
|
|
108
116
|
)
|
|
109
117
|
return prompt | llm.with_structured_output(QuestionAnswer)
|
|
110
118
|
|
|
@@ -124,7 +132,10 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|
|
124
132
|
Chain (LLMChain) that can be used to answer questions with citations.
|
|
125
133
|
"""
|
|
126
134
|
output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
|
|
127
|
-
|
|
135
|
+
if hasattr(QuestionAnswer, "model_json_schema"):
|
|
136
|
+
schema = QuestionAnswer.model_json_schema()
|
|
137
|
+
else:
|
|
138
|
+
schema = QuestionAnswer.schema()
|
|
128
139
|
function = {
|
|
129
140
|
"name": schema["title"],
|
|
130
141
|
"description": schema["description"],
|
|
@@ -136,7 +147,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|
|
136
147
|
content=(
|
|
137
148
|
"You are a world class algorithm to answer "
|
|
138
149
|
"questions with correct and exact citations."
|
|
139
|
-
)
|
|
150
|
+
),
|
|
140
151
|
),
|
|
141
152
|
HumanMessage(content="Answer question using the following context"),
|
|
142
153
|
HumanMessagePromptTemplate.from_template("{context}"),
|
|
@@ -145,15 +156,14 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
|
|
|
145
156
|
content=(
|
|
146
157
|
"Tips: Make sure to cite your sources, "
|
|
147
158
|
"and use the exact words from the context."
|
|
148
|
-
)
|
|
159
|
+
),
|
|
149
160
|
),
|
|
150
161
|
]
|
|
151
162
|
prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
|
152
163
|
|
|
153
|
-
|
|
164
|
+
return LLMChain(
|
|
154
165
|
llm=llm,
|
|
155
166
|
prompt=prompt,
|
|
156
167
|
llm_kwargs=llm_kwargs,
|
|
157
168
|
output_parser=output_parser,
|
|
158
169
|
)
|
|
159
|
-
return chain
|
|
@@ -25,7 +25,7 @@ def _get_extraction_function(entity_schema: dict) -> dict:
|
|
|
25
25
|
"parameters": {
|
|
26
26
|
"type": "object",
|
|
27
27
|
"properties": {
|
|
28
|
-
"info": {"type": "array", "items": _convert_schema(entity_schema)}
|
|
28
|
+
"info": {"type": "array", "items": _convert_schema(entity_schema)},
|
|
29
29
|
},
|
|
30
30
|
"required": ["info"],
|
|
31
31
|
},
|
|
@@ -63,18 +63,18 @@ Passage:
|
|
|
63
63
|
"""
|
|
64
64
|
from pydantic import BaseModel, Field
|
|
65
65
|
from langchain_anthropic import ChatAnthropic
|
|
66
|
-
|
|
66
|
+
|
|
67
67
|
class Joke(BaseModel):
|
|
68
68
|
setup: str = Field(description="The setup of the joke")
|
|
69
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
70
|
-
|
|
69
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
70
|
+
|
|
71
71
|
# Or any other chat model that supports tools.
|
|
72
72
|
# Please reference to to the documentation of structured_output
|
|
73
|
-
# to see an up to date list of which models support
|
|
73
|
+
# to see an up to date list of which models support
|
|
74
74
|
# with_structured_output.
|
|
75
75
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
76
76
|
structured_llm = model.with_structured_output(Joke)
|
|
77
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
77
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
78
78
|
Make sure to call the Joke function.")
|
|
79
79
|
"""
|
|
80
80
|
),
|
|
@@ -84,7 +84,7 @@ def create_extraction_chain(
|
|
|
84
84
|
llm: BaseLanguageModel,
|
|
85
85
|
prompt: Optional[BasePromptTemplate] = None,
|
|
86
86
|
tags: Optional[list[str]] = None,
|
|
87
|
-
verbose: bool = False,
|
|
87
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
88
88
|
) -> Chain:
|
|
89
89
|
"""Creates a chain that extracts information from a passage.
|
|
90
90
|
|
|
@@ -103,7 +103,7 @@ def create_extraction_chain(
|
|
|
103
103
|
extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
|
|
104
104
|
output_parser = JsonKeyOutputFunctionsParser(key_name="info")
|
|
105
105
|
llm_kwargs = get_llm_kwargs(function)
|
|
106
|
-
|
|
106
|
+
return LLMChain(
|
|
107
107
|
llm=llm,
|
|
108
108
|
prompt=extraction_prompt,
|
|
109
109
|
llm_kwargs=llm_kwargs,
|
|
@@ -111,7 +111,6 @@ def create_extraction_chain(
|
|
|
111
111
|
tags=tags,
|
|
112
112
|
verbose=verbose,
|
|
113
113
|
)
|
|
114
|
-
return chain
|
|
115
114
|
|
|
116
115
|
|
|
117
116
|
@deprecated(
|
|
@@ -133,18 +132,18 @@ def create_extraction_chain(
|
|
|
133
132
|
"""
|
|
134
133
|
from pydantic import BaseModel, Field
|
|
135
134
|
from langchain_anthropic import ChatAnthropic
|
|
136
|
-
|
|
135
|
+
|
|
137
136
|
class Joke(BaseModel):
|
|
138
137
|
setup: str = Field(description="The setup of the joke")
|
|
139
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
140
|
-
|
|
138
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
139
|
+
|
|
141
140
|
# Or any other chat model that supports tools.
|
|
142
141
|
# Please reference to to the documentation of structured_output
|
|
143
|
-
# to see an up to date list of which models support
|
|
142
|
+
# to see an up to date list of which models support
|
|
144
143
|
# with_structured_output.
|
|
145
144
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
146
145
|
structured_llm = model.with_structured_output(Joke)
|
|
147
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
146
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
148
147
|
Make sure to call the Joke function.")
|
|
149
148
|
"""
|
|
150
149
|
),
|
|
@@ -153,7 +152,7 @@ def create_extraction_chain_pydantic(
|
|
|
153
152
|
pydantic_schema: Any,
|
|
154
153
|
llm: BaseLanguageModel,
|
|
155
154
|
prompt: Optional[BasePromptTemplate] = None,
|
|
156
|
-
verbose: bool = False,
|
|
155
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
157
156
|
) -> Chain:
|
|
158
157
|
"""Creates a chain that extracts information from a passage using pydantic schema.
|
|
159
158
|
|
|
@@ -178,20 +177,21 @@ def create_extraction_chain_pydantic(
|
|
|
178
177
|
openai_schema = pydantic_schema.schema()
|
|
179
178
|
|
|
180
179
|
openai_schema = _resolve_schema_references(
|
|
181
|
-
openai_schema,
|
|
180
|
+
openai_schema,
|
|
181
|
+
openai_schema.get("definitions", {}),
|
|
182
182
|
)
|
|
183
183
|
|
|
184
184
|
function = _get_extraction_function(openai_schema)
|
|
185
185
|
extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
|
|
186
186
|
output_parser = PydanticAttrOutputFunctionsParser(
|
|
187
|
-
pydantic_schema=PydanticSchema,
|
|
187
|
+
pydantic_schema=PydanticSchema,
|
|
188
|
+
attr_name="info",
|
|
188
189
|
)
|
|
189
190
|
llm_kwargs = get_llm_kwargs(function)
|
|
190
|
-
|
|
191
|
+
return LLMChain(
|
|
191
192
|
llm=llm,
|
|
192
193
|
prompt=extraction_prompt,
|
|
193
194
|
llm_kwargs=llm_kwargs,
|
|
194
195
|
output_parser=output_parser,
|
|
195
196
|
verbose=verbose,
|
|
196
197
|
)
|
|
197
|
-
return chain
|
|
@@ -13,6 +13,7 @@ from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsPa
|
|
|
13
13
|
from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
|
|
14
14
|
from langchain_core.utils.input import get_colored_text
|
|
15
15
|
from requests import Response
|
|
16
|
+
from typing_extensions import override
|
|
16
17
|
|
|
17
18
|
from langchain.chains.base import Chain
|
|
18
19
|
from langchain.chains.llm import LLMChain
|
|
@@ -23,14 +24,6 @@ if TYPE_CHECKING:
|
|
|
23
24
|
from openapi_pydantic import Parameter
|
|
24
25
|
|
|
25
26
|
|
|
26
|
-
def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
|
|
27
|
-
summary = getattr(o, "summary", None)
|
|
28
|
-
description = getattr(o, "description", None)
|
|
29
|
-
if prefer_short:
|
|
30
|
-
return summary or description
|
|
31
|
-
return description or summary
|
|
32
|
-
|
|
33
|
-
|
|
34
27
|
def _format_url(url: str, path_params: dict) -> str:
|
|
35
28
|
expected_path_param = re.findall(r"{(.*?)}", url)
|
|
36
29
|
new_params = {}
|
|
@@ -59,13 +52,12 @@ def _format_url(url: str, path_params: dict) -> str:
|
|
|
59
52
|
sep = ","
|
|
60
53
|
new_val = ""
|
|
61
54
|
new_val += sep.join(kv_strs)
|
|
55
|
+
elif param[0] == ".":
|
|
56
|
+
new_val = f".{val}"
|
|
57
|
+
elif param[0] == ";":
|
|
58
|
+
new_val = f";{clean_param}={val}"
|
|
62
59
|
else:
|
|
63
|
-
|
|
64
|
-
new_val = f".{val}"
|
|
65
|
-
elif param[0] == ";":
|
|
66
|
-
new_val = f";{clean_param}={val}"
|
|
67
|
-
else:
|
|
68
|
-
new_val = val
|
|
60
|
+
new_val = val
|
|
69
61
|
new_params[param] = new_val
|
|
70
62
|
return url.format(**new_params)
|
|
71
63
|
|
|
@@ -77,7 +69,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
|
|
|
77
69
|
if p.param_schema:
|
|
78
70
|
schema = spec.get_schema(p.param_schema)
|
|
79
71
|
else:
|
|
80
|
-
media_type_schema =
|
|
72
|
+
media_type_schema = next(iter(p.content.values())).media_type_schema
|
|
81
73
|
schema = spec.get_schema(media_type_schema)
|
|
82
74
|
if p.description and not schema.description:
|
|
83
75
|
schema.description = p.description
|
|
@@ -102,11 +94,12 @@ def openapi_spec_to_openai_fn(
|
|
|
102
94
|
"""
|
|
103
95
|
try:
|
|
104
96
|
from langchain_community.tools import APIOperation
|
|
105
|
-
except ImportError:
|
|
106
|
-
|
|
97
|
+
except ImportError as e:
|
|
98
|
+
msg = (
|
|
107
99
|
"Could not import langchain_community.tools. "
|
|
108
100
|
"Please install it with `pip install langchain-community`."
|
|
109
101
|
)
|
|
102
|
+
raise ImportError(msg) from e
|
|
110
103
|
|
|
111
104
|
if not spec.paths:
|
|
112
105
|
return [], lambda: None
|
|
@@ -134,7 +127,8 @@ def openapi_spec_to_openai_fn(
|
|
|
134
127
|
for param_loc, arg_name in param_loc_to_arg_name.items():
|
|
135
128
|
if params_by_type[param_loc]:
|
|
136
129
|
request_args[arg_name] = _openapi_params_to_json_schema(
|
|
137
|
-
params_by_type[param_loc],
|
|
130
|
+
params_by_type[param_loc],
|
|
131
|
+
spec,
|
|
138
132
|
)
|
|
139
133
|
request_body = spec.get_request_body_for_operation(op)
|
|
140
134
|
# TODO: Support more MIME types.
|
|
@@ -144,10 +138,10 @@ def openapi_spec_to_openai_fn(
|
|
|
144
138
|
if media_type_object.media_type_schema:
|
|
145
139
|
schema = spec.get_schema(media_type_object.media_type_schema)
|
|
146
140
|
media_types[media_type] = json.loads(
|
|
147
|
-
schema.json(exclude_none=True)
|
|
141
|
+
schema.json(exclude_none=True),
|
|
148
142
|
)
|
|
149
143
|
if len(media_types) == 1:
|
|
150
|
-
media_type, schema_dict =
|
|
144
|
+
media_type, schema_dict = next(iter(media_types.items()))
|
|
151
145
|
key = "json" if media_type == "application/json" else "data"
|
|
152
146
|
request_args[key] = schema_dict
|
|
153
147
|
elif len(media_types) > 1:
|
|
@@ -173,6 +167,7 @@ def openapi_spec_to_openai_fn(
|
|
|
173
167
|
fn_args: dict,
|
|
174
168
|
headers: Optional[dict] = None,
|
|
175
169
|
params: Optional[dict] = None,
|
|
170
|
+
timeout: Optional[int] = 30,
|
|
176
171
|
**kwargs: Any,
|
|
177
172
|
) -> Any:
|
|
178
173
|
method = _name_to_call_map[name]["method"]
|
|
@@ -192,7 +187,7 @@ def openapi_spec_to_openai_fn(
|
|
|
192
187
|
_kwargs["params"].update(params)
|
|
193
188
|
else:
|
|
194
189
|
_kwargs["params"] = params
|
|
195
|
-
return requests.request(method, url, **_kwargs)
|
|
190
|
+
return requests.request(method, url, **_kwargs, timeout=timeout)
|
|
196
191
|
|
|
197
192
|
return functions, default_call_api
|
|
198
193
|
|
|
@@ -208,10 +203,12 @@ class SimpleRequestChain(Chain):
|
|
|
208
203
|
"""Key to use for the input of the request."""
|
|
209
204
|
|
|
210
205
|
@property
|
|
206
|
+
@override
|
|
211
207
|
def input_keys(self) -> list[str]:
|
|
212
208
|
return [self.input_key]
|
|
213
209
|
|
|
214
210
|
@property
|
|
211
|
+
@override
|
|
215
212
|
def output_keys(self) -> list[str]:
|
|
216
213
|
return [self.output_key]
|
|
217
214
|
|
|
@@ -229,11 +226,11 @@ class SimpleRequestChain(Chain):
|
|
|
229
226
|
_text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
|
|
230
227
|
_run_manager.on_text(_text)
|
|
231
228
|
api_response: Response = self.request_method(name, args)
|
|
232
|
-
if api_response.status_code !=
|
|
229
|
+
if api_response.status_code != requests.codes.ok:
|
|
233
230
|
response = (
|
|
234
231
|
f"{api_response.status_code}: {api_response.reason}"
|
|
235
|
-
|
|
236
|
-
|
|
232
|
+
f"\nFor {name} "
|
|
233
|
+
f"Called with args: {args.get('params', '')}"
|
|
237
234
|
)
|
|
238
235
|
else:
|
|
239
236
|
try:
|
|
@@ -248,7 +245,7 @@ class SimpleRequestChain(Chain):
|
|
|
248
245
|
message=(
|
|
249
246
|
"This function is deprecated and will be removed in langchain 1.0. "
|
|
250
247
|
"See API reference for replacement: "
|
|
251
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
|
|
248
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
|
|
252
249
|
),
|
|
253
250
|
removal="1.0",
|
|
254
251
|
)
|
|
@@ -258,7 +255,7 @@ def get_openapi_chain(
|
|
|
258
255
|
prompt: Optional[BasePromptTemplate] = None,
|
|
259
256
|
request_chain: Optional[Chain] = None,
|
|
260
257
|
llm_chain_kwargs: Optional[dict] = None,
|
|
261
|
-
verbose: bool = False,
|
|
258
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
262
259
|
headers: Optional[dict] = None,
|
|
263
260
|
params: Optional[dict] = None,
|
|
264
261
|
**kwargs: Any,
|
|
@@ -348,14 +345,16 @@ def get_openapi_chain(
|
|
|
348
345
|
`ChatOpenAI(model="gpt-3.5-turbo-0613")`.
|
|
349
346
|
prompt: Main prompt template to use.
|
|
350
347
|
request_chain: Chain for taking the functions output and executing the request.
|
|
348
|
+
|
|
351
349
|
""" # noqa: E501
|
|
352
350
|
try:
|
|
353
351
|
from langchain_community.utilities.openapi import OpenAPISpec
|
|
354
352
|
except ImportError as e:
|
|
355
|
-
|
|
353
|
+
msg = (
|
|
356
354
|
"Could not import langchain_community.utilities.openapi. "
|
|
357
355
|
"Please install it with `pip install langchain-community`."
|
|
358
|
-
)
|
|
356
|
+
)
|
|
357
|
+
raise ImportError(msg) from e
|
|
359
358
|
if isinstance(spec, str):
|
|
360
359
|
for conversion in (
|
|
361
360
|
OpenAPISpec.from_url,
|
|
@@ -365,21 +364,23 @@ def get_openapi_chain(
|
|
|
365
364
|
try:
|
|
366
365
|
spec = conversion(spec)
|
|
367
366
|
break
|
|
368
|
-
except ImportError
|
|
369
|
-
raise
|
|
370
|
-
except Exception:
|
|
367
|
+
except ImportError:
|
|
368
|
+
raise
|
|
369
|
+
except Exception: # noqa: S110
|
|
371
370
|
pass
|
|
372
371
|
if isinstance(spec, str):
|
|
373
|
-
|
|
372
|
+
msg = f"Unable to parse spec from source {spec}"
|
|
373
|
+
raise ValueError(msg) # noqa: TRY004
|
|
374
374
|
openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
|
|
375
375
|
if not llm:
|
|
376
|
-
|
|
376
|
+
msg = (
|
|
377
377
|
"Must provide an LLM for this chain.For example,\n"
|
|
378
378
|
"from langchain_openai import ChatOpenAI\n"
|
|
379
379
|
"llm = ChatOpenAI()\n"
|
|
380
380
|
)
|
|
381
|
+
raise ValueError(msg)
|
|
381
382
|
prompt = prompt or ChatPromptTemplate.from_template(
|
|
382
|
-
"Use the provided API's to respond to this user query:\n\n{query}"
|
|
383
|
+
"Use the provided API's to respond to this user query:\n\n{query}",
|
|
383
384
|
)
|
|
384
385
|
llm_chain = LLMChain(
|
|
385
386
|
llm=llm,
|
|
@@ -392,7 +393,10 @@ def get_openapi_chain(
|
|
|
392
393
|
)
|
|
393
394
|
request_chain = request_chain or SimpleRequestChain(
|
|
394
395
|
request_method=lambda name, args: call_api_fn(
|
|
395
|
-
name,
|
|
396
|
+
name,
|
|
397
|
+
args,
|
|
398
|
+
headers=headers,
|
|
399
|
+
params=params,
|
|
396
400
|
),
|
|
397
401
|
verbose=verbose,
|
|
398
402
|
)
|
|
@@ -22,7 +22,8 @@ class AnswerWithSources(BaseModel):
|
|
|
22
22
|
|
|
23
23
|
answer: str = Field(..., description="Answer to the question that was asked")
|
|
24
24
|
sources: list[str] = Field(
|
|
25
|
-
...,
|
|
25
|
+
...,
|
|
26
|
+
description="List of sources used to answer the question",
|
|
26
27
|
)
|
|
27
28
|
|
|
28
29
|
|
|
@@ -32,7 +33,7 @@ class AnswerWithSources(BaseModel):
|
|
|
32
33
|
message=(
|
|
33
34
|
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
34
35
|
"answering with structured responses: "
|
|
35
|
-
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
36
|
+
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
36
37
|
),
|
|
37
38
|
)
|
|
38
39
|
def create_qa_with_structure_chain(
|
|
@@ -40,7 +41,7 @@ def create_qa_with_structure_chain(
|
|
|
40
41
|
schema: Union[dict, type[BaseModel]],
|
|
41
42
|
output_parser: str = "base",
|
|
42
43
|
prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
|
|
43
|
-
verbose: bool = False,
|
|
44
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
44
45
|
) -> LLMChain:
|
|
45
46
|
"""Create a question answering chain that returns an answer with sources
|
|
46
47
|
based on schema.
|
|
@@ -57,27 +58,29 @@ def create_qa_with_structure_chain(
|
|
|
57
58
|
"""
|
|
58
59
|
if output_parser == "pydantic":
|
|
59
60
|
if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
|
|
60
|
-
|
|
61
|
+
msg = (
|
|
61
62
|
"Must provide a pydantic class for schema when output_parser is "
|
|
62
63
|
"'pydantic'."
|
|
63
64
|
)
|
|
65
|
+
raise ValueError(msg)
|
|
64
66
|
_output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
|
|
65
|
-
pydantic_schema=schema
|
|
67
|
+
pydantic_schema=schema,
|
|
66
68
|
)
|
|
67
69
|
elif output_parser == "base":
|
|
68
70
|
_output_parser = OutputFunctionsParser()
|
|
69
71
|
else:
|
|
70
|
-
|
|
72
|
+
msg = (
|
|
71
73
|
f"Got unexpected output_parser: {output_parser}. "
|
|
72
74
|
f"Should be one of `pydantic` or `base`."
|
|
73
75
|
)
|
|
76
|
+
raise ValueError(msg)
|
|
74
77
|
if isinstance(schema, type) and is_basemodel_subclass(schema):
|
|
75
78
|
if hasattr(schema, "model_json_schema"):
|
|
76
|
-
schema_dict = cast(dict, schema.model_json_schema())
|
|
79
|
+
schema_dict = cast("dict", schema.model_json_schema())
|
|
77
80
|
else:
|
|
78
|
-
schema_dict = cast(dict, schema.schema())
|
|
81
|
+
schema_dict = cast("dict", schema.schema())
|
|
79
82
|
else:
|
|
80
|
-
schema_dict = cast(dict, schema)
|
|
83
|
+
schema_dict = cast("dict", schema)
|
|
81
84
|
function = {
|
|
82
85
|
"name": schema_dict["title"],
|
|
83
86
|
"description": schema_dict["description"],
|
|
@@ -89,7 +92,7 @@ def create_qa_with_structure_chain(
|
|
|
89
92
|
content=(
|
|
90
93
|
"You are a world class algorithm to answer "
|
|
91
94
|
"questions in a specific format."
|
|
92
|
-
)
|
|
95
|
+
),
|
|
93
96
|
),
|
|
94
97
|
HumanMessage(content="Answer question using the following context"),
|
|
95
98
|
HumanMessagePromptTemplate.from_template("{context}"),
|
|
@@ -98,14 +101,13 @@ def create_qa_with_structure_chain(
|
|
|
98
101
|
]
|
|
99
102
|
prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
|
|
100
103
|
|
|
101
|
-
|
|
104
|
+
return LLMChain(
|
|
102
105
|
llm=llm,
|
|
103
106
|
prompt=prompt,
|
|
104
107
|
llm_kwargs=llm_kwargs,
|
|
105
108
|
output_parser=_output_parser,
|
|
106
109
|
verbose=verbose,
|
|
107
110
|
)
|
|
108
|
-
return chain
|
|
109
111
|
|
|
110
112
|
|
|
111
113
|
@deprecated(
|
|
@@ -114,11 +116,13 @@ def create_qa_with_structure_chain(
|
|
|
114
116
|
message=(
|
|
115
117
|
"This function is deprecated. Refer to this guide on retrieval and question "
|
|
116
118
|
"answering with sources: "
|
|
117
|
-
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
119
|
+
"https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
|
|
118
120
|
),
|
|
119
121
|
)
|
|
120
122
|
def create_qa_with_sources_chain(
|
|
121
|
-
llm: BaseLanguageModel,
|
|
123
|
+
llm: BaseLanguageModel,
|
|
124
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
125
|
+
**kwargs: Any,
|
|
122
126
|
) -> LLMChain:
|
|
123
127
|
"""Create a question answering chain that returns an answer with sources.
|
|
124
128
|
|
|
@@ -131,5 +135,8 @@ def create_qa_with_sources_chain(
|
|
|
131
135
|
Chain (LLMChain) that can be used to answer questions with citations.
|
|
132
136
|
"""
|
|
133
137
|
return create_qa_with_structure_chain(
|
|
134
|
-
llm,
|
|
138
|
+
llm,
|
|
139
|
+
AnswerWithSources,
|
|
140
|
+
verbose=verbose,
|
|
141
|
+
**kwargs,
|
|
135
142
|
)
|
|
@@ -86,19 +86,19 @@ def create_tagging_chain(
|
|
|
86
86
|
|
|
87
87
|
Returns:
|
|
88
88
|
Chain (LLMChain) that can be used to extract information from a passage.
|
|
89
|
+
|
|
89
90
|
"""
|
|
90
91
|
function = _get_tagging_function(schema)
|
|
91
92
|
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
|
92
93
|
output_parser = JsonOutputFunctionsParser()
|
|
93
94
|
llm_kwargs = get_llm_kwargs(function)
|
|
94
|
-
|
|
95
|
+
return LLMChain(
|
|
95
96
|
llm=llm,
|
|
96
97
|
prompt=prompt,
|
|
97
98
|
llm_kwargs=llm_kwargs,
|
|
98
99
|
output_parser=output_parser,
|
|
99
100
|
**kwargs,
|
|
100
101
|
)
|
|
101
|
-
return chain
|
|
102
102
|
|
|
103
103
|
|
|
104
104
|
@deprecated(
|
|
@@ -155,6 +155,7 @@ def create_tagging_chain_pydantic(
|
|
|
155
155
|
|
|
156
156
|
Returns:
|
|
157
157
|
Chain (LLMChain) that can be used to extract information from a passage.
|
|
158
|
+
|
|
158
159
|
"""
|
|
159
160
|
if hasattr(pydantic_schema, "model_json_schema"):
|
|
160
161
|
openai_schema = pydantic_schema.model_json_schema()
|
|
@@ -164,11 +165,10 @@ def create_tagging_chain_pydantic(
|
|
|
164
165
|
prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
|
|
165
166
|
output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
|
|
166
167
|
llm_kwargs = get_llm_kwargs(function)
|
|
167
|
-
|
|
168
|
+
return LLMChain(
|
|
168
169
|
llm=llm,
|
|
169
170
|
prompt=prompt,
|
|
170
171
|
llm_kwargs=llm_kwargs,
|
|
171
172
|
output_parser=output_parser,
|
|
172
173
|
**kwargs,
|
|
173
174
|
)
|
|
174
|
-
return chain
|
|
@@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
|
|
|
34
34
|
"""
|
|
35
35
|
from pydantic import BaseModel, Field
|
|
36
36
|
from langchain_anthropic import ChatAnthropic
|
|
37
|
-
|
|
37
|
+
|
|
38
38
|
class Joke(BaseModel):
|
|
39
39
|
setup: str = Field(description="The setup of the joke")
|
|
40
|
-
punchline: str = Field(description="The punchline to the joke")
|
|
41
|
-
|
|
40
|
+
punchline: str = Field(description="The punchline to the joke")
|
|
41
|
+
|
|
42
42
|
# Or any other chat model that supports tools.
|
|
43
43
|
# Please reference to to the documentation of structured_output
|
|
44
|
-
# to see an up to date list of which models support
|
|
44
|
+
# to see an up to date list of which models support
|
|
45
45
|
# with_structured_output.
|
|
46
46
|
model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
|
|
47
47
|
structured_llm = model.with_structured_output(Joke)
|
|
48
|
-
structured_llm.invoke("Tell me a joke about cats.
|
|
48
|
+
structured_llm.invoke("Tell me a joke about cats.
|
|
49
49
|
Make sure to call the Joke function.")
|
|
50
50
|
"""
|
|
51
51
|
),
|
|
@@ -71,10 +71,9 @@ def create_extraction_chain_pydantic(
|
|
|
71
71
|
[
|
|
72
72
|
("system", system_message),
|
|
73
73
|
("user", "{input}"),
|
|
74
|
-
]
|
|
74
|
+
],
|
|
75
75
|
)
|
|
76
76
|
functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
|
|
77
77
|
tools = [{"type": "function", "function": d} for d in functions]
|
|
78
78
|
model = llm.bind(tools=tools)
|
|
79
|
-
|
|
80
|
-
return chain
|
|
79
|
+
return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
|
|
@@ -9,6 +9,7 @@ from langchain_core.language_models import BaseLanguageModel
|
|
|
9
9
|
from langchain_core.prompts import BasePromptTemplate
|
|
10
10
|
from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
|
|
11
11
|
from pydantic import Field
|
|
12
|
+
from typing_extensions import override
|
|
12
13
|
|
|
13
14
|
from langchain.chains.base import Chain
|
|
14
15
|
from langchain.chains.llm import LLMChain
|
|
@@ -19,7 +20,7 @@ from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
|
|
|
19
20
|
since="0.2.7",
|
|
20
21
|
alternative=(
|
|
21
22
|
"example in API reference with more detail: "
|
|
22
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
|
|
23
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
|
|
23
24
|
),
|
|
24
25
|
removal="1.0",
|
|
25
26
|
)
|
|
@@ -61,12 +62,13 @@ class QAGenerationChain(Chain):
|
|
|
61
62
|
split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
|
|
62
63
|
)
|
|
63
64
|
)
|
|
65
|
+
|
|
64
66
|
"""
|
|
65
67
|
|
|
66
68
|
llm_chain: LLMChain
|
|
67
69
|
"""LLM Chain that generates responses from user input and context."""
|
|
68
70
|
text_splitter: TextSplitter = Field(
|
|
69
|
-
default=RecursiveCharacterTextSplitter(chunk_overlap=500)
|
|
71
|
+
default=RecursiveCharacterTextSplitter(chunk_overlap=500),
|
|
70
72
|
)
|
|
71
73
|
"""Text splitter that splits the input into chunks."""
|
|
72
74
|
input_key: str = "text"
|
|
@@ -103,10 +105,12 @@ class QAGenerationChain(Chain):
|
|
|
103
105
|
raise NotImplementedError
|
|
104
106
|
|
|
105
107
|
@property
|
|
108
|
+
@override
|
|
106
109
|
def input_keys(self) -> list[str]:
|
|
107
110
|
return [self.input_key]
|
|
108
111
|
|
|
109
112
|
@property
|
|
113
|
+
@override
|
|
110
114
|
def output_keys(self) -> list[str]:
|
|
111
115
|
return [self.output_key]
|
|
112
116
|
|
|
@@ -117,7 +121,8 @@ class QAGenerationChain(Chain):
|
|
|
117
121
|
) -> dict[str, list]:
|
|
118
122
|
docs = self.text_splitter.create_documents([inputs[self.input_key]])
|
|
119
123
|
results = self.llm_chain.generate(
|
|
120
|
-
[{"text": d.page_content} for d in docs],
|
|
124
|
+
[{"text": d.page_content} for d in docs],
|
|
125
|
+
run_manager=run_manager,
|
|
121
126
|
)
|
|
122
127
|
qa = [json.loads(res[0].text) for res in results.generations]
|
|
123
128
|
return {self.output_key: qa}
|