langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
langchain/chat_models/base.py
CHANGED
|
@@ -3,15 +3,7 @@ from __future__ import annotations
|
|
|
3
3
|
import warnings
|
|
4
4
|
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
5
5
|
from importlib import util
|
|
6
|
-
from typing import
|
|
7
|
-
Any,
|
|
8
|
-
Callable,
|
|
9
|
-
Literal,
|
|
10
|
-
Optional,
|
|
11
|
-
Union,
|
|
12
|
-
cast,
|
|
13
|
-
overload,
|
|
14
|
-
)
|
|
6
|
+
from typing import Any, Callable, Literal, Optional, Union, cast, overload
|
|
15
7
|
|
|
16
8
|
from langchain_core.language_models import (
|
|
17
9
|
BaseChatModel,
|
|
@@ -27,16 +19,17 @@ from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
|
|
|
27
19
|
from langchain_core.runnables.schema import StreamEvent
|
|
28
20
|
from langchain_core.tools import BaseTool
|
|
29
21
|
from langchain_core.tracers import RunLog, RunLogPatch
|
|
22
|
+
from langchain_core.v1.chat_models import BaseChatModel as BaseChatModelV1
|
|
30
23
|
from pydantic import BaseModel
|
|
31
|
-
from typing_extensions import TypeAlias
|
|
24
|
+
from typing_extensions import TypeAlias, override
|
|
32
25
|
|
|
33
26
|
__all__ = [
|
|
34
|
-
"init_chat_model",
|
|
35
27
|
# For backwards compatibility
|
|
36
28
|
"BaseChatModel",
|
|
37
29
|
"SimpleChatModel",
|
|
38
|
-
"generate_from_stream",
|
|
39
30
|
"agenerate_from_stream",
|
|
31
|
+
"generate_from_stream",
|
|
32
|
+
"init_chat_model",
|
|
40
33
|
]
|
|
41
34
|
|
|
42
35
|
|
|
@@ -47,10 +40,23 @@ def init_chat_model(
|
|
|
47
40
|
model_provider: Optional[str] = None,
|
|
48
41
|
configurable_fields: Literal[None] = None,
|
|
49
42
|
config_prefix: Optional[str] = None,
|
|
43
|
+
message_version: Literal["v0"] = "v0",
|
|
50
44
|
**kwargs: Any,
|
|
51
45
|
) -> BaseChatModel: ...
|
|
52
46
|
|
|
53
47
|
|
|
48
|
+
@overload
|
|
49
|
+
def init_chat_model(
|
|
50
|
+
model: str,
|
|
51
|
+
*,
|
|
52
|
+
model_provider: Optional[str] = None,
|
|
53
|
+
configurable_fields: Literal[None] = None,
|
|
54
|
+
config_prefix: Optional[str] = None,
|
|
55
|
+
message_version: Literal["v1"] = "v1",
|
|
56
|
+
**kwargs: Any,
|
|
57
|
+
) -> BaseChatModelV1: ...
|
|
58
|
+
|
|
59
|
+
|
|
54
60
|
@overload
|
|
55
61
|
def init_chat_model(
|
|
56
62
|
model: Literal[None] = None,
|
|
@@ -58,6 +64,7 @@ def init_chat_model(
|
|
|
58
64
|
model_provider: Optional[str] = None,
|
|
59
65
|
configurable_fields: Literal[None] = None,
|
|
60
66
|
config_prefix: Optional[str] = None,
|
|
67
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
61
68
|
**kwargs: Any,
|
|
62
69
|
) -> _ConfigurableModel: ...
|
|
63
70
|
|
|
@@ -69,6 +76,7 @@ def init_chat_model(
|
|
|
69
76
|
model_provider: Optional[str] = None,
|
|
70
77
|
configurable_fields: Union[Literal["any"], list[str], tuple[str, ...]] = ...,
|
|
71
78
|
config_prefix: Optional[str] = None,
|
|
79
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
72
80
|
**kwargs: Any,
|
|
73
81
|
) -> _ConfigurableModel: ...
|
|
74
82
|
|
|
@@ -84,61 +92,77 @@ def init_chat_model(
|
|
|
84
92
|
Union[Literal["any"], list[str], tuple[str, ...]]
|
|
85
93
|
] = None,
|
|
86
94
|
config_prefix: Optional[str] = None,
|
|
95
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
87
96
|
**kwargs: Any,
|
|
88
|
-
) -> Union[BaseChatModel, _ConfigurableModel]:
|
|
89
|
-
"""Initialize a ChatModel
|
|
97
|
+
) -> Union[BaseChatModel, BaseChatModelV1, _ConfigurableModel]:
|
|
98
|
+
"""Initialize a ChatModel in a single line using the model's name and provider.
|
|
90
99
|
|
|
91
|
-
|
|
92
|
-
|
|
100
|
+
.. note::
|
|
101
|
+
Must have the integration package corresponding to the model provider installed.
|
|
102
|
+
You should look at the `provider integration's API reference <https://python.langchain.com/api_reference/reference.html#integrations>`__
|
|
103
|
+
to see what parameters are supported by the model.
|
|
93
104
|
|
|
94
105
|
Args:
|
|
95
|
-
model: The name of the model, e.g.
|
|
106
|
+
model: The name of the model, e.g. ``'o3-mini'``, ``'claude-3-5-sonnet-latest'``. You can
|
|
96
107
|
also specify model and model provider in a single argument using
|
|
97
|
-
'{model_provider}:{model}' format, e.g.
|
|
108
|
+
``'{model_provider}:{model}'`` format, e.g. ``'openai:o1'``.
|
|
98
109
|
model_provider: The model provider if not specified as part of model arg (see
|
|
99
110
|
above). Supported model_provider values and the corresponding integration
|
|
100
111
|
package are:
|
|
101
112
|
|
|
102
|
-
-
|
|
103
|
-
-
|
|
104
|
-
-
|
|
105
|
-
-
|
|
106
|
-
-
|
|
107
|
-
-
|
|
108
|
-
-
|
|
109
|
-
-
|
|
110
|
-
-
|
|
111
|
-
-
|
|
112
|
-
-
|
|
113
|
-
-
|
|
114
|
-
-
|
|
115
|
-
-
|
|
116
|
-
-
|
|
117
|
-
-
|
|
118
|
-
-
|
|
119
|
-
-
|
|
120
|
-
-
|
|
121
|
-
-
|
|
122
|
-
-
|
|
113
|
+
- ``openai`` -> ``langchain-openai``
|
|
114
|
+
- ``anthropic`` -> ``langchain-anthropic``
|
|
115
|
+
- ``azure_openai`` -> ``langchain-openai``
|
|
116
|
+
- ``azure_ai`` -> ``langchain-azure-ai``
|
|
117
|
+
- ``google_vertexai`` -> ``langchain-google-vertexai``
|
|
118
|
+
- ``google_genai`` -> ``langchain-google-genai``
|
|
119
|
+
- ``bedrock`` -> ``langchain-aws``
|
|
120
|
+
- ``bedrock_converse`` -> ``langchain-aws``
|
|
121
|
+
- ``cohere`` -> ``langchain-cohere``
|
|
122
|
+
- ``fireworks`` -> ``langchain-fireworks``
|
|
123
|
+
- ``together`` -> ``langchain-together``
|
|
124
|
+
- ``mistralai`` -> ``langchain-mistralai``
|
|
125
|
+
- ``huggingface`` -> ``langchain-huggingface``
|
|
126
|
+
- ``groq`` -> ``langchain-groq``
|
|
127
|
+
- ``ollama`` -> ``langchain-ollama``
|
|
128
|
+
- ``google_anthropic_vertex`` -> ``langchain-google-vertexai``
|
|
129
|
+
- ``deepseek`` -> ``langchain-deepseek``
|
|
130
|
+
- ``ibm`` -> ``langchain-ibm``
|
|
131
|
+
- ``nvidia`` -> ``langchain-nvidia-ai-endpoints``
|
|
132
|
+
- ``xai`` -> ``langchain-xai``
|
|
133
|
+
- ``perplexity`` -> ``langchain-perplexity``
|
|
123
134
|
|
|
124
135
|
Will attempt to infer model_provider from model if not specified. The
|
|
125
136
|
following providers will be inferred based on these model prefixes:
|
|
126
137
|
|
|
127
|
-
-
|
|
128
|
-
-
|
|
129
|
-
-
|
|
130
|
-
-
|
|
131
|
-
-
|
|
132
|
-
-
|
|
133
|
-
-
|
|
134
|
-
-
|
|
135
|
-
-
|
|
136
|
-
-
|
|
137
|
-
|
|
138
|
-
|
|
138
|
+
- ``gpt-3...`` | ``gpt-4...`` | ``o1...`` -> ``openai``
|
|
139
|
+
- ``claude...`` -> ``anthropic``
|
|
140
|
+
- ``amazon...`` -> ``bedrock``
|
|
141
|
+
- ``gemini...`` -> ``google_vertexai``
|
|
142
|
+
- ``command...`` -> ``cohere``
|
|
143
|
+
- ``accounts/fireworks...`` -> ``fireworks``
|
|
144
|
+
- ``mistral...`` -> ``mistralai``
|
|
145
|
+
- ``deepseek...`` -> ``deepseek``
|
|
146
|
+
- ``grok...`` -> ``xai``
|
|
147
|
+
- ``sonar...`` -> ``perplexity``
|
|
148
|
+
|
|
149
|
+
message_version: The version of the BaseChatModel to return. Either ``"v0"`` for
|
|
150
|
+
a v0 :class:`~langchain_core.language_models.chat_models.BaseChatModel` or
|
|
151
|
+
``"v1"`` for a v1 :class:`~langchain_core.v1.chat_models.BaseChatModel`. The
|
|
152
|
+
output version determines what type of message objects the model will
|
|
153
|
+
generate.
|
|
154
|
+
|
|
155
|
+
.. note::
|
|
156
|
+
Currently supported for these providers:
|
|
157
|
+
|
|
158
|
+
- ``openai``
|
|
159
|
+
|
|
160
|
+
.. versionadded:: 0.4.0
|
|
161
|
+
|
|
162
|
+
configurable_fields: Which model parameters are configurable:
|
|
139
163
|
|
|
140
164
|
- None: No configurable fields.
|
|
141
|
-
-
|
|
165
|
+
- ``'any'``: All fields are configurable. **See Security Note below.**
|
|
142
166
|
- Union[List[str], Tuple[str, ...]]: Specified fields are configurable.
|
|
143
167
|
|
|
144
168
|
Fields are assumed to have config_prefix stripped if there is a
|
|
@@ -146,15 +170,15 @@ def init_chat_model(
|
|
|
146
170
|
not specified, then defaults to ``("model", "model_provider")``.
|
|
147
171
|
|
|
148
172
|
***Security Note***: Setting ``configurable_fields="any"`` means fields like
|
|
149
|
-
api_key
|
|
173
|
+
``api_key``, ``base_url``, etc. can be altered at runtime, potentially redirecting
|
|
150
174
|
model requests to a different service/user. Make sure that if you're
|
|
151
175
|
accepting untrusted configurations that you enumerate the
|
|
152
176
|
``configurable_fields=(...)`` explicitly.
|
|
153
177
|
|
|
154
|
-
config_prefix: If config_prefix is a non-empty string then model will be
|
|
178
|
+
config_prefix: If ``'config_prefix'`` is a non-empty string then model will be
|
|
155
179
|
configurable at runtime via the
|
|
156
180
|
``config["configurable"]["{config_prefix}_{param}"]`` keys. If
|
|
157
|
-
config_prefix is an empty string then model will be configurable via
|
|
181
|
+
``'config_prefix'`` is an empty string then model will be configurable via
|
|
158
182
|
``config["configurable"]["{param}"]``.
|
|
159
183
|
temperature: Model temperature.
|
|
160
184
|
max_tokens: Max output tokens.
|
|
@@ -187,7 +211,7 @@ def init_chat_model(
|
|
|
187
211
|
|
|
188
212
|
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
|
|
189
213
|
claude_sonnet = init_chat_model("anthropic:claude-3-5-sonnet-latest", temperature=0)
|
|
190
|
-
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.
|
|
214
|
+
gemini_2_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
|
|
191
215
|
|
|
192
216
|
o3_mini.invoke("what's your name")
|
|
193
217
|
claude_sonnet.invoke("what's your name")
|
|
@@ -315,70 +339,89 @@ def init_chat_model(
|
|
|
315
339
|
warnings.warn(
|
|
316
340
|
f"{config_prefix=} has been set but no fields are configurable. Set "
|
|
317
341
|
f"`configurable_fields=(...)` to specify the model params that are "
|
|
318
|
-
f"configurable."
|
|
342
|
+
f"configurable.",
|
|
343
|
+
stacklevel=2,
|
|
319
344
|
)
|
|
320
345
|
|
|
321
346
|
if not configurable_fields:
|
|
322
347
|
return _init_chat_model_helper(
|
|
323
|
-
cast(str, model),
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
kwargs["model"] = model
|
|
328
|
-
if model_provider:
|
|
329
|
-
kwargs["model_provider"] = model_provider
|
|
330
|
-
return _ConfigurableModel(
|
|
331
|
-
default_config=kwargs,
|
|
332
|
-
config_prefix=config_prefix,
|
|
333
|
-
configurable_fields=configurable_fields,
|
|
348
|
+
cast("str", model),
|
|
349
|
+
model_provider=model_provider,
|
|
350
|
+
message_version=message_version,
|
|
351
|
+
**kwargs,
|
|
334
352
|
)
|
|
353
|
+
if model:
|
|
354
|
+
kwargs["model"] = model
|
|
355
|
+
if model_provider:
|
|
356
|
+
kwargs["model_provider"] = model_provider
|
|
357
|
+
return _ConfigurableModel(
|
|
358
|
+
default_config=kwargs,
|
|
359
|
+
config_prefix=config_prefix,
|
|
360
|
+
configurable_fields=configurable_fields,
|
|
361
|
+
)
|
|
335
362
|
|
|
336
363
|
|
|
337
364
|
def _init_chat_model_helper(
|
|
338
|
-
model: str,
|
|
339
|
-
|
|
365
|
+
model: str,
|
|
366
|
+
*,
|
|
367
|
+
model_provider: Optional[str] = None,
|
|
368
|
+
message_version: Literal["v0", "v1"] = "v0",
|
|
369
|
+
**kwargs: Any,
|
|
370
|
+
) -> Union[BaseChatModel, BaseChatModelV1]:
|
|
340
371
|
model, model_provider = _parse_model(model, model_provider)
|
|
372
|
+
if message_version != "v0" and model_provider not in ("openai",):
|
|
373
|
+
warnings.warn(
|
|
374
|
+
f"Model provider {model_provider} does not support "
|
|
375
|
+
f"message_version={message_version}. Defaulting to v0.",
|
|
376
|
+
stacklevel=2,
|
|
377
|
+
)
|
|
341
378
|
if model_provider == "openai":
|
|
342
379
|
_check_pkg("langchain_openai")
|
|
343
|
-
|
|
380
|
+
if message_version == "v0":
|
|
381
|
+
from langchain_openai import ChatOpenAI
|
|
344
382
|
|
|
345
|
-
|
|
346
|
-
|
|
383
|
+
return ChatOpenAI(model=model, **kwargs)
|
|
384
|
+
# v1
|
|
385
|
+
from langchain_openai.v1 import ChatOpenAI as ChatOpenAIV1
|
|
386
|
+
|
|
387
|
+
return ChatOpenAIV1(model=model, **kwargs)
|
|
388
|
+
|
|
389
|
+
if model_provider == "anthropic":
|
|
347
390
|
_check_pkg("langchain_anthropic")
|
|
348
391
|
from langchain_anthropic import ChatAnthropic
|
|
349
392
|
|
|
350
393
|
return ChatAnthropic(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
|
|
351
|
-
|
|
394
|
+
if model_provider == "azure_openai":
|
|
352
395
|
_check_pkg("langchain_openai")
|
|
353
396
|
from langchain_openai import AzureChatOpenAI
|
|
354
397
|
|
|
355
398
|
return AzureChatOpenAI(model=model, **kwargs)
|
|
356
|
-
|
|
399
|
+
if model_provider == "azure_ai":
|
|
357
400
|
_check_pkg("langchain_azure_ai")
|
|
358
401
|
from langchain_azure_ai.chat_models import AzureAIChatCompletionsModel
|
|
359
402
|
|
|
360
403
|
return AzureAIChatCompletionsModel(model=model, **kwargs)
|
|
361
|
-
|
|
404
|
+
if model_provider == "cohere":
|
|
362
405
|
_check_pkg("langchain_cohere")
|
|
363
406
|
from langchain_cohere import ChatCohere
|
|
364
407
|
|
|
365
408
|
return ChatCohere(model=model, **kwargs)
|
|
366
|
-
|
|
409
|
+
if model_provider == "google_vertexai":
|
|
367
410
|
_check_pkg("langchain_google_vertexai")
|
|
368
411
|
from langchain_google_vertexai import ChatVertexAI
|
|
369
412
|
|
|
370
413
|
return ChatVertexAI(model=model, **kwargs)
|
|
371
|
-
|
|
414
|
+
if model_provider == "google_genai":
|
|
372
415
|
_check_pkg("langchain_google_genai")
|
|
373
416
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
374
417
|
|
|
375
418
|
return ChatGoogleGenerativeAI(model=model, **kwargs)
|
|
376
|
-
|
|
419
|
+
if model_provider == "fireworks":
|
|
377
420
|
_check_pkg("langchain_fireworks")
|
|
378
421
|
from langchain_fireworks import ChatFireworks
|
|
379
422
|
|
|
380
423
|
return ChatFireworks(model=model, **kwargs)
|
|
381
|
-
|
|
424
|
+
if model_provider == "ollama":
|
|
382
425
|
try:
|
|
383
426
|
_check_pkg("langchain_ollama")
|
|
384
427
|
from langchain_ollama import ChatOllama
|
|
@@ -393,73 +436,72 @@ def _init_chat_model_helper(
|
|
|
393
436
|
_check_pkg("langchain_ollama")
|
|
394
437
|
|
|
395
438
|
return ChatOllama(model=model, **kwargs)
|
|
396
|
-
|
|
439
|
+
if model_provider == "together":
|
|
397
440
|
_check_pkg("langchain_together")
|
|
398
441
|
from langchain_together import ChatTogether
|
|
399
442
|
|
|
400
443
|
return ChatTogether(model=model, **kwargs)
|
|
401
|
-
|
|
444
|
+
if model_provider == "mistralai":
|
|
402
445
|
_check_pkg("langchain_mistralai")
|
|
403
446
|
from langchain_mistralai import ChatMistralAI
|
|
404
447
|
|
|
405
448
|
return ChatMistralAI(model=model, **kwargs) # type: ignore[call-arg,unused-ignore]
|
|
406
|
-
|
|
449
|
+
if model_provider == "huggingface":
|
|
407
450
|
_check_pkg("langchain_huggingface")
|
|
408
451
|
from langchain_huggingface import ChatHuggingFace
|
|
409
452
|
|
|
410
453
|
return ChatHuggingFace(model_id=model, **kwargs)
|
|
411
|
-
|
|
454
|
+
if model_provider == "groq":
|
|
412
455
|
_check_pkg("langchain_groq")
|
|
413
456
|
from langchain_groq import ChatGroq
|
|
414
457
|
|
|
415
458
|
return ChatGroq(model=model, **kwargs)
|
|
416
|
-
|
|
459
|
+
if model_provider == "bedrock":
|
|
417
460
|
_check_pkg("langchain_aws")
|
|
418
461
|
from langchain_aws import ChatBedrock
|
|
419
462
|
|
|
420
463
|
# TODO: update to use model= once ChatBedrock supports
|
|
421
464
|
return ChatBedrock(model_id=model, **kwargs)
|
|
422
|
-
|
|
465
|
+
if model_provider == "bedrock_converse":
|
|
423
466
|
_check_pkg("langchain_aws")
|
|
424
467
|
from langchain_aws import ChatBedrockConverse
|
|
425
468
|
|
|
426
469
|
return ChatBedrockConverse(model=model, **kwargs)
|
|
427
|
-
|
|
470
|
+
if model_provider == "google_anthropic_vertex":
|
|
428
471
|
_check_pkg("langchain_google_vertexai")
|
|
429
472
|
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
|
430
473
|
|
|
431
474
|
return ChatAnthropicVertex(model=model, **kwargs)
|
|
432
|
-
|
|
475
|
+
if model_provider == "deepseek":
|
|
433
476
|
_check_pkg("langchain_deepseek", pkg_kebab="langchain-deepseek")
|
|
434
477
|
from langchain_deepseek import ChatDeepSeek
|
|
435
478
|
|
|
436
479
|
return ChatDeepSeek(model=model, **kwargs)
|
|
437
|
-
|
|
480
|
+
if model_provider == "nvidia":
|
|
438
481
|
_check_pkg("langchain_nvidia_ai_endpoints")
|
|
439
482
|
from langchain_nvidia_ai_endpoints import ChatNVIDIA
|
|
440
483
|
|
|
441
484
|
return ChatNVIDIA(model=model, **kwargs)
|
|
442
|
-
|
|
485
|
+
if model_provider == "ibm":
|
|
443
486
|
_check_pkg("langchain_ibm")
|
|
444
487
|
from langchain_ibm import ChatWatsonx
|
|
445
488
|
|
|
446
489
|
return ChatWatsonx(model_id=model, **kwargs)
|
|
447
|
-
|
|
490
|
+
if model_provider == "xai":
|
|
448
491
|
_check_pkg("langchain_xai")
|
|
449
492
|
from langchain_xai import ChatXAI
|
|
450
493
|
|
|
451
494
|
return ChatXAI(model=model, **kwargs)
|
|
452
|
-
|
|
495
|
+
if model_provider == "perplexity":
|
|
453
496
|
_check_pkg("langchain_perplexity")
|
|
454
497
|
from langchain_perplexity import ChatPerplexity
|
|
455
498
|
|
|
456
499
|
return ChatPerplexity(model=model, **kwargs)
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
)
|
|
500
|
+
supported = ", ".join(_SUPPORTED_PROVIDERS)
|
|
501
|
+
msg = (
|
|
502
|
+
f"Unsupported {model_provider=}.\n\nSupported model providers are: {supported}"
|
|
503
|
+
)
|
|
504
|
+
raise ValueError(msg)
|
|
463
505
|
|
|
464
506
|
|
|
465
507
|
_SUPPORTED_PROVIDERS = {
|
|
@@ -489,26 +531,25 @@ _SUPPORTED_PROVIDERS = {
|
|
|
489
531
|
def _attempt_infer_model_provider(model_name: str) -> Optional[str]:
|
|
490
532
|
if any(model_name.startswith(pre) for pre in ("gpt-3", "gpt-4", "o1", "o3")):
|
|
491
533
|
return "openai"
|
|
492
|
-
|
|
534
|
+
if model_name.startswith("claude"):
|
|
493
535
|
return "anthropic"
|
|
494
|
-
|
|
536
|
+
if model_name.startswith("command"):
|
|
495
537
|
return "cohere"
|
|
496
|
-
|
|
538
|
+
if model_name.startswith("accounts/fireworks"):
|
|
497
539
|
return "fireworks"
|
|
498
|
-
|
|
540
|
+
if model_name.startswith("gemini"):
|
|
499
541
|
return "google_vertexai"
|
|
500
|
-
|
|
542
|
+
if model_name.startswith("amazon."):
|
|
501
543
|
return "bedrock"
|
|
502
|
-
|
|
544
|
+
if model_name.startswith("mistral"):
|
|
503
545
|
return "mistralai"
|
|
504
|
-
|
|
546
|
+
if model_name.startswith("deepseek"):
|
|
505
547
|
return "deepseek"
|
|
506
|
-
|
|
548
|
+
if model_name.startswith("grok"):
|
|
507
549
|
return "xai"
|
|
508
|
-
|
|
550
|
+
if model_name.startswith("sonar"):
|
|
509
551
|
return "perplexity"
|
|
510
|
-
|
|
511
|
-
return None
|
|
552
|
+
return None
|
|
512
553
|
|
|
513
554
|
|
|
514
555
|
def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
|
@@ -521,10 +562,11 @@ def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
|
|
521
562
|
model = ":".join(model.split(":")[1:])
|
|
522
563
|
model_provider = model_provider or _attempt_infer_model_provider(model)
|
|
523
564
|
if not model_provider:
|
|
524
|
-
|
|
565
|
+
msg = (
|
|
525
566
|
f"Unable to infer model provider for {model=}, please specify "
|
|
526
567
|
f"model_provider directly."
|
|
527
568
|
)
|
|
569
|
+
raise ValueError(msg)
|
|
528
570
|
model_provider = model_provider.replace("-", "_").lower()
|
|
529
571
|
return model, model_provider
|
|
530
572
|
|
|
@@ -532,9 +574,10 @@ def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
|
|
|
532
574
|
def _check_pkg(pkg: str, *, pkg_kebab: Optional[str] = None) -> None:
|
|
533
575
|
if not util.find_spec(pkg):
|
|
534
576
|
pkg_kebab = pkg_kebab if pkg_kebab is not None else pkg.replace("_", "-")
|
|
535
|
-
|
|
577
|
+
msg = (
|
|
536
578
|
f"Unable to import {pkg}. Please install with `pip install -U {pkg_kebab}`"
|
|
537
579
|
)
|
|
580
|
+
raise ImportError(msg)
|
|
538
581
|
|
|
539
582
|
|
|
540
583
|
def _remove_prefix(s: str, prefix: str) -> str:
|
|
@@ -567,7 +610,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
567
610
|
else config_prefix
|
|
568
611
|
)
|
|
569
612
|
self._queued_declarative_operations: list[tuple[str, tuple, dict]] = list(
|
|
570
|
-
queued_declarative_operations
|
|
613
|
+
queued_declarative_operations,
|
|
571
614
|
)
|
|
572
615
|
|
|
573
616
|
def __getattr__(self, name: str) -> Any:
|
|
@@ -579,7 +622,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
579
622
|
# self._model()).
|
|
580
623
|
def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
|
|
581
624
|
queued_declarative_operations = list(
|
|
582
|
-
self._queued_declarative_operations
|
|
625
|
+
self._queued_declarative_operations,
|
|
583
626
|
)
|
|
584
627
|
queued_declarative_operations.append((name, args, kwargs))
|
|
585
628
|
return _ConfigurableModel(
|
|
@@ -592,14 +635,13 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
592
635
|
)
|
|
593
636
|
|
|
594
637
|
return queue
|
|
595
|
-
|
|
638
|
+
if self._default_config and (model := self._model()) and hasattr(model, name):
|
|
596
639
|
return getattr(model, name)
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
raise AttributeError(msg)
|
|
640
|
+
msg = f"{name} is not a BaseChatModel attribute"
|
|
641
|
+
if self._default_config:
|
|
642
|
+
msg += " and is not implemented on the default model"
|
|
643
|
+
msg += "."
|
|
644
|
+
raise AttributeError(msg)
|
|
603
645
|
|
|
604
646
|
def _model(self, config: Optional[RunnableConfig] = None) -> Runnable:
|
|
605
647
|
params = {**self._default_config, **self._model_params(config)}
|
|
@@ -627,7 +669,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
627
669
|
**kwargs: Any,
|
|
628
670
|
) -> _ConfigurableModel:
|
|
629
671
|
"""Bind config to a Runnable, returning a new Runnable."""
|
|
630
|
-
config = RunnableConfig(**(config or {}), **cast(RunnableConfig, kwargs))
|
|
672
|
+
config = RunnableConfig(**(config or {}), **cast("RunnableConfig", kwargs))
|
|
631
673
|
model_params = self._model_params(config)
|
|
632
674
|
remaining_config = {k: v for k, v in config.items() if k != "configurable"}
|
|
633
675
|
remaining_config["configurable"] = {
|
|
@@ -642,7 +684,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
642
684
|
"with_config",
|
|
643
685
|
(),
|
|
644
686
|
{"config": remaining_config},
|
|
645
|
-
)
|
|
687
|
+
),
|
|
646
688
|
)
|
|
647
689
|
return _ConfigurableModel(
|
|
648
690
|
default_config={**self._default_config, **model_params},
|
|
@@ -670,6 +712,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
670
712
|
list[AnyMessage],
|
|
671
713
|
]
|
|
672
714
|
|
|
715
|
+
@override
|
|
673
716
|
def invoke(
|
|
674
717
|
self,
|
|
675
718
|
input: LanguageModelInput,
|
|
@@ -678,6 +721,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
678
721
|
) -> Any:
|
|
679
722
|
return self._model(config).invoke(input, config=config, **kwargs)
|
|
680
723
|
|
|
724
|
+
@override
|
|
681
725
|
async def ainvoke(
|
|
682
726
|
self,
|
|
683
727
|
input: LanguageModelInput,
|
|
@@ -686,6 +730,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
686
730
|
) -> Any:
|
|
687
731
|
return await self._model(config).ainvoke(input, config=config, **kwargs)
|
|
688
732
|
|
|
733
|
+
@override
|
|
689
734
|
def stream(
|
|
690
735
|
self,
|
|
691
736
|
input: LanguageModelInput,
|
|
@@ -694,6 +739,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
694
739
|
) -> Iterator[Any]:
|
|
695
740
|
yield from self._model(config).stream(input, config=config, **kwargs)
|
|
696
741
|
|
|
742
|
+
@override
|
|
697
743
|
async def astream(
|
|
698
744
|
self,
|
|
699
745
|
input: LanguageModelInput,
|
|
@@ -717,14 +763,19 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
717
763
|
if isinstance(config, list):
|
|
718
764
|
config = config[0]
|
|
719
765
|
return self._model(config).batch(
|
|
720
|
-
inputs,
|
|
766
|
+
inputs,
|
|
767
|
+
config=config,
|
|
768
|
+
return_exceptions=return_exceptions,
|
|
769
|
+
**kwargs,
|
|
721
770
|
)
|
|
722
771
|
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
723
772
|
# in parallel.
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
773
|
+
return super().batch(
|
|
774
|
+
inputs,
|
|
775
|
+
config=config,
|
|
776
|
+
return_exceptions=return_exceptions,
|
|
777
|
+
**kwargs,
|
|
778
|
+
)
|
|
728
779
|
|
|
729
780
|
async def abatch(
|
|
730
781
|
self,
|
|
@@ -740,14 +791,19 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
740
791
|
if isinstance(config, list):
|
|
741
792
|
config = config[0]
|
|
742
793
|
return await self._model(config).abatch(
|
|
743
|
-
inputs,
|
|
794
|
+
inputs,
|
|
795
|
+
config=config,
|
|
796
|
+
return_exceptions=return_exceptions,
|
|
797
|
+
**kwargs,
|
|
744
798
|
)
|
|
745
799
|
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
746
800
|
# in parallel.
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
801
|
+
return await super().abatch(
|
|
802
|
+
inputs,
|
|
803
|
+
config=config,
|
|
804
|
+
return_exceptions=return_exceptions,
|
|
805
|
+
**kwargs,
|
|
806
|
+
)
|
|
751
807
|
|
|
752
808
|
def batch_as_completed(
|
|
753
809
|
self,
|
|
@@ -762,14 +818,20 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
762
818
|
if config is None or isinstance(config, dict) or len(config) <= 1:
|
|
763
819
|
if isinstance(config, list):
|
|
764
820
|
config = config[0]
|
|
765
|
-
yield from self._model(cast(RunnableConfig, config)).batch_as_completed( # type: ignore[call-overload]
|
|
766
|
-
inputs,
|
|
821
|
+
yield from self._model(cast("RunnableConfig", config)).batch_as_completed( # type: ignore[call-overload]
|
|
822
|
+
inputs,
|
|
823
|
+
config=config,
|
|
824
|
+
return_exceptions=return_exceptions,
|
|
825
|
+
**kwargs,
|
|
767
826
|
)
|
|
768
827
|
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
769
828
|
# in parallel.
|
|
770
829
|
else:
|
|
771
830
|
yield from super().batch_as_completed( # type: ignore[call-overload]
|
|
772
|
-
inputs,
|
|
831
|
+
inputs,
|
|
832
|
+
config=config,
|
|
833
|
+
return_exceptions=return_exceptions,
|
|
834
|
+
**kwargs,
|
|
773
835
|
)
|
|
774
836
|
|
|
775
837
|
async def abatch_as_completed(
|
|
@@ -786,19 +848,26 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
786
848
|
if isinstance(config, list):
|
|
787
849
|
config = config[0]
|
|
788
850
|
async for x in self._model(
|
|
789
|
-
cast(RunnableConfig, config)
|
|
851
|
+
cast("RunnableConfig", config),
|
|
790
852
|
).abatch_as_completed( # type: ignore[call-overload]
|
|
791
|
-
inputs,
|
|
853
|
+
inputs,
|
|
854
|
+
config=config,
|
|
855
|
+
return_exceptions=return_exceptions,
|
|
856
|
+
**kwargs,
|
|
792
857
|
):
|
|
793
858
|
yield x
|
|
794
859
|
# If multiple configs default to Runnable.batch which uses executor to invoke
|
|
795
860
|
# in parallel.
|
|
796
861
|
else:
|
|
797
862
|
async for x in super().abatch_as_completed( # type: ignore[call-overload]
|
|
798
|
-
inputs,
|
|
863
|
+
inputs,
|
|
864
|
+
config=config,
|
|
865
|
+
return_exceptions=return_exceptions,
|
|
866
|
+
**kwargs,
|
|
799
867
|
):
|
|
800
868
|
yield x
|
|
801
869
|
|
|
870
|
+
@override
|
|
802
871
|
def transform(
|
|
803
872
|
self,
|
|
804
873
|
input: Iterator[LanguageModelInput],
|
|
@@ -807,6 +876,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
807
876
|
) -> Iterator[Any]:
|
|
808
877
|
yield from self._model(config).transform(input, config=config, **kwargs)
|
|
809
878
|
|
|
879
|
+
@override
|
|
810
880
|
async def atransform(
|
|
811
881
|
self,
|
|
812
882
|
input: AsyncIterator[LanguageModelInput],
|
|
@@ -850,6 +920,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
850
920
|
**kwargs: Any,
|
|
851
921
|
) -> AsyncIterator[RunLog]: ...
|
|
852
922
|
|
|
923
|
+
@override
|
|
853
924
|
async def astream_log(
|
|
854
925
|
self,
|
|
855
926
|
input: Any,
|
|
@@ -880,6 +951,7 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
880
951
|
):
|
|
881
952
|
yield x
|
|
882
953
|
|
|
954
|
+
@override
|
|
883
955
|
async def astream_events(
|
|
884
956
|
self,
|
|
885
957
|
input: Any,
|
|
@@ -918,6 +990,8 @@ class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
|
|
|
918
990
|
|
|
919
991
|
# Explicitly added to satisfy downstream linters.
|
|
920
992
|
def with_structured_output(
|
|
921
|
-
self,
|
|
993
|
+
self,
|
|
994
|
+
schema: Union[dict, type[BaseModel]],
|
|
995
|
+
**kwargs: Any,
|
|
922
996
|
) -> Runnable[LanguageModelInput, Union[dict, BaseModel]]:
|
|
923
997
|
return self.__getattr__("with_structured_output")(schema, **kwargs)
|