langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +174 -151
- langchain/agents/agent_iterator.py +50 -26
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +8 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +11 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +9 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +17 -9
- langchain/agents/json_chat/base.py +19 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +11 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +81 -71
- langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
- langchain/agents/openai_functions_agent/base.py +47 -37
- langchain/agents/openai_functions_multi_agent/base.py +40 -27
- langchain/agents/openai_tools/base.py +9 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +8 -6
- langchain/agents/output_parsers/openai_functions.py +24 -9
- langchain/agents/output_parsers/openai_tools.py +16 -4
- langchain/agents/output_parsers/react_json_single_input.py +13 -5
- langchain/agents/output_parsers/react_single_input.py +18 -11
- langchain/agents/output_parsers/self_ask.py +5 -2
- langchain/agents/output_parsers/tools.py +32 -13
- langchain/agents/output_parsers/xml.py +102 -28
- langchain/agents/react/agent.py +5 -4
- langchain/agents/react/base.py +26 -17
- langchain/agents/react/output_parser.py +7 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +5 -2
- langchain/agents/self_ask_with_search/base.py +23 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +19 -11
- langchain/agents/structured_chat/output_parser.py +29 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +8 -6
- langchain/agents/tools.py +5 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +12 -6
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +17 -3
- langchain/callbacks/streaming_aiter_final_only.py +16 -5
- langchain/callbacks/streaming_stdout_final_only.py +10 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +12 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +41 -23
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +96 -56
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +30 -11
- langchain/chains/combine_documents/map_reduce.py +41 -30
- langchain/chains/combine_documents/map_rerank.py +39 -24
- langchain/chains/combine_documents/reduce.py +48 -26
- langchain/chains/combine_documents/refine.py +27 -17
- langchain/chains/combine_documents/stuff.py +24 -13
- langchain/chains/constitutional_ai/base.py +11 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +9 -4
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +108 -79
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +10 -10
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +28 -12
- langchain/chains/flare/prompts.py +2 -0
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +82 -61
- langchain/chains/llm_bash/__init__.py +3 -2
- langchain/chains/llm_checker/base.py +19 -6
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +25 -10
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +22 -7
- langchain/chains/llm_symbolic_math/__init__.py +3 -2
- langchain/chains/loading.py +155 -97
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +11 -9
- langchain/chains/natbot/base.py +11 -9
- langchain/chains/natbot/crawler.py +102 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +15 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +39 -35
- langchain/chains/openai_functions/qa_with_structure.py +22 -15
- langchain/chains/openai_functions/tagging.py +4 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +8 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +17 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +15 -6
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +21 -8
- langchain/chains/query_constructor/base.py +37 -34
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +101 -34
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +38 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +38 -22
- langchain/chains/router/embedding_router.py +15 -8
- langchain/chains/router/llm_router.py +23 -20
- langchain/chains/router/multi_prompt.py +5 -2
- langchain/chains/router/multi_retrieval_qa.py +28 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +7 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +77 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +9 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +213 -139
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blob_loaders/schema.py +1 -4
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +35 -24
- langchain/embeddings/cache.py +37 -32
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +21 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +23 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +34 -20
- langchain/evaluation/exact_match/base.py +14 -1
- langchain/evaluation/loading.py +16 -11
- langchain/evaluation/parsing/base.py +20 -4
- langchain/evaluation/parsing/json_distance.py +24 -10
- langchain/evaluation/parsing/json_schema.py +13 -12
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +20 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +4 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/regex_match/base.py +9 -1
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +23 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +20 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +14 -7
- langchain/memory/buffer_window.py +2 -0
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +14 -13
- langchain/memory/entity.py +131 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +4 -3
- langchain/memory/summary.py +43 -11
- langchain/memory/summary_buffer.py +20 -8
- langchain/memory/token_buffer.py +2 -0
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +14 -7
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +10 -4
- langchain/output_parsers/fix.py +60 -53
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +59 -48
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +9 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +99 -80
- langchain/output_parsers/structured.py +21 -6
- langchain/output_parsers/yaml.py +19 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/pydantic_v1/__init__.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +5 -3
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
- langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
- langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
- langchain/retrievers/ensemble.py +30 -27
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +35 -27
- langchain/retrievers/multi_vector.py +24 -9
- langchain/retrievers/parent_document_retriever.py +33 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +157 -127
- langchain/retrievers/time_weighted_retriever.py +21 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/hub.py +12 -0
- langchain/runnables/openai_functions.py +12 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +2 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +9 -23
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +22 -4
- langchain/smith/evaluation/runner_utils.py +416 -247
- langchain/smith/evaluation/string_run_evaluator.py +102 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +7 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +3 -2
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
- langchain/smith/evaluation/utils.py +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
|
@@ -10,6 +10,7 @@ from langchain_core.output_parsers import BaseOutputParser, StrOutputParser
|
|
|
10
10
|
from langchain_core.prompts import BasePromptTemplate, format_document
|
|
11
11
|
from langchain_core.runnables import Runnable, RunnablePassthrough
|
|
12
12
|
from pydantic import ConfigDict, Field, model_validator
|
|
13
|
+
from typing_extensions import override
|
|
13
14
|
|
|
14
15
|
from langchain.chains.combine_documents.base import (
|
|
15
16
|
DEFAULT_DOCUMENT_PROMPT,
|
|
@@ -74,6 +75,7 @@ def create_stuff_documents_chain(
|
|
|
74
75
|
]
|
|
75
76
|
|
|
76
77
|
chain.invoke({"context": docs})
|
|
78
|
+
|
|
77
79
|
""" # noqa: E501
|
|
78
80
|
|
|
79
81
|
_validate_prompt(prompt, document_variable_name)
|
|
@@ -88,7 +90,7 @@ def create_stuff_documents_chain(
|
|
|
88
90
|
|
|
89
91
|
return (
|
|
90
92
|
RunnablePassthrough.assign(**{document_variable_name: format_docs}).with_config(
|
|
91
|
-
run_name="format_inputs"
|
|
93
|
+
run_name="format_inputs",
|
|
92
94
|
)
|
|
93
95
|
| prompt
|
|
94
96
|
| llm
|
|
@@ -102,7 +104,7 @@ def create_stuff_documents_chain(
|
|
|
102
104
|
message=(
|
|
103
105
|
"This class is deprecated. Use the `create_stuff_documents_chain` constructor "
|
|
104
106
|
"instead. See migration guide here: "
|
|
105
|
-
"https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/"
|
|
107
|
+
"https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/"
|
|
106
108
|
),
|
|
107
109
|
)
|
|
108
110
|
class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
@@ -141,13 +143,14 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
141
143
|
document_prompt=document_prompt,
|
|
142
144
|
document_variable_name=document_variable_name
|
|
143
145
|
)
|
|
146
|
+
|
|
144
147
|
"""
|
|
145
148
|
|
|
146
149
|
llm_chain: LLMChain
|
|
147
150
|
"""LLM chain which is called with the formatted document string,
|
|
148
151
|
along with any other inputs."""
|
|
149
152
|
document_prompt: BasePromptTemplate = Field(
|
|
150
|
-
default_factory=lambda: DEFAULT_DOCUMENT_PROMPT
|
|
153
|
+
default_factory=lambda: DEFAULT_DOCUMENT_PROMPT,
|
|
151
154
|
)
|
|
152
155
|
"""Prompt to use to format each document, gets passed to `format_document`."""
|
|
153
156
|
document_variable_name: str
|
|
@@ -175,19 +178,21 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
175
178
|
if len(llm_chain_variables) == 1:
|
|
176
179
|
values["document_variable_name"] = llm_chain_variables[0]
|
|
177
180
|
else:
|
|
178
|
-
|
|
181
|
+
msg = (
|
|
179
182
|
"document_variable_name must be provided if there are "
|
|
180
183
|
"multiple llm_chain_variables"
|
|
181
184
|
)
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
185
|
+
raise ValueError(msg)
|
|
186
|
+
elif values["document_variable_name"] not in llm_chain_variables:
|
|
187
|
+
msg = (
|
|
188
|
+
f"document_variable_name {values['document_variable_name']} was "
|
|
189
|
+
f"not found in llm_chain input_variables: {llm_chain_variables}"
|
|
190
|
+
)
|
|
191
|
+
raise ValueError(msg)
|
|
188
192
|
return values
|
|
189
193
|
|
|
190
194
|
@property
|
|
195
|
+
@override
|
|
191
196
|
def input_keys(self) -> list[str]:
|
|
192
197
|
extra_keys = [
|
|
193
198
|
k for k in self.llm_chain.input_keys if k != self.document_variable_name
|
|
@@ -238,10 +243,13 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
238
243
|
"""
|
|
239
244
|
inputs = self._get_inputs(docs, **kwargs)
|
|
240
245
|
prompt = self.llm_chain.prompt.format(**inputs)
|
|
241
|
-
return self.llm_chain._get_num_tokens(prompt)
|
|
246
|
+
return self.llm_chain._get_num_tokens(prompt) # noqa: SLF001
|
|
242
247
|
|
|
243
248
|
def combine_docs(
|
|
244
|
-
self,
|
|
249
|
+
self,
|
|
250
|
+
docs: list[Document],
|
|
251
|
+
callbacks: Callbacks = None,
|
|
252
|
+
**kwargs: Any,
|
|
245
253
|
) -> tuple[str, dict]:
|
|
246
254
|
"""Stuff all documents into one prompt and pass to LLM.
|
|
247
255
|
|
|
@@ -259,7 +267,10 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
|
|
|
259
267
|
return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
|
|
260
268
|
|
|
261
269
|
async def acombine_docs(
|
|
262
|
-
self,
|
|
270
|
+
self,
|
|
271
|
+
docs: list[Document],
|
|
272
|
+
callbacks: Callbacks = None,
|
|
273
|
+
**kwargs: Any,
|
|
263
274
|
) -> tuple[str, dict]:
|
|
264
275
|
"""Async stuff all documents into one prompt and pass to LLM.
|
|
265
276
|
|
|
@@ -19,7 +19,7 @@ from langchain.chains.llm import LLMChain
|
|
|
19
19
|
message=(
|
|
20
20
|
"This class is deprecated and will be removed in langchain 1.0. "
|
|
21
21
|
"See API reference for replacement: "
|
|
22
|
-
"https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html"
|
|
22
|
+
"https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html"
|
|
23
23
|
),
|
|
24
24
|
removal="1.0",
|
|
25
25
|
)
|
|
@@ -187,6 +187,7 @@ class ConstitutionalChain(Chain):
|
|
|
187
187
|
)
|
|
188
188
|
|
|
189
189
|
constitutional_chain.run(question="What is the meaning of life?")
|
|
190
|
+
|
|
190
191
|
""" # noqa: E501
|
|
191
192
|
|
|
192
193
|
chain: LLMChain
|
|
@@ -197,12 +198,18 @@ class ConstitutionalChain(Chain):
|
|
|
197
198
|
|
|
198
199
|
@classmethod
|
|
199
200
|
def get_principles(
|
|
200
|
-
cls,
|
|
201
|
+
cls,
|
|
202
|
+
names: Optional[list[str]] = None,
|
|
201
203
|
) -> list[ConstitutionalPrinciple]:
|
|
204
|
+
"""Get constitutional principles by name.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
names: List of names of constitutional principles to retrieve.
|
|
208
|
+
If None (default), all principles are returned.
|
|
209
|
+
"""
|
|
202
210
|
if names is None:
|
|
203
211
|
return list(PRINCIPLES.values())
|
|
204
|
-
|
|
205
|
-
return [PRINCIPLES[name] for name in names]
|
|
212
|
+
return [PRINCIPLES[name] for name in names]
|
|
206
213
|
|
|
207
214
|
@classmethod
|
|
208
215
|
def from_llm(
|
|
@@ -2,90 +2,87 @@
|
|
|
2
2
|
UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
# flake8: noqa
|
|
6
|
-
from typing import Dict
|
|
7
|
-
|
|
8
5
|
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
|
9
6
|
|
|
10
|
-
PRINCIPLES:
|
|
7
|
+
PRINCIPLES: dict[str, ConstitutionalPrinciple] = {
|
|
11
8
|
"harmful1": ConstitutionalPrinciple(
|
|
12
9
|
name="harmful1",
|
|
13
|
-
critique_request="Identify specific ways in which the assistant
|
|
10
|
+
critique_request="Identify specific ways in which the assistant's last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
|
|
14
11
|
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.",
|
|
15
12
|
),
|
|
16
13
|
"harmful2": ConstitutionalPrinciple(
|
|
17
14
|
name="harmful2",
|
|
18
|
-
critique_request="Explain ways in which the assistant
|
|
15
|
+
critique_request="Explain ways in which the assistant's preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
|
|
19
16
|
revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.",
|
|
20
17
|
),
|
|
21
18
|
"harmful3": ConstitutionalPrinciple(
|
|
22
19
|
name="harmful3",
|
|
23
|
-
critique_request="Identify all ways in which the assistant
|
|
20
|
+
critique_request="Identify all ways in which the assistant's last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
|
|
24
21
|
revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.",
|
|
25
22
|
),
|
|
26
23
|
"harmful4": ConstitutionalPrinciple(
|
|
27
24
|
name="harmful4",
|
|
28
|
-
critique_request="Identify specific ways in which the assistant
|
|
25
|
+
critique_request="Identify specific ways in which the assistant's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
|
|
29
26
|
revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.",
|
|
30
27
|
),
|
|
31
28
|
"insensitive": ConstitutionalPrinciple(
|
|
32
29
|
name="insensitive",
|
|
33
|
-
critique_request="Discuss the assistant
|
|
34
|
-
revision_request="Revise the assistant
|
|
30
|
+
critique_request="Discuss the assistant's last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
|
|
31
|
+
revision_request="Revise the assistant's last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
|
|
35
32
|
),
|
|
36
33
|
"offensive": ConstitutionalPrinciple(
|
|
37
34
|
name="offensive",
|
|
38
|
-
critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human
|
|
39
|
-
revision_request="Please write a new version of the assistant
|
|
35
|
+
critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human's dialogue.",
|
|
36
|
+
revision_request="Please write a new version of the assistant's response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
|
|
40
37
|
),
|
|
41
38
|
"harmful5": ConstitutionalPrinciple(
|
|
42
39
|
name="harmful5",
|
|
43
|
-
critique_request="What
|
|
44
|
-
revision_request="Amend the assistant
|
|
40
|
+
critique_request="What's the strongest argument you could give that the assistant's last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
|
|
41
|
+
revision_request="Amend the assistant's last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
|
|
45
42
|
),
|
|
46
43
|
"age-innappropriate": ConstitutionalPrinciple(
|
|
47
44
|
name="age-innappropriate",
|
|
48
|
-
critique_request="Did the assistant
|
|
45
|
+
critique_request="Did the assistant's preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
|
|
49
46
|
revision_request="Please rewrite the assistant response to make it more age appropriate for children.",
|
|
50
47
|
),
|
|
51
48
|
"derogatory": ConstitutionalPrinciple(
|
|
52
49
|
name="derogatory",
|
|
53
|
-
critique_request="Think carefully about whether the human
|
|
54
|
-
revision_request="Please rewrite the assistant
|
|
50
|
+
critique_request="Think carefully about whether the human's request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should've responded instead.",
|
|
51
|
+
revision_request="Please rewrite the assistant's response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
|
|
55
52
|
),
|
|
56
53
|
"illegal": ConstitutionalPrinciple(
|
|
57
54
|
name="illegal",
|
|
58
55
|
critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.",
|
|
59
|
-
revision_request="Revise the assistant
|
|
56
|
+
revision_request="Revise the assistant's response so as to refrain from giving legally questionable or dangerous advice.",
|
|
60
57
|
),
|
|
61
58
|
"controversial": ConstitutionalPrinciple(
|
|
62
59
|
name="controversial",
|
|
63
|
-
critique_request="Discuss ways in which the assistant
|
|
64
|
-
revision_request="Revise the assistant
|
|
60
|
+
critique_request="Discuss ways in which the assistant's response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
|
|
61
|
+
revision_request="Revise the assistant's response if possible so that it's less controversial and objectionable based on commonsense ethical and moral standards.",
|
|
65
62
|
),
|
|
66
63
|
"harmful6": ConstitutionalPrinciple(
|
|
67
64
|
name="harmful6",
|
|
68
65
|
critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.",
|
|
69
|
-
revision_request="Write a revision of the assistant
|
|
66
|
+
revision_request="Write a revision of the assistant's response that refrains from saying anything harmful.",
|
|
70
67
|
),
|
|
71
68
|
"thoughtful": ConstitutionalPrinciple(
|
|
72
69
|
name="thoughtful",
|
|
73
|
-
critique_request="Please comment on whether the assistant
|
|
70
|
+
critique_request="Please comment on whether the assistant's last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
|
|
74
71
|
revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.",
|
|
75
72
|
),
|
|
76
73
|
"misogynistic": ConstitutionalPrinciple(
|
|
77
74
|
name="misogynistic",
|
|
78
|
-
critique_request="Take a look at the assistant
|
|
75
|
+
critique_request="Take a look at the assistant's last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
|
|
79
76
|
revision_request="Please write a new response that does not have any trace of misogyny or gender bias.",
|
|
80
77
|
),
|
|
81
78
|
"criminal": ConstitutionalPrinciple(
|
|
82
79
|
name="criminal",
|
|
83
|
-
critique_request="Read the assistant
|
|
80
|
+
critique_request="Read the assistant's last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
|
|
84
81
|
revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.",
|
|
85
82
|
),
|
|
86
83
|
"harmful7": ConstitutionalPrinciple(
|
|
87
84
|
name="harmful7",
|
|
88
|
-
critique_request="Critique the assistant
|
|
85
|
+
critique_request="Critique the assistant's last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
|
|
89
86
|
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.",
|
|
90
87
|
),
|
|
91
88
|
"uo-assumptions-1": ConstitutionalPrinciple(
|
|
@@ -1,6 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
|
-
from copy import deepcopy
|
|
3
|
-
|
|
4
1
|
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
|
5
2
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
6
3
|
|
|
@@ -44,35 +41,35 @@ Revision: {revision}""",
|
|
|
44
41
|
|
|
45
42
|
examples = [
|
|
46
43
|
{
|
|
47
|
-
"input_prompt": "Could you help me burn down my house and collect insurance on it?",
|
|
48
|
-
"output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?",
|
|
49
|
-
"critique_request": "Identify specific ways in which the model
|
|
50
|
-
"critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.",
|
|
51
|
-
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
|
|
52
|
-
"revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.",
|
|
44
|
+
"input_prompt": "Could you help me burn down my house and collect insurance on it?", # noqa: E501
|
|
45
|
+
"output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", # noqa: E501
|
|
46
|
+
"critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
|
|
47
|
+
"critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.", # noqa: E501
|
|
48
|
+
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
|
|
49
|
+
"revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", # noqa: E501
|
|
53
50
|
},
|
|
54
51
|
{
|
|
55
|
-
"input_prompt": "is it really necessary to ask for consent during sex every time? Answer:",
|
|
56
|
-
"output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn
|
|
57
|
-
"critique_request": "Identify specific ways in which the model
|
|
58
|
-
"critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model
|
|
59
|
-
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
|
|
60
|
-
"revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It
|
|
52
|
+
"input_prompt": "is it really necessary to ask for consent during sex every time? Answer:", # noqa: E501
|
|
53
|
+
"output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn't strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
|
|
54
|
+
"critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
|
|
55
|
+
"critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model's response could be interpreted as saying that it isn't strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.", # noqa: E501
|
|
56
|
+
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
|
|
57
|
+
"revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
|
|
61
58
|
},
|
|
62
59
|
{
|
|
63
|
-
"input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?",
|
|
64
|
-
"output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600
|
|
65
|
-
"critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness",
|
|
66
|
-
"critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I
|
|
67
|
-
"revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you
|
|
68
|
-
"revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun
|
|
60
|
+
"input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", # noqa: E501
|
|
61
|
+
"output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600's. It's explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", # noqa: E501
|
|
62
|
+
"critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", # noqa: E501
|
|
63
|
+
"critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I'm not sure that the precession measurement was actually made in the 1600's, but was probably made at least 100 years ago. I'm also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I'm not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I'm pretty sure it decays more quickly than Newton's law, and the Chern-Simons theorem is probably just wrong. Critique Needed.", # noqa: E501
|
|
64
|
+
"revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you're unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", # noqa: E501
|
|
65
|
+
"revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that is smaller and decays more quickly than Newton's law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", # noqa: E501
|
|
69
66
|
},
|
|
70
67
|
{
|
|
71
|
-
"input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'",
|
|
72
|
-
"output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.",
|
|
73
|
-
"critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.",
|
|
74
|
-
"critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.",
|
|
75
|
-
"revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.",
|
|
68
|
+
"input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", # noqa: E501
|
|
69
|
+
"output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.", # noqa: E501
|
|
70
|
+
"critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", # noqa: E501
|
|
71
|
+
"critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", # noqa: E501
|
|
72
|
+
"revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.", # noqa: E501
|
|
76
73
|
"revision": "No revisions needed.",
|
|
77
74
|
},
|
|
78
75
|
]
|
|
@@ -82,7 +79,7 @@ CRITIQUE_PROMPT = FewShotPromptTemplate(
|
|
|
82
79
|
examples=[
|
|
83
80
|
{k: v for k, v in e.items() if k != "revision_request"} for e in examples
|
|
84
81
|
],
|
|
85
|
-
prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'",
|
|
82
|
+
prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'", # noqa: E501
|
|
86
83
|
suffix="""Human: {input_prompt}
|
|
87
84
|
Model: {output_from_model}
|
|
88
85
|
|
|
@@ -111,7 +108,7 @@ If the critique does identify something worth changing, please revise the model
|
|
|
111
108
|
|
|
112
109
|
Revision Request: {revision_request}
|
|
113
110
|
|
|
114
|
-
Revision:""",
|
|
111
|
+
Revision:""", # noqa: E501
|
|
115
112
|
example_separator="\n === \n",
|
|
116
113
|
input_variables=[
|
|
117
114
|
"input_prompt",
|
|
@@ -4,7 +4,7 @@ from langchain_core._api import deprecated
|
|
|
4
4
|
from langchain_core.memory import BaseMemory
|
|
5
5
|
from langchain_core.prompts import BasePromptTemplate
|
|
6
6
|
from pydantic import ConfigDict, Field, model_validator
|
|
7
|
-
from typing_extensions import Self
|
|
7
|
+
from typing_extensions import Self, override
|
|
8
8
|
|
|
9
9
|
from langchain.chains.conversation.prompt import PROMPT
|
|
10
10
|
from langchain.chains.llm import LLMChain
|
|
@@ -53,6 +53,7 @@ class ConversationChain(LLMChain):
|
|
|
53
53
|
"Hi I'm Bob.",
|
|
54
54
|
config={"configurable": {"session_id": "1"}},
|
|
55
55
|
) # session_id determines thread
|
|
56
|
+
|
|
56
57
|
Memory objects can also be incorporated into the ``get_session_history`` callable:
|
|
57
58
|
|
|
58
59
|
.. code-block:: python
|
|
@@ -96,6 +97,7 @@ class ConversationChain(LLMChain):
|
|
|
96
97
|
from langchain_community.llms import OpenAI
|
|
97
98
|
|
|
98
99
|
conversation = ConversationChain(llm=OpenAI())
|
|
100
|
+
|
|
99
101
|
"""
|
|
100
102
|
|
|
101
103
|
memory: BaseMemory = Field(default_factory=ConversationBufferMemory)
|
|
@@ -112,6 +114,7 @@ class ConversationChain(LLMChain):
|
|
|
112
114
|
)
|
|
113
115
|
|
|
114
116
|
@classmethod
|
|
117
|
+
@override
|
|
115
118
|
def is_lc_serializable(cls) -> bool:
|
|
116
119
|
return False
|
|
117
120
|
|
|
@@ -126,16 +129,18 @@ class ConversationChain(LLMChain):
|
|
|
126
129
|
memory_keys = self.memory.memory_variables
|
|
127
130
|
input_key = self.input_key
|
|
128
131
|
if input_key in memory_keys:
|
|
129
|
-
|
|
132
|
+
msg = (
|
|
130
133
|
f"The input key {input_key} was also found in the memory keys "
|
|
131
134
|
f"({memory_keys}) - please provide keys that don't overlap."
|
|
132
135
|
)
|
|
136
|
+
raise ValueError(msg)
|
|
133
137
|
prompt_variables = self.prompt.input_variables
|
|
134
|
-
expected_keys = memory_keys
|
|
138
|
+
expected_keys = [*memory_keys, input_key]
|
|
135
139
|
if set(expected_keys) != set(prompt_variables):
|
|
136
|
-
|
|
140
|
+
msg = (
|
|
137
141
|
"Got unexpected prompt input variables. The prompt expects "
|
|
138
142
|
f"{prompt_variables}, but got {memory_keys} as inputs from "
|
|
139
143
|
f"memory, and {input_key} as the normal input key."
|
|
140
144
|
)
|
|
145
|
+
raise ValueError(msg)
|
|
141
146
|
return self
|
|
@@ -34,12 +34,12 @@ def __getattr__(name: str) -> Any:
|
|
|
34
34
|
# This is only for backwards compatibility.
|
|
35
35
|
|
|
36
36
|
__all__ = [
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
"ConversationKGMemory",
|
|
37
|
+
"CombinedMemory",
|
|
38
|
+
"ConversationBufferMemory",
|
|
40
39
|
"ConversationBufferWindowMemory",
|
|
41
40
|
"ConversationEntityMemory",
|
|
42
|
-
"
|
|
43
|
-
"CombinedMemory",
|
|
41
|
+
"ConversationKGMemory",
|
|
44
42
|
"ConversationStringBufferMemory",
|
|
43
|
+
"ConversationSummaryBufferMemory",
|
|
44
|
+
"ConversationSummaryMemory",
|
|
45
45
|
]
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
|
|
1
|
+
from langchain_core.prompts.prompt import PromptTemplate
|
|
2
|
+
|
|
2
3
|
from langchain.memory.prompt import (
|
|
3
4
|
ENTITY_EXTRACTION_PROMPT,
|
|
4
5
|
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
|
|
@@ -6,23 +7,22 @@ from langchain.memory.prompt import (
|
|
|
6
7
|
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
|
|
7
8
|
SUMMARY_PROMPT,
|
|
8
9
|
)
|
|
9
|
-
from langchain_core.prompts.prompt import PromptTemplate
|
|
10
10
|
|
|
11
11
|
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
|
12
12
|
|
|
13
13
|
Current conversation:
|
|
14
14
|
{history}
|
|
15
15
|
Human: {input}
|
|
16
|
-
AI:"""
|
|
16
|
+
AI:""" # noqa: E501
|
|
17
17
|
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
|
|
18
18
|
|
|
19
19
|
# Only for backwards compatibility
|
|
20
20
|
|
|
21
21
|
__all__ = [
|
|
22
|
-
"
|
|
22
|
+
"ENTITY_EXTRACTION_PROMPT",
|
|
23
23
|
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
|
|
24
24
|
"ENTITY_SUMMARIZATION_PROMPT",
|
|
25
|
-
"ENTITY_EXTRACTION_PROMPT",
|
|
26
25
|
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
|
|
27
26
|
"PROMPT",
|
|
27
|
+
"SUMMARY_PROMPT",
|
|
28
28
|
]
|