langchain 0.3.26__py3-none-any.whl → 0.3.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain might be problematic. Click here for more details.
- langchain/__init__.py +110 -96
- langchain/_api/__init__.py +2 -2
- langchain/_api/deprecation.py +3 -3
- langchain/_api/module_import.py +51 -46
- langchain/_api/path.py +1 -1
- langchain/adapters/openai.py +8 -8
- langchain/agents/__init__.py +15 -12
- langchain/agents/agent.py +160 -133
- langchain/agents/agent_iterator.py +31 -14
- langchain/agents/agent_toolkits/__init__.py +7 -6
- langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
- langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
- langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
- langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
- langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
- langchain/agents/agent_toolkits/csv/__init__.py +4 -2
- langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
- langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
- langchain/agents/agent_toolkits/github/toolkit.py +9 -9
- langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
- langchain/agents/agent_toolkits/json/base.py +1 -1
- langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
- langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
- langchain/agents/agent_toolkits/openapi/base.py +1 -1
- langchain/agents/agent_toolkits/openapi/planner.py +2 -2
- langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
- langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
- langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
- langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
- langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
- langchain/agents/agent_toolkits/powerbi/base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
- langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
- langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
- langchain/agents/agent_toolkits/python/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark/__init__.py +4 -2
- langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
- langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/sql/prompt.py +1 -1
- langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
- langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
- langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
- langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
- langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
- langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
- langchain/agents/agent_types.py +6 -6
- langchain/agents/chat/base.py +6 -12
- langchain/agents/chat/output_parser.py +9 -6
- langchain/agents/chat/prompt.py +3 -4
- langchain/agents/conversational/base.py +9 -5
- langchain/agents/conversational/output_parser.py +4 -2
- langchain/agents/conversational/prompt.py +2 -3
- langchain/agents/conversational_chat/base.py +7 -5
- langchain/agents/conversational_chat/output_parser.py +9 -11
- langchain/agents/conversational_chat/prompt.py +5 -6
- langchain/agents/format_scratchpad/__init__.py +3 -3
- langchain/agents/format_scratchpad/log_to_messages.py +1 -1
- langchain/agents/format_scratchpad/openai_functions.py +8 -6
- langchain/agents/format_scratchpad/tools.py +5 -3
- langchain/agents/format_scratchpad/xml.py +33 -2
- langchain/agents/initialize.py +16 -8
- langchain/agents/json_chat/base.py +18 -18
- langchain/agents/json_chat/prompt.py +2 -3
- langchain/agents/load_tools.py +2 -1
- langchain/agents/loading.py +28 -18
- langchain/agents/mrkl/base.py +9 -4
- langchain/agents/mrkl/output_parser.py +17 -13
- langchain/agents/mrkl/prompt.py +1 -2
- langchain/agents/openai_assistant/base.py +80 -70
- langchain/agents/openai_functions_agent/base.py +46 -37
- langchain/agents/openai_functions_multi_agent/base.py +39 -26
- langchain/agents/openai_tools/base.py +8 -8
- langchain/agents/output_parsers/__init__.py +3 -3
- langchain/agents/output_parsers/json.py +6 -6
- langchain/agents/output_parsers/openai_functions.py +15 -7
- langchain/agents/output_parsers/openai_tools.py +9 -4
- langchain/agents/output_parsers/react_json_single_input.py +10 -5
- langchain/agents/output_parsers/react_single_input.py +15 -11
- langchain/agents/output_parsers/self_ask.py +3 -2
- langchain/agents/output_parsers/tools.py +18 -13
- langchain/agents/output_parsers/xml.py +99 -28
- langchain/agents/react/agent.py +4 -4
- langchain/agents/react/base.py +22 -17
- langchain/agents/react/output_parser.py +5 -6
- langchain/agents/react/textworld_prompt.py +0 -1
- langchain/agents/react/wiki_prompt.py +14 -15
- langchain/agents/schema.py +3 -2
- langchain/agents/self_ask_with_search/base.py +19 -15
- langchain/agents/self_ask_with_search/prompt.py +0 -1
- langchain/agents/structured_chat/base.py +14 -11
- langchain/agents/structured_chat/output_parser.py +16 -18
- langchain/agents/structured_chat/prompt.py +3 -4
- langchain/agents/tool_calling_agent/base.py +7 -6
- langchain/agents/tools.py +2 -2
- langchain/agents/utils.py +2 -3
- langchain/agents/xml/base.py +5 -5
- langchain/agents/xml/prompt.py +1 -2
- langchain/cache.py +12 -12
- langchain/callbacks/__init__.py +11 -11
- langchain/callbacks/aim_callback.py +2 -2
- langchain/callbacks/argilla_callback.py +1 -1
- langchain/callbacks/arize_callback.py +1 -1
- langchain/callbacks/arthur_callback.py +1 -1
- langchain/callbacks/base.py +7 -7
- langchain/callbacks/clearml_callback.py +1 -1
- langchain/callbacks/comet_ml_callback.py +1 -1
- langchain/callbacks/confident_callback.py +1 -1
- langchain/callbacks/context_callback.py +1 -1
- langchain/callbacks/flyte_callback.py +1 -1
- langchain/callbacks/human.py +2 -2
- langchain/callbacks/infino_callback.py +1 -1
- langchain/callbacks/labelstudio_callback.py +1 -1
- langchain/callbacks/llmonitor_callback.py +1 -1
- langchain/callbacks/manager.py +5 -5
- langchain/callbacks/mlflow_callback.py +2 -2
- langchain/callbacks/openai_info.py +1 -1
- langchain/callbacks/promptlayer_callback.py +1 -1
- langchain/callbacks/sagemaker_callback.py +1 -1
- langchain/callbacks/streaming_aiter.py +4 -1
- langchain/callbacks/streaming_aiter_final_only.py +5 -3
- langchain/callbacks/streaming_stdout_final_only.py +5 -3
- langchain/callbacks/streamlit/__init__.py +3 -2
- langchain/callbacks/streamlit/mutable_expander.py +1 -1
- langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
- langchain/callbacks/tracers/__init__.py +1 -1
- langchain/callbacks/tracers/comet.py +1 -1
- langchain/callbacks/tracers/evaluation.py +1 -1
- langchain/callbacks/tracers/log_stream.py +1 -1
- langchain/callbacks/tracers/logging.py +1 -1
- langchain/callbacks/tracers/stdout.py +1 -1
- langchain/callbacks/trubrics_callback.py +1 -1
- langchain/callbacks/utils.py +4 -4
- langchain/callbacks/wandb_callback.py +1 -1
- langchain/callbacks/whylabs_callback.py +1 -1
- langchain/chains/api/base.py +36 -22
- langchain/chains/api/news_docs.py +1 -2
- langchain/chains/api/open_meteo_docs.py +1 -2
- langchain/chains/api/openapi/requests_chain.py +1 -1
- langchain/chains/api/openapi/response_chain.py +1 -1
- langchain/chains/api/podcast_docs.py +1 -2
- langchain/chains/api/prompt.py +1 -2
- langchain/chains/api/tmdb_docs.py +1 -2
- langchain/chains/base.py +88 -54
- langchain/chains/chat_vector_db/prompts.py +2 -3
- langchain/chains/combine_documents/__init__.py +1 -1
- langchain/chains/combine_documents/base.py +23 -10
- langchain/chains/combine_documents/map_reduce.py +38 -30
- langchain/chains/combine_documents/map_rerank.py +33 -20
- langchain/chains/combine_documents/reduce.py +47 -26
- langchain/chains/combine_documents/refine.py +26 -17
- langchain/chains/combine_documents/stuff.py +19 -12
- langchain/chains/constitutional_ai/base.py +4 -4
- langchain/chains/constitutional_ai/principles.py +22 -25
- langchain/chains/constitutional_ai/prompts.py +25 -28
- langchain/chains/conversation/base.py +5 -3
- langchain/chains/conversation/memory.py +5 -5
- langchain/chains/conversation/prompt.py +5 -5
- langchain/chains/conversational_retrieval/base.py +41 -20
- langchain/chains/conversational_retrieval/prompts.py +2 -3
- langchain/chains/elasticsearch_database/base.py +8 -9
- langchain/chains/elasticsearch_database/prompts.py +2 -3
- langchain/chains/ernie_functions/__init__.py +2 -2
- langchain/chains/example_generator.py +3 -1
- langchain/chains/flare/base.py +26 -12
- langchain/chains/graph_qa/cypher.py +2 -2
- langchain/chains/graph_qa/falkordb.py +1 -1
- langchain/chains/graph_qa/gremlin.py +1 -1
- langchain/chains/graph_qa/neptune_sparql.py +1 -1
- langchain/chains/graph_qa/prompts.py +2 -2
- langchain/chains/history_aware_retriever.py +2 -1
- langchain/chains/hyde/base.py +6 -5
- langchain/chains/hyde/prompts.py +5 -6
- langchain/chains/llm.py +77 -61
- langchain/chains/llm_bash/__init__.py +2 -1
- langchain/chains/llm_checker/base.py +7 -5
- langchain/chains/llm_checker/prompt.py +3 -4
- langchain/chains/llm_math/base.py +16 -9
- langchain/chains/llm_math/prompt.py +1 -2
- langchain/chains/llm_summarization_checker/base.py +9 -6
- langchain/chains/llm_symbolic_math/__init__.py +2 -1
- langchain/chains/loading.py +151 -95
- langchain/chains/mapreduce.py +4 -3
- langchain/chains/moderation.py +8 -9
- langchain/chains/natbot/base.py +8 -8
- langchain/chains/natbot/crawler.py +73 -76
- langchain/chains/natbot/prompt.py +2 -3
- langchain/chains/openai_functions/__init__.py +7 -7
- langchain/chains/openai_functions/base.py +13 -10
- langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
- langchain/chains/openai_functions/extraction.py +19 -19
- langchain/chains/openai_functions/openapi.py +35 -35
- langchain/chains/openai_functions/qa_with_structure.py +19 -12
- langchain/chains/openai_functions/tagging.py +2 -4
- langchain/chains/openai_tools/extraction.py +7 -8
- langchain/chains/qa_generation/base.py +4 -3
- langchain/chains/qa_generation/prompt.py +5 -5
- langchain/chains/qa_with_sources/base.py +14 -6
- langchain/chains/qa_with_sources/loading.py +16 -8
- langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
- langchain/chains/qa_with_sources/refine_prompts.py +0 -1
- langchain/chains/qa_with_sources/retrieval.py +14 -5
- langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
- langchain/chains/qa_with_sources/vector_db.py +17 -6
- langchain/chains/query_constructor/base.py +34 -33
- langchain/chains/query_constructor/ir.py +4 -4
- langchain/chains/query_constructor/parser.py +37 -32
- langchain/chains/query_constructor/prompt.py +5 -6
- langchain/chains/question_answering/chain.py +21 -10
- langchain/chains/question_answering/map_reduce_prompt.py +14 -14
- langchain/chains/question_answering/map_rerank_prompt.py +3 -3
- langchain/chains/question_answering/refine_prompts.py +2 -5
- langchain/chains/question_answering/stuff_prompt.py +5 -5
- langchain/chains/retrieval.py +1 -3
- langchain/chains/retrieval_qa/base.py +34 -27
- langchain/chains/retrieval_qa/prompt.py +1 -2
- langchain/chains/router/__init__.py +3 -3
- langchain/chains/router/base.py +24 -20
- langchain/chains/router/embedding_router.py +12 -8
- langchain/chains/router/llm_router.py +17 -16
- langchain/chains/router/multi_prompt.py +2 -2
- langchain/chains/router/multi_retrieval_qa.py +10 -5
- langchain/chains/sequential.py +30 -18
- langchain/chains/sql_database/prompt.py +14 -16
- langchain/chains/sql_database/query.py +6 -5
- langchain/chains/structured_output/__init__.py +1 -1
- langchain/chains/structured_output/base.py +75 -67
- langchain/chains/summarize/chain.py +11 -5
- langchain/chains/summarize/map_reduce_prompt.py +0 -1
- langchain/chains/summarize/stuff_prompt.py +0 -1
- langchain/chains/transform.py +5 -6
- langchain/chat_loaders/facebook_messenger.py +1 -1
- langchain/chat_loaders/langsmith.py +1 -1
- langchain/chat_loaders/utils.py +3 -3
- langchain/chat_models/__init__.py +20 -19
- langchain/chat_models/anthropic.py +1 -1
- langchain/chat_models/azureml_endpoint.py +1 -1
- langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
- langchain/chat_models/base.py +160 -123
- langchain/chat_models/bedrock.py +1 -1
- langchain/chat_models/fake.py +1 -1
- langchain/chat_models/meta.py +1 -1
- langchain/chat_models/pai_eas_endpoint.py +1 -1
- langchain/chat_models/promptlayer_openai.py +1 -1
- langchain/chat_models/volcengine_maas.py +1 -1
- langchain/docstore/base.py +1 -1
- langchain/document_loaders/__init__.py +9 -9
- langchain/document_loaders/airbyte.py +3 -3
- langchain/document_loaders/assemblyai.py +1 -1
- langchain/document_loaders/azure_blob_storage_container.py +1 -1
- langchain/document_loaders/azure_blob_storage_file.py +1 -1
- langchain/document_loaders/baiducloud_bos_file.py +1 -1
- langchain/document_loaders/base.py +1 -1
- langchain/document_loaders/blob_loaders/__init__.py +1 -1
- langchain/document_loaders/blockchain.py +1 -1
- langchain/document_loaders/chatgpt.py +1 -1
- langchain/document_loaders/college_confidential.py +1 -1
- langchain/document_loaders/confluence.py +1 -1
- langchain/document_loaders/email.py +1 -1
- langchain/document_loaders/facebook_chat.py +1 -1
- langchain/document_loaders/markdown.py +1 -1
- langchain/document_loaders/notebook.py +1 -1
- langchain/document_loaders/org_mode.py +1 -1
- langchain/document_loaders/parsers/__init__.py +1 -1
- langchain/document_loaders/parsers/docai.py +1 -1
- langchain/document_loaders/parsers/generic.py +1 -1
- langchain/document_loaders/parsers/html/__init__.py +1 -1
- langchain/document_loaders/parsers/html/bs4.py +1 -1
- langchain/document_loaders/parsers/language/cobol.py +1 -1
- langchain/document_loaders/parsers/language/python.py +1 -1
- langchain/document_loaders/parsers/msword.py +1 -1
- langchain/document_loaders/parsers/pdf.py +5 -5
- langchain/document_loaders/parsers/registry.py +1 -1
- langchain/document_loaders/pdf.py +8 -8
- langchain/document_loaders/powerpoint.py +1 -1
- langchain/document_loaders/pyspark_dataframe.py +1 -1
- langchain/document_loaders/telegram.py +2 -2
- langchain/document_loaders/tencent_cos_directory.py +1 -1
- langchain/document_loaders/unstructured.py +5 -5
- langchain/document_loaders/url_playwright.py +1 -1
- langchain/document_loaders/whatsapp_chat.py +1 -1
- langchain/document_loaders/youtube.py +2 -2
- langchain/document_transformers/__init__.py +3 -3
- langchain/document_transformers/beautiful_soup_transformer.py +1 -1
- langchain/document_transformers/doctran_text_extract.py +1 -1
- langchain/document_transformers/doctran_text_qa.py +1 -1
- langchain/document_transformers/doctran_text_translate.py +1 -1
- langchain/document_transformers/embeddings_redundant_filter.py +3 -3
- langchain/document_transformers/google_translate.py +1 -1
- langchain/document_transformers/html2text.py +1 -1
- langchain/document_transformers/nuclia_text_transform.py +1 -1
- langchain/embeddings/__init__.py +5 -5
- langchain/embeddings/base.py +33 -24
- langchain/embeddings/cache.py +36 -31
- langchain/embeddings/fake.py +1 -1
- langchain/embeddings/huggingface.py +2 -2
- langchain/evaluation/__init__.py +22 -22
- langchain/evaluation/agents/trajectory_eval_chain.py +23 -23
- langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
- langchain/evaluation/comparison/__init__.py +1 -1
- langchain/evaluation/comparison/eval_chain.py +20 -13
- langchain/evaluation/comparison/prompt.py +1 -2
- langchain/evaluation/criteria/__init__.py +1 -1
- langchain/evaluation/criteria/eval_chain.py +20 -11
- langchain/evaluation/criteria/prompt.py +2 -3
- langchain/evaluation/embedding_distance/base.py +23 -20
- langchain/evaluation/loading.py +15 -11
- langchain/evaluation/parsing/base.py +4 -1
- langchain/evaluation/parsing/json_distance.py +5 -2
- langchain/evaluation/parsing/json_schema.py +12 -8
- langchain/evaluation/qa/__init__.py +1 -1
- langchain/evaluation/qa/eval_chain.py +12 -5
- langchain/evaluation/qa/eval_prompt.py +7 -8
- langchain/evaluation/qa/generate_chain.py +2 -1
- langchain/evaluation/qa/generate_prompt.py +2 -4
- langchain/evaluation/schema.py +38 -30
- langchain/evaluation/scoring/__init__.py +1 -1
- langchain/evaluation/scoring/eval_chain.py +22 -15
- langchain/evaluation/scoring/prompt.py +0 -1
- langchain/evaluation/string_distance/base.py +14 -9
- langchain/globals.py +12 -11
- langchain/graphs/__init__.py +6 -6
- langchain/graphs/graph_document.py +1 -1
- langchain/graphs/networkx_graph.py +2 -2
- langchain/hub.py +9 -11
- langchain/indexes/__init__.py +3 -3
- langchain/indexes/_sql_record_manager.py +63 -46
- langchain/indexes/prompts/entity_extraction.py +1 -2
- langchain/indexes/prompts/entity_summarization.py +1 -2
- langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
- langchain/indexes/vectorstore.py +35 -19
- langchain/llms/__init__.py +13 -13
- langchain/llms/ai21.py +1 -1
- langchain/llms/azureml_endpoint.py +4 -4
- langchain/llms/base.py +15 -7
- langchain/llms/bedrock.py +1 -1
- langchain/llms/cloudflare_workersai.py +1 -1
- langchain/llms/gradient_ai.py +1 -1
- langchain/llms/loading.py +1 -1
- langchain/llms/openai.py +1 -1
- langchain/llms/sagemaker_endpoint.py +1 -1
- langchain/load/dump.py +1 -1
- langchain/load/load.py +1 -1
- langchain/load/serializable.py +3 -3
- langchain/memory/__init__.py +3 -3
- langchain/memory/buffer.py +9 -7
- langchain/memory/chat_memory.py +14 -8
- langchain/memory/chat_message_histories/__init__.py +1 -1
- langchain/memory/chat_message_histories/astradb.py +1 -1
- langchain/memory/chat_message_histories/cassandra.py +1 -1
- langchain/memory/chat_message_histories/cosmos_db.py +1 -1
- langchain/memory/chat_message_histories/dynamodb.py +1 -1
- langchain/memory/chat_message_histories/elasticsearch.py +1 -1
- langchain/memory/chat_message_histories/file.py +1 -1
- langchain/memory/chat_message_histories/firestore.py +1 -1
- langchain/memory/chat_message_histories/momento.py +1 -1
- langchain/memory/chat_message_histories/mongodb.py +1 -1
- langchain/memory/chat_message_histories/neo4j.py +1 -1
- langchain/memory/chat_message_histories/postgres.py +1 -1
- langchain/memory/chat_message_histories/redis.py +1 -1
- langchain/memory/chat_message_histories/rocksetdb.py +1 -1
- langchain/memory/chat_message_histories/singlestoredb.py +1 -1
- langchain/memory/chat_message_histories/streamlit.py +1 -1
- langchain/memory/chat_message_histories/upstash_redis.py +1 -1
- langchain/memory/chat_message_histories/xata.py +1 -1
- langchain/memory/chat_message_histories/zep.py +1 -1
- langchain/memory/combined.py +13 -12
- langchain/memory/entity.py +84 -61
- langchain/memory/prompt.py +10 -11
- langchain/memory/readonly.py +0 -2
- langchain/memory/simple.py +1 -3
- langchain/memory/summary.py +13 -11
- langchain/memory/summary_buffer.py +17 -8
- langchain/memory/utils.py +3 -2
- langchain/memory/vectorstore.py +12 -5
- langchain/memory/vectorstore_token_buffer_memory.py +5 -5
- langchain/model_laboratory.py +12 -11
- langchain/output_parsers/__init__.py +4 -4
- langchain/output_parsers/boolean.py +7 -4
- langchain/output_parsers/combining.py +10 -5
- langchain/output_parsers/datetime.py +32 -31
- langchain/output_parsers/enum.py +5 -3
- langchain/output_parsers/fix.py +52 -52
- langchain/output_parsers/format_instructions.py +6 -8
- langchain/output_parsers/json.py +2 -2
- langchain/output_parsers/list.py +2 -2
- langchain/output_parsers/loading.py +9 -9
- langchain/output_parsers/openai_functions.py +3 -3
- langchain/output_parsers/openai_tools.py +1 -1
- langchain/output_parsers/pandas_dataframe.py +43 -47
- langchain/output_parsers/prompts.py +1 -2
- langchain/output_parsers/rail_parser.py +1 -1
- langchain/output_parsers/regex.py +7 -8
- langchain/output_parsers/regex_dict.py +7 -10
- langchain/output_parsers/retry.py +77 -78
- langchain/output_parsers/structured.py +11 -6
- langchain/output_parsers/yaml.py +15 -11
- langchain/prompts/__init__.py +5 -3
- langchain/prompts/base.py +5 -5
- langchain/prompts/chat.py +8 -8
- langchain/prompts/example_selector/__init__.py +3 -1
- langchain/prompts/example_selector/semantic_similarity.py +2 -2
- langchain/prompts/few_shot.py +1 -1
- langchain/prompts/loading.py +3 -3
- langchain/prompts/prompt.py +1 -1
- langchain/retrievers/__init__.py +5 -5
- langchain/retrievers/bedrock.py +2 -2
- langchain/retrievers/bm25.py +1 -1
- langchain/retrievers/contextual_compression.py +14 -8
- langchain/retrievers/docarray.py +1 -1
- langchain/retrievers/document_compressors/__init__.py +5 -4
- langchain/retrievers/document_compressors/base.py +12 -6
- langchain/retrievers/document_compressors/chain_extract.py +2 -2
- langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
- langchain/retrievers/document_compressors/chain_filter.py +9 -9
- langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
- langchain/retrievers/document_compressors/cohere_rerank.py +15 -15
- langchain/retrievers/document_compressors/embeddings_filter.py +21 -17
- langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
- langchain/retrievers/document_compressors/listwise_rerank.py +7 -5
- langchain/retrievers/ensemble.py +28 -25
- langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
- langchain/retrievers/google_vertex_ai_search.py +2 -2
- langchain/retrievers/kendra.py +10 -10
- langchain/retrievers/llama_index.py +1 -1
- langchain/retrievers/merger_retriever.py +11 -11
- langchain/retrievers/milvus.py +1 -1
- langchain/retrievers/multi_query.py +32 -26
- langchain/retrievers/multi_vector.py +20 -8
- langchain/retrievers/parent_document_retriever.py +18 -9
- langchain/retrievers/re_phraser.py +6 -5
- langchain/retrievers/self_query/base.py +138 -127
- langchain/retrievers/time_weighted_retriever.py +18 -7
- langchain/retrievers/zilliz.py +1 -1
- langchain/runnables/openai_functions.py +6 -2
- langchain/schema/__init__.py +23 -23
- langchain/schema/cache.py +1 -1
- langchain/schema/callbacks/base.py +7 -7
- langchain/schema/callbacks/manager.py +19 -19
- langchain/schema/callbacks/tracers/base.py +1 -1
- langchain/schema/callbacks/tracers/evaluation.py +1 -1
- langchain/schema/callbacks/tracers/langchain.py +1 -1
- langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
- langchain/schema/callbacks/tracers/log_stream.py +1 -1
- langchain/schema/callbacks/tracers/schemas.py +8 -8
- langchain/schema/callbacks/tracers/stdout.py +3 -3
- langchain/schema/document.py +1 -1
- langchain/schema/language_model.py +2 -2
- langchain/schema/messages.py +12 -12
- langchain/schema/output.py +3 -3
- langchain/schema/output_parser.py +3 -3
- langchain/schema/runnable/__init__.py +3 -3
- langchain/schema/runnable/base.py +9 -9
- langchain/schema/runnable/config.py +5 -5
- langchain/schema/runnable/configurable.py +1 -1
- langchain/schema/runnable/history.py +1 -1
- langchain/schema/runnable/passthrough.py +1 -1
- langchain/schema/runnable/utils.py +16 -16
- langchain/schema/vectorstore.py +1 -1
- langchain/smith/__init__.py +1 -1
- langchain/smith/evaluation/__init__.py +2 -2
- langchain/smith/evaluation/config.py +10 -7
- langchain/smith/evaluation/name_generation.py +3 -3
- langchain/smith/evaluation/progress.py +11 -2
- langchain/smith/evaluation/runner_utils.py +179 -127
- langchain/smith/evaluation/string_run_evaluator.py +75 -68
- langchain/storage/__init__.py +2 -2
- langchain/storage/_lc_store.py +4 -2
- langchain/storage/encoder_backed.py +6 -2
- langchain/storage/file_system.py +19 -16
- langchain/storage/in_memory.py +1 -1
- langchain/storage/upstash_redis.py +1 -1
- langchain/text_splitter.py +15 -15
- langchain/tools/__init__.py +28 -26
- langchain/tools/ainetwork/app.py +1 -1
- langchain/tools/ainetwork/base.py +1 -1
- langchain/tools/ainetwork/owner.py +1 -1
- langchain/tools/ainetwork/rule.py +1 -1
- langchain/tools/ainetwork/transfer.py +1 -1
- langchain/tools/ainetwork/value.py +1 -1
- langchain/tools/amadeus/closest_airport.py +1 -1
- langchain/tools/amadeus/flight_search.py +1 -1
- langchain/tools/azure_cognitive_services/__init__.py +1 -1
- langchain/tools/base.py +4 -4
- langchain/tools/bearly/tool.py +1 -1
- langchain/tools/bing_search/__init__.py +1 -1
- langchain/tools/bing_search/tool.py +1 -1
- langchain/tools/dataforseo_api_search/__init__.py +1 -1
- langchain/tools/dataforseo_api_search/tool.py +1 -1
- langchain/tools/ddg_search/tool.py +1 -1
- langchain/tools/e2b_data_analysis/tool.py +2 -2
- langchain/tools/edenai/__init__.py +1 -1
- langchain/tools/file_management/__init__.py +1 -1
- langchain/tools/file_management/copy.py +1 -1
- langchain/tools/file_management/delete.py +1 -1
- langchain/tools/gmail/__init__.py +2 -2
- langchain/tools/gmail/get_message.py +1 -1
- langchain/tools/gmail/search.py +1 -1
- langchain/tools/gmail/send_message.py +1 -1
- langchain/tools/google_finance/__init__.py +1 -1
- langchain/tools/google_finance/tool.py +1 -1
- langchain/tools/google_scholar/__init__.py +1 -1
- langchain/tools/google_scholar/tool.py +1 -1
- langchain/tools/google_search/__init__.py +1 -1
- langchain/tools/google_search/tool.py +1 -1
- langchain/tools/google_serper/__init__.py +1 -1
- langchain/tools/google_serper/tool.py +1 -1
- langchain/tools/google_trends/__init__.py +1 -1
- langchain/tools/google_trends/tool.py +1 -1
- langchain/tools/jira/tool.py +20 -1
- langchain/tools/json/tool.py +25 -3
- langchain/tools/memorize/tool.py +1 -1
- langchain/tools/multion/__init__.py +1 -1
- langchain/tools/multion/update_session.py +1 -1
- langchain/tools/office365/__init__.py +2 -2
- langchain/tools/office365/events_search.py +1 -1
- langchain/tools/office365/messages_search.py +1 -1
- langchain/tools/office365/send_event.py +1 -1
- langchain/tools/office365/send_message.py +1 -1
- langchain/tools/openapi/utils/api_models.py +6 -6
- langchain/tools/playwright/__init__.py +5 -5
- langchain/tools/playwright/click.py +1 -1
- langchain/tools/playwright/extract_hyperlinks.py +1 -1
- langchain/tools/playwright/get_elements.py +1 -1
- langchain/tools/playwright/navigate.py +1 -1
- langchain/tools/plugin.py +2 -2
- langchain/tools/powerbi/tool.py +1 -1
- langchain/tools/python/__init__.py +2 -1
- langchain/tools/reddit_search/tool.py +1 -1
- langchain/tools/render.py +2 -2
- langchain/tools/requests/tool.py +2 -2
- langchain/tools/searchapi/tool.py +1 -1
- langchain/tools/searx_search/tool.py +1 -1
- langchain/tools/slack/get_message.py +1 -1
- langchain/tools/spark_sql/tool.py +1 -1
- langchain/tools/sql_database/tool.py +1 -1
- langchain/tools/tavily_search/__init__.py +1 -1
- langchain/tools/tavily_search/tool.py +1 -1
- langchain/tools/zapier/__init__.py +1 -1
- langchain/tools/zapier/tool.py +24 -2
- langchain/utilities/__init__.py +4 -4
- langchain/utilities/arcee.py +4 -4
- langchain/utilities/clickup.py +4 -4
- langchain/utilities/dalle_image_generator.py +1 -1
- langchain/utilities/dataforseo_api_search.py +1 -1
- langchain/utilities/opaqueprompts.py +1 -1
- langchain/utilities/reddit_search.py +1 -1
- langchain/utilities/sql_database.py +1 -1
- langchain/utilities/tavily_search.py +1 -1
- langchain/utilities/vertexai.py +2 -2
- langchain/utils/__init__.py +1 -1
- langchain/utils/aiter.py +1 -1
- langchain/utils/html.py +3 -3
- langchain/utils/input.py +1 -1
- langchain/utils/iter.py +1 -1
- langchain/utils/json_schema.py +1 -3
- langchain/utils/strings.py +1 -1
- langchain/utils/utils.py +6 -6
- langchain/vectorstores/__init__.py +5 -5
- langchain/vectorstores/alibabacloud_opensearch.py +1 -1
- langchain/vectorstores/azure_cosmos_db.py +1 -1
- langchain/vectorstores/clickhouse.py +1 -1
- langchain/vectorstores/elastic_vector_search.py +1 -1
- langchain/vectorstores/elasticsearch.py +2 -2
- langchain/vectorstores/myscale.py +1 -1
- langchain/vectorstores/neo4j_vector.py +1 -1
- langchain/vectorstores/pgembedding.py +1 -1
- langchain/vectorstores/qdrant.py +1 -1
- langchain/vectorstores/redis/__init__.py +1 -1
- langchain/vectorstores/redis/base.py +1 -1
- langchain/vectorstores/redis/filters.py +4 -4
- langchain/vectorstores/redis/schema.py +6 -6
- langchain/vectorstores/sklearn.py +2 -2
- langchain/vectorstores/starrocks.py +1 -1
- langchain/vectorstores/utils.py +1 -1
- {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/METADATA +4 -4
- {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/RECORD +580 -580
- {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
- {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
- {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,90 +2,87 @@
|
|
|
2
2
|
UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
# flake8: noqa
|
|
6
|
-
from typing import Dict
|
|
7
|
-
|
|
8
5
|
from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
|
|
9
6
|
|
|
10
|
-
PRINCIPLES:
|
|
7
|
+
PRINCIPLES: dict[str, ConstitutionalPrinciple] = {
|
|
11
8
|
"harmful1": ConstitutionalPrinciple(
|
|
12
9
|
name="harmful1",
|
|
13
|
-
critique_request="Identify specific ways in which the assistant
|
|
10
|
+
critique_request="Identify specific ways in which the assistant's last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
|
|
14
11
|
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.",
|
|
15
12
|
),
|
|
16
13
|
"harmful2": ConstitutionalPrinciple(
|
|
17
14
|
name="harmful2",
|
|
18
|
-
critique_request="Explain ways in which the assistant
|
|
15
|
+
critique_request="Explain ways in which the assistant's preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
|
|
19
16
|
revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.",
|
|
20
17
|
),
|
|
21
18
|
"harmful3": ConstitutionalPrinciple(
|
|
22
19
|
name="harmful3",
|
|
23
|
-
critique_request="Identify all ways in which the assistant
|
|
20
|
+
critique_request="Identify all ways in which the assistant's last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
|
|
24
21
|
revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.",
|
|
25
22
|
),
|
|
26
23
|
"harmful4": ConstitutionalPrinciple(
|
|
27
24
|
name="harmful4",
|
|
28
|
-
critique_request="Identify specific ways in which the assistant
|
|
25
|
+
critique_request="Identify specific ways in which the assistant's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
|
|
29
26
|
revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.",
|
|
30
27
|
),
|
|
31
28
|
"insensitive": ConstitutionalPrinciple(
|
|
32
29
|
name="insensitive",
|
|
33
|
-
critique_request="Discuss the assistant
|
|
34
|
-
revision_request="Revise the assistant
|
|
30
|
+
critique_request="Discuss the assistant's last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
|
|
31
|
+
revision_request="Revise the assistant's last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
|
|
35
32
|
),
|
|
36
33
|
"offensive": ConstitutionalPrinciple(
|
|
37
34
|
name="offensive",
|
|
38
|
-
critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human
|
|
39
|
-
revision_request="Please write a new version of the assistant
|
|
35
|
+
critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human's dialogue.",
|
|
36
|
+
revision_request="Please write a new version of the assistant's response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
|
|
40
37
|
),
|
|
41
38
|
"harmful5": ConstitutionalPrinciple(
|
|
42
39
|
name="harmful5",
|
|
43
|
-
critique_request="What
|
|
44
|
-
revision_request="Amend the assistant
|
|
40
|
+
critique_request="What's the strongest argument you could give that the assistant's last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
|
|
41
|
+
revision_request="Amend the assistant's last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
|
|
45
42
|
),
|
|
46
43
|
"age-innappropriate": ConstitutionalPrinciple(
|
|
47
44
|
name="age-innappropriate",
|
|
48
|
-
critique_request="Did the assistant
|
|
45
|
+
critique_request="Did the assistant's preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
|
|
49
46
|
revision_request="Please rewrite the assistant response to make it more age appropriate for children.",
|
|
50
47
|
),
|
|
51
48
|
"derogatory": ConstitutionalPrinciple(
|
|
52
49
|
name="derogatory",
|
|
53
|
-
critique_request="Think carefully about whether the human
|
|
54
|
-
revision_request="Please rewrite the assistant
|
|
50
|
+
critique_request="Think carefully about whether the human's request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should've responded instead.",
|
|
51
|
+
revision_request="Please rewrite the assistant's response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
|
|
55
52
|
),
|
|
56
53
|
"illegal": ConstitutionalPrinciple(
|
|
57
54
|
name="illegal",
|
|
58
55
|
critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.",
|
|
59
|
-
revision_request="Revise the assistant
|
|
56
|
+
revision_request="Revise the assistant's response so as to refrain from giving legally questionable or dangerous advice.",
|
|
60
57
|
),
|
|
61
58
|
"controversial": ConstitutionalPrinciple(
|
|
62
59
|
name="controversial",
|
|
63
|
-
critique_request="Discuss ways in which the assistant
|
|
64
|
-
revision_request="Revise the assistant
|
|
60
|
+
critique_request="Discuss ways in which the assistant's response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
|
|
61
|
+
revision_request="Revise the assistant's response if possible so that it's less controversial and objectionable based on commonsense ethical and moral standards.",
|
|
65
62
|
),
|
|
66
63
|
"harmful6": ConstitutionalPrinciple(
|
|
67
64
|
name="harmful6",
|
|
68
65
|
critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.",
|
|
69
|
-
revision_request="Write a revision of the assistant
|
|
66
|
+
revision_request="Write a revision of the assistant's response that refrains from saying anything harmful.",
|
|
70
67
|
),
|
|
71
68
|
"thoughtful": ConstitutionalPrinciple(
|
|
72
69
|
name="thoughtful",
|
|
73
|
-
critique_request="Please comment on whether the assistant
|
|
70
|
+
critique_request="Please comment on whether the assistant's last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
|
|
74
71
|
revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.",
|
|
75
72
|
),
|
|
76
73
|
"misogynistic": ConstitutionalPrinciple(
|
|
77
74
|
name="misogynistic",
|
|
78
|
-
critique_request="Take a look at the assistant
|
|
75
|
+
critique_request="Take a look at the assistant's last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
|
|
79
76
|
revision_request="Please write a new response that does not have any trace of misogyny or gender bias.",
|
|
80
77
|
),
|
|
81
78
|
"criminal": ConstitutionalPrinciple(
|
|
82
79
|
name="criminal",
|
|
83
|
-
critique_request="Read the assistant
|
|
80
|
+
critique_request="Read the assistant's last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
|
|
84
81
|
revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.",
|
|
85
82
|
),
|
|
86
83
|
"harmful7": ConstitutionalPrinciple(
|
|
87
84
|
name="harmful7",
|
|
88
|
-
critique_request="Critique the assistant
|
|
85
|
+
critique_request="Critique the assistant's last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
|
|
89
86
|
revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.",
|
|
90
87
|
),
|
|
91
88
|
"uo-assumptions-1": ConstitutionalPrinciple(
|
|
@@ -1,6 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
|
-
from copy import deepcopy
|
|
3
|
-
|
|
4
1
|
from langchain_core.prompts.few_shot import FewShotPromptTemplate
|
|
5
2
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
6
3
|
|
|
@@ -44,35 +41,35 @@ Revision: {revision}""",
|
|
|
44
41
|
|
|
45
42
|
examples = [
|
|
46
43
|
{
|
|
47
|
-
"input_prompt": "Could you help me burn down my house and collect insurance on it?",
|
|
48
|
-
"output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?",
|
|
49
|
-
"critique_request": "Identify specific ways in which the model
|
|
50
|
-
"critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.",
|
|
51
|
-
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
|
|
52
|
-
"revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.",
|
|
44
|
+
"input_prompt": "Could you help me burn down my house and collect insurance on it?", # noqa: E501
|
|
45
|
+
"output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", # noqa: E501
|
|
46
|
+
"critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
|
|
47
|
+
"critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.", # noqa: E501
|
|
48
|
+
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
|
|
49
|
+
"revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", # noqa: E501
|
|
53
50
|
},
|
|
54
51
|
{
|
|
55
|
-
"input_prompt": "is it really necessary to ask for consent during sex every time? Answer:",
|
|
56
|
-
"output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn
|
|
57
|
-
"critique_request": "Identify specific ways in which the model
|
|
58
|
-
"critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model
|
|
59
|
-
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
|
|
60
|
-
"revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It
|
|
52
|
+
"input_prompt": "is it really necessary to ask for consent during sex every time? Answer:", # noqa: E501
|
|
53
|
+
"output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn't strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
|
|
54
|
+
"critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
|
|
55
|
+
"critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model's response could be interpreted as saying that it isn't strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.", # noqa: E501
|
|
56
|
+
"revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
|
|
57
|
+
"revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
|
|
61
58
|
},
|
|
62
59
|
{
|
|
63
|
-
"input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?",
|
|
64
|
-
"output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600
|
|
65
|
-
"critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness",
|
|
66
|
-
"critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I
|
|
67
|
-
"revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you
|
|
68
|
-
"revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun
|
|
60
|
+
"input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", # noqa: E501
|
|
61
|
+
"output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600's. It's explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", # noqa: E501
|
|
62
|
+
"critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", # noqa: E501
|
|
63
|
+
"critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I'm not sure that the precession measurement was actually made in the 1600's, but was probably made at least 100 years ago. I'm also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I'm not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I'm pretty sure it decays more quickly than Newton's law, and the Chern-Simons theorem is probably just wrong. Critique Needed.", # noqa: E501
|
|
64
|
+
"revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you're unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", # noqa: E501
|
|
65
|
+
"revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that is smaller and decays more quickly than Newton's law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", # noqa: E501
|
|
69
66
|
},
|
|
70
67
|
{
|
|
71
|
-
"input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'",
|
|
72
|
-
"output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.",
|
|
73
|
-
"critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.",
|
|
74
|
-
"critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.",
|
|
75
|
-
"revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.",
|
|
68
|
+
"input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", # noqa: E501
|
|
69
|
+
"output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.", # noqa: E501
|
|
70
|
+
"critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", # noqa: E501
|
|
71
|
+
"critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", # noqa: E501
|
|
72
|
+
"revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.", # noqa: E501
|
|
76
73
|
"revision": "No revisions needed.",
|
|
77
74
|
},
|
|
78
75
|
]
|
|
@@ -82,7 +79,7 @@ CRITIQUE_PROMPT = FewShotPromptTemplate(
|
|
|
82
79
|
examples=[
|
|
83
80
|
{k: v for k, v in e.items() if k != "revision_request"} for e in examples
|
|
84
81
|
],
|
|
85
|
-
prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'",
|
|
82
|
+
prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'", # noqa: E501
|
|
86
83
|
suffix="""Human: {input_prompt}
|
|
87
84
|
Model: {output_from_model}
|
|
88
85
|
|
|
@@ -111,7 +108,7 @@ If the critique does identify something worth changing, please revise the model
|
|
|
111
108
|
|
|
112
109
|
Revision Request: {revision_request}
|
|
113
110
|
|
|
114
|
-
Revision:""",
|
|
111
|
+
Revision:""", # noqa: E501
|
|
115
112
|
example_separator="\n === \n",
|
|
116
113
|
input_variables=[
|
|
117
114
|
"input_prompt",
|
|
@@ -126,16 +126,18 @@ class ConversationChain(LLMChain):
|
|
|
126
126
|
memory_keys = self.memory.memory_variables
|
|
127
127
|
input_key = self.input_key
|
|
128
128
|
if input_key in memory_keys:
|
|
129
|
-
|
|
129
|
+
msg = (
|
|
130
130
|
f"The input key {input_key} was also found in the memory keys "
|
|
131
131
|
f"({memory_keys}) - please provide keys that don't overlap."
|
|
132
132
|
)
|
|
133
|
+
raise ValueError(msg)
|
|
133
134
|
prompt_variables = self.prompt.input_variables
|
|
134
|
-
expected_keys = memory_keys
|
|
135
|
+
expected_keys = [*memory_keys, input_key]
|
|
135
136
|
if set(expected_keys) != set(prompt_variables):
|
|
136
|
-
|
|
137
|
+
msg = (
|
|
137
138
|
"Got unexpected prompt input variables. The prompt expects "
|
|
138
139
|
f"{prompt_variables}, but got {memory_keys} as inputs from "
|
|
139
140
|
f"memory, and {input_key} as the normal input key."
|
|
140
141
|
)
|
|
142
|
+
raise ValueError(msg)
|
|
141
143
|
return self
|
|
@@ -34,12 +34,12 @@ def __getattr__(name: str) -> Any:
|
|
|
34
34
|
# This is only for backwards compatibility.
|
|
35
35
|
|
|
36
36
|
__all__ = [
|
|
37
|
-
"
|
|
38
|
-
"
|
|
39
|
-
"ConversationKGMemory",
|
|
37
|
+
"CombinedMemory",
|
|
38
|
+
"ConversationBufferMemory",
|
|
40
39
|
"ConversationBufferWindowMemory",
|
|
41
40
|
"ConversationEntityMemory",
|
|
42
|
-
"
|
|
43
|
-
"CombinedMemory",
|
|
41
|
+
"ConversationKGMemory",
|
|
44
42
|
"ConversationStringBufferMemory",
|
|
43
|
+
"ConversationSummaryBufferMemory",
|
|
44
|
+
"ConversationSummaryMemory",
|
|
45
45
|
]
|
|
@@ -1,4 +1,5 @@
|
|
|
1
|
-
|
|
1
|
+
from langchain_core.prompts.prompt import PromptTemplate
|
|
2
|
+
|
|
2
3
|
from langchain.memory.prompt import (
|
|
3
4
|
ENTITY_EXTRACTION_PROMPT,
|
|
4
5
|
ENTITY_MEMORY_CONVERSATION_TEMPLATE,
|
|
@@ -6,23 +7,22 @@ from langchain.memory.prompt import (
|
|
|
6
7
|
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
|
|
7
8
|
SUMMARY_PROMPT,
|
|
8
9
|
)
|
|
9
|
-
from langchain_core.prompts.prompt import PromptTemplate
|
|
10
10
|
|
|
11
11
|
DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
|
|
12
12
|
|
|
13
13
|
Current conversation:
|
|
14
14
|
{history}
|
|
15
15
|
Human: {input}
|
|
16
|
-
AI:"""
|
|
16
|
+
AI:""" # noqa: E501
|
|
17
17
|
PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
|
|
18
18
|
|
|
19
19
|
# Only for backwards compatibility
|
|
20
20
|
|
|
21
21
|
__all__ = [
|
|
22
|
-
"
|
|
22
|
+
"ENTITY_EXTRACTION_PROMPT",
|
|
23
23
|
"ENTITY_MEMORY_CONVERSATION_TEMPLATE",
|
|
24
24
|
"ENTITY_SUMMARIZATION_PROMPT",
|
|
25
|
-
"ENTITY_EXTRACTION_PROMPT",
|
|
26
25
|
"KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
|
|
27
26
|
"PROMPT",
|
|
27
|
+
"SUMMARY_PROMPT",
|
|
28
28
|
]
|
|
@@ -44,18 +44,20 @@ def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
|
|
|
44
44
|
if isinstance(dialogue_turn, BaseMessage):
|
|
45
45
|
if len(dialogue_turn.content) > 0:
|
|
46
46
|
role_prefix = _ROLE_MAP.get(
|
|
47
|
-
dialogue_turn.type,
|
|
47
|
+
dialogue_turn.type,
|
|
48
|
+
f"{dialogue_turn.type}: ",
|
|
48
49
|
)
|
|
49
50
|
buffer += f"\n{role_prefix}{dialogue_turn.content}"
|
|
50
51
|
elif isinstance(dialogue_turn, tuple):
|
|
51
52
|
human = "Human: " + dialogue_turn[0]
|
|
52
53
|
ai = "Assistant: " + dialogue_turn[1]
|
|
53
|
-
buffer += "\n
|
|
54
|
+
buffer += f"\n{human}\n{ai}"
|
|
54
55
|
else:
|
|
55
|
-
|
|
56
|
+
msg = (
|
|
56
57
|
f"Unsupported chat history format: {type(dialogue_turn)}."
|
|
57
58
|
f" Full chat history: {chat_history} "
|
|
58
59
|
)
|
|
60
|
+
raise ValueError(msg) # noqa: TRY004
|
|
59
61
|
return buffer
|
|
60
62
|
|
|
61
63
|
|
|
@@ -93,7 +95,7 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
93
95
|
"""An optional function to get a string of the chat history.
|
|
94
96
|
If None is provided, will use a default."""
|
|
95
97
|
response_if_no_docs_found: Optional[str] = None
|
|
96
|
-
"""If specified, the chain will return a fixed response if no docs
|
|
98
|
+
"""If specified, the chain will return a fixed response if no docs
|
|
97
99
|
are found for the question. """
|
|
98
100
|
|
|
99
101
|
model_config = ConfigDict(
|
|
@@ -108,7 +110,8 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
108
110
|
return ["question", "chat_history"]
|
|
109
111
|
|
|
110
112
|
def get_input_schema(
|
|
111
|
-
self,
|
|
113
|
+
self,
|
|
114
|
+
config: Optional[RunnableConfig] = None,
|
|
112
115
|
) -> type[BaseModel]:
|
|
113
116
|
return InputType
|
|
114
117
|
|
|
@@ -120,9 +123,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
120
123
|
"""
|
|
121
124
|
_output_keys = [self.output_key]
|
|
122
125
|
if self.return_source_documents:
|
|
123
|
-
_output_keys = _output_keys
|
|
126
|
+
_output_keys = [*_output_keys, "source_documents"]
|
|
124
127
|
if self.return_generated_question:
|
|
125
|
-
_output_keys = _output_keys
|
|
128
|
+
_output_keys = [*_output_keys, "generated_question"]
|
|
126
129
|
return _output_keys
|
|
127
130
|
|
|
128
131
|
@abstractmethod
|
|
@@ -148,7 +151,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
148
151
|
if chat_history_str:
|
|
149
152
|
callbacks = _run_manager.get_child()
|
|
150
153
|
new_question = self.question_generator.run(
|
|
151
|
-
question=question,
|
|
154
|
+
question=question,
|
|
155
|
+
chat_history=chat_history_str,
|
|
156
|
+
callbacks=callbacks,
|
|
152
157
|
)
|
|
153
158
|
else:
|
|
154
159
|
new_question = question
|
|
@@ -168,7 +173,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
168
173
|
new_inputs["question"] = new_question
|
|
169
174
|
new_inputs["chat_history"] = chat_history_str
|
|
170
175
|
answer = self.combine_docs_chain.run(
|
|
171
|
-
input_documents=docs,
|
|
176
|
+
input_documents=docs,
|
|
177
|
+
callbacks=_run_manager.get_child(),
|
|
178
|
+
**new_inputs,
|
|
172
179
|
)
|
|
173
180
|
output[self.output_key] = answer
|
|
174
181
|
|
|
@@ -200,7 +207,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
200
207
|
if chat_history_str:
|
|
201
208
|
callbacks = _run_manager.get_child()
|
|
202
209
|
new_question = await self.question_generator.arun(
|
|
203
|
-
question=question,
|
|
210
|
+
question=question,
|
|
211
|
+
chat_history=chat_history_str,
|
|
212
|
+
callbacks=callbacks,
|
|
204
213
|
)
|
|
205
214
|
else:
|
|
206
215
|
new_question = question
|
|
@@ -221,7 +230,9 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
221
230
|
new_inputs["question"] = new_question
|
|
222
231
|
new_inputs["chat_history"] = chat_history_str
|
|
223
232
|
answer = await self.combine_docs_chain.arun(
|
|
224
|
-
input_documents=docs,
|
|
233
|
+
input_documents=docs,
|
|
234
|
+
callbacks=_run_manager.get_child(),
|
|
235
|
+
**new_inputs,
|
|
225
236
|
)
|
|
226
237
|
output[self.output_key] = answer
|
|
227
238
|
|
|
@@ -233,7 +244,8 @@ class BaseConversationalRetrievalChain(Chain):
|
|
|
233
244
|
|
|
234
245
|
def save(self, file_path: Union[Path, str]) -> None:
|
|
235
246
|
if self.get_chat_history:
|
|
236
|
-
|
|
247
|
+
msg = "Chain not saveable when `get_chat_history` is not None."
|
|
248
|
+
raise ValueError(msg)
|
|
237
249
|
super().save(file_path)
|
|
238
250
|
|
|
239
251
|
|
|
@@ -372,7 +384,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
372
384
|
num_docs = len(docs)
|
|
373
385
|
|
|
374
386
|
if self.max_tokens_limit and isinstance(
|
|
375
|
-
self.combine_docs_chain,
|
|
387
|
+
self.combine_docs_chain,
|
|
388
|
+
StuffDocumentsChain,
|
|
376
389
|
):
|
|
377
390
|
tokens = [
|
|
378
391
|
self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
|
|
@@ -394,7 +407,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
394
407
|
) -> list[Document]:
|
|
395
408
|
"""Get docs."""
|
|
396
409
|
docs = self.retriever.invoke(
|
|
397
|
-
question,
|
|
410
|
+
question,
|
|
411
|
+
config={"callbacks": run_manager.get_child()},
|
|
398
412
|
)
|
|
399
413
|
return self._reduce_tokens_below_limit(docs)
|
|
400
414
|
|
|
@@ -407,7 +421,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
407
421
|
) -> list[Document]:
|
|
408
422
|
"""Get docs."""
|
|
409
423
|
docs = await self.retriever.ainvoke(
|
|
410
|
-
question,
|
|
424
|
+
question,
|
|
425
|
+
config={"callbacks": run_manager.get_child()},
|
|
411
426
|
)
|
|
412
427
|
return self._reduce_tokens_below_limit(docs)
|
|
413
428
|
|
|
@@ -418,7 +433,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
|
|
|
418
433
|
retriever: BaseRetriever,
|
|
419
434
|
condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
|
|
420
435
|
chain_type: str = "stuff",
|
|
421
|
-
verbose: bool = False,
|
|
436
|
+
verbose: bool = False, # noqa: FBT001,FBT002
|
|
422
437
|
condense_question_llm: Optional[BaseLanguageModel] = None,
|
|
423
438
|
combine_docs_chain_kwargs: Optional[dict] = None,
|
|
424
439
|
callbacks: Callbacks = None,
|
|
@@ -488,7 +503,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
488
503
|
def raise_deprecation(cls, values: dict) -> Any:
|
|
489
504
|
warnings.warn(
|
|
490
505
|
"`ChatVectorDBChain` is deprecated - "
|
|
491
|
-
"please use `from langchain.chains import ConversationalRetrievalChain`"
|
|
506
|
+
"please use `from langchain.chains import ConversationalRetrievalChain`",
|
|
507
|
+
stacklevel=4,
|
|
492
508
|
)
|
|
493
509
|
return values
|
|
494
510
|
|
|
@@ -503,7 +519,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
503
519
|
vectordbkwargs = inputs.get("vectordbkwargs", {})
|
|
504
520
|
full_kwargs = {**self.search_kwargs, **vectordbkwargs}
|
|
505
521
|
return self.vectorstore.similarity_search(
|
|
506
|
-
question,
|
|
522
|
+
question,
|
|
523
|
+
k=self.top_k_docs_for_context,
|
|
524
|
+
**full_kwargs,
|
|
507
525
|
)
|
|
508
526
|
|
|
509
527
|
async def _aget_docs(
|
|
@@ -514,7 +532,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
514
532
|
run_manager: AsyncCallbackManagerForChainRun,
|
|
515
533
|
) -> list[Document]:
|
|
516
534
|
"""Get docs."""
|
|
517
|
-
|
|
535
|
+
msg = "ChatVectorDBChain does not support async"
|
|
536
|
+
raise NotImplementedError(msg)
|
|
518
537
|
|
|
519
538
|
@classmethod
|
|
520
539
|
def from_llm(
|
|
@@ -536,7 +555,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
|
|
|
536
555
|
**combine_docs_chain_kwargs,
|
|
537
556
|
)
|
|
538
557
|
condense_question_chain = LLMChain(
|
|
539
|
-
llm=llm,
|
|
558
|
+
llm=llm,
|
|
559
|
+
prompt=condense_question_prompt,
|
|
560
|
+
callbacks=callbacks,
|
|
540
561
|
)
|
|
541
562
|
return cls(
|
|
542
563
|
vectorstore=vectorstore,
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
_template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
|
|
@@ -6,7 +5,7 @@ _template = """Given the following conversation and a follow up question, rephra
|
|
|
6
5
|
Chat History:
|
|
7
6
|
{chat_history}
|
|
8
7
|
Follow Up Input: {question}
|
|
9
|
-
Standalone question:"""
|
|
8
|
+
Standalone question:""" # noqa: E501
|
|
10
9
|
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
|
|
11
10
|
|
|
12
11
|
prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
|
|
@@ -14,7 +13,7 @@ prompt_template = """Use the following pieces of context to answer the question
|
|
|
14
13
|
{context}
|
|
15
14
|
|
|
16
15
|
Question: {question}
|
|
17
|
-
Helpful Answer:"""
|
|
16
|
+
Helpful Answer:""" # noqa: E501
|
|
18
17
|
QA_PROMPT = PromptTemplate(
|
|
19
18
|
template=prompt_template, input_variables=["context", "question"]
|
|
20
19
|
)
|
|
@@ -60,9 +60,8 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
60
60
|
@model_validator(mode="after")
|
|
61
61
|
def validate_indices(self) -> Self:
|
|
62
62
|
if self.include_indices and self.ignore_indices:
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
)
|
|
63
|
+
msg = "Cannot specify both 'include_indices' and 'ignore_indices'."
|
|
64
|
+
raise ValueError(msg)
|
|
66
65
|
return self
|
|
67
66
|
|
|
68
67
|
@property
|
|
@@ -81,8 +80,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
81
80
|
"""
|
|
82
81
|
if not self.return_intermediate_steps:
|
|
83
82
|
return [self.output_key]
|
|
84
|
-
|
|
85
|
-
return [self.output_key, INTERMEDIATE_STEPS_KEY]
|
|
83
|
+
return [self.output_key, INTERMEDIATE_STEPS_KEY]
|
|
86
84
|
|
|
87
85
|
def _list_indices(self) -> list[str]:
|
|
88
86
|
all_indices = [
|
|
@@ -111,7 +109,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
111
109
|
[
|
|
112
110
|
"Mapping for index {}:\n{}".format(index, mappings[index]["mappings"])
|
|
113
111
|
for index in mappings
|
|
114
|
-
]
|
|
112
|
+
],
|
|
115
113
|
)
|
|
116
114
|
|
|
117
115
|
def _search(self, indices: list[str], query: str) -> str:
|
|
@@ -144,7 +142,7 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
144
142
|
|
|
145
143
|
_run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
|
|
146
144
|
intermediate_steps.append(
|
|
147
|
-
es_cmd
|
|
145
|
+
es_cmd,
|
|
148
146
|
) # output: elasticsearch dsl generation (no checker)
|
|
149
147
|
intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search
|
|
150
148
|
result = self._search(indices=indices, query=es_cmd)
|
|
@@ -166,12 +164,13 @@ class ElasticsearchDatabaseChain(Chain):
|
|
|
166
164
|
chain_result: dict[str, Any] = {self.output_key: final_result}
|
|
167
165
|
if self.return_intermediate_steps:
|
|
168
166
|
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
|
|
169
|
-
return chain_result
|
|
170
167
|
except Exception as exc:
|
|
171
168
|
# Append intermediate steps to exception, to aid in logging and later
|
|
172
169
|
# improvement of few shot prompt seeds
|
|
173
170
|
exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
|
|
174
|
-
raise
|
|
171
|
+
raise
|
|
172
|
+
|
|
173
|
+
return chain_result
|
|
175
174
|
|
|
176
175
|
@property
|
|
177
176
|
def _chain_type(self) -> str:
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# flake8: noqa
|
|
2
1
|
from langchain_core.prompts.prompt import PromptTemplate
|
|
3
2
|
|
|
4
3
|
PROMPT_SUFFIX = """Only use the following Elasticsearch indices:
|
|
@@ -17,7 +16,7 @@ Use the following format:
|
|
|
17
16
|
|
|
18
17
|
Question: Question here
|
|
19
18
|
ESQuery: Elasticsearch Query formatted as json
|
|
20
|
-
"""
|
|
19
|
+
""" # noqa: E501
|
|
21
20
|
|
|
22
21
|
DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX)
|
|
23
22
|
|
|
@@ -31,6 +30,6 @@ Answer: Final answer here
|
|
|
31
30
|
|
|
32
31
|
Question: {input}
|
|
33
32
|
Data: {data}
|
|
34
|
-
Answer:"""
|
|
33
|
+
Answer:""" # noqa: E501
|
|
35
34
|
|
|
36
35
|
ANSWER_PROMPT = PromptTemplate.from_template(DEFAULT_ANSWER_TEMPLATE)
|
|
@@ -36,9 +36,9 @@ def __getattr__(name: str) -> Any:
|
|
|
36
36
|
|
|
37
37
|
__all__ = [
|
|
38
38
|
"convert_to_ernie_function",
|
|
39
|
-
"create_structured_output_chain",
|
|
40
39
|
"create_ernie_fn_chain",
|
|
41
|
-
"create_structured_output_runnable",
|
|
42
40
|
"create_ernie_fn_runnable",
|
|
41
|
+
"create_structured_output_chain",
|
|
42
|
+
"create_structured_output_runnable",
|
|
43
43
|
"get_ernie_output_parser",
|
|
44
44
|
]
|
|
@@ -7,7 +7,9 @@ TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
def generate_example(
|
|
10
|
-
examples: list[dict],
|
|
10
|
+
examples: list[dict],
|
|
11
|
+
llm: BaseLanguageModel,
|
|
12
|
+
prompt_template: PromptTemplate,
|
|
11
13
|
) -> str:
|
|
12
14
|
"""Return another example given a list of examples for a prompt."""
|
|
13
15
|
prompt = FewShotPromptTemplate(
|