langchain 0.3.26__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (580) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +160 -133
  9. langchain/agents/agent_iterator.py +31 -14
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +6 -6
  48. langchain/agents/chat/base.py +6 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +9 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +7 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +16 -8
  63. langchain/agents/json_chat/base.py +18 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +9 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +80 -70
  71. langchain/agents/openai_functions_agent/base.py +46 -37
  72. langchain/agents/openai_functions_multi_agent/base.py +39 -26
  73. langchain/agents/openai_tools/base.py +8 -8
  74. langchain/agents/output_parsers/__init__.py +3 -3
  75. langchain/agents/output_parsers/json.py +6 -6
  76. langchain/agents/output_parsers/openai_functions.py +15 -7
  77. langchain/agents/output_parsers/openai_tools.py +9 -4
  78. langchain/agents/output_parsers/react_json_single_input.py +10 -5
  79. langchain/agents/output_parsers/react_single_input.py +15 -11
  80. langchain/agents/output_parsers/self_ask.py +3 -2
  81. langchain/agents/output_parsers/tools.py +18 -13
  82. langchain/agents/output_parsers/xml.py +99 -28
  83. langchain/agents/react/agent.py +4 -4
  84. langchain/agents/react/base.py +22 -17
  85. langchain/agents/react/output_parser.py +5 -6
  86. langchain/agents/react/textworld_prompt.py +0 -1
  87. langchain/agents/react/wiki_prompt.py +14 -15
  88. langchain/agents/schema.py +3 -2
  89. langchain/agents/self_ask_with_search/base.py +19 -15
  90. langchain/agents/self_ask_with_search/prompt.py +0 -1
  91. langchain/agents/structured_chat/base.py +14 -11
  92. langchain/agents/structured_chat/output_parser.py +16 -18
  93. langchain/agents/structured_chat/prompt.py +3 -4
  94. langchain/agents/tool_calling_agent/base.py +7 -6
  95. langchain/agents/tools.py +2 -2
  96. langchain/agents/utils.py +2 -3
  97. langchain/agents/xml/base.py +5 -5
  98. langchain/agents/xml/prompt.py +1 -2
  99. langchain/cache.py +12 -12
  100. langchain/callbacks/__init__.py +11 -11
  101. langchain/callbacks/aim_callback.py +2 -2
  102. langchain/callbacks/argilla_callback.py +1 -1
  103. langchain/callbacks/arize_callback.py +1 -1
  104. langchain/callbacks/arthur_callback.py +1 -1
  105. langchain/callbacks/base.py +7 -7
  106. langchain/callbacks/clearml_callback.py +1 -1
  107. langchain/callbacks/comet_ml_callback.py +1 -1
  108. langchain/callbacks/confident_callback.py +1 -1
  109. langchain/callbacks/context_callback.py +1 -1
  110. langchain/callbacks/flyte_callback.py +1 -1
  111. langchain/callbacks/human.py +2 -2
  112. langchain/callbacks/infino_callback.py +1 -1
  113. langchain/callbacks/labelstudio_callback.py +1 -1
  114. langchain/callbacks/llmonitor_callback.py +1 -1
  115. langchain/callbacks/manager.py +5 -5
  116. langchain/callbacks/mlflow_callback.py +2 -2
  117. langchain/callbacks/openai_info.py +1 -1
  118. langchain/callbacks/promptlayer_callback.py +1 -1
  119. langchain/callbacks/sagemaker_callback.py +1 -1
  120. langchain/callbacks/streaming_aiter.py +4 -1
  121. langchain/callbacks/streaming_aiter_final_only.py +5 -3
  122. langchain/callbacks/streaming_stdout_final_only.py +5 -3
  123. langchain/callbacks/streamlit/__init__.py +3 -2
  124. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  125. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  126. langchain/callbacks/tracers/__init__.py +1 -1
  127. langchain/callbacks/tracers/comet.py +1 -1
  128. langchain/callbacks/tracers/evaluation.py +1 -1
  129. langchain/callbacks/tracers/log_stream.py +1 -1
  130. langchain/callbacks/tracers/logging.py +1 -1
  131. langchain/callbacks/tracers/stdout.py +1 -1
  132. langchain/callbacks/trubrics_callback.py +1 -1
  133. langchain/callbacks/utils.py +4 -4
  134. langchain/callbacks/wandb_callback.py +1 -1
  135. langchain/callbacks/whylabs_callback.py +1 -1
  136. langchain/chains/api/base.py +36 -22
  137. langchain/chains/api/news_docs.py +1 -2
  138. langchain/chains/api/open_meteo_docs.py +1 -2
  139. langchain/chains/api/openapi/requests_chain.py +1 -1
  140. langchain/chains/api/openapi/response_chain.py +1 -1
  141. langchain/chains/api/podcast_docs.py +1 -2
  142. langchain/chains/api/prompt.py +1 -2
  143. langchain/chains/api/tmdb_docs.py +1 -2
  144. langchain/chains/base.py +88 -54
  145. langchain/chains/chat_vector_db/prompts.py +2 -3
  146. langchain/chains/combine_documents/__init__.py +1 -1
  147. langchain/chains/combine_documents/base.py +23 -10
  148. langchain/chains/combine_documents/map_reduce.py +38 -30
  149. langchain/chains/combine_documents/map_rerank.py +33 -20
  150. langchain/chains/combine_documents/reduce.py +47 -26
  151. langchain/chains/combine_documents/refine.py +26 -17
  152. langchain/chains/combine_documents/stuff.py +19 -12
  153. langchain/chains/constitutional_ai/base.py +4 -4
  154. langchain/chains/constitutional_ai/principles.py +22 -25
  155. langchain/chains/constitutional_ai/prompts.py +25 -28
  156. langchain/chains/conversation/base.py +5 -3
  157. langchain/chains/conversation/memory.py +5 -5
  158. langchain/chains/conversation/prompt.py +5 -5
  159. langchain/chains/conversational_retrieval/base.py +41 -20
  160. langchain/chains/conversational_retrieval/prompts.py +2 -3
  161. langchain/chains/elasticsearch_database/base.py +8 -9
  162. langchain/chains/elasticsearch_database/prompts.py +2 -3
  163. langchain/chains/ernie_functions/__init__.py +2 -2
  164. langchain/chains/example_generator.py +3 -1
  165. langchain/chains/flare/base.py +26 -12
  166. langchain/chains/graph_qa/cypher.py +2 -2
  167. langchain/chains/graph_qa/falkordb.py +1 -1
  168. langchain/chains/graph_qa/gremlin.py +1 -1
  169. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  170. langchain/chains/graph_qa/prompts.py +2 -2
  171. langchain/chains/history_aware_retriever.py +2 -1
  172. langchain/chains/hyde/base.py +6 -5
  173. langchain/chains/hyde/prompts.py +5 -6
  174. langchain/chains/llm.py +77 -61
  175. langchain/chains/llm_bash/__init__.py +2 -1
  176. langchain/chains/llm_checker/base.py +7 -5
  177. langchain/chains/llm_checker/prompt.py +3 -4
  178. langchain/chains/llm_math/base.py +16 -9
  179. langchain/chains/llm_math/prompt.py +1 -2
  180. langchain/chains/llm_summarization_checker/base.py +9 -6
  181. langchain/chains/llm_symbolic_math/__init__.py +2 -1
  182. langchain/chains/loading.py +151 -95
  183. langchain/chains/mapreduce.py +4 -3
  184. langchain/chains/moderation.py +8 -9
  185. langchain/chains/natbot/base.py +8 -8
  186. langchain/chains/natbot/crawler.py +73 -76
  187. langchain/chains/natbot/prompt.py +2 -3
  188. langchain/chains/openai_functions/__init__.py +7 -7
  189. langchain/chains/openai_functions/base.py +13 -10
  190. langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
  191. langchain/chains/openai_functions/extraction.py +19 -19
  192. langchain/chains/openai_functions/openapi.py +35 -35
  193. langchain/chains/openai_functions/qa_with_structure.py +19 -12
  194. langchain/chains/openai_functions/tagging.py +2 -4
  195. langchain/chains/openai_tools/extraction.py +7 -8
  196. langchain/chains/qa_generation/base.py +4 -3
  197. langchain/chains/qa_generation/prompt.py +5 -5
  198. langchain/chains/qa_with_sources/base.py +14 -6
  199. langchain/chains/qa_with_sources/loading.py +16 -8
  200. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  201. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  202. langchain/chains/qa_with_sources/retrieval.py +14 -5
  203. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  204. langchain/chains/qa_with_sources/vector_db.py +17 -6
  205. langchain/chains/query_constructor/base.py +34 -33
  206. langchain/chains/query_constructor/ir.py +4 -4
  207. langchain/chains/query_constructor/parser.py +37 -32
  208. langchain/chains/query_constructor/prompt.py +5 -6
  209. langchain/chains/question_answering/chain.py +21 -10
  210. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  211. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  212. langchain/chains/question_answering/refine_prompts.py +2 -5
  213. langchain/chains/question_answering/stuff_prompt.py +5 -5
  214. langchain/chains/retrieval.py +1 -3
  215. langchain/chains/retrieval_qa/base.py +34 -27
  216. langchain/chains/retrieval_qa/prompt.py +1 -2
  217. langchain/chains/router/__init__.py +3 -3
  218. langchain/chains/router/base.py +24 -20
  219. langchain/chains/router/embedding_router.py +12 -8
  220. langchain/chains/router/llm_router.py +17 -16
  221. langchain/chains/router/multi_prompt.py +2 -2
  222. langchain/chains/router/multi_retrieval_qa.py +10 -5
  223. langchain/chains/sequential.py +30 -18
  224. langchain/chains/sql_database/prompt.py +14 -16
  225. langchain/chains/sql_database/query.py +6 -5
  226. langchain/chains/structured_output/__init__.py +1 -1
  227. langchain/chains/structured_output/base.py +75 -67
  228. langchain/chains/summarize/chain.py +11 -5
  229. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  230. langchain/chains/summarize/stuff_prompt.py +0 -1
  231. langchain/chains/transform.py +5 -6
  232. langchain/chat_loaders/facebook_messenger.py +1 -1
  233. langchain/chat_loaders/langsmith.py +1 -1
  234. langchain/chat_loaders/utils.py +3 -3
  235. langchain/chat_models/__init__.py +20 -19
  236. langchain/chat_models/anthropic.py +1 -1
  237. langchain/chat_models/azureml_endpoint.py +1 -1
  238. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  239. langchain/chat_models/base.py +160 -123
  240. langchain/chat_models/bedrock.py +1 -1
  241. langchain/chat_models/fake.py +1 -1
  242. langchain/chat_models/meta.py +1 -1
  243. langchain/chat_models/pai_eas_endpoint.py +1 -1
  244. langchain/chat_models/promptlayer_openai.py +1 -1
  245. langchain/chat_models/volcengine_maas.py +1 -1
  246. langchain/docstore/base.py +1 -1
  247. langchain/document_loaders/__init__.py +9 -9
  248. langchain/document_loaders/airbyte.py +3 -3
  249. langchain/document_loaders/assemblyai.py +1 -1
  250. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  251. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  252. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  253. langchain/document_loaders/base.py +1 -1
  254. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  255. langchain/document_loaders/blockchain.py +1 -1
  256. langchain/document_loaders/chatgpt.py +1 -1
  257. langchain/document_loaders/college_confidential.py +1 -1
  258. langchain/document_loaders/confluence.py +1 -1
  259. langchain/document_loaders/email.py +1 -1
  260. langchain/document_loaders/facebook_chat.py +1 -1
  261. langchain/document_loaders/markdown.py +1 -1
  262. langchain/document_loaders/notebook.py +1 -1
  263. langchain/document_loaders/org_mode.py +1 -1
  264. langchain/document_loaders/parsers/__init__.py +1 -1
  265. langchain/document_loaders/parsers/docai.py +1 -1
  266. langchain/document_loaders/parsers/generic.py +1 -1
  267. langchain/document_loaders/parsers/html/__init__.py +1 -1
  268. langchain/document_loaders/parsers/html/bs4.py +1 -1
  269. langchain/document_loaders/parsers/language/cobol.py +1 -1
  270. langchain/document_loaders/parsers/language/python.py +1 -1
  271. langchain/document_loaders/parsers/msword.py +1 -1
  272. langchain/document_loaders/parsers/pdf.py +5 -5
  273. langchain/document_loaders/parsers/registry.py +1 -1
  274. langchain/document_loaders/pdf.py +8 -8
  275. langchain/document_loaders/powerpoint.py +1 -1
  276. langchain/document_loaders/pyspark_dataframe.py +1 -1
  277. langchain/document_loaders/telegram.py +2 -2
  278. langchain/document_loaders/tencent_cos_directory.py +1 -1
  279. langchain/document_loaders/unstructured.py +5 -5
  280. langchain/document_loaders/url_playwright.py +1 -1
  281. langchain/document_loaders/whatsapp_chat.py +1 -1
  282. langchain/document_loaders/youtube.py +2 -2
  283. langchain/document_transformers/__init__.py +3 -3
  284. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  285. langchain/document_transformers/doctran_text_extract.py +1 -1
  286. langchain/document_transformers/doctran_text_qa.py +1 -1
  287. langchain/document_transformers/doctran_text_translate.py +1 -1
  288. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  289. langchain/document_transformers/google_translate.py +1 -1
  290. langchain/document_transformers/html2text.py +1 -1
  291. langchain/document_transformers/nuclia_text_transform.py +1 -1
  292. langchain/embeddings/__init__.py +5 -5
  293. langchain/embeddings/base.py +33 -24
  294. langchain/embeddings/cache.py +36 -31
  295. langchain/embeddings/fake.py +1 -1
  296. langchain/embeddings/huggingface.py +2 -2
  297. langchain/evaluation/__init__.py +22 -22
  298. langchain/evaluation/agents/trajectory_eval_chain.py +23 -23
  299. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  300. langchain/evaluation/comparison/__init__.py +1 -1
  301. langchain/evaluation/comparison/eval_chain.py +20 -13
  302. langchain/evaluation/comparison/prompt.py +1 -2
  303. langchain/evaluation/criteria/__init__.py +1 -1
  304. langchain/evaluation/criteria/eval_chain.py +20 -11
  305. langchain/evaluation/criteria/prompt.py +2 -3
  306. langchain/evaluation/embedding_distance/base.py +23 -20
  307. langchain/evaluation/loading.py +15 -11
  308. langchain/evaluation/parsing/base.py +4 -1
  309. langchain/evaluation/parsing/json_distance.py +5 -2
  310. langchain/evaluation/parsing/json_schema.py +12 -8
  311. langchain/evaluation/qa/__init__.py +1 -1
  312. langchain/evaluation/qa/eval_chain.py +12 -5
  313. langchain/evaluation/qa/eval_prompt.py +7 -8
  314. langchain/evaluation/qa/generate_chain.py +2 -1
  315. langchain/evaluation/qa/generate_prompt.py +2 -4
  316. langchain/evaluation/schema.py +38 -30
  317. langchain/evaluation/scoring/__init__.py +1 -1
  318. langchain/evaluation/scoring/eval_chain.py +22 -15
  319. langchain/evaluation/scoring/prompt.py +0 -1
  320. langchain/evaluation/string_distance/base.py +14 -9
  321. langchain/globals.py +12 -11
  322. langchain/graphs/__init__.py +6 -6
  323. langchain/graphs/graph_document.py +1 -1
  324. langchain/graphs/networkx_graph.py +2 -2
  325. langchain/hub.py +9 -11
  326. langchain/indexes/__init__.py +3 -3
  327. langchain/indexes/_sql_record_manager.py +63 -46
  328. langchain/indexes/prompts/entity_extraction.py +1 -2
  329. langchain/indexes/prompts/entity_summarization.py +1 -2
  330. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  331. langchain/indexes/vectorstore.py +35 -19
  332. langchain/llms/__init__.py +13 -13
  333. langchain/llms/ai21.py +1 -1
  334. langchain/llms/azureml_endpoint.py +4 -4
  335. langchain/llms/base.py +15 -7
  336. langchain/llms/bedrock.py +1 -1
  337. langchain/llms/cloudflare_workersai.py +1 -1
  338. langchain/llms/gradient_ai.py +1 -1
  339. langchain/llms/loading.py +1 -1
  340. langchain/llms/openai.py +1 -1
  341. langchain/llms/sagemaker_endpoint.py +1 -1
  342. langchain/load/dump.py +1 -1
  343. langchain/load/load.py +1 -1
  344. langchain/load/serializable.py +3 -3
  345. langchain/memory/__init__.py +3 -3
  346. langchain/memory/buffer.py +9 -7
  347. langchain/memory/chat_memory.py +14 -8
  348. langchain/memory/chat_message_histories/__init__.py +1 -1
  349. langchain/memory/chat_message_histories/astradb.py +1 -1
  350. langchain/memory/chat_message_histories/cassandra.py +1 -1
  351. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  352. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  353. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  354. langchain/memory/chat_message_histories/file.py +1 -1
  355. langchain/memory/chat_message_histories/firestore.py +1 -1
  356. langchain/memory/chat_message_histories/momento.py +1 -1
  357. langchain/memory/chat_message_histories/mongodb.py +1 -1
  358. langchain/memory/chat_message_histories/neo4j.py +1 -1
  359. langchain/memory/chat_message_histories/postgres.py +1 -1
  360. langchain/memory/chat_message_histories/redis.py +1 -1
  361. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  362. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  363. langchain/memory/chat_message_histories/streamlit.py +1 -1
  364. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  365. langchain/memory/chat_message_histories/xata.py +1 -1
  366. langchain/memory/chat_message_histories/zep.py +1 -1
  367. langchain/memory/combined.py +13 -12
  368. langchain/memory/entity.py +84 -61
  369. langchain/memory/prompt.py +10 -11
  370. langchain/memory/readonly.py +0 -2
  371. langchain/memory/simple.py +1 -3
  372. langchain/memory/summary.py +13 -11
  373. langchain/memory/summary_buffer.py +17 -8
  374. langchain/memory/utils.py +3 -2
  375. langchain/memory/vectorstore.py +12 -5
  376. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  377. langchain/model_laboratory.py +12 -11
  378. langchain/output_parsers/__init__.py +4 -4
  379. langchain/output_parsers/boolean.py +7 -4
  380. langchain/output_parsers/combining.py +10 -5
  381. langchain/output_parsers/datetime.py +32 -31
  382. langchain/output_parsers/enum.py +5 -3
  383. langchain/output_parsers/fix.py +52 -52
  384. langchain/output_parsers/format_instructions.py +6 -8
  385. langchain/output_parsers/json.py +2 -2
  386. langchain/output_parsers/list.py +2 -2
  387. langchain/output_parsers/loading.py +9 -9
  388. langchain/output_parsers/openai_functions.py +3 -3
  389. langchain/output_parsers/openai_tools.py +1 -1
  390. langchain/output_parsers/pandas_dataframe.py +43 -47
  391. langchain/output_parsers/prompts.py +1 -2
  392. langchain/output_parsers/rail_parser.py +1 -1
  393. langchain/output_parsers/regex.py +7 -8
  394. langchain/output_parsers/regex_dict.py +7 -10
  395. langchain/output_parsers/retry.py +77 -78
  396. langchain/output_parsers/structured.py +11 -6
  397. langchain/output_parsers/yaml.py +15 -11
  398. langchain/prompts/__init__.py +5 -3
  399. langchain/prompts/base.py +5 -5
  400. langchain/prompts/chat.py +8 -8
  401. langchain/prompts/example_selector/__init__.py +3 -1
  402. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  403. langchain/prompts/few_shot.py +1 -1
  404. langchain/prompts/loading.py +3 -3
  405. langchain/prompts/prompt.py +1 -1
  406. langchain/retrievers/__init__.py +5 -5
  407. langchain/retrievers/bedrock.py +2 -2
  408. langchain/retrievers/bm25.py +1 -1
  409. langchain/retrievers/contextual_compression.py +14 -8
  410. langchain/retrievers/docarray.py +1 -1
  411. langchain/retrievers/document_compressors/__init__.py +5 -4
  412. langchain/retrievers/document_compressors/base.py +12 -6
  413. langchain/retrievers/document_compressors/chain_extract.py +2 -2
  414. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  415. langchain/retrievers/document_compressors/chain_filter.py +9 -9
  416. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  417. langchain/retrievers/document_compressors/cohere_rerank.py +15 -15
  418. langchain/retrievers/document_compressors/embeddings_filter.py +21 -17
  419. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  420. langchain/retrievers/document_compressors/listwise_rerank.py +7 -5
  421. langchain/retrievers/ensemble.py +28 -25
  422. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  423. langchain/retrievers/google_vertex_ai_search.py +2 -2
  424. langchain/retrievers/kendra.py +10 -10
  425. langchain/retrievers/llama_index.py +1 -1
  426. langchain/retrievers/merger_retriever.py +11 -11
  427. langchain/retrievers/milvus.py +1 -1
  428. langchain/retrievers/multi_query.py +32 -26
  429. langchain/retrievers/multi_vector.py +20 -8
  430. langchain/retrievers/parent_document_retriever.py +18 -9
  431. langchain/retrievers/re_phraser.py +6 -5
  432. langchain/retrievers/self_query/base.py +138 -127
  433. langchain/retrievers/time_weighted_retriever.py +18 -7
  434. langchain/retrievers/zilliz.py +1 -1
  435. langchain/runnables/openai_functions.py +6 -2
  436. langchain/schema/__init__.py +23 -23
  437. langchain/schema/cache.py +1 -1
  438. langchain/schema/callbacks/base.py +7 -7
  439. langchain/schema/callbacks/manager.py +19 -19
  440. langchain/schema/callbacks/tracers/base.py +1 -1
  441. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  442. langchain/schema/callbacks/tracers/langchain.py +1 -1
  443. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  444. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  445. langchain/schema/callbacks/tracers/schemas.py +8 -8
  446. langchain/schema/callbacks/tracers/stdout.py +3 -3
  447. langchain/schema/document.py +1 -1
  448. langchain/schema/language_model.py +2 -2
  449. langchain/schema/messages.py +12 -12
  450. langchain/schema/output.py +3 -3
  451. langchain/schema/output_parser.py +3 -3
  452. langchain/schema/runnable/__init__.py +3 -3
  453. langchain/schema/runnable/base.py +9 -9
  454. langchain/schema/runnable/config.py +5 -5
  455. langchain/schema/runnable/configurable.py +1 -1
  456. langchain/schema/runnable/history.py +1 -1
  457. langchain/schema/runnable/passthrough.py +1 -1
  458. langchain/schema/runnable/utils.py +16 -16
  459. langchain/schema/vectorstore.py +1 -1
  460. langchain/smith/__init__.py +1 -1
  461. langchain/smith/evaluation/__init__.py +2 -2
  462. langchain/smith/evaluation/config.py +10 -7
  463. langchain/smith/evaluation/name_generation.py +3 -3
  464. langchain/smith/evaluation/progress.py +11 -2
  465. langchain/smith/evaluation/runner_utils.py +179 -127
  466. langchain/smith/evaluation/string_run_evaluator.py +75 -68
  467. langchain/storage/__init__.py +2 -2
  468. langchain/storage/_lc_store.py +4 -2
  469. langchain/storage/encoder_backed.py +6 -2
  470. langchain/storage/file_system.py +19 -16
  471. langchain/storage/in_memory.py +1 -1
  472. langchain/storage/upstash_redis.py +1 -1
  473. langchain/text_splitter.py +15 -15
  474. langchain/tools/__init__.py +28 -26
  475. langchain/tools/ainetwork/app.py +1 -1
  476. langchain/tools/ainetwork/base.py +1 -1
  477. langchain/tools/ainetwork/owner.py +1 -1
  478. langchain/tools/ainetwork/rule.py +1 -1
  479. langchain/tools/ainetwork/transfer.py +1 -1
  480. langchain/tools/ainetwork/value.py +1 -1
  481. langchain/tools/amadeus/closest_airport.py +1 -1
  482. langchain/tools/amadeus/flight_search.py +1 -1
  483. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  484. langchain/tools/base.py +4 -4
  485. langchain/tools/bearly/tool.py +1 -1
  486. langchain/tools/bing_search/__init__.py +1 -1
  487. langchain/tools/bing_search/tool.py +1 -1
  488. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  489. langchain/tools/dataforseo_api_search/tool.py +1 -1
  490. langchain/tools/ddg_search/tool.py +1 -1
  491. langchain/tools/e2b_data_analysis/tool.py +2 -2
  492. langchain/tools/edenai/__init__.py +1 -1
  493. langchain/tools/file_management/__init__.py +1 -1
  494. langchain/tools/file_management/copy.py +1 -1
  495. langchain/tools/file_management/delete.py +1 -1
  496. langchain/tools/gmail/__init__.py +2 -2
  497. langchain/tools/gmail/get_message.py +1 -1
  498. langchain/tools/gmail/search.py +1 -1
  499. langchain/tools/gmail/send_message.py +1 -1
  500. langchain/tools/google_finance/__init__.py +1 -1
  501. langchain/tools/google_finance/tool.py +1 -1
  502. langchain/tools/google_scholar/__init__.py +1 -1
  503. langchain/tools/google_scholar/tool.py +1 -1
  504. langchain/tools/google_search/__init__.py +1 -1
  505. langchain/tools/google_search/tool.py +1 -1
  506. langchain/tools/google_serper/__init__.py +1 -1
  507. langchain/tools/google_serper/tool.py +1 -1
  508. langchain/tools/google_trends/__init__.py +1 -1
  509. langchain/tools/google_trends/tool.py +1 -1
  510. langchain/tools/jira/tool.py +20 -1
  511. langchain/tools/json/tool.py +25 -3
  512. langchain/tools/memorize/tool.py +1 -1
  513. langchain/tools/multion/__init__.py +1 -1
  514. langchain/tools/multion/update_session.py +1 -1
  515. langchain/tools/office365/__init__.py +2 -2
  516. langchain/tools/office365/events_search.py +1 -1
  517. langchain/tools/office365/messages_search.py +1 -1
  518. langchain/tools/office365/send_event.py +1 -1
  519. langchain/tools/office365/send_message.py +1 -1
  520. langchain/tools/openapi/utils/api_models.py +6 -6
  521. langchain/tools/playwright/__init__.py +5 -5
  522. langchain/tools/playwright/click.py +1 -1
  523. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  524. langchain/tools/playwright/get_elements.py +1 -1
  525. langchain/tools/playwright/navigate.py +1 -1
  526. langchain/tools/plugin.py +2 -2
  527. langchain/tools/powerbi/tool.py +1 -1
  528. langchain/tools/python/__init__.py +2 -1
  529. langchain/tools/reddit_search/tool.py +1 -1
  530. langchain/tools/render.py +2 -2
  531. langchain/tools/requests/tool.py +2 -2
  532. langchain/tools/searchapi/tool.py +1 -1
  533. langchain/tools/searx_search/tool.py +1 -1
  534. langchain/tools/slack/get_message.py +1 -1
  535. langchain/tools/spark_sql/tool.py +1 -1
  536. langchain/tools/sql_database/tool.py +1 -1
  537. langchain/tools/tavily_search/__init__.py +1 -1
  538. langchain/tools/tavily_search/tool.py +1 -1
  539. langchain/tools/zapier/__init__.py +1 -1
  540. langchain/tools/zapier/tool.py +24 -2
  541. langchain/utilities/__init__.py +4 -4
  542. langchain/utilities/arcee.py +4 -4
  543. langchain/utilities/clickup.py +4 -4
  544. langchain/utilities/dalle_image_generator.py +1 -1
  545. langchain/utilities/dataforseo_api_search.py +1 -1
  546. langchain/utilities/opaqueprompts.py +1 -1
  547. langchain/utilities/reddit_search.py +1 -1
  548. langchain/utilities/sql_database.py +1 -1
  549. langchain/utilities/tavily_search.py +1 -1
  550. langchain/utilities/vertexai.py +2 -2
  551. langchain/utils/__init__.py +1 -1
  552. langchain/utils/aiter.py +1 -1
  553. langchain/utils/html.py +3 -3
  554. langchain/utils/input.py +1 -1
  555. langchain/utils/iter.py +1 -1
  556. langchain/utils/json_schema.py +1 -3
  557. langchain/utils/strings.py +1 -1
  558. langchain/utils/utils.py +6 -6
  559. langchain/vectorstores/__init__.py +5 -5
  560. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  561. langchain/vectorstores/azure_cosmos_db.py +1 -1
  562. langchain/vectorstores/clickhouse.py +1 -1
  563. langchain/vectorstores/elastic_vector_search.py +1 -1
  564. langchain/vectorstores/elasticsearch.py +2 -2
  565. langchain/vectorstores/myscale.py +1 -1
  566. langchain/vectorstores/neo4j_vector.py +1 -1
  567. langchain/vectorstores/pgembedding.py +1 -1
  568. langchain/vectorstores/qdrant.py +1 -1
  569. langchain/vectorstores/redis/__init__.py +1 -1
  570. langchain/vectorstores/redis/base.py +1 -1
  571. langchain/vectorstores/redis/filters.py +4 -4
  572. langchain/vectorstores/redis/schema.py +6 -6
  573. langchain/vectorstores/sklearn.py +2 -2
  574. langchain/vectorstores/starrocks.py +1 -1
  575. langchain/vectorstores/utils.py +1 -1
  576. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/METADATA +4 -4
  577. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/RECORD +580 -580
  578. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
  579. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
  580. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
@@ -2,90 +2,87 @@
2
2
  UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
3
3
  """
4
4
 
5
- # flake8: noqa
6
- from typing import Dict
7
-
8
5
  from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
9
6
 
10
- PRINCIPLES: Dict[str, ConstitutionalPrinciple] = {
7
+ PRINCIPLES: dict[str, ConstitutionalPrinciple] = {
11
8
  "harmful1": ConstitutionalPrinciple(
12
9
  name="harmful1",
13
- critique_request="Identify specific ways in which the assistants last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
10
+ critique_request="Identify specific ways in which the assistant's last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
14
11
  revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.",
15
12
  ),
16
13
  "harmful2": ConstitutionalPrinciple(
17
14
  name="harmful2",
18
- critique_request="Explain ways in which the assistants preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
15
+ critique_request="Explain ways in which the assistant's preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
19
16
  revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.",
20
17
  ),
21
18
  "harmful3": ConstitutionalPrinciple(
22
19
  name="harmful3",
23
- critique_request="Identify all ways in which the assistants last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
20
+ critique_request="Identify all ways in which the assistant's last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
24
21
  revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.",
25
22
  ),
26
23
  "harmful4": ConstitutionalPrinciple(
27
24
  name="harmful4",
28
- critique_request="Identify specific ways in which the assistants precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
25
+ critique_request="Identify specific ways in which the assistant's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
29
26
  revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.",
30
27
  ),
31
28
  "insensitive": ConstitutionalPrinciple(
32
29
  name="insensitive",
33
- critique_request="Discuss the assistants last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
34
- revision_request="Revise the assistants last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
30
+ critique_request="Discuss the assistant's last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
31
+ revision_request="Revise the assistant's last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
35
32
  ),
36
33
  "offensive": ConstitutionalPrinciple(
37
34
  name="offensive",
38
- critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the humans dialogue.",
39
- revision_request="Please write a new version of the assistants response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
35
+ critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human's dialogue.",
36
+ revision_request="Please write a new version of the assistant's response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
40
37
  ),
41
38
  "harmful5": ConstitutionalPrinciple(
42
39
  name="harmful5",
43
- critique_request="Whats the strongest argument you could give that the assistants last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
44
- revision_request="Amend the assistants last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
40
+ critique_request="What's the strongest argument you could give that the assistant's last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
41
+ revision_request="Amend the assistant's last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
45
42
  ),
46
43
  "age-innappropriate": ConstitutionalPrinciple(
47
44
  name="age-innappropriate",
48
- critique_request="Did the assistants preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
45
+ critique_request="Did the assistant's preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
49
46
  revision_request="Please rewrite the assistant response to make it more age appropriate for children.",
50
47
  ),
51
48
  "derogatory": ConstitutionalPrinciple(
52
49
  name="derogatory",
53
- critique_request="Think carefully about whether the humans request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant shouldve responded instead.",
54
- revision_request="Please rewrite the assistants response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
50
+ critique_request="Think carefully about whether the human's request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should've responded instead.",
51
+ revision_request="Please rewrite the assistant's response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
55
52
  ),
56
53
  "illegal": ConstitutionalPrinciple(
57
54
  name="illegal",
58
55
  critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.",
59
- revision_request="Revise the assistants response so as to refrain from giving legally questionable or dangerous advice.",
56
+ revision_request="Revise the assistant's response so as to refrain from giving legally questionable or dangerous advice.",
60
57
  ),
61
58
  "controversial": ConstitutionalPrinciple(
62
59
  name="controversial",
63
- critique_request="Discuss ways in which the assistants response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
64
- revision_request="Revise the assistants response if possible so that its less controversial and objectionable based on commonsense ethical and moral standards.",
60
+ critique_request="Discuss ways in which the assistant's response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
61
+ revision_request="Revise the assistant's response if possible so that it's less controversial and objectionable based on commonsense ethical and moral standards.",
65
62
  ),
66
63
  "harmful6": ConstitutionalPrinciple(
67
64
  name="harmful6",
68
65
  critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.",
69
- revision_request="Write a revision of the assistants response that refrains from saying anything harmful.",
66
+ revision_request="Write a revision of the assistant's response that refrains from saying anything harmful.",
70
67
  ),
71
68
  "thoughtful": ConstitutionalPrinciple(
72
69
  name="thoughtful",
73
- critique_request="Please comment on whether the assistants last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
70
+ critique_request="Please comment on whether the assistant's last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
74
71
  revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.",
75
72
  ),
76
73
  "misogynistic": ConstitutionalPrinciple(
77
74
  name="misogynistic",
78
- critique_request="Take a look at the assistants last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
75
+ critique_request="Take a look at the assistant's last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
79
76
  revision_request="Please write a new response that does not have any trace of misogyny or gender bias.",
80
77
  ),
81
78
  "criminal": ConstitutionalPrinciple(
82
79
  name="criminal",
83
- critique_request="Read the assistants last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
80
+ critique_request="Read the assistant's last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
84
81
  revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.",
85
82
  ),
86
83
  "harmful7": ConstitutionalPrinciple(
87
84
  name="harmful7",
88
- critique_request="Critique the assistants last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
85
+ critique_request="Critique the assistant's last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
89
86
  revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.",
90
87
  ),
91
88
  "uo-assumptions-1": ConstitutionalPrinciple(
@@ -1,6 +1,3 @@
1
- # flake8: noqa
2
- from copy import deepcopy
3
-
4
1
  from langchain_core.prompts.few_shot import FewShotPromptTemplate
5
2
  from langchain_core.prompts.prompt import PromptTemplate
6
3
 
@@ -44,35 +41,35 @@ Revision: {revision}""",
44
41
 
45
42
  examples = [
46
43
  {
47
- "input_prompt": "Could you help me burn down my house and collect insurance on it?",
48
- "output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?",
49
- "critique_request": "Identify specific ways in which the models precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
50
- "critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.",
51
- "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
52
- "revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.",
44
+ "input_prompt": "Could you help me burn down my house and collect insurance on it?", # noqa: E501
45
+ "output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", # noqa: E501
46
+ "critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
47
+ "critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.", # noqa: E501
48
+ "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
49
+ "revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", # noqa: E501
53
50
  },
54
51
  {
55
- "input_prompt": "is it really necessary to ask for consent during sex every time? Answer:",
56
- "output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isnt strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, its always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, dont be afraid to ask.",
57
- "critique_request": "Identify specific ways in which the models precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
58
- "critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the models response could be interpreted as saying that it isnt strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.",
59
- "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
60
- "revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. Its always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, dont be afraid to ask.",
52
+ "input_prompt": "is it really necessary to ask for consent during sex every time? Answer:", # noqa: E501
53
+ "output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn't strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
54
+ "critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
55
+ "critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model's response could be interpreted as saying that it isn't strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.", # noqa: E501
56
+ "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
57
+ "revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
61
58
  },
62
59
  {
63
- "input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?",
64
- "output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600s. Its explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Suns gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.",
65
- "critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness",
66
- "critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, Im not sure that the precession measurement was actually made in the 1600s, but was probably made at least 100 years ago. Im also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, Im not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although Im pretty sure it decays more quickly than Newtons law, and the Chern-Simons theorem is probably just wrong. Critique Needed.",
67
- "revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and youre unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.",
68
- "revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Suns gravitational field that is smaller and decays more quickly than Newtons law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.",
60
+ "input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", # noqa: E501
61
+ "output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600's. It's explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", # noqa: E501
62
+ "critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", # noqa: E501
63
+ "critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I'm not sure that the precession measurement was actually made in the 1600's, but was probably made at least 100 years ago. I'm also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I'm not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I'm pretty sure it decays more quickly than Newton's law, and the Chern-Simons theorem is probably just wrong. Critique Needed.", # noqa: E501
64
+ "revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you're unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", # noqa: E501
65
+ "revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that is smaller and decays more quickly than Newton's law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", # noqa: E501
69
66
  },
70
67
  {
71
- "input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'",
72
- "output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.",
73
- "critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.",
74
- "critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.",
75
- "revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.",
68
+ "input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", # noqa: E501
69
+ "output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.", # noqa: E501
70
+ "critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", # noqa: E501
71
+ "critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", # noqa: E501
72
+ "revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.", # noqa: E501
76
73
  "revision": "No revisions needed.",
77
74
  },
78
75
  ]
@@ -82,7 +79,7 @@ CRITIQUE_PROMPT = FewShotPromptTemplate(
82
79
  examples=[
83
80
  {k: v for k, v in e.items() if k != "revision_request"} for e in examples
84
81
  ],
85
- prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'",
82
+ prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'", # noqa: E501
86
83
  suffix="""Human: {input_prompt}
87
84
  Model: {output_from_model}
88
85
 
@@ -111,7 +108,7 @@ If the critique does identify something worth changing, please revise the model
111
108
 
112
109
  Revision Request: {revision_request}
113
110
 
114
- Revision:""",
111
+ Revision:""", # noqa: E501
115
112
  example_separator="\n === \n",
116
113
  input_variables=[
117
114
  "input_prompt",
@@ -126,16 +126,18 @@ class ConversationChain(LLMChain):
126
126
  memory_keys = self.memory.memory_variables
127
127
  input_key = self.input_key
128
128
  if input_key in memory_keys:
129
- raise ValueError(
129
+ msg = (
130
130
  f"The input key {input_key} was also found in the memory keys "
131
131
  f"({memory_keys}) - please provide keys that don't overlap."
132
132
  )
133
+ raise ValueError(msg)
133
134
  prompt_variables = self.prompt.input_variables
134
- expected_keys = memory_keys + [input_key]
135
+ expected_keys = [*memory_keys, input_key]
135
136
  if set(expected_keys) != set(prompt_variables):
136
- raise ValueError(
137
+ msg = (
137
138
  "Got unexpected prompt input variables. The prompt expects "
138
139
  f"{prompt_variables}, but got {memory_keys} as inputs from "
139
140
  f"memory, and {input_key} as the normal input key."
140
141
  )
142
+ raise ValueError(msg)
141
143
  return self
@@ -34,12 +34,12 @@ def __getattr__(name: str) -> Any:
34
34
  # This is only for backwards compatibility.
35
35
 
36
36
  __all__ = [
37
- "ConversationSummaryBufferMemory",
38
- "ConversationSummaryMemory",
39
- "ConversationKGMemory",
37
+ "CombinedMemory",
38
+ "ConversationBufferMemory",
40
39
  "ConversationBufferWindowMemory",
41
40
  "ConversationEntityMemory",
42
- "ConversationBufferMemory",
43
- "CombinedMemory",
41
+ "ConversationKGMemory",
44
42
  "ConversationStringBufferMemory",
43
+ "ConversationSummaryBufferMemory",
44
+ "ConversationSummaryMemory",
45
45
  ]
@@ -1,4 +1,5 @@
1
- # flake8: noqa
1
+ from langchain_core.prompts.prompt import PromptTemplate
2
+
2
3
  from langchain.memory.prompt import (
3
4
  ENTITY_EXTRACTION_PROMPT,
4
5
  ENTITY_MEMORY_CONVERSATION_TEMPLATE,
@@ -6,23 +7,22 @@ from langchain.memory.prompt import (
6
7
  KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
7
8
  SUMMARY_PROMPT,
8
9
  )
9
- from langchain_core.prompts.prompt import PromptTemplate
10
10
 
11
11
  DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
12
12
 
13
13
  Current conversation:
14
14
  {history}
15
15
  Human: {input}
16
- AI:"""
16
+ AI:""" # noqa: E501
17
17
  PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
18
18
 
19
19
  # Only for backwards compatibility
20
20
 
21
21
  __all__ = [
22
- "SUMMARY_PROMPT",
22
+ "ENTITY_EXTRACTION_PROMPT",
23
23
  "ENTITY_MEMORY_CONVERSATION_TEMPLATE",
24
24
  "ENTITY_SUMMARIZATION_PROMPT",
25
- "ENTITY_EXTRACTION_PROMPT",
26
25
  "KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
27
26
  "PROMPT",
27
+ "SUMMARY_PROMPT",
28
28
  ]
@@ -44,18 +44,20 @@ def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
44
44
  if isinstance(dialogue_turn, BaseMessage):
45
45
  if len(dialogue_turn.content) > 0:
46
46
  role_prefix = _ROLE_MAP.get(
47
- dialogue_turn.type, f"{dialogue_turn.type}: "
47
+ dialogue_turn.type,
48
+ f"{dialogue_turn.type}: ",
48
49
  )
49
50
  buffer += f"\n{role_prefix}{dialogue_turn.content}"
50
51
  elif isinstance(dialogue_turn, tuple):
51
52
  human = "Human: " + dialogue_turn[0]
52
53
  ai = "Assistant: " + dialogue_turn[1]
53
- buffer += "\n" + "\n".join([human, ai])
54
+ buffer += f"\n{human}\n{ai}"
54
55
  else:
55
- raise ValueError(
56
+ msg = (
56
57
  f"Unsupported chat history format: {type(dialogue_turn)}."
57
58
  f" Full chat history: {chat_history} "
58
59
  )
60
+ raise ValueError(msg) # noqa: TRY004
59
61
  return buffer
60
62
 
61
63
 
@@ -93,7 +95,7 @@ class BaseConversationalRetrievalChain(Chain):
93
95
  """An optional function to get a string of the chat history.
94
96
  If None is provided, will use a default."""
95
97
  response_if_no_docs_found: Optional[str] = None
96
- """If specified, the chain will return a fixed response if no docs
98
+ """If specified, the chain will return a fixed response if no docs
97
99
  are found for the question. """
98
100
 
99
101
  model_config = ConfigDict(
@@ -108,7 +110,8 @@ class BaseConversationalRetrievalChain(Chain):
108
110
  return ["question", "chat_history"]
109
111
 
110
112
  def get_input_schema(
111
- self, config: Optional[RunnableConfig] = None
113
+ self,
114
+ config: Optional[RunnableConfig] = None,
112
115
  ) -> type[BaseModel]:
113
116
  return InputType
114
117
 
@@ -120,9 +123,9 @@ class BaseConversationalRetrievalChain(Chain):
120
123
  """
121
124
  _output_keys = [self.output_key]
122
125
  if self.return_source_documents:
123
- _output_keys = _output_keys + ["source_documents"]
126
+ _output_keys = [*_output_keys, "source_documents"]
124
127
  if self.return_generated_question:
125
- _output_keys = _output_keys + ["generated_question"]
128
+ _output_keys = [*_output_keys, "generated_question"]
126
129
  return _output_keys
127
130
 
128
131
  @abstractmethod
@@ -148,7 +151,9 @@ class BaseConversationalRetrievalChain(Chain):
148
151
  if chat_history_str:
149
152
  callbacks = _run_manager.get_child()
150
153
  new_question = self.question_generator.run(
151
- question=question, chat_history=chat_history_str, callbacks=callbacks
154
+ question=question,
155
+ chat_history=chat_history_str,
156
+ callbacks=callbacks,
152
157
  )
153
158
  else:
154
159
  new_question = question
@@ -168,7 +173,9 @@ class BaseConversationalRetrievalChain(Chain):
168
173
  new_inputs["question"] = new_question
169
174
  new_inputs["chat_history"] = chat_history_str
170
175
  answer = self.combine_docs_chain.run(
171
- input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
176
+ input_documents=docs,
177
+ callbacks=_run_manager.get_child(),
178
+ **new_inputs,
172
179
  )
173
180
  output[self.output_key] = answer
174
181
 
@@ -200,7 +207,9 @@ class BaseConversationalRetrievalChain(Chain):
200
207
  if chat_history_str:
201
208
  callbacks = _run_manager.get_child()
202
209
  new_question = await self.question_generator.arun(
203
- question=question, chat_history=chat_history_str, callbacks=callbacks
210
+ question=question,
211
+ chat_history=chat_history_str,
212
+ callbacks=callbacks,
204
213
  )
205
214
  else:
206
215
  new_question = question
@@ -221,7 +230,9 @@ class BaseConversationalRetrievalChain(Chain):
221
230
  new_inputs["question"] = new_question
222
231
  new_inputs["chat_history"] = chat_history_str
223
232
  answer = await self.combine_docs_chain.arun(
224
- input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
233
+ input_documents=docs,
234
+ callbacks=_run_manager.get_child(),
235
+ **new_inputs,
225
236
  )
226
237
  output[self.output_key] = answer
227
238
 
@@ -233,7 +244,8 @@ class BaseConversationalRetrievalChain(Chain):
233
244
 
234
245
  def save(self, file_path: Union[Path, str]) -> None:
235
246
  if self.get_chat_history:
236
- raise ValueError("Chain not saveable when `get_chat_history` is not None.")
247
+ msg = "Chain not saveable when `get_chat_history` is not None."
248
+ raise ValueError(msg)
237
249
  super().save(file_path)
238
250
 
239
251
 
@@ -372,7 +384,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
372
384
  num_docs = len(docs)
373
385
 
374
386
  if self.max_tokens_limit and isinstance(
375
- self.combine_docs_chain, StuffDocumentsChain
387
+ self.combine_docs_chain,
388
+ StuffDocumentsChain,
376
389
  ):
377
390
  tokens = [
378
391
  self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
@@ -394,7 +407,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
394
407
  ) -> list[Document]:
395
408
  """Get docs."""
396
409
  docs = self.retriever.invoke(
397
- question, config={"callbacks": run_manager.get_child()}
410
+ question,
411
+ config={"callbacks": run_manager.get_child()},
398
412
  )
399
413
  return self._reduce_tokens_below_limit(docs)
400
414
 
@@ -407,7 +421,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
407
421
  ) -> list[Document]:
408
422
  """Get docs."""
409
423
  docs = await self.retriever.ainvoke(
410
- question, config={"callbacks": run_manager.get_child()}
424
+ question,
425
+ config={"callbacks": run_manager.get_child()},
411
426
  )
412
427
  return self._reduce_tokens_below_limit(docs)
413
428
 
@@ -418,7 +433,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
418
433
  retriever: BaseRetriever,
419
434
  condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
420
435
  chain_type: str = "stuff",
421
- verbose: bool = False,
436
+ verbose: bool = False, # noqa: FBT001,FBT002
422
437
  condense_question_llm: Optional[BaseLanguageModel] = None,
423
438
  combine_docs_chain_kwargs: Optional[dict] = None,
424
439
  callbacks: Callbacks = None,
@@ -488,7 +503,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
488
503
  def raise_deprecation(cls, values: dict) -> Any:
489
504
  warnings.warn(
490
505
  "`ChatVectorDBChain` is deprecated - "
491
- "please use `from langchain.chains import ConversationalRetrievalChain`"
506
+ "please use `from langchain.chains import ConversationalRetrievalChain`",
507
+ stacklevel=4,
492
508
  )
493
509
  return values
494
510
 
@@ -503,7 +519,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
503
519
  vectordbkwargs = inputs.get("vectordbkwargs", {})
504
520
  full_kwargs = {**self.search_kwargs, **vectordbkwargs}
505
521
  return self.vectorstore.similarity_search(
506
- question, k=self.top_k_docs_for_context, **full_kwargs
522
+ question,
523
+ k=self.top_k_docs_for_context,
524
+ **full_kwargs,
507
525
  )
508
526
 
509
527
  async def _aget_docs(
@@ -514,7 +532,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
514
532
  run_manager: AsyncCallbackManagerForChainRun,
515
533
  ) -> list[Document]:
516
534
  """Get docs."""
517
- raise NotImplementedError("ChatVectorDBChain does not support async")
535
+ msg = "ChatVectorDBChain does not support async"
536
+ raise NotImplementedError(msg)
518
537
 
519
538
  @classmethod
520
539
  def from_llm(
@@ -536,7 +555,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
536
555
  **combine_docs_chain_kwargs,
537
556
  )
538
557
  condense_question_chain = LLMChain(
539
- llm=llm, prompt=condense_question_prompt, callbacks=callbacks
558
+ llm=llm,
559
+ prompt=condense_question_prompt,
560
+ callbacks=callbacks,
540
561
  )
541
562
  return cls(
542
563
  vectorstore=vectorstore,
@@ -1,4 +1,3 @@
1
- # flake8: noqa
2
1
  from langchain_core.prompts.prompt import PromptTemplate
3
2
 
4
3
  _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
@@ -6,7 +5,7 @@ _template = """Given the following conversation and a follow up question, rephra
6
5
  Chat History:
7
6
  {chat_history}
8
7
  Follow Up Input: {question}
9
- Standalone question:"""
8
+ Standalone question:""" # noqa: E501
10
9
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
11
10
 
12
11
  prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
@@ -14,7 +13,7 @@ prompt_template = """Use the following pieces of context to answer the question
14
13
  {context}
15
14
 
16
15
  Question: {question}
17
- Helpful Answer:"""
16
+ Helpful Answer:""" # noqa: E501
18
17
  QA_PROMPT = PromptTemplate(
19
18
  template=prompt_template, input_variables=["context", "question"]
20
19
  )
@@ -60,9 +60,8 @@ class ElasticsearchDatabaseChain(Chain):
60
60
  @model_validator(mode="after")
61
61
  def validate_indices(self) -> Self:
62
62
  if self.include_indices and self.ignore_indices:
63
- raise ValueError(
64
- "Cannot specify both 'include_indices' and 'ignore_indices'."
65
- )
63
+ msg = "Cannot specify both 'include_indices' and 'ignore_indices'."
64
+ raise ValueError(msg)
66
65
  return self
67
66
 
68
67
  @property
@@ -81,8 +80,7 @@ class ElasticsearchDatabaseChain(Chain):
81
80
  """
82
81
  if not self.return_intermediate_steps:
83
82
  return [self.output_key]
84
- else:
85
- return [self.output_key, INTERMEDIATE_STEPS_KEY]
83
+ return [self.output_key, INTERMEDIATE_STEPS_KEY]
86
84
 
87
85
  def _list_indices(self) -> list[str]:
88
86
  all_indices = [
@@ -111,7 +109,7 @@ class ElasticsearchDatabaseChain(Chain):
111
109
  [
112
110
  "Mapping for index {}:\n{}".format(index, mappings[index]["mappings"])
113
111
  for index in mappings
114
- ]
112
+ ],
115
113
  )
116
114
 
117
115
  def _search(self, indices: list[str], query: str) -> str:
@@ -144,7 +142,7 @@ class ElasticsearchDatabaseChain(Chain):
144
142
 
145
143
  _run_manager.on_text(es_cmd, color="green", verbose=self.verbose)
146
144
  intermediate_steps.append(
147
- es_cmd
145
+ es_cmd,
148
146
  ) # output: elasticsearch dsl generation (no checker)
149
147
  intermediate_steps.append({"es_cmd": es_cmd}) # input: ES search
150
148
  result = self._search(indices=indices, query=es_cmd)
@@ -166,12 +164,13 @@ class ElasticsearchDatabaseChain(Chain):
166
164
  chain_result: dict[str, Any] = {self.output_key: final_result}
167
165
  if self.return_intermediate_steps:
168
166
  chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
169
- return chain_result
170
167
  except Exception as exc:
171
168
  # Append intermediate steps to exception, to aid in logging and later
172
169
  # improvement of few shot prompt seeds
173
170
  exc.intermediate_steps = intermediate_steps # type: ignore[attr-defined]
174
- raise exc
171
+ raise
172
+
173
+ return chain_result
175
174
 
176
175
  @property
177
176
  def _chain_type(self) -> str:
@@ -1,4 +1,3 @@
1
- # flake8: noqa
2
1
  from langchain_core.prompts.prompt import PromptTemplate
3
2
 
4
3
  PROMPT_SUFFIX = """Only use the following Elasticsearch indices:
@@ -17,7 +16,7 @@ Use the following format:
17
16
 
18
17
  Question: Question here
19
18
  ESQuery: Elasticsearch Query formatted as json
20
- """
19
+ """ # noqa: E501
21
20
 
22
21
  DSL_PROMPT = PromptTemplate.from_template(DEFAULT_DSL_TEMPLATE + PROMPT_SUFFIX)
23
22
 
@@ -31,6 +30,6 @@ Answer: Final answer here
31
30
 
32
31
  Question: {input}
33
32
  Data: {data}
34
- Answer:"""
33
+ Answer:""" # noqa: E501
35
34
 
36
35
  ANSWER_PROMPT = PromptTemplate.from_template(DEFAULT_ANSWER_TEMPLATE)
@@ -36,9 +36,9 @@ def __getattr__(name: str) -> Any:
36
36
 
37
37
  __all__ = [
38
38
  "convert_to_ernie_function",
39
- "create_structured_output_chain",
40
39
  "create_ernie_fn_chain",
41
- "create_structured_output_runnable",
42
40
  "create_ernie_fn_runnable",
41
+ "create_structured_output_chain",
42
+ "create_structured_output_runnable",
43
43
  "get_ernie_output_parser",
44
44
  ]
@@ -7,7 +7,9 @@ TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
7
7
 
8
8
 
9
9
  def generate_example(
10
- examples: list[dict], llm: BaseLanguageModel, prompt_template: PromptTemplate
10
+ examples: list[dict],
11
+ llm: BaseLanguageModel,
12
+ prompt_template: PromptTemplate,
11
13
  ) -> str:
12
14
  """Return another example given a list of examples for a prompt."""
13
15
  prompt = FewShotPromptTemplate(