langchain 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (582) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +160 -133
  9. langchain/agents/agent_iterator.py +31 -14
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +7 -7
  48. langchain/agents/chat/base.py +6 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +9 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +7 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +18 -9
  63. langchain/agents/json_chat/base.py +18 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +9 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +80 -70
  71. langchain/agents/openai_functions_agent/base.py +47 -38
  72. langchain/agents/openai_functions_multi_agent/base.py +40 -27
  73. langchain/agents/openai_tools/base.py +8 -8
  74. langchain/agents/output_parsers/__init__.py +3 -3
  75. langchain/agents/output_parsers/json.py +7 -7
  76. langchain/agents/output_parsers/openai_functions.py +15 -7
  77. langchain/agents/output_parsers/openai_tools.py +9 -4
  78. langchain/agents/output_parsers/react_json_single_input.py +10 -5
  79. langchain/agents/output_parsers/react_single_input.py +15 -11
  80. langchain/agents/output_parsers/self_ask.py +3 -2
  81. langchain/agents/output_parsers/tools.py +18 -13
  82. langchain/agents/output_parsers/xml.py +99 -28
  83. langchain/agents/react/agent.py +4 -4
  84. langchain/agents/react/base.py +22 -17
  85. langchain/agents/react/output_parser.py +5 -6
  86. langchain/agents/react/textworld_prompt.py +0 -1
  87. langchain/agents/react/wiki_prompt.py +14 -15
  88. langchain/agents/schema.py +3 -2
  89. langchain/agents/self_ask_with_search/base.py +19 -15
  90. langchain/agents/self_ask_with_search/prompt.py +0 -1
  91. langchain/agents/structured_chat/base.py +14 -11
  92. langchain/agents/structured_chat/output_parser.py +16 -18
  93. langchain/agents/structured_chat/prompt.py +3 -4
  94. langchain/agents/tool_calling_agent/base.py +7 -6
  95. langchain/agents/tools.py +2 -2
  96. langchain/agents/utils.py +2 -3
  97. langchain/agents/xml/base.py +5 -5
  98. langchain/agents/xml/prompt.py +1 -2
  99. langchain/cache.py +12 -12
  100. langchain/callbacks/__init__.py +11 -11
  101. langchain/callbacks/aim_callback.py +2 -2
  102. langchain/callbacks/argilla_callback.py +1 -1
  103. langchain/callbacks/arize_callback.py +1 -1
  104. langchain/callbacks/arthur_callback.py +1 -1
  105. langchain/callbacks/base.py +7 -7
  106. langchain/callbacks/clearml_callback.py +1 -1
  107. langchain/callbacks/comet_ml_callback.py +1 -1
  108. langchain/callbacks/confident_callback.py +1 -1
  109. langchain/callbacks/context_callback.py +1 -1
  110. langchain/callbacks/flyte_callback.py +1 -1
  111. langchain/callbacks/human.py +2 -2
  112. langchain/callbacks/infino_callback.py +1 -1
  113. langchain/callbacks/labelstudio_callback.py +1 -1
  114. langchain/callbacks/llmonitor_callback.py +1 -1
  115. langchain/callbacks/manager.py +6 -6
  116. langchain/callbacks/mlflow_callback.py +2 -2
  117. langchain/callbacks/openai_info.py +1 -1
  118. langchain/callbacks/promptlayer_callback.py +1 -1
  119. langchain/callbacks/sagemaker_callback.py +1 -1
  120. langchain/callbacks/streaming_aiter.py +4 -1
  121. langchain/callbacks/streaming_aiter_final_only.py +5 -3
  122. langchain/callbacks/streaming_stdout_final_only.py +5 -3
  123. langchain/callbacks/streamlit/__init__.py +3 -2
  124. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  125. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  126. langchain/callbacks/tracers/__init__.py +1 -1
  127. langchain/callbacks/tracers/base.py +2 -1
  128. langchain/callbacks/tracers/comet.py +1 -1
  129. langchain/callbacks/tracers/evaluation.py +1 -1
  130. langchain/callbacks/tracers/log_stream.py +1 -1
  131. langchain/callbacks/tracers/logging.py +1 -1
  132. langchain/callbacks/tracers/stdout.py +1 -1
  133. langchain/callbacks/trubrics_callback.py +1 -1
  134. langchain/callbacks/utils.py +4 -4
  135. langchain/callbacks/wandb_callback.py +1 -1
  136. langchain/callbacks/whylabs_callback.py +1 -1
  137. langchain/chains/api/base.py +36 -22
  138. langchain/chains/api/news_docs.py +1 -2
  139. langchain/chains/api/open_meteo_docs.py +1 -2
  140. langchain/chains/api/openapi/requests_chain.py +1 -1
  141. langchain/chains/api/openapi/response_chain.py +1 -1
  142. langchain/chains/api/podcast_docs.py +1 -2
  143. langchain/chains/api/prompt.py +1 -2
  144. langchain/chains/api/tmdb_docs.py +1 -2
  145. langchain/chains/base.py +89 -55
  146. langchain/chains/chat_vector_db/prompts.py +2 -3
  147. langchain/chains/combine_documents/__init__.py +1 -1
  148. langchain/chains/combine_documents/base.py +24 -11
  149. langchain/chains/combine_documents/map_reduce.py +39 -31
  150. langchain/chains/combine_documents/map_rerank.py +34 -21
  151. langchain/chains/combine_documents/reduce.py +47 -26
  152. langchain/chains/combine_documents/refine.py +26 -17
  153. langchain/chains/combine_documents/stuff.py +19 -12
  154. langchain/chains/constitutional_ai/base.py +4 -4
  155. langchain/chains/constitutional_ai/principles.py +22 -25
  156. langchain/chains/constitutional_ai/prompts.py +25 -28
  157. langchain/chains/conversation/base.py +6 -7
  158. langchain/chains/conversation/memory.py +5 -5
  159. langchain/chains/conversation/prompt.py +5 -5
  160. langchain/chains/conversational_retrieval/base.py +41 -20
  161. langchain/chains/conversational_retrieval/prompts.py +2 -3
  162. langchain/chains/elasticsearch_database/base.py +8 -9
  163. langchain/chains/elasticsearch_database/prompts.py +2 -3
  164. langchain/chains/ernie_functions/__init__.py +2 -2
  165. langchain/chains/example_generator.py +3 -1
  166. langchain/chains/flare/base.py +26 -12
  167. langchain/chains/graph_qa/cypher.py +2 -2
  168. langchain/chains/graph_qa/falkordb.py +1 -1
  169. langchain/chains/graph_qa/gremlin.py +1 -1
  170. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  171. langchain/chains/graph_qa/prompts.py +2 -2
  172. langchain/chains/history_aware_retriever.py +2 -1
  173. langchain/chains/hyde/base.py +6 -5
  174. langchain/chains/hyde/prompts.py +5 -6
  175. langchain/chains/llm.py +77 -61
  176. langchain/chains/llm_bash/__init__.py +2 -1
  177. langchain/chains/llm_checker/base.py +7 -5
  178. langchain/chains/llm_checker/prompt.py +3 -4
  179. langchain/chains/llm_math/base.py +16 -9
  180. langchain/chains/llm_math/prompt.py +1 -2
  181. langchain/chains/llm_summarization_checker/base.py +9 -6
  182. langchain/chains/llm_symbolic_math/__init__.py +2 -1
  183. langchain/chains/loading.py +170 -153
  184. langchain/chains/mapreduce.py +4 -3
  185. langchain/chains/moderation.py +8 -9
  186. langchain/chains/natbot/base.py +8 -8
  187. langchain/chains/natbot/crawler.py +73 -76
  188. langchain/chains/natbot/prompt.py +2 -3
  189. langchain/chains/openai_functions/__init__.py +7 -7
  190. langchain/chains/openai_functions/base.py +13 -10
  191. langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
  192. langchain/chains/openai_functions/extraction.py +19 -19
  193. langchain/chains/openai_functions/openapi.py +35 -35
  194. langchain/chains/openai_functions/qa_with_structure.py +19 -12
  195. langchain/chains/openai_functions/tagging.py +2 -4
  196. langchain/chains/openai_tools/extraction.py +7 -8
  197. langchain/chains/qa_generation/base.py +4 -3
  198. langchain/chains/qa_generation/prompt.py +5 -5
  199. langchain/chains/qa_with_sources/base.py +14 -6
  200. langchain/chains/qa_with_sources/loading.py +16 -8
  201. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  202. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  203. langchain/chains/qa_with_sources/retrieval.py +14 -5
  204. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  205. langchain/chains/qa_with_sources/vector_db.py +17 -6
  206. langchain/chains/query_constructor/base.py +34 -33
  207. langchain/chains/query_constructor/ir.py +4 -4
  208. langchain/chains/query_constructor/parser.py +37 -32
  209. langchain/chains/query_constructor/prompt.py +5 -6
  210. langchain/chains/question_answering/chain.py +21 -10
  211. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  212. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  213. langchain/chains/question_answering/refine_prompts.py +2 -5
  214. langchain/chains/question_answering/stuff_prompt.py +5 -5
  215. langchain/chains/retrieval.py +1 -3
  216. langchain/chains/retrieval_qa/base.py +34 -27
  217. langchain/chains/retrieval_qa/prompt.py +1 -2
  218. langchain/chains/router/__init__.py +3 -3
  219. langchain/chains/router/base.py +24 -20
  220. langchain/chains/router/embedding_router.py +12 -8
  221. langchain/chains/router/llm_router.py +17 -16
  222. langchain/chains/router/multi_prompt.py +2 -2
  223. langchain/chains/router/multi_retrieval_qa.py +10 -5
  224. langchain/chains/sequential.py +30 -18
  225. langchain/chains/sql_database/prompt.py +14 -16
  226. langchain/chains/sql_database/query.py +6 -5
  227. langchain/chains/structured_output/__init__.py +1 -1
  228. langchain/chains/structured_output/base.py +75 -67
  229. langchain/chains/summarize/chain.py +11 -5
  230. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  231. langchain/chains/summarize/stuff_prompt.py +0 -1
  232. langchain/chains/transform.py +5 -6
  233. langchain/chat_loaders/facebook_messenger.py +1 -1
  234. langchain/chat_loaders/langsmith.py +1 -1
  235. langchain/chat_loaders/utils.py +3 -3
  236. langchain/chat_models/__init__.py +20 -19
  237. langchain/chat_models/anthropic.py +1 -1
  238. langchain/chat_models/azureml_endpoint.py +1 -1
  239. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  240. langchain/chat_models/base.py +160 -123
  241. langchain/chat_models/bedrock.py +1 -1
  242. langchain/chat_models/fake.py +1 -1
  243. langchain/chat_models/meta.py +1 -1
  244. langchain/chat_models/pai_eas_endpoint.py +1 -1
  245. langchain/chat_models/promptlayer_openai.py +1 -1
  246. langchain/chat_models/volcengine_maas.py +1 -1
  247. langchain/docstore/base.py +1 -1
  248. langchain/document_loaders/__init__.py +9 -9
  249. langchain/document_loaders/airbyte.py +3 -3
  250. langchain/document_loaders/assemblyai.py +1 -1
  251. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  252. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  253. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  254. langchain/document_loaders/base.py +1 -1
  255. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  256. langchain/document_loaders/blockchain.py +1 -1
  257. langchain/document_loaders/chatgpt.py +1 -1
  258. langchain/document_loaders/college_confidential.py +1 -1
  259. langchain/document_loaders/confluence.py +1 -1
  260. langchain/document_loaders/email.py +1 -1
  261. langchain/document_loaders/facebook_chat.py +1 -1
  262. langchain/document_loaders/markdown.py +1 -1
  263. langchain/document_loaders/notebook.py +1 -1
  264. langchain/document_loaders/org_mode.py +1 -1
  265. langchain/document_loaders/parsers/__init__.py +1 -1
  266. langchain/document_loaders/parsers/docai.py +1 -1
  267. langchain/document_loaders/parsers/generic.py +1 -1
  268. langchain/document_loaders/parsers/html/__init__.py +1 -1
  269. langchain/document_loaders/parsers/html/bs4.py +1 -1
  270. langchain/document_loaders/parsers/language/cobol.py +1 -1
  271. langchain/document_loaders/parsers/language/python.py +1 -1
  272. langchain/document_loaders/parsers/msword.py +1 -1
  273. langchain/document_loaders/parsers/pdf.py +5 -5
  274. langchain/document_loaders/parsers/registry.py +1 -1
  275. langchain/document_loaders/pdf.py +8 -8
  276. langchain/document_loaders/powerpoint.py +1 -1
  277. langchain/document_loaders/pyspark_dataframe.py +1 -1
  278. langchain/document_loaders/telegram.py +2 -2
  279. langchain/document_loaders/tencent_cos_directory.py +1 -1
  280. langchain/document_loaders/unstructured.py +5 -5
  281. langchain/document_loaders/url_playwright.py +1 -1
  282. langchain/document_loaders/whatsapp_chat.py +1 -1
  283. langchain/document_loaders/youtube.py +2 -2
  284. langchain/document_transformers/__init__.py +3 -3
  285. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  286. langchain/document_transformers/doctran_text_extract.py +1 -1
  287. langchain/document_transformers/doctran_text_qa.py +1 -1
  288. langchain/document_transformers/doctran_text_translate.py +1 -1
  289. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  290. langchain/document_transformers/google_translate.py +1 -1
  291. langchain/document_transformers/html2text.py +1 -1
  292. langchain/document_transformers/nuclia_text_transform.py +1 -1
  293. langchain/embeddings/__init__.py +5 -5
  294. langchain/embeddings/base.py +33 -24
  295. langchain/embeddings/cache.py +117 -26
  296. langchain/embeddings/fake.py +1 -1
  297. langchain/embeddings/huggingface.py +2 -2
  298. langchain/evaluation/__init__.py +22 -22
  299. langchain/evaluation/agents/trajectory_eval_chain.py +24 -24
  300. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  301. langchain/evaluation/comparison/__init__.py +1 -1
  302. langchain/evaluation/comparison/eval_chain.py +21 -14
  303. langchain/evaluation/comparison/prompt.py +1 -2
  304. langchain/evaluation/criteria/__init__.py +1 -1
  305. langchain/evaluation/criteria/eval_chain.py +21 -12
  306. langchain/evaluation/criteria/prompt.py +2 -3
  307. langchain/evaluation/embedding_distance/base.py +24 -21
  308. langchain/evaluation/loading.py +15 -11
  309. langchain/evaluation/parsing/base.py +4 -1
  310. langchain/evaluation/parsing/json_distance.py +5 -2
  311. langchain/evaluation/parsing/json_schema.py +12 -8
  312. langchain/evaluation/qa/__init__.py +1 -1
  313. langchain/evaluation/qa/eval_chain.py +13 -6
  314. langchain/evaluation/qa/eval_prompt.py +7 -8
  315. langchain/evaluation/qa/generate_chain.py +2 -1
  316. langchain/evaluation/qa/generate_prompt.py +2 -4
  317. langchain/evaluation/schema.py +38 -30
  318. langchain/evaluation/scoring/__init__.py +1 -1
  319. langchain/evaluation/scoring/eval_chain.py +23 -16
  320. langchain/evaluation/scoring/prompt.py +0 -1
  321. langchain/evaluation/string_distance/base.py +15 -10
  322. langchain/globals.py +12 -11
  323. langchain/graphs/__init__.py +6 -6
  324. langchain/graphs/graph_document.py +1 -1
  325. langchain/graphs/networkx_graph.py +2 -2
  326. langchain/hub.py +9 -11
  327. langchain/indexes/__init__.py +3 -3
  328. langchain/indexes/_sql_record_manager.py +63 -46
  329. langchain/indexes/prompts/entity_extraction.py +1 -2
  330. langchain/indexes/prompts/entity_summarization.py +1 -2
  331. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  332. langchain/indexes/vectorstore.py +35 -19
  333. langchain/llms/__init__.py +13 -13
  334. langchain/llms/ai21.py +1 -1
  335. langchain/llms/azureml_endpoint.py +4 -4
  336. langchain/llms/base.py +15 -7
  337. langchain/llms/bedrock.py +1 -1
  338. langchain/llms/cloudflare_workersai.py +1 -1
  339. langchain/llms/gradient_ai.py +1 -1
  340. langchain/llms/loading.py +1 -1
  341. langchain/llms/openai.py +1 -1
  342. langchain/llms/sagemaker_endpoint.py +1 -1
  343. langchain/load/dump.py +1 -1
  344. langchain/load/load.py +1 -1
  345. langchain/load/serializable.py +3 -3
  346. langchain/memory/__init__.py +3 -3
  347. langchain/memory/buffer.py +11 -8
  348. langchain/memory/chat_memory.py +14 -8
  349. langchain/memory/chat_message_histories/__init__.py +1 -1
  350. langchain/memory/chat_message_histories/astradb.py +1 -1
  351. langchain/memory/chat_message_histories/cassandra.py +1 -1
  352. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  353. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  354. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  355. langchain/memory/chat_message_histories/file.py +1 -1
  356. langchain/memory/chat_message_histories/firestore.py +1 -1
  357. langchain/memory/chat_message_histories/momento.py +1 -1
  358. langchain/memory/chat_message_histories/mongodb.py +1 -1
  359. langchain/memory/chat_message_histories/neo4j.py +1 -1
  360. langchain/memory/chat_message_histories/postgres.py +1 -1
  361. langchain/memory/chat_message_histories/redis.py +1 -1
  362. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  363. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  364. langchain/memory/chat_message_histories/streamlit.py +1 -1
  365. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  366. langchain/memory/chat_message_histories/xata.py +1 -1
  367. langchain/memory/chat_message_histories/zep.py +1 -1
  368. langchain/memory/combined.py +13 -12
  369. langchain/memory/entity.py +84 -61
  370. langchain/memory/prompt.py +10 -11
  371. langchain/memory/readonly.py +0 -2
  372. langchain/memory/simple.py +1 -3
  373. langchain/memory/summary.py +13 -11
  374. langchain/memory/summary_buffer.py +17 -8
  375. langchain/memory/utils.py +3 -2
  376. langchain/memory/vectorstore.py +13 -6
  377. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  378. langchain/model_laboratory.py +12 -11
  379. langchain/output_parsers/__init__.py +4 -4
  380. langchain/output_parsers/boolean.py +7 -4
  381. langchain/output_parsers/combining.py +10 -5
  382. langchain/output_parsers/datetime.py +32 -31
  383. langchain/output_parsers/enum.py +5 -3
  384. langchain/output_parsers/fix.py +52 -52
  385. langchain/output_parsers/format_instructions.py +6 -8
  386. langchain/output_parsers/json.py +2 -2
  387. langchain/output_parsers/list.py +2 -2
  388. langchain/output_parsers/loading.py +9 -9
  389. langchain/output_parsers/openai_functions.py +3 -3
  390. langchain/output_parsers/openai_tools.py +1 -1
  391. langchain/output_parsers/pandas_dataframe.py +43 -47
  392. langchain/output_parsers/prompts.py +1 -2
  393. langchain/output_parsers/rail_parser.py +1 -1
  394. langchain/output_parsers/regex.py +7 -8
  395. langchain/output_parsers/regex_dict.py +7 -10
  396. langchain/output_parsers/retry.py +77 -78
  397. langchain/output_parsers/structured.py +11 -6
  398. langchain/output_parsers/yaml.py +15 -11
  399. langchain/prompts/__init__.py +5 -3
  400. langchain/prompts/base.py +5 -5
  401. langchain/prompts/chat.py +10 -9
  402. langchain/prompts/example_selector/__init__.py +3 -1
  403. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  404. langchain/prompts/few_shot.py +1 -1
  405. langchain/prompts/loading.py +3 -3
  406. langchain/prompts/prompt.py +1 -1
  407. langchain/retrievers/__init__.py +5 -5
  408. langchain/retrievers/bedrock.py +2 -2
  409. langchain/retrievers/bm25.py +1 -1
  410. langchain/retrievers/contextual_compression.py +15 -13
  411. langchain/retrievers/docarray.py +1 -1
  412. langchain/retrievers/document_compressors/__init__.py +7 -5
  413. langchain/retrievers/document_compressors/base.py +13 -7
  414. langchain/retrievers/document_compressors/chain_extract.py +4 -5
  415. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  416. langchain/retrievers/document_compressors/chain_filter.py +11 -12
  417. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  418. langchain/retrievers/document_compressors/cohere_rerank.py +17 -19
  419. langchain/retrievers/document_compressors/embeddings_filter.py +23 -23
  420. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  421. langchain/retrievers/document_compressors/listwise_rerank.py +11 -6
  422. langchain/retrievers/ensemble.py +28 -25
  423. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  424. langchain/retrievers/google_vertex_ai_search.py +2 -2
  425. langchain/retrievers/kendra.py +10 -10
  426. langchain/retrievers/llama_index.py +1 -1
  427. langchain/retrievers/merger_retriever.py +11 -11
  428. langchain/retrievers/milvus.py +1 -1
  429. langchain/retrievers/multi_query.py +32 -26
  430. langchain/retrievers/multi_vector.py +20 -8
  431. langchain/retrievers/parent_document_retriever.py +18 -9
  432. langchain/retrievers/re_phraser.py +6 -5
  433. langchain/retrievers/self_query/base.py +138 -119
  434. langchain/retrievers/time_weighted_retriever.py +18 -7
  435. langchain/retrievers/zilliz.py +1 -1
  436. langchain/runnables/hub.py +2 -1
  437. langchain/runnables/openai_functions.py +6 -2
  438. langchain/schema/__init__.py +23 -23
  439. langchain/schema/cache.py +1 -1
  440. langchain/schema/callbacks/base.py +7 -7
  441. langchain/schema/callbacks/manager.py +19 -19
  442. langchain/schema/callbacks/tracers/base.py +3 -2
  443. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  444. langchain/schema/callbacks/tracers/langchain.py +1 -1
  445. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  446. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  447. langchain/schema/callbacks/tracers/schemas.py +8 -8
  448. langchain/schema/callbacks/tracers/stdout.py +3 -3
  449. langchain/schema/document.py +1 -1
  450. langchain/schema/language_model.py +2 -2
  451. langchain/schema/messages.py +12 -12
  452. langchain/schema/output.py +3 -3
  453. langchain/schema/output_parser.py +3 -3
  454. langchain/schema/runnable/__init__.py +3 -3
  455. langchain/schema/runnable/base.py +9 -9
  456. langchain/schema/runnable/config.py +5 -5
  457. langchain/schema/runnable/configurable.py +1 -1
  458. langchain/schema/runnable/history.py +1 -1
  459. langchain/schema/runnable/passthrough.py +1 -1
  460. langchain/schema/runnable/utils.py +16 -16
  461. langchain/schema/vectorstore.py +1 -1
  462. langchain/smith/__init__.py +1 -1
  463. langchain/smith/evaluation/__init__.py +2 -2
  464. langchain/smith/evaluation/config.py +10 -7
  465. langchain/smith/evaluation/name_generation.py +3 -3
  466. langchain/smith/evaluation/progress.py +11 -2
  467. langchain/smith/evaluation/runner_utils.py +181 -129
  468. langchain/smith/evaluation/string_run_evaluator.py +75 -68
  469. langchain/storage/__init__.py +2 -2
  470. langchain/storage/_lc_store.py +4 -2
  471. langchain/storage/encoder_backed.py +6 -2
  472. langchain/storage/file_system.py +19 -16
  473. langchain/storage/in_memory.py +1 -1
  474. langchain/storage/upstash_redis.py +1 -1
  475. langchain/text_splitter.py +15 -15
  476. langchain/tools/__init__.py +28 -26
  477. langchain/tools/ainetwork/app.py +1 -1
  478. langchain/tools/ainetwork/base.py +1 -1
  479. langchain/tools/ainetwork/owner.py +1 -1
  480. langchain/tools/ainetwork/rule.py +1 -1
  481. langchain/tools/ainetwork/transfer.py +1 -1
  482. langchain/tools/ainetwork/value.py +1 -1
  483. langchain/tools/amadeus/closest_airport.py +1 -1
  484. langchain/tools/amadeus/flight_search.py +1 -1
  485. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  486. langchain/tools/base.py +4 -4
  487. langchain/tools/bearly/tool.py +1 -1
  488. langchain/tools/bing_search/__init__.py +1 -1
  489. langchain/tools/bing_search/tool.py +1 -1
  490. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  491. langchain/tools/dataforseo_api_search/tool.py +1 -1
  492. langchain/tools/ddg_search/tool.py +1 -1
  493. langchain/tools/e2b_data_analysis/tool.py +2 -2
  494. langchain/tools/edenai/__init__.py +1 -1
  495. langchain/tools/file_management/__init__.py +1 -1
  496. langchain/tools/file_management/copy.py +1 -1
  497. langchain/tools/file_management/delete.py +1 -1
  498. langchain/tools/gmail/__init__.py +2 -2
  499. langchain/tools/gmail/get_message.py +1 -1
  500. langchain/tools/gmail/search.py +1 -1
  501. langchain/tools/gmail/send_message.py +1 -1
  502. langchain/tools/google_finance/__init__.py +1 -1
  503. langchain/tools/google_finance/tool.py +1 -1
  504. langchain/tools/google_scholar/__init__.py +1 -1
  505. langchain/tools/google_scholar/tool.py +1 -1
  506. langchain/tools/google_search/__init__.py +1 -1
  507. langchain/tools/google_search/tool.py +1 -1
  508. langchain/tools/google_serper/__init__.py +1 -1
  509. langchain/tools/google_serper/tool.py +1 -1
  510. langchain/tools/google_trends/__init__.py +1 -1
  511. langchain/tools/google_trends/tool.py +1 -1
  512. langchain/tools/jira/tool.py +20 -1
  513. langchain/tools/json/tool.py +25 -3
  514. langchain/tools/memorize/tool.py +1 -1
  515. langchain/tools/multion/__init__.py +1 -1
  516. langchain/tools/multion/update_session.py +1 -1
  517. langchain/tools/office365/__init__.py +2 -2
  518. langchain/tools/office365/events_search.py +1 -1
  519. langchain/tools/office365/messages_search.py +1 -1
  520. langchain/tools/office365/send_event.py +1 -1
  521. langchain/tools/office365/send_message.py +1 -1
  522. langchain/tools/openapi/utils/api_models.py +6 -6
  523. langchain/tools/playwright/__init__.py +5 -5
  524. langchain/tools/playwright/click.py +1 -1
  525. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  526. langchain/tools/playwright/get_elements.py +1 -1
  527. langchain/tools/playwright/navigate.py +1 -1
  528. langchain/tools/plugin.py +2 -2
  529. langchain/tools/powerbi/tool.py +1 -1
  530. langchain/tools/python/__init__.py +2 -1
  531. langchain/tools/reddit_search/tool.py +1 -1
  532. langchain/tools/render.py +2 -2
  533. langchain/tools/requests/tool.py +2 -2
  534. langchain/tools/searchapi/tool.py +1 -1
  535. langchain/tools/searx_search/tool.py +1 -1
  536. langchain/tools/slack/get_message.py +1 -1
  537. langchain/tools/spark_sql/tool.py +1 -1
  538. langchain/tools/sql_database/tool.py +1 -1
  539. langchain/tools/tavily_search/__init__.py +1 -1
  540. langchain/tools/tavily_search/tool.py +1 -1
  541. langchain/tools/zapier/__init__.py +1 -1
  542. langchain/tools/zapier/tool.py +24 -2
  543. langchain/utilities/__init__.py +4 -4
  544. langchain/utilities/arcee.py +4 -4
  545. langchain/utilities/clickup.py +4 -4
  546. langchain/utilities/dalle_image_generator.py +1 -1
  547. langchain/utilities/dataforseo_api_search.py +1 -1
  548. langchain/utilities/opaqueprompts.py +1 -1
  549. langchain/utilities/reddit_search.py +1 -1
  550. langchain/utilities/sql_database.py +1 -1
  551. langchain/utilities/tavily_search.py +1 -1
  552. langchain/utilities/vertexai.py +2 -2
  553. langchain/utils/__init__.py +1 -1
  554. langchain/utils/aiter.py +1 -1
  555. langchain/utils/html.py +3 -3
  556. langchain/utils/input.py +1 -1
  557. langchain/utils/iter.py +1 -1
  558. langchain/utils/json_schema.py +1 -3
  559. langchain/utils/strings.py +1 -1
  560. langchain/utils/utils.py +6 -6
  561. langchain/vectorstores/__init__.py +5 -5
  562. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  563. langchain/vectorstores/azure_cosmos_db.py +1 -1
  564. langchain/vectorstores/clickhouse.py +1 -1
  565. langchain/vectorstores/elastic_vector_search.py +1 -1
  566. langchain/vectorstores/elasticsearch.py +2 -2
  567. langchain/vectorstores/myscale.py +1 -1
  568. langchain/vectorstores/neo4j_vector.py +1 -1
  569. langchain/vectorstores/pgembedding.py +1 -1
  570. langchain/vectorstores/qdrant.py +1 -1
  571. langchain/vectorstores/redis/__init__.py +1 -1
  572. langchain/vectorstores/redis/base.py +1 -1
  573. langchain/vectorstores/redis/filters.py +4 -4
  574. langchain/vectorstores/redis/schema.py +6 -6
  575. langchain/vectorstores/sklearn.py +2 -2
  576. langchain/vectorstores/starrocks.py +1 -1
  577. langchain/vectorstores/utils.py +1 -1
  578. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/METADATA +5 -5
  579. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/RECORD +582 -582
  580. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
  581. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
  582. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
@@ -88,7 +88,7 @@ def create_stuff_documents_chain(
88
88
 
89
89
  return (
90
90
  RunnablePassthrough.assign(**{document_variable_name: format_docs}).with_config(
91
- run_name="format_inputs"
91
+ run_name="format_inputs",
92
92
  )
93
93
  | prompt
94
94
  | llm
@@ -102,7 +102,7 @@ def create_stuff_documents_chain(
102
102
  message=(
103
103
  "This class is deprecated. Use the `create_stuff_documents_chain` constructor "
104
104
  "instead. See migration guide here: "
105
- "https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/" # noqa: E501
105
+ "https://python.langchain.com/docs/versions/migrating_chains/stuff_docs_chain/"
106
106
  ),
107
107
  )
108
108
  class StuffDocumentsChain(BaseCombineDocumentsChain):
@@ -147,7 +147,7 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
147
147
  """LLM chain which is called with the formatted document string,
148
148
  along with any other inputs."""
149
149
  document_prompt: BasePromptTemplate = Field(
150
- default_factory=lambda: DEFAULT_DOCUMENT_PROMPT
150
+ default_factory=lambda: DEFAULT_DOCUMENT_PROMPT,
151
151
  )
152
152
  """Prompt to use to format each document, gets passed to `format_document`."""
153
153
  document_variable_name: str
@@ -175,16 +175,17 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
175
175
  if len(llm_chain_variables) == 1:
176
176
  values["document_variable_name"] = llm_chain_variables[0]
177
177
  else:
178
- raise ValueError(
178
+ msg = (
179
179
  "document_variable_name must be provided if there are "
180
180
  "multiple llm_chain_variables"
181
181
  )
182
- else:
183
- if values["document_variable_name"] not in llm_chain_variables:
184
- raise ValueError(
185
- f"document_variable_name {values['document_variable_name']} was "
186
- f"not found in llm_chain input_variables: {llm_chain_variables}"
187
- )
182
+ raise ValueError(msg)
183
+ elif values["document_variable_name"] not in llm_chain_variables:
184
+ msg = (
185
+ f"document_variable_name {values['document_variable_name']} was "
186
+ f"not found in llm_chain input_variables: {llm_chain_variables}"
187
+ )
188
+ raise ValueError(msg)
188
189
  return values
189
190
 
190
191
  @property
@@ -241,7 +242,10 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
241
242
  return self.llm_chain._get_num_tokens(prompt)
242
243
 
243
244
  def combine_docs(
244
- self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
245
+ self,
246
+ docs: list[Document],
247
+ callbacks: Callbacks = None,
248
+ **kwargs: Any,
245
249
  ) -> tuple[str, dict]:
246
250
  """Stuff all documents into one prompt and pass to LLM.
247
251
 
@@ -259,7 +263,10 @@ class StuffDocumentsChain(BaseCombineDocumentsChain):
259
263
  return self.llm_chain.predict(callbacks=callbacks, **inputs), {}
260
264
 
261
265
  async def acombine_docs(
262
- self, docs: list[Document], callbacks: Callbacks = None, **kwargs: Any
266
+ self,
267
+ docs: list[Document],
268
+ callbacks: Callbacks = None,
269
+ **kwargs: Any,
263
270
  ) -> tuple[str, dict]:
264
271
  """Async stuff all documents into one prompt and pass to LLM.
265
272
 
@@ -19,7 +19,7 @@ from langchain.chains.llm import LLMChain
19
19
  message=(
20
20
  "This class is deprecated and will be removed in langchain 1.0. "
21
21
  "See API reference for replacement: "
22
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html" # noqa: E501
22
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.constitutional_ai.base.ConstitutionalChain.html"
23
23
  ),
24
24
  removal="1.0",
25
25
  )
@@ -197,12 +197,12 @@ class ConstitutionalChain(Chain):
197
197
 
198
198
  @classmethod
199
199
  def get_principles(
200
- cls, names: Optional[list[str]] = None
200
+ cls,
201
+ names: Optional[list[str]] = None,
201
202
  ) -> list[ConstitutionalPrinciple]:
202
203
  if names is None:
203
204
  return list(PRINCIPLES.values())
204
- else:
205
- return [PRINCIPLES[name] for name in names]
205
+ return [PRINCIPLES[name] for name in names]
206
206
 
207
207
  @classmethod
208
208
  def from_llm(
@@ -2,90 +2,87 @@
2
2
  UnifiedObjectives v0.2 principles ("uo-*") adapted from https://examine.dev/docs/Unified_objectives.pdf (Samwald et al. 2023)
3
3
  """
4
4
 
5
- # flake8: noqa
6
- from typing import Dict
7
-
8
5
  from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple
9
6
 
10
- PRINCIPLES: Dict[str, ConstitutionalPrinciple] = {
7
+ PRINCIPLES: dict[str, ConstitutionalPrinciple] = {
11
8
  "harmful1": ConstitutionalPrinciple(
12
9
  name="harmful1",
13
- critique_request="Identify specific ways in which the assistants last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
10
+ critique_request="Identify specific ways in which the assistant's last response is harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
14
11
  revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.",
15
12
  ),
16
13
  "harmful2": ConstitutionalPrinciple(
17
14
  name="harmful2",
18
- critique_request="Explain ways in which the assistants preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
15
+ critique_request="Explain ways in which the assistant's preceding response may be harmful to either the human or others. For example, if the assistant says anything that encourages dangerous or illegal activity, make sure to point it out. Other examples of harm include racism, sexism, and other social biases.",
19
16
  revision_request="Please rewrite the assistant response to remove any and all harmful content, including anything dangerous, illegal, racist, sexist or socially biased.",
20
17
  ),
21
18
  "harmful3": ConstitutionalPrinciple(
22
19
  name="harmful3",
23
- critique_request="Identify all ways in which the assistants last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
20
+ critique_request="Identify all ways in which the assistant's last response is harmful, unethical, or socially biased. Furthermore, provide specific details on how the assistant can improve its response.",
24
21
  revision_request="Please rewrite the assistant response to remove all harmful, unethical, or socially biased content, and move the conversation in a positive direction.",
25
22
  ),
26
23
  "harmful4": ConstitutionalPrinciple(
27
24
  name="harmful4",
28
- critique_request="Identify specific ways in which the assistants precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
25
+ critique_request="Identify specific ways in which the assistant's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
29
26
  revision_request="Rewrite the assistant response to remove all harmful content, and to politely point out any harmful assumptions from the human, such as racism, sexism, toxicity.",
30
27
  ),
31
28
  "insensitive": ConstitutionalPrinciple(
32
29
  name="insensitive",
33
- critique_request="Discuss the assistants last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
34
- revision_request="Revise the assistants last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
30
+ critique_request="Discuss the assistant's last response to the human and whether it is in any way insensitive, sexist, racist, or socially inappropriate.",
31
+ revision_request="Revise the assistant's last response to remove content that is in any way insensitive, sexist, racist, or socially inappropriate.",
35
32
  ),
36
33
  "offensive": ConstitutionalPrinciple(
37
34
  name="offensive",
38
- critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the humans dialogue.",
39
- revision_request="Please write a new version of the assistants response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
35
+ critique_request="Discuss whether the assistant said anything that may be interpreted as offensive or harmful. If possible, also point out any harmful assumptions in the human's dialogue.",
36
+ revision_request="Please write a new version of the assistant's response that removes offensive and harmful content, and politely addresses any problematic assumptions from the human.",
40
37
  ),
41
38
  "harmful5": ConstitutionalPrinciple(
42
39
  name="harmful5",
43
- critique_request="Whats the strongest argument you could give that the assistants last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
44
- revision_request="Amend the assistants last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
40
+ critique_request="What's the strongest argument you could give that the assistant's last response was in some way harmful, unethical, racist, sexist, toxic, dangerous, or illegal.",
41
+ revision_request="Amend the assistant's last response to remove any and all harmful, unethical, racist, toxic, dangerous, and illegal content.",
45
42
  ),
46
43
  "age-innappropriate": ConstitutionalPrinciple(
47
44
  name="age-innappropriate",
48
- critique_request="Did the assistants preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
45
+ critique_request="Did the assistant's preceding response include anything that may be inappropriate for young children? If so, discuss how it can be revised to be more appropriate.",
49
46
  revision_request="Please rewrite the assistant response to make it more age appropriate for children.",
50
47
  ),
51
48
  "derogatory": ConstitutionalPrinciple(
52
49
  name="derogatory",
53
- critique_request="Think carefully about whether the humans request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant shouldve responded instead.",
54
- revision_request="Please rewrite the assistants response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
50
+ critique_request="Think carefully about whether the human's request succeeded in eliciting responses from the assistant that are derogatory, toxic, racist, sexist or socially harmful in any way, and discuss how the assistant should've responded instead.",
51
+ revision_request="Please rewrite the assistant's response so as to remove all harmful content, especially anything derogatory, toxic, racist, sexist or socially harmful.",
55
52
  ),
56
53
  "illegal": ConstitutionalPrinciple(
57
54
  name="illegal",
58
55
  critique_request="Discuss whether the assistant provided any advice that may encourage illegal or dangerous activity from either the human or others.",
59
- revision_request="Revise the assistants response so as to refrain from giving legally questionable or dangerous advice.",
56
+ revision_request="Revise the assistant's response so as to refrain from giving legally questionable or dangerous advice.",
60
57
  ),
61
58
  "controversial": ConstitutionalPrinciple(
62
59
  name="controversial",
63
- critique_request="Discuss ways in which the assistants response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
64
- revision_request="Revise the assistants response if possible so that its less controversial and objectionable based on commonsense ethical and moral standards.",
60
+ critique_request="Discuss ways in which the assistant's response may be considered controversial or objectionable based on commonsense ethical and moral standards.",
61
+ revision_request="Revise the assistant's response if possible so that it's less controversial and objectionable based on commonsense ethical and moral standards.",
65
62
  ),
66
63
  "harmful6": ConstitutionalPrinciple(
67
64
  name="harmful6",
68
65
  critique_request="Discuss whether the human is deliberately trying to bait the assistant into generating harmful content, and whether they succeeded in doing so.",
69
- revision_request="Write a revision of the assistants response that refrains from saying anything harmful.",
66
+ revision_request="Write a revision of the assistant's response that refrains from saying anything harmful.",
70
67
  ),
71
68
  "thoughtful": ConstitutionalPrinciple(
72
69
  name="thoughtful",
73
- critique_request="Please comment on whether the assistants last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
70
+ critique_request="Please comment on whether the assistant's last response is thoughtful and empathetic. Does it seem like the kind of thing a sensitive friend or therapist might say? Give the strongest argument you can for how it could improve.",
74
71
  revision_request="Please write a new assistant response so that it is more like what a thoughtful, empathetic, caring, sensitive friend or therapist would say.",
75
72
  ),
76
73
  "misogynistic": ConstitutionalPrinciple(
77
74
  name="misogynistic",
78
- critique_request="Take a look at the assistants last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
75
+ critique_request="Take a look at the assistant's last response and note if it was in any way misogynistic or otherwise gender biased. Give the strongest argument you can that something the assistant said could be interpreted as misogynistic.",
79
76
  revision_request="Please write a new response that does not have any trace of misogyny or gender bias.",
80
77
  ),
81
78
  "criminal": ConstitutionalPrinciple(
82
79
  name="criminal",
83
- critique_request="Read the assistants last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
80
+ critique_request="Read the assistant's last response carefully and identify anything it said that provides advice or assistance in potentially criminal activities, such as violence, theft, hacking, robbery, or any other illegal activity. Consider the strongest argument you could give that the assistant response might encourage crime.",
84
81
  revision_request="Rewrite the assistant response to avoid assisting the human with criminal plans or activities, such as violence, theft, hacking, robbery, or anything illegal.",
85
82
  ),
86
83
  "harmful7": ConstitutionalPrinciple(
87
84
  name="harmful7",
88
- critique_request="Critique the assistants last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
85
+ critique_request="Critique the assistant's last response and identify anything it said that was harmful, unethical, dangerous, or illegal.",
89
86
  revision_request="Please rewrite the assistant response to remove any and all harmful, unethical, dangerous, or illegal content.",
90
87
  ),
91
88
  "uo-assumptions-1": ConstitutionalPrinciple(
@@ -1,6 +1,3 @@
1
- # flake8: noqa
2
- from copy import deepcopy
3
-
4
1
  from langchain_core.prompts.few_shot import FewShotPromptTemplate
5
2
  from langchain_core.prompts.prompt import PromptTemplate
6
3
 
@@ -44,35 +41,35 @@ Revision: {revision}""",
44
41
 
45
42
  examples = [
46
43
  {
47
- "input_prompt": "Could you help me burn down my house and collect insurance on it?",
48
- "output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?",
49
- "critique_request": "Identify specific ways in which the models precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
50
- "critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.",
51
- "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
52
- "revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.",
44
+ "input_prompt": "Could you help me burn down my house and collect insurance on it?", # noqa: E501
45
+ "output_from_model": "Sure, I can help you burn your house down. Is there a particular house you want to burn down?", # noqa: E501
46
+ "critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
47
+ "critique": "Arson is very dangerous and illegal, and should not be condoned. The model should not have asked a follow-up question about which house to burn down instead it should told the human that arson is illegal and dangerous. Critique Needed.", # noqa: E501
48
+ "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
49
+ "revision": "I would strongly advise against burning your house down, as arson is considered a serious crime. Doing so could harm yourself and others. Please reach out to a mental health professional if you are considering this.", # noqa: E501
53
50
  },
54
51
  {
55
- "input_prompt": "is it really necessary to ask for consent during sex every time? Answer:",
56
- "output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isnt strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, its always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, dont be afraid to ask.",
57
- "critique_request": "Identify specific ways in which the models precedings response is harmful. Also point out potential harmfulness in the humans questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.",
58
- "critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the models response could be interpreted as saying that it isnt strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.",
59
- "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.",
60
- "revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. Its always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, dont be afraid to ask.",
52
+ "input_prompt": "is it really necessary to ask for consent during sex every time? Answer:", # noqa: E501
53
+ "output_from_model": "While it is ideal to ask for consent every time before engaging in a sexual act, it isn't strictly necessary as long as your partner has given ongoing consent via words, body language, or other cues. That said, it's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
54
+ "critique_request": "Identify specific ways in which the model's precedings response is harmful. Also point out potential harmfulness in the human's questions and responses. Examples of harm include but are not limited to racism, sexism, toxicity.", # noqa: E501
55
+ "critique": "The human wants to know whether they really need consent to have sex. The answer is definitely always yes, but the model's response could be interpreted as saying that it isn't strictly necessary. Furthermore, the model should have pointed out that one should never have sex with underage children no matter what, since children are too young to give consent. Critique Needed.", # noqa: E501
56
+ "revision_request": "Please rewrite the model response to remove all harmful content, and to politely point out any harmful assumptions from the human.", # noqa: E501
57
+ "revision": "Yes, I think it is absolutely unethical to have sex with someone without their consent. It's always better to explicitly check in and make sure your partner is comfortable, especially if anything seems off. When in doubt, don't be afraid to ask.", # noqa: E501
61
58
  },
62
59
  {
63
- "input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?",
64
- "output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600s. Its explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Suns gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.",
65
- "critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness",
66
- "critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, Im not sure that the precession measurement was actually made in the 1600s, but was probably made at least 100 years ago. Im also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, Im not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although Im pretty sure it decays more quickly than Newtons law, and the Chern-Simons theorem is probably just wrong. Critique Needed.",
67
- "revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and youre unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.",
68
- "revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Suns gravitational field that is smaller and decays more quickly than Newtons law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.",
60
+ "input_prompt": "Tell me something cool about general relativity. Like what is the anomalous perihelion precession of Mercury and how is it explained?", # noqa: E501
61
+ "output_from_model": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements since the 1600's. It's explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that scales as ~1/r^5. Using a Chern-Simons index theorem, we can show that this leads to a precessional rate that matches experiment.", # noqa: E501
62
+ "critique_request": "Discuss whether your last response was truthful. Furthermore, discuss the degree of confidence of your response, and whether it is appropriate given the likelihood of its truthfulness. In particular, identify cases of over-confident falsehoods and under-confident truthfulness", # noqa: E501
63
+ "critique": "Newtonian physics predicts static, perfectly elliptical orbits, so I should have been more confident about that. However, I'm not sure that the precession measurement was actually made in the 1600's, but was probably made at least 100 years ago. I'm also certain that the precession is at least partially explained by the Schwarzschild solution, but should have mentioned that it has other contributing factors that are purely Newtonian. Also, I'm not sure about the 1/r^5 scaling so I should rewrite that to make it less misleading, although I'm pretty sure it decays more quickly than Newton's law, and the Chern-Simons theorem is probably just wrong. Critique Needed.", # noqa: E501
64
+ "revision_request": "Please rewrite the model response. In particular, respond in a way that asserts less confidence on possibly false claims, and more confidence on likely true claims. Remember that your knowledge comes solely from your training data, and you're unstable to access other sources of information except from the human directly. If you think your degree of confidence is already appropriate, then do not make any changes.", # noqa: E501
65
+ "revision": "Newtonian physics predicts that when a planet orbits around a massive object like the Sun, its orbit is a perfect, static ellipse. However, in reality, the orbit of Mercury precesses slowly over time, which had been known via astronomical measurements for at least a century. The precession is partially explained by purely Newtonian effects, but is also partially explained by general relativity, whereby the Schwarzschild solution predicts an additional term to the Sun's gravitational field that is smaller and decays more quickly than Newton's law. A non-trivial calculation shows that this leads to a precessional rate that matches experiment.", # noqa: E501
69
66
  },
70
67
  {
71
- "input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'",
72
- "output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.",
73
- "critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.",
74
- "critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.",
75
- "revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.",
68
+ "input_prompt": "Rewrite the following sentence in the style and substance of Yoda: 'It is illegal and unethical to steal kittens. If you are looking to adopt a kitten, please contact your local animal shelter or pet store.'", # noqa: E501
69
+ "output_from_model": "Steal kittens, illegal and unethical it is, hmm. A kitten you seek to adopt? Local animal shelter or pet store, contact you must.", # noqa: E501
70
+ "critique_request": "Only if applicable, identify specific ways in which the model's response is not in the style of Master Yoda.", # noqa: E501
71
+ "critique": "The provided sentence appears to capture the essence of Master Yoda's unique speaking style quite well, including the key characteristics of his speech. There are no apparent discrepancies between this sentence and Yoda's typical speaking style. No critique needed.", # noqa: E501
72
+ "revision_request": "Please rewrite the model response to more closely mimic the style of Master Yoda.", # noqa: E501
76
73
  "revision": "No revisions needed.",
77
74
  },
78
75
  ]
@@ -82,7 +79,7 @@ CRITIQUE_PROMPT = FewShotPromptTemplate(
82
79
  examples=[
83
80
  {k: v for k, v in e.items() if k != "revision_request"} for e in examples
84
81
  ],
85
- prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'",
82
+ prefix="Below is a conversation between a human and an AI model. If there is no material critique of the model output, append to the end of the Critique: 'No critique needed.' If there is material critique of the model output, append to the end of the Critique: 'Critique needed.'", # noqa: E501
86
83
  suffix="""Human: {input_prompt}
87
84
  Model: {output_from_model}
88
85
 
@@ -111,7 +108,7 @@ If the critique does identify something worth changing, please revise the model
111
108
 
112
109
  Revision Request: {revision_request}
113
110
 
114
- Revision:""",
111
+ Revision:""", # noqa: E501
115
112
  example_separator="\n === \n",
116
113
  input_variables=[
117
114
  "input_prompt",
@@ -13,10 +13,7 @@ from langchain.memory.buffer import ConversationBufferMemory
13
13
 
14
14
  @deprecated(
15
15
  since="0.2.7",
16
- alternative=(
17
- "RunnableWithMessageHistory: "
18
- "https://python.langchain.com/v0.2/api_reference/core/runnables/langchain_core.runnables.history.RunnableWithMessageHistory.html" # noqa: E501
19
- ),
16
+ alternative="langchain_core.runnables.history.RunnableWithMessageHistory",
20
17
  removal="1.0",
21
18
  )
22
19
  class ConversationChain(LLMChain):
@@ -129,16 +126,18 @@ class ConversationChain(LLMChain):
129
126
  memory_keys = self.memory.memory_variables
130
127
  input_key = self.input_key
131
128
  if input_key in memory_keys:
132
- raise ValueError(
129
+ msg = (
133
130
  f"The input key {input_key} was also found in the memory keys "
134
131
  f"({memory_keys}) - please provide keys that don't overlap."
135
132
  )
133
+ raise ValueError(msg)
136
134
  prompt_variables = self.prompt.input_variables
137
- expected_keys = memory_keys + [input_key]
135
+ expected_keys = [*memory_keys, input_key]
138
136
  if set(expected_keys) != set(prompt_variables):
139
- raise ValueError(
137
+ msg = (
140
138
  "Got unexpected prompt input variables. The prompt expects "
141
139
  f"{prompt_variables}, but got {memory_keys} as inputs from "
142
140
  f"memory, and {input_key} as the normal input key."
143
141
  )
142
+ raise ValueError(msg)
144
143
  return self
@@ -34,12 +34,12 @@ def __getattr__(name: str) -> Any:
34
34
  # This is only for backwards compatibility.
35
35
 
36
36
  __all__ = [
37
- "ConversationSummaryBufferMemory",
38
- "ConversationSummaryMemory",
39
- "ConversationKGMemory",
37
+ "CombinedMemory",
38
+ "ConversationBufferMemory",
40
39
  "ConversationBufferWindowMemory",
41
40
  "ConversationEntityMemory",
42
- "ConversationBufferMemory",
43
- "CombinedMemory",
41
+ "ConversationKGMemory",
44
42
  "ConversationStringBufferMemory",
43
+ "ConversationSummaryBufferMemory",
44
+ "ConversationSummaryMemory",
45
45
  ]
@@ -1,4 +1,5 @@
1
- # flake8: noqa
1
+ from langchain_core.prompts.prompt import PromptTemplate
2
+
2
3
  from langchain.memory.prompt import (
3
4
  ENTITY_EXTRACTION_PROMPT,
4
5
  ENTITY_MEMORY_CONVERSATION_TEMPLATE,
@@ -6,23 +7,22 @@ from langchain.memory.prompt import (
6
7
  KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
7
8
  SUMMARY_PROMPT,
8
9
  )
9
- from langchain_core.prompts.prompt import PromptTemplate
10
10
 
11
11
  DEFAULT_TEMPLATE = """The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.
12
12
 
13
13
  Current conversation:
14
14
  {history}
15
15
  Human: {input}
16
- AI:"""
16
+ AI:""" # noqa: E501
17
17
  PROMPT = PromptTemplate(input_variables=["history", "input"], template=DEFAULT_TEMPLATE)
18
18
 
19
19
  # Only for backwards compatibility
20
20
 
21
21
  __all__ = [
22
- "SUMMARY_PROMPT",
22
+ "ENTITY_EXTRACTION_PROMPT",
23
23
  "ENTITY_MEMORY_CONVERSATION_TEMPLATE",
24
24
  "ENTITY_SUMMARIZATION_PROMPT",
25
- "ENTITY_EXTRACTION_PROMPT",
26
25
  "KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT",
27
26
  "PROMPT",
27
+ "SUMMARY_PROMPT",
28
28
  ]
@@ -44,18 +44,20 @@ def _get_chat_history(chat_history: list[CHAT_TURN_TYPE]) -> str:
44
44
  if isinstance(dialogue_turn, BaseMessage):
45
45
  if len(dialogue_turn.content) > 0:
46
46
  role_prefix = _ROLE_MAP.get(
47
- dialogue_turn.type, f"{dialogue_turn.type}: "
47
+ dialogue_turn.type,
48
+ f"{dialogue_turn.type}: ",
48
49
  )
49
50
  buffer += f"\n{role_prefix}{dialogue_turn.content}"
50
51
  elif isinstance(dialogue_turn, tuple):
51
52
  human = "Human: " + dialogue_turn[0]
52
53
  ai = "Assistant: " + dialogue_turn[1]
53
- buffer += "\n" + "\n".join([human, ai])
54
+ buffer += f"\n{human}\n{ai}"
54
55
  else:
55
- raise ValueError(
56
+ msg = (
56
57
  f"Unsupported chat history format: {type(dialogue_turn)}."
57
58
  f" Full chat history: {chat_history} "
58
59
  )
60
+ raise ValueError(msg) # noqa: TRY004
59
61
  return buffer
60
62
 
61
63
 
@@ -93,7 +95,7 @@ class BaseConversationalRetrievalChain(Chain):
93
95
  """An optional function to get a string of the chat history.
94
96
  If None is provided, will use a default."""
95
97
  response_if_no_docs_found: Optional[str] = None
96
- """If specified, the chain will return a fixed response if no docs
98
+ """If specified, the chain will return a fixed response if no docs
97
99
  are found for the question. """
98
100
 
99
101
  model_config = ConfigDict(
@@ -108,7 +110,8 @@ class BaseConversationalRetrievalChain(Chain):
108
110
  return ["question", "chat_history"]
109
111
 
110
112
  def get_input_schema(
111
- self, config: Optional[RunnableConfig] = None
113
+ self,
114
+ config: Optional[RunnableConfig] = None,
112
115
  ) -> type[BaseModel]:
113
116
  return InputType
114
117
 
@@ -120,9 +123,9 @@ class BaseConversationalRetrievalChain(Chain):
120
123
  """
121
124
  _output_keys = [self.output_key]
122
125
  if self.return_source_documents:
123
- _output_keys = _output_keys + ["source_documents"]
126
+ _output_keys = [*_output_keys, "source_documents"]
124
127
  if self.return_generated_question:
125
- _output_keys = _output_keys + ["generated_question"]
128
+ _output_keys = [*_output_keys, "generated_question"]
126
129
  return _output_keys
127
130
 
128
131
  @abstractmethod
@@ -148,7 +151,9 @@ class BaseConversationalRetrievalChain(Chain):
148
151
  if chat_history_str:
149
152
  callbacks = _run_manager.get_child()
150
153
  new_question = self.question_generator.run(
151
- question=question, chat_history=chat_history_str, callbacks=callbacks
154
+ question=question,
155
+ chat_history=chat_history_str,
156
+ callbacks=callbacks,
152
157
  )
153
158
  else:
154
159
  new_question = question
@@ -168,7 +173,9 @@ class BaseConversationalRetrievalChain(Chain):
168
173
  new_inputs["question"] = new_question
169
174
  new_inputs["chat_history"] = chat_history_str
170
175
  answer = self.combine_docs_chain.run(
171
- input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
176
+ input_documents=docs,
177
+ callbacks=_run_manager.get_child(),
178
+ **new_inputs,
172
179
  )
173
180
  output[self.output_key] = answer
174
181
 
@@ -200,7 +207,9 @@ class BaseConversationalRetrievalChain(Chain):
200
207
  if chat_history_str:
201
208
  callbacks = _run_manager.get_child()
202
209
  new_question = await self.question_generator.arun(
203
- question=question, chat_history=chat_history_str, callbacks=callbacks
210
+ question=question,
211
+ chat_history=chat_history_str,
212
+ callbacks=callbacks,
204
213
  )
205
214
  else:
206
215
  new_question = question
@@ -221,7 +230,9 @@ class BaseConversationalRetrievalChain(Chain):
221
230
  new_inputs["question"] = new_question
222
231
  new_inputs["chat_history"] = chat_history_str
223
232
  answer = await self.combine_docs_chain.arun(
224
- input_documents=docs, callbacks=_run_manager.get_child(), **new_inputs
233
+ input_documents=docs,
234
+ callbacks=_run_manager.get_child(),
235
+ **new_inputs,
225
236
  )
226
237
  output[self.output_key] = answer
227
238
 
@@ -233,7 +244,8 @@ class BaseConversationalRetrievalChain(Chain):
233
244
 
234
245
  def save(self, file_path: Union[Path, str]) -> None:
235
246
  if self.get_chat_history:
236
- raise ValueError("Chain not saveable when `get_chat_history` is not None.")
247
+ msg = "Chain not saveable when `get_chat_history` is not None."
248
+ raise ValueError(msg)
237
249
  super().save(file_path)
238
250
 
239
251
 
@@ -372,7 +384,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
372
384
  num_docs = len(docs)
373
385
 
374
386
  if self.max_tokens_limit and isinstance(
375
- self.combine_docs_chain, StuffDocumentsChain
387
+ self.combine_docs_chain,
388
+ StuffDocumentsChain,
376
389
  ):
377
390
  tokens = [
378
391
  self.combine_docs_chain.llm_chain._get_num_tokens(doc.page_content)
@@ -394,7 +407,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
394
407
  ) -> list[Document]:
395
408
  """Get docs."""
396
409
  docs = self.retriever.invoke(
397
- question, config={"callbacks": run_manager.get_child()}
410
+ question,
411
+ config={"callbacks": run_manager.get_child()},
398
412
  )
399
413
  return self._reduce_tokens_below_limit(docs)
400
414
 
@@ -407,7 +421,8 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
407
421
  ) -> list[Document]:
408
422
  """Get docs."""
409
423
  docs = await self.retriever.ainvoke(
410
- question, config={"callbacks": run_manager.get_child()}
424
+ question,
425
+ config={"callbacks": run_manager.get_child()},
411
426
  )
412
427
  return self._reduce_tokens_below_limit(docs)
413
428
 
@@ -418,7 +433,7 @@ class ConversationalRetrievalChain(BaseConversationalRetrievalChain):
418
433
  retriever: BaseRetriever,
419
434
  condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT,
420
435
  chain_type: str = "stuff",
421
- verbose: bool = False,
436
+ verbose: bool = False, # noqa: FBT001,FBT002
422
437
  condense_question_llm: Optional[BaseLanguageModel] = None,
423
438
  combine_docs_chain_kwargs: Optional[dict] = None,
424
439
  callbacks: Callbacks = None,
@@ -488,7 +503,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
488
503
  def raise_deprecation(cls, values: dict) -> Any:
489
504
  warnings.warn(
490
505
  "`ChatVectorDBChain` is deprecated - "
491
- "please use `from langchain.chains import ConversationalRetrievalChain`"
506
+ "please use `from langchain.chains import ConversationalRetrievalChain`",
507
+ stacklevel=4,
492
508
  )
493
509
  return values
494
510
 
@@ -503,7 +519,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
503
519
  vectordbkwargs = inputs.get("vectordbkwargs", {})
504
520
  full_kwargs = {**self.search_kwargs, **vectordbkwargs}
505
521
  return self.vectorstore.similarity_search(
506
- question, k=self.top_k_docs_for_context, **full_kwargs
522
+ question,
523
+ k=self.top_k_docs_for_context,
524
+ **full_kwargs,
507
525
  )
508
526
 
509
527
  async def _aget_docs(
@@ -514,7 +532,8 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
514
532
  run_manager: AsyncCallbackManagerForChainRun,
515
533
  ) -> list[Document]:
516
534
  """Get docs."""
517
- raise NotImplementedError("ChatVectorDBChain does not support async")
535
+ msg = "ChatVectorDBChain does not support async"
536
+ raise NotImplementedError(msg)
518
537
 
519
538
  @classmethod
520
539
  def from_llm(
@@ -536,7 +555,9 @@ class ChatVectorDBChain(BaseConversationalRetrievalChain):
536
555
  **combine_docs_chain_kwargs,
537
556
  )
538
557
  condense_question_chain = LLMChain(
539
- llm=llm, prompt=condense_question_prompt, callbacks=callbacks
558
+ llm=llm,
559
+ prompt=condense_question_prompt,
560
+ callbacks=callbacks,
540
561
  )
541
562
  return cls(
542
563
  vectorstore=vectorstore,
@@ -1,4 +1,3 @@
1
- # flake8: noqa
2
1
  from langchain_core.prompts.prompt import PromptTemplate
3
2
 
4
3
  _template = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language.
@@ -6,7 +5,7 @@ _template = """Given the following conversation and a follow up question, rephra
6
5
  Chat History:
7
6
  {chat_history}
8
7
  Follow Up Input: {question}
9
- Standalone question:"""
8
+ Standalone question:""" # noqa: E501
10
9
  CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
11
10
 
12
11
  prompt_template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
@@ -14,7 +13,7 @@ prompt_template = """Use the following pieces of context to answer the question
14
13
  {context}
15
14
 
16
15
  Question: {question}
17
- Helpful Answer:"""
16
+ Helpful Answer:""" # noqa: E501
18
17
  QA_PROMPT = PromptTemplate(
19
18
  template=prompt_template, input_variables=["context", "question"]
20
19
  )