langchain 0.3.26__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (580) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +160 -133
  9. langchain/agents/agent_iterator.py +31 -14
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +6 -6
  48. langchain/agents/chat/base.py +6 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +9 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +7 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +16 -8
  63. langchain/agents/json_chat/base.py +18 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +9 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +80 -70
  71. langchain/agents/openai_functions_agent/base.py +46 -37
  72. langchain/agents/openai_functions_multi_agent/base.py +39 -26
  73. langchain/agents/openai_tools/base.py +8 -8
  74. langchain/agents/output_parsers/__init__.py +3 -3
  75. langchain/agents/output_parsers/json.py +6 -6
  76. langchain/agents/output_parsers/openai_functions.py +15 -7
  77. langchain/agents/output_parsers/openai_tools.py +9 -4
  78. langchain/agents/output_parsers/react_json_single_input.py +10 -5
  79. langchain/agents/output_parsers/react_single_input.py +15 -11
  80. langchain/agents/output_parsers/self_ask.py +3 -2
  81. langchain/agents/output_parsers/tools.py +18 -13
  82. langchain/agents/output_parsers/xml.py +99 -28
  83. langchain/agents/react/agent.py +4 -4
  84. langchain/agents/react/base.py +22 -17
  85. langchain/agents/react/output_parser.py +5 -6
  86. langchain/agents/react/textworld_prompt.py +0 -1
  87. langchain/agents/react/wiki_prompt.py +14 -15
  88. langchain/agents/schema.py +3 -2
  89. langchain/agents/self_ask_with_search/base.py +19 -15
  90. langchain/agents/self_ask_with_search/prompt.py +0 -1
  91. langchain/agents/structured_chat/base.py +14 -11
  92. langchain/agents/structured_chat/output_parser.py +16 -18
  93. langchain/agents/structured_chat/prompt.py +3 -4
  94. langchain/agents/tool_calling_agent/base.py +7 -6
  95. langchain/agents/tools.py +2 -2
  96. langchain/agents/utils.py +2 -3
  97. langchain/agents/xml/base.py +5 -5
  98. langchain/agents/xml/prompt.py +1 -2
  99. langchain/cache.py +12 -12
  100. langchain/callbacks/__init__.py +11 -11
  101. langchain/callbacks/aim_callback.py +2 -2
  102. langchain/callbacks/argilla_callback.py +1 -1
  103. langchain/callbacks/arize_callback.py +1 -1
  104. langchain/callbacks/arthur_callback.py +1 -1
  105. langchain/callbacks/base.py +7 -7
  106. langchain/callbacks/clearml_callback.py +1 -1
  107. langchain/callbacks/comet_ml_callback.py +1 -1
  108. langchain/callbacks/confident_callback.py +1 -1
  109. langchain/callbacks/context_callback.py +1 -1
  110. langchain/callbacks/flyte_callback.py +1 -1
  111. langchain/callbacks/human.py +2 -2
  112. langchain/callbacks/infino_callback.py +1 -1
  113. langchain/callbacks/labelstudio_callback.py +1 -1
  114. langchain/callbacks/llmonitor_callback.py +1 -1
  115. langchain/callbacks/manager.py +5 -5
  116. langchain/callbacks/mlflow_callback.py +2 -2
  117. langchain/callbacks/openai_info.py +1 -1
  118. langchain/callbacks/promptlayer_callback.py +1 -1
  119. langchain/callbacks/sagemaker_callback.py +1 -1
  120. langchain/callbacks/streaming_aiter.py +4 -1
  121. langchain/callbacks/streaming_aiter_final_only.py +5 -3
  122. langchain/callbacks/streaming_stdout_final_only.py +5 -3
  123. langchain/callbacks/streamlit/__init__.py +3 -2
  124. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  125. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  126. langchain/callbacks/tracers/__init__.py +1 -1
  127. langchain/callbacks/tracers/comet.py +1 -1
  128. langchain/callbacks/tracers/evaluation.py +1 -1
  129. langchain/callbacks/tracers/log_stream.py +1 -1
  130. langchain/callbacks/tracers/logging.py +1 -1
  131. langchain/callbacks/tracers/stdout.py +1 -1
  132. langchain/callbacks/trubrics_callback.py +1 -1
  133. langchain/callbacks/utils.py +4 -4
  134. langchain/callbacks/wandb_callback.py +1 -1
  135. langchain/callbacks/whylabs_callback.py +1 -1
  136. langchain/chains/api/base.py +36 -22
  137. langchain/chains/api/news_docs.py +1 -2
  138. langchain/chains/api/open_meteo_docs.py +1 -2
  139. langchain/chains/api/openapi/requests_chain.py +1 -1
  140. langchain/chains/api/openapi/response_chain.py +1 -1
  141. langchain/chains/api/podcast_docs.py +1 -2
  142. langchain/chains/api/prompt.py +1 -2
  143. langchain/chains/api/tmdb_docs.py +1 -2
  144. langchain/chains/base.py +88 -54
  145. langchain/chains/chat_vector_db/prompts.py +2 -3
  146. langchain/chains/combine_documents/__init__.py +1 -1
  147. langchain/chains/combine_documents/base.py +23 -10
  148. langchain/chains/combine_documents/map_reduce.py +38 -30
  149. langchain/chains/combine_documents/map_rerank.py +33 -20
  150. langchain/chains/combine_documents/reduce.py +47 -26
  151. langchain/chains/combine_documents/refine.py +26 -17
  152. langchain/chains/combine_documents/stuff.py +19 -12
  153. langchain/chains/constitutional_ai/base.py +4 -4
  154. langchain/chains/constitutional_ai/principles.py +22 -25
  155. langchain/chains/constitutional_ai/prompts.py +25 -28
  156. langchain/chains/conversation/base.py +5 -3
  157. langchain/chains/conversation/memory.py +5 -5
  158. langchain/chains/conversation/prompt.py +5 -5
  159. langchain/chains/conversational_retrieval/base.py +41 -20
  160. langchain/chains/conversational_retrieval/prompts.py +2 -3
  161. langchain/chains/elasticsearch_database/base.py +8 -9
  162. langchain/chains/elasticsearch_database/prompts.py +2 -3
  163. langchain/chains/ernie_functions/__init__.py +2 -2
  164. langchain/chains/example_generator.py +3 -1
  165. langchain/chains/flare/base.py +26 -12
  166. langchain/chains/graph_qa/cypher.py +2 -2
  167. langchain/chains/graph_qa/falkordb.py +1 -1
  168. langchain/chains/graph_qa/gremlin.py +1 -1
  169. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  170. langchain/chains/graph_qa/prompts.py +2 -2
  171. langchain/chains/history_aware_retriever.py +2 -1
  172. langchain/chains/hyde/base.py +6 -5
  173. langchain/chains/hyde/prompts.py +5 -6
  174. langchain/chains/llm.py +77 -61
  175. langchain/chains/llm_bash/__init__.py +2 -1
  176. langchain/chains/llm_checker/base.py +7 -5
  177. langchain/chains/llm_checker/prompt.py +3 -4
  178. langchain/chains/llm_math/base.py +16 -9
  179. langchain/chains/llm_math/prompt.py +1 -2
  180. langchain/chains/llm_summarization_checker/base.py +9 -6
  181. langchain/chains/llm_symbolic_math/__init__.py +2 -1
  182. langchain/chains/loading.py +151 -95
  183. langchain/chains/mapreduce.py +4 -3
  184. langchain/chains/moderation.py +8 -9
  185. langchain/chains/natbot/base.py +8 -8
  186. langchain/chains/natbot/crawler.py +73 -76
  187. langchain/chains/natbot/prompt.py +2 -3
  188. langchain/chains/openai_functions/__init__.py +7 -7
  189. langchain/chains/openai_functions/base.py +13 -10
  190. langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
  191. langchain/chains/openai_functions/extraction.py +19 -19
  192. langchain/chains/openai_functions/openapi.py +35 -35
  193. langchain/chains/openai_functions/qa_with_structure.py +19 -12
  194. langchain/chains/openai_functions/tagging.py +2 -4
  195. langchain/chains/openai_tools/extraction.py +7 -8
  196. langchain/chains/qa_generation/base.py +4 -3
  197. langchain/chains/qa_generation/prompt.py +5 -5
  198. langchain/chains/qa_with_sources/base.py +14 -6
  199. langchain/chains/qa_with_sources/loading.py +16 -8
  200. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  201. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  202. langchain/chains/qa_with_sources/retrieval.py +14 -5
  203. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  204. langchain/chains/qa_with_sources/vector_db.py +17 -6
  205. langchain/chains/query_constructor/base.py +34 -33
  206. langchain/chains/query_constructor/ir.py +4 -4
  207. langchain/chains/query_constructor/parser.py +37 -32
  208. langchain/chains/query_constructor/prompt.py +5 -6
  209. langchain/chains/question_answering/chain.py +21 -10
  210. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  211. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  212. langchain/chains/question_answering/refine_prompts.py +2 -5
  213. langchain/chains/question_answering/stuff_prompt.py +5 -5
  214. langchain/chains/retrieval.py +1 -3
  215. langchain/chains/retrieval_qa/base.py +34 -27
  216. langchain/chains/retrieval_qa/prompt.py +1 -2
  217. langchain/chains/router/__init__.py +3 -3
  218. langchain/chains/router/base.py +24 -20
  219. langchain/chains/router/embedding_router.py +12 -8
  220. langchain/chains/router/llm_router.py +17 -16
  221. langchain/chains/router/multi_prompt.py +2 -2
  222. langchain/chains/router/multi_retrieval_qa.py +10 -5
  223. langchain/chains/sequential.py +30 -18
  224. langchain/chains/sql_database/prompt.py +14 -16
  225. langchain/chains/sql_database/query.py +6 -5
  226. langchain/chains/structured_output/__init__.py +1 -1
  227. langchain/chains/structured_output/base.py +75 -67
  228. langchain/chains/summarize/chain.py +11 -5
  229. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  230. langchain/chains/summarize/stuff_prompt.py +0 -1
  231. langchain/chains/transform.py +5 -6
  232. langchain/chat_loaders/facebook_messenger.py +1 -1
  233. langchain/chat_loaders/langsmith.py +1 -1
  234. langchain/chat_loaders/utils.py +3 -3
  235. langchain/chat_models/__init__.py +20 -19
  236. langchain/chat_models/anthropic.py +1 -1
  237. langchain/chat_models/azureml_endpoint.py +1 -1
  238. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  239. langchain/chat_models/base.py +160 -123
  240. langchain/chat_models/bedrock.py +1 -1
  241. langchain/chat_models/fake.py +1 -1
  242. langchain/chat_models/meta.py +1 -1
  243. langchain/chat_models/pai_eas_endpoint.py +1 -1
  244. langchain/chat_models/promptlayer_openai.py +1 -1
  245. langchain/chat_models/volcengine_maas.py +1 -1
  246. langchain/docstore/base.py +1 -1
  247. langchain/document_loaders/__init__.py +9 -9
  248. langchain/document_loaders/airbyte.py +3 -3
  249. langchain/document_loaders/assemblyai.py +1 -1
  250. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  251. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  252. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  253. langchain/document_loaders/base.py +1 -1
  254. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  255. langchain/document_loaders/blockchain.py +1 -1
  256. langchain/document_loaders/chatgpt.py +1 -1
  257. langchain/document_loaders/college_confidential.py +1 -1
  258. langchain/document_loaders/confluence.py +1 -1
  259. langchain/document_loaders/email.py +1 -1
  260. langchain/document_loaders/facebook_chat.py +1 -1
  261. langchain/document_loaders/markdown.py +1 -1
  262. langchain/document_loaders/notebook.py +1 -1
  263. langchain/document_loaders/org_mode.py +1 -1
  264. langchain/document_loaders/parsers/__init__.py +1 -1
  265. langchain/document_loaders/parsers/docai.py +1 -1
  266. langchain/document_loaders/parsers/generic.py +1 -1
  267. langchain/document_loaders/parsers/html/__init__.py +1 -1
  268. langchain/document_loaders/parsers/html/bs4.py +1 -1
  269. langchain/document_loaders/parsers/language/cobol.py +1 -1
  270. langchain/document_loaders/parsers/language/python.py +1 -1
  271. langchain/document_loaders/parsers/msword.py +1 -1
  272. langchain/document_loaders/parsers/pdf.py +5 -5
  273. langchain/document_loaders/parsers/registry.py +1 -1
  274. langchain/document_loaders/pdf.py +8 -8
  275. langchain/document_loaders/powerpoint.py +1 -1
  276. langchain/document_loaders/pyspark_dataframe.py +1 -1
  277. langchain/document_loaders/telegram.py +2 -2
  278. langchain/document_loaders/tencent_cos_directory.py +1 -1
  279. langchain/document_loaders/unstructured.py +5 -5
  280. langchain/document_loaders/url_playwright.py +1 -1
  281. langchain/document_loaders/whatsapp_chat.py +1 -1
  282. langchain/document_loaders/youtube.py +2 -2
  283. langchain/document_transformers/__init__.py +3 -3
  284. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  285. langchain/document_transformers/doctran_text_extract.py +1 -1
  286. langchain/document_transformers/doctran_text_qa.py +1 -1
  287. langchain/document_transformers/doctran_text_translate.py +1 -1
  288. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  289. langchain/document_transformers/google_translate.py +1 -1
  290. langchain/document_transformers/html2text.py +1 -1
  291. langchain/document_transformers/nuclia_text_transform.py +1 -1
  292. langchain/embeddings/__init__.py +5 -5
  293. langchain/embeddings/base.py +33 -24
  294. langchain/embeddings/cache.py +36 -31
  295. langchain/embeddings/fake.py +1 -1
  296. langchain/embeddings/huggingface.py +2 -2
  297. langchain/evaluation/__init__.py +22 -22
  298. langchain/evaluation/agents/trajectory_eval_chain.py +23 -23
  299. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  300. langchain/evaluation/comparison/__init__.py +1 -1
  301. langchain/evaluation/comparison/eval_chain.py +20 -13
  302. langchain/evaluation/comparison/prompt.py +1 -2
  303. langchain/evaluation/criteria/__init__.py +1 -1
  304. langchain/evaluation/criteria/eval_chain.py +20 -11
  305. langchain/evaluation/criteria/prompt.py +2 -3
  306. langchain/evaluation/embedding_distance/base.py +23 -20
  307. langchain/evaluation/loading.py +15 -11
  308. langchain/evaluation/parsing/base.py +4 -1
  309. langchain/evaluation/parsing/json_distance.py +5 -2
  310. langchain/evaluation/parsing/json_schema.py +12 -8
  311. langchain/evaluation/qa/__init__.py +1 -1
  312. langchain/evaluation/qa/eval_chain.py +12 -5
  313. langchain/evaluation/qa/eval_prompt.py +7 -8
  314. langchain/evaluation/qa/generate_chain.py +2 -1
  315. langchain/evaluation/qa/generate_prompt.py +2 -4
  316. langchain/evaluation/schema.py +38 -30
  317. langchain/evaluation/scoring/__init__.py +1 -1
  318. langchain/evaluation/scoring/eval_chain.py +22 -15
  319. langchain/evaluation/scoring/prompt.py +0 -1
  320. langchain/evaluation/string_distance/base.py +14 -9
  321. langchain/globals.py +12 -11
  322. langchain/graphs/__init__.py +6 -6
  323. langchain/graphs/graph_document.py +1 -1
  324. langchain/graphs/networkx_graph.py +2 -2
  325. langchain/hub.py +9 -11
  326. langchain/indexes/__init__.py +3 -3
  327. langchain/indexes/_sql_record_manager.py +63 -46
  328. langchain/indexes/prompts/entity_extraction.py +1 -2
  329. langchain/indexes/prompts/entity_summarization.py +1 -2
  330. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  331. langchain/indexes/vectorstore.py +35 -19
  332. langchain/llms/__init__.py +13 -13
  333. langchain/llms/ai21.py +1 -1
  334. langchain/llms/azureml_endpoint.py +4 -4
  335. langchain/llms/base.py +15 -7
  336. langchain/llms/bedrock.py +1 -1
  337. langchain/llms/cloudflare_workersai.py +1 -1
  338. langchain/llms/gradient_ai.py +1 -1
  339. langchain/llms/loading.py +1 -1
  340. langchain/llms/openai.py +1 -1
  341. langchain/llms/sagemaker_endpoint.py +1 -1
  342. langchain/load/dump.py +1 -1
  343. langchain/load/load.py +1 -1
  344. langchain/load/serializable.py +3 -3
  345. langchain/memory/__init__.py +3 -3
  346. langchain/memory/buffer.py +9 -7
  347. langchain/memory/chat_memory.py +14 -8
  348. langchain/memory/chat_message_histories/__init__.py +1 -1
  349. langchain/memory/chat_message_histories/astradb.py +1 -1
  350. langchain/memory/chat_message_histories/cassandra.py +1 -1
  351. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  352. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  353. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  354. langchain/memory/chat_message_histories/file.py +1 -1
  355. langchain/memory/chat_message_histories/firestore.py +1 -1
  356. langchain/memory/chat_message_histories/momento.py +1 -1
  357. langchain/memory/chat_message_histories/mongodb.py +1 -1
  358. langchain/memory/chat_message_histories/neo4j.py +1 -1
  359. langchain/memory/chat_message_histories/postgres.py +1 -1
  360. langchain/memory/chat_message_histories/redis.py +1 -1
  361. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  362. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  363. langchain/memory/chat_message_histories/streamlit.py +1 -1
  364. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  365. langchain/memory/chat_message_histories/xata.py +1 -1
  366. langchain/memory/chat_message_histories/zep.py +1 -1
  367. langchain/memory/combined.py +13 -12
  368. langchain/memory/entity.py +84 -61
  369. langchain/memory/prompt.py +10 -11
  370. langchain/memory/readonly.py +0 -2
  371. langchain/memory/simple.py +1 -3
  372. langchain/memory/summary.py +13 -11
  373. langchain/memory/summary_buffer.py +17 -8
  374. langchain/memory/utils.py +3 -2
  375. langchain/memory/vectorstore.py +12 -5
  376. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  377. langchain/model_laboratory.py +12 -11
  378. langchain/output_parsers/__init__.py +4 -4
  379. langchain/output_parsers/boolean.py +7 -4
  380. langchain/output_parsers/combining.py +10 -5
  381. langchain/output_parsers/datetime.py +32 -31
  382. langchain/output_parsers/enum.py +5 -3
  383. langchain/output_parsers/fix.py +52 -52
  384. langchain/output_parsers/format_instructions.py +6 -8
  385. langchain/output_parsers/json.py +2 -2
  386. langchain/output_parsers/list.py +2 -2
  387. langchain/output_parsers/loading.py +9 -9
  388. langchain/output_parsers/openai_functions.py +3 -3
  389. langchain/output_parsers/openai_tools.py +1 -1
  390. langchain/output_parsers/pandas_dataframe.py +43 -47
  391. langchain/output_parsers/prompts.py +1 -2
  392. langchain/output_parsers/rail_parser.py +1 -1
  393. langchain/output_parsers/regex.py +7 -8
  394. langchain/output_parsers/regex_dict.py +7 -10
  395. langchain/output_parsers/retry.py +77 -78
  396. langchain/output_parsers/structured.py +11 -6
  397. langchain/output_parsers/yaml.py +15 -11
  398. langchain/prompts/__init__.py +5 -3
  399. langchain/prompts/base.py +5 -5
  400. langchain/prompts/chat.py +8 -8
  401. langchain/prompts/example_selector/__init__.py +3 -1
  402. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  403. langchain/prompts/few_shot.py +1 -1
  404. langchain/prompts/loading.py +3 -3
  405. langchain/prompts/prompt.py +1 -1
  406. langchain/retrievers/__init__.py +5 -5
  407. langchain/retrievers/bedrock.py +2 -2
  408. langchain/retrievers/bm25.py +1 -1
  409. langchain/retrievers/contextual_compression.py +14 -8
  410. langchain/retrievers/docarray.py +1 -1
  411. langchain/retrievers/document_compressors/__init__.py +5 -4
  412. langchain/retrievers/document_compressors/base.py +12 -6
  413. langchain/retrievers/document_compressors/chain_extract.py +2 -2
  414. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  415. langchain/retrievers/document_compressors/chain_filter.py +9 -9
  416. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  417. langchain/retrievers/document_compressors/cohere_rerank.py +15 -15
  418. langchain/retrievers/document_compressors/embeddings_filter.py +21 -17
  419. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  420. langchain/retrievers/document_compressors/listwise_rerank.py +7 -5
  421. langchain/retrievers/ensemble.py +28 -25
  422. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  423. langchain/retrievers/google_vertex_ai_search.py +2 -2
  424. langchain/retrievers/kendra.py +10 -10
  425. langchain/retrievers/llama_index.py +1 -1
  426. langchain/retrievers/merger_retriever.py +11 -11
  427. langchain/retrievers/milvus.py +1 -1
  428. langchain/retrievers/multi_query.py +32 -26
  429. langchain/retrievers/multi_vector.py +20 -8
  430. langchain/retrievers/parent_document_retriever.py +18 -9
  431. langchain/retrievers/re_phraser.py +6 -5
  432. langchain/retrievers/self_query/base.py +138 -127
  433. langchain/retrievers/time_weighted_retriever.py +18 -7
  434. langchain/retrievers/zilliz.py +1 -1
  435. langchain/runnables/openai_functions.py +6 -2
  436. langchain/schema/__init__.py +23 -23
  437. langchain/schema/cache.py +1 -1
  438. langchain/schema/callbacks/base.py +7 -7
  439. langchain/schema/callbacks/manager.py +19 -19
  440. langchain/schema/callbacks/tracers/base.py +1 -1
  441. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  442. langchain/schema/callbacks/tracers/langchain.py +1 -1
  443. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  444. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  445. langchain/schema/callbacks/tracers/schemas.py +8 -8
  446. langchain/schema/callbacks/tracers/stdout.py +3 -3
  447. langchain/schema/document.py +1 -1
  448. langchain/schema/language_model.py +2 -2
  449. langchain/schema/messages.py +12 -12
  450. langchain/schema/output.py +3 -3
  451. langchain/schema/output_parser.py +3 -3
  452. langchain/schema/runnable/__init__.py +3 -3
  453. langchain/schema/runnable/base.py +9 -9
  454. langchain/schema/runnable/config.py +5 -5
  455. langchain/schema/runnable/configurable.py +1 -1
  456. langchain/schema/runnable/history.py +1 -1
  457. langchain/schema/runnable/passthrough.py +1 -1
  458. langchain/schema/runnable/utils.py +16 -16
  459. langchain/schema/vectorstore.py +1 -1
  460. langchain/smith/__init__.py +1 -1
  461. langchain/smith/evaluation/__init__.py +2 -2
  462. langchain/smith/evaluation/config.py +10 -7
  463. langchain/smith/evaluation/name_generation.py +3 -3
  464. langchain/smith/evaluation/progress.py +11 -2
  465. langchain/smith/evaluation/runner_utils.py +179 -127
  466. langchain/smith/evaluation/string_run_evaluator.py +75 -68
  467. langchain/storage/__init__.py +2 -2
  468. langchain/storage/_lc_store.py +4 -2
  469. langchain/storage/encoder_backed.py +6 -2
  470. langchain/storage/file_system.py +19 -16
  471. langchain/storage/in_memory.py +1 -1
  472. langchain/storage/upstash_redis.py +1 -1
  473. langchain/text_splitter.py +15 -15
  474. langchain/tools/__init__.py +28 -26
  475. langchain/tools/ainetwork/app.py +1 -1
  476. langchain/tools/ainetwork/base.py +1 -1
  477. langchain/tools/ainetwork/owner.py +1 -1
  478. langchain/tools/ainetwork/rule.py +1 -1
  479. langchain/tools/ainetwork/transfer.py +1 -1
  480. langchain/tools/ainetwork/value.py +1 -1
  481. langchain/tools/amadeus/closest_airport.py +1 -1
  482. langchain/tools/amadeus/flight_search.py +1 -1
  483. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  484. langchain/tools/base.py +4 -4
  485. langchain/tools/bearly/tool.py +1 -1
  486. langchain/tools/bing_search/__init__.py +1 -1
  487. langchain/tools/bing_search/tool.py +1 -1
  488. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  489. langchain/tools/dataforseo_api_search/tool.py +1 -1
  490. langchain/tools/ddg_search/tool.py +1 -1
  491. langchain/tools/e2b_data_analysis/tool.py +2 -2
  492. langchain/tools/edenai/__init__.py +1 -1
  493. langchain/tools/file_management/__init__.py +1 -1
  494. langchain/tools/file_management/copy.py +1 -1
  495. langchain/tools/file_management/delete.py +1 -1
  496. langchain/tools/gmail/__init__.py +2 -2
  497. langchain/tools/gmail/get_message.py +1 -1
  498. langchain/tools/gmail/search.py +1 -1
  499. langchain/tools/gmail/send_message.py +1 -1
  500. langchain/tools/google_finance/__init__.py +1 -1
  501. langchain/tools/google_finance/tool.py +1 -1
  502. langchain/tools/google_scholar/__init__.py +1 -1
  503. langchain/tools/google_scholar/tool.py +1 -1
  504. langchain/tools/google_search/__init__.py +1 -1
  505. langchain/tools/google_search/tool.py +1 -1
  506. langchain/tools/google_serper/__init__.py +1 -1
  507. langchain/tools/google_serper/tool.py +1 -1
  508. langchain/tools/google_trends/__init__.py +1 -1
  509. langchain/tools/google_trends/tool.py +1 -1
  510. langchain/tools/jira/tool.py +20 -1
  511. langchain/tools/json/tool.py +25 -3
  512. langchain/tools/memorize/tool.py +1 -1
  513. langchain/tools/multion/__init__.py +1 -1
  514. langchain/tools/multion/update_session.py +1 -1
  515. langchain/tools/office365/__init__.py +2 -2
  516. langchain/tools/office365/events_search.py +1 -1
  517. langchain/tools/office365/messages_search.py +1 -1
  518. langchain/tools/office365/send_event.py +1 -1
  519. langchain/tools/office365/send_message.py +1 -1
  520. langchain/tools/openapi/utils/api_models.py +6 -6
  521. langchain/tools/playwright/__init__.py +5 -5
  522. langchain/tools/playwright/click.py +1 -1
  523. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  524. langchain/tools/playwright/get_elements.py +1 -1
  525. langchain/tools/playwright/navigate.py +1 -1
  526. langchain/tools/plugin.py +2 -2
  527. langchain/tools/powerbi/tool.py +1 -1
  528. langchain/tools/python/__init__.py +2 -1
  529. langchain/tools/reddit_search/tool.py +1 -1
  530. langchain/tools/render.py +2 -2
  531. langchain/tools/requests/tool.py +2 -2
  532. langchain/tools/searchapi/tool.py +1 -1
  533. langchain/tools/searx_search/tool.py +1 -1
  534. langchain/tools/slack/get_message.py +1 -1
  535. langchain/tools/spark_sql/tool.py +1 -1
  536. langchain/tools/sql_database/tool.py +1 -1
  537. langchain/tools/tavily_search/__init__.py +1 -1
  538. langchain/tools/tavily_search/tool.py +1 -1
  539. langchain/tools/zapier/__init__.py +1 -1
  540. langchain/tools/zapier/tool.py +24 -2
  541. langchain/utilities/__init__.py +4 -4
  542. langchain/utilities/arcee.py +4 -4
  543. langchain/utilities/clickup.py +4 -4
  544. langchain/utilities/dalle_image_generator.py +1 -1
  545. langchain/utilities/dataforseo_api_search.py +1 -1
  546. langchain/utilities/opaqueprompts.py +1 -1
  547. langchain/utilities/reddit_search.py +1 -1
  548. langchain/utilities/sql_database.py +1 -1
  549. langchain/utilities/tavily_search.py +1 -1
  550. langchain/utilities/vertexai.py +2 -2
  551. langchain/utils/__init__.py +1 -1
  552. langchain/utils/aiter.py +1 -1
  553. langchain/utils/html.py +3 -3
  554. langchain/utils/input.py +1 -1
  555. langchain/utils/iter.py +1 -1
  556. langchain/utils/json_schema.py +1 -3
  557. langchain/utils/strings.py +1 -1
  558. langchain/utils/utils.py +6 -6
  559. langchain/vectorstores/__init__.py +5 -5
  560. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  561. langchain/vectorstores/azure_cosmos_db.py +1 -1
  562. langchain/vectorstores/clickhouse.py +1 -1
  563. langchain/vectorstores/elastic_vector_search.py +1 -1
  564. langchain/vectorstores/elasticsearch.py +2 -2
  565. langchain/vectorstores/myscale.py +1 -1
  566. langchain/vectorstores/neo4j_vector.py +1 -1
  567. langchain/vectorstores/pgembedding.py +1 -1
  568. langchain/vectorstores/qdrant.py +1 -1
  569. langchain/vectorstores/redis/__init__.py +1 -1
  570. langchain/vectorstores/redis/base.py +1 -1
  571. langchain/vectorstores/redis/filters.py +4 -4
  572. langchain/vectorstores/redis/schema.py +6 -6
  573. langchain/vectorstores/sklearn.py +2 -2
  574. langchain/vectorstores/starrocks.py +1 -1
  575. langchain/vectorstores/utils.py +1 -1
  576. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/METADATA +4 -4
  577. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/RECORD +580 -580
  578. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
  579. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
  580. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
langchain/memory/utils.py CHANGED
@@ -14,7 +14,8 @@ def get_prompt_input_key(inputs: dict[str, Any], memory_variables: list[str]) ->
14
14
  """
15
15
  # "stop" is a special key that can be passed as input but is not used to
16
16
  # format the prompt.
17
- prompt_input_keys = list(set(inputs).difference(memory_variables + ["stop"]))
17
+ prompt_input_keys = list(set(inputs).difference([*memory_variables, "stop"]))
18
18
  if len(prompt_input_keys) != 1:
19
- raise ValueError(f"One input key expected got {prompt_input_keys}")
19
+ msg = f"One input key expected got {prompt_input_keys}"
20
+ raise ValueError(msg)
20
21
  return prompt_input_keys[0]
@@ -52,7 +52,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
52
52
  return self.input_key
53
53
 
54
54
  def _documents_to_memory_variables(
55
- self, docs: list[Document]
55
+ self,
56
+ docs: list[Document],
56
57
  ) -> dict[str, Union[list[Document], str]]:
57
58
  result: Union[list[Document], str]
58
59
  if not self.return_docs:
@@ -62,7 +63,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
62
63
  return {self.memory_key: result}
63
64
 
64
65
  def load_memory_variables(
65
- self, inputs: dict[str, Any]
66
+ self,
67
+ inputs: dict[str, Any],
66
68
  ) -> dict[str, Union[list[Document], str]]:
67
69
  """Return history buffer."""
68
70
  input_key = self._get_prompt_input_key(inputs)
@@ -71,7 +73,8 @@ class VectorStoreRetrieverMemory(BaseMemory):
71
73
  return self._documents_to_memory_variables(docs)
72
74
 
73
75
  async def aload_memory_variables(
74
- self, inputs: dict[str, Any]
76
+ self,
77
+ inputs: dict[str, Any],
75
78
  ) -> dict[str, Union[list[Document], str]]:
76
79
  """Return history buffer."""
77
80
  input_key = self._get_prompt_input_key(inputs)
@@ -80,7 +83,9 @@ class VectorStoreRetrieverMemory(BaseMemory):
80
83
  return self._documents_to_memory_variables(docs)
81
84
 
82
85
  def _form_documents(
83
- self, inputs: dict[str, Any], outputs: dict[str, str]
86
+ self,
87
+ inputs: dict[str, Any],
88
+ outputs: dict[str, str],
84
89
  ) -> list[Document]:
85
90
  """Format context from this conversation to buffer."""
86
91
  # Each document should only include the current turn, not the chat history
@@ -100,7 +105,9 @@ class VectorStoreRetrieverMemory(BaseMemory):
100
105
  self.retriever.add_documents(documents)
101
106
 
102
107
  async def asave_context(
103
- self, inputs: dict[str, Any], outputs: dict[str, str]
108
+ self,
109
+ inputs: dict[str, Any],
110
+ outputs: dict[str, str],
104
111
  ) -> None:
105
112
  """Save context from this conversation to buffer."""
106
113
  documents = self._form_documents(inputs, outputs)
@@ -23,7 +23,7 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
23
23
  DEFAULT_HISTORY_TEMPLATE = """
24
24
  Current date and time: {current_time}.
25
25
 
26
- Potentially relevant timestamped excerpts of previous conversations (you
26
+ Potentially relevant timestamped excerpts of previous conversations (you
27
27
  do not need to use these if irrelevant):
28
28
  {previous_history}
29
29
 
@@ -131,13 +131,13 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
131
131
  previous_history = ""
132
132
  current_history = super().load_memory_variables(inputs)
133
133
  template = SystemMessagePromptTemplate.from_template(
134
- self.previous_history_template
134
+ self.previous_history_template,
135
135
  )
136
136
  messages = [
137
137
  template.format(
138
138
  previous_history=previous_history,
139
139
  current_time=datetime.now().astimezone().strftime(TIMESTAMP_FORMAT),
140
- )
140
+ ),
141
141
  ]
142
142
  messages.extend(current_history[self.memory_key])
143
143
  return {self.memory_key: messages}
@@ -167,7 +167,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
167
167
  self._pop_and_store_interaction(buffer)
168
168
 
169
169
  def _pop_and_store_interaction(self, buffer: list[BaseMessage]) -> None:
170
- input = buffer.pop(0)
170
+ input_ = buffer.pop(0)
171
171
  output = buffer.pop(0)
172
172
  timestamp = self._timestamps.pop(0).strftime(TIMESTAMP_FORMAT)
173
173
  # Split AI output into smaller chunks to avoid creating documents
@@ -175,7 +175,7 @@ class ConversationVectorStoreTokenBufferMemory(ConversationTokenBufferMemory):
175
175
  ai_chunks = self._split_long_ai_text(str(output.content))
176
176
  for index, chunk in enumerate(ai_chunks):
177
177
  self.memory_retriever.save_context(
178
- {"Human": f"<{timestamp}/00> {str(input.content)}"},
178
+ {"Human": f"<{timestamp}/00> {input_.content!s}"},
179
179
  {"AI": f"<{timestamp}/{index:02}> {chunk}"},
180
180
  )
181
181
 
@@ -34,24 +34,26 @@ class ModelLaboratory:
34
34
  """
35
35
  for chain in chains:
36
36
  if not isinstance(chain, Chain):
37
- raise ValueError(
37
+ msg = (
38
38
  "ModelLaboratory should now be initialized with Chains. "
39
39
  "If you want to initialize with LLMs, use the `from_llms` method "
40
40
  "instead (`ModelLaboratory.from_llms(...)`)"
41
41
  )
42
+ raise ValueError(msg) # noqa: TRY004
42
43
  if len(chain.input_keys) != 1:
43
- raise ValueError(
44
+ msg = (
44
45
  "Currently only support chains with one input variable, "
45
46
  f"got {chain.input_keys}"
46
47
  )
48
+ raise ValueError(msg)
47
49
  if len(chain.output_keys) != 1:
48
- raise ValueError(
50
+ msg = (
49
51
  "Currently only support chains with one output variable, "
50
52
  f"got {chain.output_keys}"
51
53
  )
52
- if names is not None:
53
- if len(names) != len(chains):
54
- raise ValueError("Length of chains does not match length of names.")
54
+ if names is not None and len(names) != len(chains):
55
+ msg = "Length of chains does not match length of names."
56
+ raise ValueError(msg)
55
57
  self.chains = chains
56
58
  chain_range = [str(i) for i in range(len(self.chains))]
57
59
  self.chain_colors = get_color_mapping(chain_range)
@@ -59,7 +61,9 @@ class ModelLaboratory:
59
61
 
60
62
  @classmethod
61
63
  def from_llms(
62
- cls, llms: list[BaseLLM], prompt: Optional[PromptTemplate] = None
64
+ cls,
65
+ llms: list[BaseLLM],
66
+ prompt: Optional[PromptTemplate] = None,
63
67
  ) -> ModelLaboratory:
64
68
  """Initialize the ModelLaboratory with LLMs and an optional prompt.
65
69
 
@@ -89,10 +93,7 @@ class ModelLaboratory:
89
93
  """
90
94
  print(f"\033[1mInput:\033[0m\n{text}\n") # noqa: T201
91
95
  for i, chain in enumerate(self.chains):
92
- if self.names is not None:
93
- name = self.names[i]
94
- else:
95
- name = str(chain)
96
+ name = self.names[i] if self.names is not None else str(chain)
96
97
  print_text(name, end="\n")
97
98
  output = chain.run(text)
98
99
  print_text(output, color=self.chain_colors[str(i)], end="\n\n")
@@ -49,7 +49,7 @@ if TYPE_CHECKING:
49
49
  # Used to consolidate logic for raising deprecation warnings and
50
50
  # handling optional imports.
51
51
  DEPRECATED_LOOKUP = {
52
- "GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser"
52
+ "GuardrailsOutputParser": "langchain_community.output_parsers.rail_parser",
53
53
  }
54
54
 
55
55
  _import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
@@ -67,12 +67,15 @@ __all__ = [
67
67
  "DatetimeOutputParser",
68
68
  "EnumOutputParser",
69
69
  "GuardrailsOutputParser",
70
+ "JsonOutputKeyToolsParser",
71
+ "JsonOutputToolsParser",
70
72
  "ListOutputParser",
71
73
  "MarkdownListOutputParser",
72
74
  "NumberedListOutputParser",
73
75
  "OutputFixingParser",
74
76
  "PandasDataFrameOutputParser",
75
77
  "PydanticOutputParser",
78
+ "PydanticToolsParser",
76
79
  "RegexDictParser",
77
80
  "RegexParser",
78
81
  "ResponseSchema",
@@ -80,8 +83,5 @@ __all__ = [
80
83
  "RetryWithErrorOutputParser",
81
84
  "StructuredOutputParser",
82
85
  "XMLOutputParser",
83
- "JsonOutputToolsParser",
84
- "PydanticToolsParser",
85
- "JsonOutputKeyToolsParser",
86
86
  "YamlOutputParser",
87
87
  ]
@@ -28,22 +28,25 @@ class BooleanOutputParser(BaseOutputParser[bool]):
28
28
  }
29
29
  if self.true_val.upper() in truthy:
30
30
  if self.false_val.upper() in truthy:
31
- raise ValueError(
31
+ msg = (
32
32
  f"Ambiguous response. Both {self.true_val} and {self.false_val} "
33
33
  f"in received: {text}."
34
34
  )
35
+ raise ValueError(msg)
35
36
  return True
36
- elif self.false_val.upper() in truthy:
37
+ if self.false_val.upper() in truthy:
37
38
  if self.true_val.upper() in truthy:
38
- raise ValueError(
39
+ msg = (
39
40
  f"Ambiguous response. Both {self.true_val} and {self.false_val} "
40
41
  f"in received: {text}."
41
42
  )
43
+ raise ValueError(msg)
42
44
  return False
43
- raise ValueError(
45
+ msg = (
44
46
  f"BooleanOutputParser expected output value to include either "
45
47
  f"{self.true_val} or {self.false_val}. Received {text}."
46
48
  )
49
+ raise ValueError(msg)
47
50
 
48
51
  @property
49
52
  def _type(self) -> str:
@@ -5,6 +5,8 @@ from typing import Any
5
5
  from langchain_core.output_parsers import BaseOutputParser
6
6
  from langchain_core.utils import pre_init
7
7
 
8
+ _MIN_PARSERS = 2
9
+
8
10
 
9
11
  class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
10
12
  """Combine multiple output parsers into one."""
@@ -19,13 +21,16 @@ class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
19
21
  def validate_parsers(cls, values: dict[str, Any]) -> dict[str, Any]:
20
22
  """Validate the parsers."""
21
23
  parsers = values["parsers"]
22
- if len(parsers) < 2:
23
- raise ValueError("Must have at least two parsers")
24
+ if len(parsers) < _MIN_PARSERS:
25
+ msg = "Must have at least two parsers"
26
+ raise ValueError(msg)
24
27
  for parser in parsers:
25
28
  if parser._type == "combining":
26
- raise ValueError("Cannot nest combining parsers")
29
+ msg = "Cannot nest combining parsers"
30
+ raise ValueError(msg)
27
31
  if parser._type == "list":
28
- raise ValueError("Cannot combine list parsers")
32
+ msg = "Cannot combine list parsers"
33
+ raise ValueError(msg)
29
34
  return values
30
35
 
31
36
  @property
@@ -46,7 +51,7 @@ class CombiningOutputParser(BaseOutputParser[dict[str, Any]]):
46
51
  def parse(self, text: str) -> dict[str, Any]:
47
52
  """Parse the output of an LLM call."""
48
53
  texts = text.split("\n\n")
49
- output = dict()
54
+ output = {}
50
55
  for txt, parser in zip(texts, self.parsers):
51
56
  output.update(parser.parse(txt.strip()))
52
57
  return output
@@ -1,42 +1,43 @@
1
- import random
2
- from datetime import datetime, timedelta
1
+ from datetime import datetime, timedelta, timezone
3
2
 
4
3
  from langchain_core.exceptions import OutputParserException
5
4
  from langchain_core.output_parsers import BaseOutputParser
6
5
  from langchain_core.utils import comma_list
7
6
 
8
7
 
9
- def _generate_random_datetime_strings(
10
- pattern: str,
11
- n: int = 3,
12
- start_date: datetime = datetime(1, 1, 1),
13
- end_date: datetime = datetime.now() + timedelta(days=3650),
14
- ) -> list[str]:
15
- """Generates n random datetime strings conforming to the
16
- given pattern within the specified date range.
17
-
18
- Pattern should be a string containing the desired format codes.
19
- start_date and end_date should be datetime objects representing
20
- the start and end of the date range.
21
- """
22
- examples = []
23
- delta = end_date - start_date
24
- for i in range(n):
25
- random_delta = random.uniform(0, delta.total_seconds())
26
- dt = start_date + timedelta(seconds=random_delta)
27
- date_string = dt.strftime(pattern)
28
- examples.append(date_string)
29
- return examples
30
-
31
-
32
8
  class DatetimeOutputParser(BaseOutputParser[datetime]):
33
9
  """Parse the output of an LLM call to a datetime."""
34
10
 
35
11
  format: str = "%Y-%m-%dT%H:%M:%S.%fZ"
36
- """The string value that used as the datetime format."""
12
+ """The string value that is used as the datetime format.
13
+
14
+ Update this to match the desired datetime format for your application.
15
+ """
37
16
 
38
17
  def get_format_instructions(self) -> str:
39
- examples = comma_list(_generate_random_datetime_strings(self.format))
18
+ """Returns the format instructions for the given format."""
19
+ if self.format == "%Y-%m-%dT%H:%M:%S.%fZ":
20
+ examples = comma_list(
21
+ [
22
+ "2023-07-04T14:30:00.000000Z",
23
+ "1999-12-31T23:59:59.999999Z",
24
+ "2025-01-01T00:00:00.000000Z",
25
+ ],
26
+ )
27
+ else:
28
+ try:
29
+ now = datetime.now(tz=timezone.utc)
30
+ examples = comma_list(
31
+ [
32
+ now.strftime(self.format),
33
+ (now.replace(year=now.year - 1)).strftime(self.format),
34
+ (now - timedelta(days=1)).strftime(self.format),
35
+ ],
36
+ )
37
+ except ValueError:
38
+ # Fallback if the format is very unusual
39
+ examples = f"e.g., a valid string in the format {self.format}"
40
+
40
41
  return (
41
42
  f"Write a datetime string that matches the "
42
43
  f"following pattern: '{self.format}'.\n\n"
@@ -45,12 +46,12 @@ class DatetimeOutputParser(BaseOutputParser[datetime]):
45
46
  )
46
47
 
47
48
  def parse(self, response: str) -> datetime:
49
+ """Parse a string into a datetime object."""
48
50
  try:
49
- return datetime.strptime(response.strip(), self.format)
51
+ return datetime.strptime(response.strip(), self.format) # noqa: DTZ007
50
52
  except ValueError as e:
51
- raise OutputParserException(
52
- f"Could not parse datetime string: {response}"
53
- ) from e
53
+ msg = f"Could not parse datetime string: {response}"
54
+ raise OutputParserException(msg) from e
54
55
 
55
56
  @property
56
57
  def _type(self) -> str:
@@ -15,7 +15,8 @@ class EnumOutputParser(BaseOutputParser[Enum]):
15
15
  def raise_deprecation(cls, values: dict) -> dict:
16
16
  enum = values["enum"]
17
17
  if not all(isinstance(e.value, str) for e in enum):
18
- raise ValueError("Enum values must be strings")
18
+ msg = "Enum values must be strings"
19
+ raise ValueError(msg)
19
20
  return values
20
21
 
21
22
  @property
@@ -25,11 +26,12 @@ class EnumOutputParser(BaseOutputParser[Enum]):
25
26
  def parse(self, response: str) -> Enum:
26
27
  try:
27
28
  return self.enum(response.strip())
28
- except ValueError:
29
- raise OutputParserException(
29
+ except ValueError as e:
30
+ msg = (
30
31
  f"Response '{response}' is not one of the "
31
32
  f"expected values: {self._valid_values}"
32
33
  )
34
+ raise OutputParserException(msg) from e
33
35
 
34
36
  def get_format_instructions(self) -> str:
35
37
  return f"Select one of the following options: {', '.join(self._valid_values)}"
@@ -70,34 +70,34 @@ class OutputFixingParser(BaseOutputParser[T]):
70
70
  return self.parser.parse(completion)
71
71
  except OutputParserException as e:
72
72
  if retries == self.max_retries:
73
- raise e
73
+ raise
74
+ retries += 1
75
+ if self.legacy and hasattr(self.retry_chain, "run"):
76
+ completion = self.retry_chain.run(
77
+ instructions=self.parser.get_format_instructions(),
78
+ completion=completion,
79
+ error=repr(e),
80
+ )
74
81
  else:
75
- retries += 1
76
- if self.legacy and hasattr(self.retry_chain, "run"):
77
- completion = self.retry_chain.run(
78
- instructions=self.parser.get_format_instructions(),
79
- completion=completion,
80
- error=repr(e),
82
+ try:
83
+ completion = self.retry_chain.invoke(
84
+ {
85
+ "instructions": self.parser.get_format_instructions(),
86
+ "completion": completion,
87
+ "error": repr(e),
88
+ },
81
89
  )
82
- else:
83
- try:
84
- completion = self.retry_chain.invoke(
85
- dict(
86
- instructions=self.parser.get_format_instructions(),
87
- completion=completion,
88
- error=repr(e),
89
- )
90
- )
91
- except (NotImplementedError, AttributeError):
92
- # Case: self.parser does not have get_format_instructions
93
- completion = self.retry_chain.invoke(
94
- dict(
95
- completion=completion,
96
- error=repr(e),
97
- )
98
- )
99
-
100
- raise OutputParserException("Failed to parse")
90
+ except (NotImplementedError, AttributeError):
91
+ # Case: self.parser does not have get_format_instructions
92
+ completion = self.retry_chain.invoke(
93
+ {
94
+ "completion": completion,
95
+ "error": repr(e),
96
+ },
97
+ )
98
+
99
+ msg = "Failed to parse"
100
+ raise OutputParserException(msg)
101
101
 
102
102
  async def aparse(self, completion: str) -> T:
103
103
  retries = 0
@@ -107,34 +107,34 @@ class OutputFixingParser(BaseOutputParser[T]):
107
107
  return await self.parser.aparse(completion)
108
108
  except OutputParserException as e:
109
109
  if retries == self.max_retries:
110
- raise e
110
+ raise
111
+ retries += 1
112
+ if self.legacy and hasattr(self.retry_chain, "arun"):
113
+ completion = await self.retry_chain.arun(
114
+ instructions=self.parser.get_format_instructions(),
115
+ completion=completion,
116
+ error=repr(e),
117
+ )
111
118
  else:
112
- retries += 1
113
- if self.legacy and hasattr(self.retry_chain, "arun"):
114
- completion = await self.retry_chain.arun(
115
- instructions=self.parser.get_format_instructions(),
116
- completion=completion,
117
- error=repr(e),
119
+ try:
120
+ completion = await self.retry_chain.ainvoke(
121
+ {
122
+ "instructions": self.parser.get_format_instructions(),
123
+ "completion": completion,
124
+ "error": repr(e),
125
+ },
118
126
  )
119
- else:
120
- try:
121
- completion = await self.retry_chain.ainvoke(
122
- dict(
123
- instructions=self.parser.get_format_instructions(),
124
- completion=completion,
125
- error=repr(e),
126
- )
127
- )
128
- except (NotImplementedError, AttributeError):
129
- # Case: self.parser does not have get_format_instructions
130
- completion = await self.retry_chain.ainvoke(
131
- dict(
132
- completion=completion,
133
- error=repr(e),
134
- )
135
- )
136
-
137
- raise OutputParserException("Failed to parse")
127
+ except (NotImplementedError, AttributeError):
128
+ # Case: self.parser does not have get_format_instructions
129
+ completion = await self.retry_chain.ainvoke(
130
+ {
131
+ "completion": completion,
132
+ "error": repr(e),
133
+ },
134
+ )
135
+
136
+ msg = "Failed to parse"
137
+ raise OutputParserException(msg)
138
138
 
139
139
  def get_format_instructions(self) -> str:
140
140
  return self.parser.get_format_instructions()
@@ -1,12 +1,10 @@
1
- # flake8: noqa
2
-
3
1
  STRUCTURED_FORMAT_INSTRUCTIONS = """The output should be a markdown code snippet formatted in the following schema, including the leading and trailing "```json" and "```":
4
2
 
5
3
  ```json
6
4
  {{
7
5
  {format}
8
6
  }}
9
- ```"""
7
+ ```""" # noqa: E501
10
8
 
11
9
  STRUCTURED_FORMAT_SIMPLE_INSTRUCTIONS = """
12
10
  ```json
@@ -24,7 +22,7 @@ the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of the schema.
24
22
  Here is the output schema:
25
23
  ```
26
24
  {schema}
27
- ```"""
25
+ ```""" # noqa: E501
28
26
 
29
27
  YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance that conforms to the given JSON schema below.
30
28
 
@@ -49,14 +47,14 @@ YAML_FORMAT_INSTRUCTIONS = """The output should be formatted as a YAML instance
49
47
  ```
50
48
  habit: Using disposable water bottles for daily hydration.
51
49
  sustainable_alternative: Switch to a reusable water bottle to reduce plastic waste and decrease your environmental footprint.
52
- ```
50
+ ```
53
51
 
54
- Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
52
+ Please follow the standard YAML formatting conventions with an indent of 2 spaces and make sure that the data types adhere strictly to the following JSON schema:
55
53
  ```
56
54
  {schema}
57
55
  ```
58
56
 
59
- Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!"""
57
+ Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!""" # noqa: E501
60
58
 
61
59
 
62
60
  PANDAS_DATAFRAME_FORMAT_INSTRUCTIONS = """The output should be formatted as a string as the operation, followed by a colon, followed by the column or row to be queried on, followed by optional array parameters.
@@ -78,4 +76,4 @@ Here are the possible columns:
78
76
  ```
79
77
  {columns}
80
78
  ```
81
- """
79
+ """ # noqa: E501
@@ -9,7 +9,7 @@ from langchain_core.utils.json import (
9
9
 
10
10
  __all__ = [
11
11
  "SimpleJsonOutputParser",
12
- "parse_partial_json",
13
- "parse_json_markdown",
14
12
  "parse_and_check_json_markdown",
13
+ "parse_json_markdown",
14
+ "parse_partial_json",
15
15
  ]
@@ -6,8 +6,8 @@ from langchain_core.output_parsers.list import (
6
6
  )
7
7
 
8
8
  __all__ = [
9
- "ListOutputParser",
10
9
  "CommaSeparatedListOutputParser",
11
- "NumberedListOutputParser",
10
+ "ListOutputParser",
12
11
  "MarkdownListOutputParser",
12
+ "NumberedListOutputParser",
13
13
  ]
@@ -10,13 +10,13 @@ def load_output_parser(config: dict) -> dict:
10
10
  Returns:
11
11
  config dict with output parser loaded
12
12
  """
13
- if "output_parsers" in config:
14
- if config["output_parsers"] is not None:
15
- _config = config["output_parsers"]
16
- output_parser_type = _config["_type"]
17
- if output_parser_type == "regex_parser":
18
- output_parser = RegexParser(**_config)
19
- else:
20
- raise ValueError(f"Unsupported output parser {output_parser_type}")
21
- config["output_parsers"] = output_parser
13
+ if "output_parsers" in config and config["output_parsers"] is not None:
14
+ _config = config["output_parsers"]
15
+ output_parser_type = _config["_type"]
16
+ if output_parser_type == "regex_parser":
17
+ output_parser = RegexParser(**_config)
18
+ else:
19
+ msg = f"Unsupported output parser {output_parser_type}"
20
+ raise ValueError(msg)
21
+ config["output_parsers"] = output_parser
22
22
  return config
@@ -6,8 +6,8 @@ from langchain_core.output_parsers.openai_functions import (
6
6
  )
7
7
 
8
8
  __all__ = [
9
- "PydanticOutputFunctionsParser",
10
- "PydanticAttrOutputFunctionsParser",
11
- "JsonOutputFunctionsParser",
12
9
  "JsonKeyOutputFunctionsParser",
10
+ "JsonOutputFunctionsParser",
11
+ "PydanticAttrOutputFunctionsParser",
12
+ "PydanticOutputFunctionsParser",
13
13
  ]
@@ -4,4 +4,4 @@ from langchain_core.output_parsers.openai_tools import (
4
4
  PydanticToolsParser,
5
5
  )
6
6
 
7
- __all__ = ["PydanticToolsParser", "JsonOutputToolsParser", "JsonOutputKeyToolsParser"]
7
+ __all__ = ["JsonOutputKeyToolsParser", "JsonOutputToolsParser", "PydanticToolsParser"]