langchain 0.3.25__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (582) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +160 -133
  9. langchain/agents/agent_iterator.py +31 -14
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +7 -7
  48. langchain/agents/chat/base.py +6 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +9 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +7 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +18 -9
  63. langchain/agents/json_chat/base.py +18 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +9 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +80 -70
  71. langchain/agents/openai_functions_agent/base.py +47 -38
  72. langchain/agents/openai_functions_multi_agent/base.py +40 -27
  73. langchain/agents/openai_tools/base.py +8 -8
  74. langchain/agents/output_parsers/__init__.py +3 -3
  75. langchain/agents/output_parsers/json.py +7 -7
  76. langchain/agents/output_parsers/openai_functions.py +15 -7
  77. langchain/agents/output_parsers/openai_tools.py +9 -4
  78. langchain/agents/output_parsers/react_json_single_input.py +10 -5
  79. langchain/agents/output_parsers/react_single_input.py +15 -11
  80. langchain/agents/output_parsers/self_ask.py +3 -2
  81. langchain/agents/output_parsers/tools.py +18 -13
  82. langchain/agents/output_parsers/xml.py +99 -28
  83. langchain/agents/react/agent.py +4 -4
  84. langchain/agents/react/base.py +22 -17
  85. langchain/agents/react/output_parser.py +5 -6
  86. langchain/agents/react/textworld_prompt.py +0 -1
  87. langchain/agents/react/wiki_prompt.py +14 -15
  88. langchain/agents/schema.py +3 -2
  89. langchain/agents/self_ask_with_search/base.py +19 -15
  90. langchain/agents/self_ask_with_search/prompt.py +0 -1
  91. langchain/agents/structured_chat/base.py +14 -11
  92. langchain/agents/structured_chat/output_parser.py +16 -18
  93. langchain/agents/structured_chat/prompt.py +3 -4
  94. langchain/agents/tool_calling_agent/base.py +7 -6
  95. langchain/agents/tools.py +2 -2
  96. langchain/agents/utils.py +2 -3
  97. langchain/agents/xml/base.py +5 -5
  98. langchain/agents/xml/prompt.py +1 -2
  99. langchain/cache.py +12 -12
  100. langchain/callbacks/__init__.py +11 -11
  101. langchain/callbacks/aim_callback.py +2 -2
  102. langchain/callbacks/argilla_callback.py +1 -1
  103. langchain/callbacks/arize_callback.py +1 -1
  104. langchain/callbacks/arthur_callback.py +1 -1
  105. langchain/callbacks/base.py +7 -7
  106. langchain/callbacks/clearml_callback.py +1 -1
  107. langchain/callbacks/comet_ml_callback.py +1 -1
  108. langchain/callbacks/confident_callback.py +1 -1
  109. langchain/callbacks/context_callback.py +1 -1
  110. langchain/callbacks/flyte_callback.py +1 -1
  111. langchain/callbacks/human.py +2 -2
  112. langchain/callbacks/infino_callback.py +1 -1
  113. langchain/callbacks/labelstudio_callback.py +1 -1
  114. langchain/callbacks/llmonitor_callback.py +1 -1
  115. langchain/callbacks/manager.py +6 -6
  116. langchain/callbacks/mlflow_callback.py +2 -2
  117. langchain/callbacks/openai_info.py +1 -1
  118. langchain/callbacks/promptlayer_callback.py +1 -1
  119. langchain/callbacks/sagemaker_callback.py +1 -1
  120. langchain/callbacks/streaming_aiter.py +4 -1
  121. langchain/callbacks/streaming_aiter_final_only.py +5 -3
  122. langchain/callbacks/streaming_stdout_final_only.py +5 -3
  123. langchain/callbacks/streamlit/__init__.py +3 -2
  124. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  125. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  126. langchain/callbacks/tracers/__init__.py +1 -1
  127. langchain/callbacks/tracers/base.py +2 -1
  128. langchain/callbacks/tracers/comet.py +1 -1
  129. langchain/callbacks/tracers/evaluation.py +1 -1
  130. langchain/callbacks/tracers/log_stream.py +1 -1
  131. langchain/callbacks/tracers/logging.py +1 -1
  132. langchain/callbacks/tracers/stdout.py +1 -1
  133. langchain/callbacks/trubrics_callback.py +1 -1
  134. langchain/callbacks/utils.py +4 -4
  135. langchain/callbacks/wandb_callback.py +1 -1
  136. langchain/callbacks/whylabs_callback.py +1 -1
  137. langchain/chains/api/base.py +36 -22
  138. langchain/chains/api/news_docs.py +1 -2
  139. langchain/chains/api/open_meteo_docs.py +1 -2
  140. langchain/chains/api/openapi/requests_chain.py +1 -1
  141. langchain/chains/api/openapi/response_chain.py +1 -1
  142. langchain/chains/api/podcast_docs.py +1 -2
  143. langchain/chains/api/prompt.py +1 -2
  144. langchain/chains/api/tmdb_docs.py +1 -2
  145. langchain/chains/base.py +89 -55
  146. langchain/chains/chat_vector_db/prompts.py +2 -3
  147. langchain/chains/combine_documents/__init__.py +1 -1
  148. langchain/chains/combine_documents/base.py +24 -11
  149. langchain/chains/combine_documents/map_reduce.py +39 -31
  150. langchain/chains/combine_documents/map_rerank.py +34 -21
  151. langchain/chains/combine_documents/reduce.py +47 -26
  152. langchain/chains/combine_documents/refine.py +26 -17
  153. langchain/chains/combine_documents/stuff.py +19 -12
  154. langchain/chains/constitutional_ai/base.py +4 -4
  155. langchain/chains/constitutional_ai/principles.py +22 -25
  156. langchain/chains/constitutional_ai/prompts.py +25 -28
  157. langchain/chains/conversation/base.py +6 -7
  158. langchain/chains/conversation/memory.py +5 -5
  159. langchain/chains/conversation/prompt.py +5 -5
  160. langchain/chains/conversational_retrieval/base.py +41 -20
  161. langchain/chains/conversational_retrieval/prompts.py +2 -3
  162. langchain/chains/elasticsearch_database/base.py +8 -9
  163. langchain/chains/elasticsearch_database/prompts.py +2 -3
  164. langchain/chains/ernie_functions/__init__.py +2 -2
  165. langchain/chains/example_generator.py +3 -1
  166. langchain/chains/flare/base.py +26 -12
  167. langchain/chains/graph_qa/cypher.py +2 -2
  168. langchain/chains/graph_qa/falkordb.py +1 -1
  169. langchain/chains/graph_qa/gremlin.py +1 -1
  170. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  171. langchain/chains/graph_qa/prompts.py +2 -2
  172. langchain/chains/history_aware_retriever.py +2 -1
  173. langchain/chains/hyde/base.py +6 -5
  174. langchain/chains/hyde/prompts.py +5 -6
  175. langchain/chains/llm.py +77 -61
  176. langchain/chains/llm_bash/__init__.py +2 -1
  177. langchain/chains/llm_checker/base.py +7 -5
  178. langchain/chains/llm_checker/prompt.py +3 -4
  179. langchain/chains/llm_math/base.py +16 -9
  180. langchain/chains/llm_math/prompt.py +1 -2
  181. langchain/chains/llm_summarization_checker/base.py +9 -6
  182. langchain/chains/llm_symbolic_math/__init__.py +2 -1
  183. langchain/chains/loading.py +170 -153
  184. langchain/chains/mapreduce.py +4 -3
  185. langchain/chains/moderation.py +8 -9
  186. langchain/chains/natbot/base.py +8 -8
  187. langchain/chains/natbot/crawler.py +73 -76
  188. langchain/chains/natbot/prompt.py +2 -3
  189. langchain/chains/openai_functions/__init__.py +7 -7
  190. langchain/chains/openai_functions/base.py +13 -10
  191. langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
  192. langchain/chains/openai_functions/extraction.py +19 -19
  193. langchain/chains/openai_functions/openapi.py +35 -35
  194. langchain/chains/openai_functions/qa_with_structure.py +19 -12
  195. langchain/chains/openai_functions/tagging.py +2 -4
  196. langchain/chains/openai_tools/extraction.py +7 -8
  197. langchain/chains/qa_generation/base.py +4 -3
  198. langchain/chains/qa_generation/prompt.py +5 -5
  199. langchain/chains/qa_with_sources/base.py +14 -6
  200. langchain/chains/qa_with_sources/loading.py +16 -8
  201. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  202. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  203. langchain/chains/qa_with_sources/retrieval.py +14 -5
  204. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  205. langchain/chains/qa_with_sources/vector_db.py +17 -6
  206. langchain/chains/query_constructor/base.py +34 -33
  207. langchain/chains/query_constructor/ir.py +4 -4
  208. langchain/chains/query_constructor/parser.py +37 -32
  209. langchain/chains/query_constructor/prompt.py +5 -6
  210. langchain/chains/question_answering/chain.py +21 -10
  211. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  212. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  213. langchain/chains/question_answering/refine_prompts.py +2 -5
  214. langchain/chains/question_answering/stuff_prompt.py +5 -5
  215. langchain/chains/retrieval.py +1 -3
  216. langchain/chains/retrieval_qa/base.py +34 -27
  217. langchain/chains/retrieval_qa/prompt.py +1 -2
  218. langchain/chains/router/__init__.py +3 -3
  219. langchain/chains/router/base.py +24 -20
  220. langchain/chains/router/embedding_router.py +12 -8
  221. langchain/chains/router/llm_router.py +17 -16
  222. langchain/chains/router/multi_prompt.py +2 -2
  223. langchain/chains/router/multi_retrieval_qa.py +10 -5
  224. langchain/chains/sequential.py +30 -18
  225. langchain/chains/sql_database/prompt.py +14 -16
  226. langchain/chains/sql_database/query.py +6 -5
  227. langchain/chains/structured_output/__init__.py +1 -1
  228. langchain/chains/structured_output/base.py +75 -67
  229. langchain/chains/summarize/chain.py +11 -5
  230. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  231. langchain/chains/summarize/stuff_prompt.py +0 -1
  232. langchain/chains/transform.py +5 -6
  233. langchain/chat_loaders/facebook_messenger.py +1 -1
  234. langchain/chat_loaders/langsmith.py +1 -1
  235. langchain/chat_loaders/utils.py +3 -3
  236. langchain/chat_models/__init__.py +20 -19
  237. langchain/chat_models/anthropic.py +1 -1
  238. langchain/chat_models/azureml_endpoint.py +1 -1
  239. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  240. langchain/chat_models/base.py +160 -123
  241. langchain/chat_models/bedrock.py +1 -1
  242. langchain/chat_models/fake.py +1 -1
  243. langchain/chat_models/meta.py +1 -1
  244. langchain/chat_models/pai_eas_endpoint.py +1 -1
  245. langchain/chat_models/promptlayer_openai.py +1 -1
  246. langchain/chat_models/volcengine_maas.py +1 -1
  247. langchain/docstore/base.py +1 -1
  248. langchain/document_loaders/__init__.py +9 -9
  249. langchain/document_loaders/airbyte.py +3 -3
  250. langchain/document_loaders/assemblyai.py +1 -1
  251. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  252. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  253. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  254. langchain/document_loaders/base.py +1 -1
  255. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  256. langchain/document_loaders/blockchain.py +1 -1
  257. langchain/document_loaders/chatgpt.py +1 -1
  258. langchain/document_loaders/college_confidential.py +1 -1
  259. langchain/document_loaders/confluence.py +1 -1
  260. langchain/document_loaders/email.py +1 -1
  261. langchain/document_loaders/facebook_chat.py +1 -1
  262. langchain/document_loaders/markdown.py +1 -1
  263. langchain/document_loaders/notebook.py +1 -1
  264. langchain/document_loaders/org_mode.py +1 -1
  265. langchain/document_loaders/parsers/__init__.py +1 -1
  266. langchain/document_loaders/parsers/docai.py +1 -1
  267. langchain/document_loaders/parsers/generic.py +1 -1
  268. langchain/document_loaders/parsers/html/__init__.py +1 -1
  269. langchain/document_loaders/parsers/html/bs4.py +1 -1
  270. langchain/document_loaders/parsers/language/cobol.py +1 -1
  271. langchain/document_loaders/parsers/language/python.py +1 -1
  272. langchain/document_loaders/parsers/msword.py +1 -1
  273. langchain/document_loaders/parsers/pdf.py +5 -5
  274. langchain/document_loaders/parsers/registry.py +1 -1
  275. langchain/document_loaders/pdf.py +8 -8
  276. langchain/document_loaders/powerpoint.py +1 -1
  277. langchain/document_loaders/pyspark_dataframe.py +1 -1
  278. langchain/document_loaders/telegram.py +2 -2
  279. langchain/document_loaders/tencent_cos_directory.py +1 -1
  280. langchain/document_loaders/unstructured.py +5 -5
  281. langchain/document_loaders/url_playwright.py +1 -1
  282. langchain/document_loaders/whatsapp_chat.py +1 -1
  283. langchain/document_loaders/youtube.py +2 -2
  284. langchain/document_transformers/__init__.py +3 -3
  285. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  286. langchain/document_transformers/doctran_text_extract.py +1 -1
  287. langchain/document_transformers/doctran_text_qa.py +1 -1
  288. langchain/document_transformers/doctran_text_translate.py +1 -1
  289. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  290. langchain/document_transformers/google_translate.py +1 -1
  291. langchain/document_transformers/html2text.py +1 -1
  292. langchain/document_transformers/nuclia_text_transform.py +1 -1
  293. langchain/embeddings/__init__.py +5 -5
  294. langchain/embeddings/base.py +33 -24
  295. langchain/embeddings/cache.py +117 -26
  296. langchain/embeddings/fake.py +1 -1
  297. langchain/embeddings/huggingface.py +2 -2
  298. langchain/evaluation/__init__.py +22 -22
  299. langchain/evaluation/agents/trajectory_eval_chain.py +24 -24
  300. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  301. langchain/evaluation/comparison/__init__.py +1 -1
  302. langchain/evaluation/comparison/eval_chain.py +21 -14
  303. langchain/evaluation/comparison/prompt.py +1 -2
  304. langchain/evaluation/criteria/__init__.py +1 -1
  305. langchain/evaluation/criteria/eval_chain.py +21 -12
  306. langchain/evaluation/criteria/prompt.py +2 -3
  307. langchain/evaluation/embedding_distance/base.py +24 -21
  308. langchain/evaluation/loading.py +15 -11
  309. langchain/evaluation/parsing/base.py +4 -1
  310. langchain/evaluation/parsing/json_distance.py +5 -2
  311. langchain/evaluation/parsing/json_schema.py +12 -8
  312. langchain/evaluation/qa/__init__.py +1 -1
  313. langchain/evaluation/qa/eval_chain.py +13 -6
  314. langchain/evaluation/qa/eval_prompt.py +7 -8
  315. langchain/evaluation/qa/generate_chain.py +2 -1
  316. langchain/evaluation/qa/generate_prompt.py +2 -4
  317. langchain/evaluation/schema.py +38 -30
  318. langchain/evaluation/scoring/__init__.py +1 -1
  319. langchain/evaluation/scoring/eval_chain.py +23 -16
  320. langchain/evaluation/scoring/prompt.py +0 -1
  321. langchain/evaluation/string_distance/base.py +15 -10
  322. langchain/globals.py +12 -11
  323. langchain/graphs/__init__.py +6 -6
  324. langchain/graphs/graph_document.py +1 -1
  325. langchain/graphs/networkx_graph.py +2 -2
  326. langchain/hub.py +9 -11
  327. langchain/indexes/__init__.py +3 -3
  328. langchain/indexes/_sql_record_manager.py +63 -46
  329. langchain/indexes/prompts/entity_extraction.py +1 -2
  330. langchain/indexes/prompts/entity_summarization.py +1 -2
  331. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  332. langchain/indexes/vectorstore.py +35 -19
  333. langchain/llms/__init__.py +13 -13
  334. langchain/llms/ai21.py +1 -1
  335. langchain/llms/azureml_endpoint.py +4 -4
  336. langchain/llms/base.py +15 -7
  337. langchain/llms/bedrock.py +1 -1
  338. langchain/llms/cloudflare_workersai.py +1 -1
  339. langchain/llms/gradient_ai.py +1 -1
  340. langchain/llms/loading.py +1 -1
  341. langchain/llms/openai.py +1 -1
  342. langchain/llms/sagemaker_endpoint.py +1 -1
  343. langchain/load/dump.py +1 -1
  344. langchain/load/load.py +1 -1
  345. langchain/load/serializable.py +3 -3
  346. langchain/memory/__init__.py +3 -3
  347. langchain/memory/buffer.py +11 -8
  348. langchain/memory/chat_memory.py +14 -8
  349. langchain/memory/chat_message_histories/__init__.py +1 -1
  350. langchain/memory/chat_message_histories/astradb.py +1 -1
  351. langchain/memory/chat_message_histories/cassandra.py +1 -1
  352. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  353. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  354. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  355. langchain/memory/chat_message_histories/file.py +1 -1
  356. langchain/memory/chat_message_histories/firestore.py +1 -1
  357. langchain/memory/chat_message_histories/momento.py +1 -1
  358. langchain/memory/chat_message_histories/mongodb.py +1 -1
  359. langchain/memory/chat_message_histories/neo4j.py +1 -1
  360. langchain/memory/chat_message_histories/postgres.py +1 -1
  361. langchain/memory/chat_message_histories/redis.py +1 -1
  362. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  363. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  364. langchain/memory/chat_message_histories/streamlit.py +1 -1
  365. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  366. langchain/memory/chat_message_histories/xata.py +1 -1
  367. langchain/memory/chat_message_histories/zep.py +1 -1
  368. langchain/memory/combined.py +13 -12
  369. langchain/memory/entity.py +84 -61
  370. langchain/memory/prompt.py +10 -11
  371. langchain/memory/readonly.py +0 -2
  372. langchain/memory/simple.py +1 -3
  373. langchain/memory/summary.py +13 -11
  374. langchain/memory/summary_buffer.py +17 -8
  375. langchain/memory/utils.py +3 -2
  376. langchain/memory/vectorstore.py +13 -6
  377. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  378. langchain/model_laboratory.py +12 -11
  379. langchain/output_parsers/__init__.py +4 -4
  380. langchain/output_parsers/boolean.py +7 -4
  381. langchain/output_parsers/combining.py +10 -5
  382. langchain/output_parsers/datetime.py +32 -31
  383. langchain/output_parsers/enum.py +5 -3
  384. langchain/output_parsers/fix.py +52 -52
  385. langchain/output_parsers/format_instructions.py +6 -8
  386. langchain/output_parsers/json.py +2 -2
  387. langchain/output_parsers/list.py +2 -2
  388. langchain/output_parsers/loading.py +9 -9
  389. langchain/output_parsers/openai_functions.py +3 -3
  390. langchain/output_parsers/openai_tools.py +1 -1
  391. langchain/output_parsers/pandas_dataframe.py +43 -47
  392. langchain/output_parsers/prompts.py +1 -2
  393. langchain/output_parsers/rail_parser.py +1 -1
  394. langchain/output_parsers/regex.py +7 -8
  395. langchain/output_parsers/regex_dict.py +7 -10
  396. langchain/output_parsers/retry.py +77 -78
  397. langchain/output_parsers/structured.py +11 -6
  398. langchain/output_parsers/yaml.py +15 -11
  399. langchain/prompts/__init__.py +5 -3
  400. langchain/prompts/base.py +5 -5
  401. langchain/prompts/chat.py +10 -9
  402. langchain/prompts/example_selector/__init__.py +3 -1
  403. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  404. langchain/prompts/few_shot.py +1 -1
  405. langchain/prompts/loading.py +3 -3
  406. langchain/prompts/prompt.py +1 -1
  407. langchain/retrievers/__init__.py +5 -5
  408. langchain/retrievers/bedrock.py +2 -2
  409. langchain/retrievers/bm25.py +1 -1
  410. langchain/retrievers/contextual_compression.py +15 -13
  411. langchain/retrievers/docarray.py +1 -1
  412. langchain/retrievers/document_compressors/__init__.py +7 -5
  413. langchain/retrievers/document_compressors/base.py +13 -7
  414. langchain/retrievers/document_compressors/chain_extract.py +4 -5
  415. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  416. langchain/retrievers/document_compressors/chain_filter.py +11 -12
  417. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  418. langchain/retrievers/document_compressors/cohere_rerank.py +17 -19
  419. langchain/retrievers/document_compressors/embeddings_filter.py +23 -23
  420. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  421. langchain/retrievers/document_compressors/listwise_rerank.py +11 -6
  422. langchain/retrievers/ensemble.py +28 -25
  423. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  424. langchain/retrievers/google_vertex_ai_search.py +2 -2
  425. langchain/retrievers/kendra.py +10 -10
  426. langchain/retrievers/llama_index.py +1 -1
  427. langchain/retrievers/merger_retriever.py +11 -11
  428. langchain/retrievers/milvus.py +1 -1
  429. langchain/retrievers/multi_query.py +32 -26
  430. langchain/retrievers/multi_vector.py +20 -8
  431. langchain/retrievers/parent_document_retriever.py +18 -9
  432. langchain/retrievers/re_phraser.py +6 -5
  433. langchain/retrievers/self_query/base.py +138 -119
  434. langchain/retrievers/time_weighted_retriever.py +18 -7
  435. langchain/retrievers/zilliz.py +1 -1
  436. langchain/runnables/hub.py +2 -1
  437. langchain/runnables/openai_functions.py +6 -2
  438. langchain/schema/__init__.py +23 -23
  439. langchain/schema/cache.py +1 -1
  440. langchain/schema/callbacks/base.py +7 -7
  441. langchain/schema/callbacks/manager.py +19 -19
  442. langchain/schema/callbacks/tracers/base.py +3 -2
  443. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  444. langchain/schema/callbacks/tracers/langchain.py +1 -1
  445. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  446. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  447. langchain/schema/callbacks/tracers/schemas.py +8 -8
  448. langchain/schema/callbacks/tracers/stdout.py +3 -3
  449. langchain/schema/document.py +1 -1
  450. langchain/schema/language_model.py +2 -2
  451. langchain/schema/messages.py +12 -12
  452. langchain/schema/output.py +3 -3
  453. langchain/schema/output_parser.py +3 -3
  454. langchain/schema/runnable/__init__.py +3 -3
  455. langchain/schema/runnable/base.py +9 -9
  456. langchain/schema/runnable/config.py +5 -5
  457. langchain/schema/runnable/configurable.py +1 -1
  458. langchain/schema/runnable/history.py +1 -1
  459. langchain/schema/runnable/passthrough.py +1 -1
  460. langchain/schema/runnable/utils.py +16 -16
  461. langchain/schema/vectorstore.py +1 -1
  462. langchain/smith/__init__.py +1 -1
  463. langchain/smith/evaluation/__init__.py +2 -2
  464. langchain/smith/evaluation/config.py +10 -7
  465. langchain/smith/evaluation/name_generation.py +3 -3
  466. langchain/smith/evaluation/progress.py +11 -2
  467. langchain/smith/evaluation/runner_utils.py +181 -129
  468. langchain/smith/evaluation/string_run_evaluator.py +75 -68
  469. langchain/storage/__init__.py +2 -2
  470. langchain/storage/_lc_store.py +4 -2
  471. langchain/storage/encoder_backed.py +6 -2
  472. langchain/storage/file_system.py +19 -16
  473. langchain/storage/in_memory.py +1 -1
  474. langchain/storage/upstash_redis.py +1 -1
  475. langchain/text_splitter.py +15 -15
  476. langchain/tools/__init__.py +28 -26
  477. langchain/tools/ainetwork/app.py +1 -1
  478. langchain/tools/ainetwork/base.py +1 -1
  479. langchain/tools/ainetwork/owner.py +1 -1
  480. langchain/tools/ainetwork/rule.py +1 -1
  481. langchain/tools/ainetwork/transfer.py +1 -1
  482. langchain/tools/ainetwork/value.py +1 -1
  483. langchain/tools/amadeus/closest_airport.py +1 -1
  484. langchain/tools/amadeus/flight_search.py +1 -1
  485. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  486. langchain/tools/base.py +4 -4
  487. langchain/tools/bearly/tool.py +1 -1
  488. langchain/tools/bing_search/__init__.py +1 -1
  489. langchain/tools/bing_search/tool.py +1 -1
  490. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  491. langchain/tools/dataforseo_api_search/tool.py +1 -1
  492. langchain/tools/ddg_search/tool.py +1 -1
  493. langchain/tools/e2b_data_analysis/tool.py +2 -2
  494. langchain/tools/edenai/__init__.py +1 -1
  495. langchain/tools/file_management/__init__.py +1 -1
  496. langchain/tools/file_management/copy.py +1 -1
  497. langchain/tools/file_management/delete.py +1 -1
  498. langchain/tools/gmail/__init__.py +2 -2
  499. langchain/tools/gmail/get_message.py +1 -1
  500. langchain/tools/gmail/search.py +1 -1
  501. langchain/tools/gmail/send_message.py +1 -1
  502. langchain/tools/google_finance/__init__.py +1 -1
  503. langchain/tools/google_finance/tool.py +1 -1
  504. langchain/tools/google_scholar/__init__.py +1 -1
  505. langchain/tools/google_scholar/tool.py +1 -1
  506. langchain/tools/google_search/__init__.py +1 -1
  507. langchain/tools/google_search/tool.py +1 -1
  508. langchain/tools/google_serper/__init__.py +1 -1
  509. langchain/tools/google_serper/tool.py +1 -1
  510. langchain/tools/google_trends/__init__.py +1 -1
  511. langchain/tools/google_trends/tool.py +1 -1
  512. langchain/tools/jira/tool.py +20 -1
  513. langchain/tools/json/tool.py +25 -3
  514. langchain/tools/memorize/tool.py +1 -1
  515. langchain/tools/multion/__init__.py +1 -1
  516. langchain/tools/multion/update_session.py +1 -1
  517. langchain/tools/office365/__init__.py +2 -2
  518. langchain/tools/office365/events_search.py +1 -1
  519. langchain/tools/office365/messages_search.py +1 -1
  520. langchain/tools/office365/send_event.py +1 -1
  521. langchain/tools/office365/send_message.py +1 -1
  522. langchain/tools/openapi/utils/api_models.py +6 -6
  523. langchain/tools/playwright/__init__.py +5 -5
  524. langchain/tools/playwright/click.py +1 -1
  525. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  526. langchain/tools/playwright/get_elements.py +1 -1
  527. langchain/tools/playwright/navigate.py +1 -1
  528. langchain/tools/plugin.py +2 -2
  529. langchain/tools/powerbi/tool.py +1 -1
  530. langchain/tools/python/__init__.py +2 -1
  531. langchain/tools/reddit_search/tool.py +1 -1
  532. langchain/tools/render.py +2 -2
  533. langchain/tools/requests/tool.py +2 -2
  534. langchain/tools/searchapi/tool.py +1 -1
  535. langchain/tools/searx_search/tool.py +1 -1
  536. langchain/tools/slack/get_message.py +1 -1
  537. langchain/tools/spark_sql/tool.py +1 -1
  538. langchain/tools/sql_database/tool.py +1 -1
  539. langchain/tools/tavily_search/__init__.py +1 -1
  540. langchain/tools/tavily_search/tool.py +1 -1
  541. langchain/tools/zapier/__init__.py +1 -1
  542. langchain/tools/zapier/tool.py +24 -2
  543. langchain/utilities/__init__.py +4 -4
  544. langchain/utilities/arcee.py +4 -4
  545. langchain/utilities/clickup.py +4 -4
  546. langchain/utilities/dalle_image_generator.py +1 -1
  547. langchain/utilities/dataforseo_api_search.py +1 -1
  548. langchain/utilities/opaqueprompts.py +1 -1
  549. langchain/utilities/reddit_search.py +1 -1
  550. langchain/utilities/sql_database.py +1 -1
  551. langchain/utilities/tavily_search.py +1 -1
  552. langchain/utilities/vertexai.py +2 -2
  553. langchain/utils/__init__.py +1 -1
  554. langchain/utils/aiter.py +1 -1
  555. langchain/utils/html.py +3 -3
  556. langchain/utils/input.py +1 -1
  557. langchain/utils/iter.py +1 -1
  558. langchain/utils/json_schema.py +1 -3
  559. langchain/utils/strings.py +1 -1
  560. langchain/utils/utils.py +6 -6
  561. langchain/vectorstores/__init__.py +5 -5
  562. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  563. langchain/vectorstores/azure_cosmos_db.py +1 -1
  564. langchain/vectorstores/clickhouse.py +1 -1
  565. langchain/vectorstores/elastic_vector_search.py +1 -1
  566. langchain/vectorstores/elasticsearch.py +2 -2
  567. langchain/vectorstores/myscale.py +1 -1
  568. langchain/vectorstores/neo4j_vector.py +1 -1
  569. langchain/vectorstores/pgembedding.py +1 -1
  570. langchain/vectorstores/qdrant.py +1 -1
  571. langchain/vectorstores/redis/__init__.py +1 -1
  572. langchain/vectorstores/redis/base.py +1 -1
  573. langchain/vectorstores/redis/filters.py +4 -4
  574. langchain/vectorstores/redis/schema.py +6 -6
  575. langchain/vectorstores/sklearn.py +2 -2
  576. langchain/vectorstores/starrocks.py +1 -1
  577. langchain/vectorstores/utils.py +1 -1
  578. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/METADATA +5 -5
  579. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/RECORD +582 -582
  580. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
  581. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
  582. {langchain-0.3.25.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
@@ -25,7 +25,7 @@ def _get_extraction_function(entity_schema: dict) -> dict:
25
25
  "parameters": {
26
26
  "type": "object",
27
27
  "properties": {
28
- "info": {"type": "array", "items": _convert_schema(entity_schema)}
28
+ "info": {"type": "array", "items": _convert_schema(entity_schema)},
29
29
  },
30
30
  "required": ["info"],
31
31
  },
@@ -63,18 +63,18 @@ Passage:
63
63
  """
64
64
  from pydantic import BaseModel, Field
65
65
  from langchain_anthropic import ChatAnthropic
66
-
66
+
67
67
  class Joke(BaseModel):
68
68
  setup: str = Field(description="The setup of the joke")
69
- punchline: str = Field(description="The punchline to the joke")
70
-
69
+ punchline: str = Field(description="The punchline to the joke")
70
+
71
71
  # Or any other chat model that supports tools.
72
72
  # Please reference to to the documentation of structured_output
73
- # to see an up to date list of which models support
73
+ # to see an up to date list of which models support
74
74
  # with_structured_output.
75
75
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
76
76
  structured_llm = model.with_structured_output(Joke)
77
- structured_llm.invoke("Tell me a joke about cats.
77
+ structured_llm.invoke("Tell me a joke about cats.
78
78
  Make sure to call the Joke function.")
79
79
  """
80
80
  ),
@@ -84,7 +84,7 @@ def create_extraction_chain(
84
84
  llm: BaseLanguageModel,
85
85
  prompt: Optional[BasePromptTemplate] = None,
86
86
  tags: Optional[list[str]] = None,
87
- verbose: bool = False,
87
+ verbose: bool = False, # noqa: FBT001,FBT002
88
88
  ) -> Chain:
89
89
  """Creates a chain that extracts information from a passage.
90
90
 
@@ -103,7 +103,7 @@ def create_extraction_chain(
103
103
  extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
104
104
  output_parser = JsonKeyOutputFunctionsParser(key_name="info")
105
105
  llm_kwargs = get_llm_kwargs(function)
106
- chain = LLMChain(
106
+ return LLMChain(
107
107
  llm=llm,
108
108
  prompt=extraction_prompt,
109
109
  llm_kwargs=llm_kwargs,
@@ -111,7 +111,6 @@ def create_extraction_chain(
111
111
  tags=tags,
112
112
  verbose=verbose,
113
113
  )
114
- return chain
115
114
 
116
115
 
117
116
  @deprecated(
@@ -133,18 +132,18 @@ def create_extraction_chain(
133
132
  """
134
133
  from pydantic import BaseModel, Field
135
134
  from langchain_anthropic import ChatAnthropic
136
-
135
+
137
136
  class Joke(BaseModel):
138
137
  setup: str = Field(description="The setup of the joke")
139
- punchline: str = Field(description="The punchline to the joke")
140
-
138
+ punchline: str = Field(description="The punchline to the joke")
139
+
141
140
  # Or any other chat model that supports tools.
142
141
  # Please reference to to the documentation of structured_output
143
- # to see an up to date list of which models support
142
+ # to see an up to date list of which models support
144
143
  # with_structured_output.
145
144
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
146
145
  structured_llm = model.with_structured_output(Joke)
147
- structured_llm.invoke("Tell me a joke about cats.
146
+ structured_llm.invoke("Tell me a joke about cats.
148
147
  Make sure to call the Joke function.")
149
148
  """
150
149
  ),
@@ -153,7 +152,7 @@ def create_extraction_chain_pydantic(
153
152
  pydantic_schema: Any,
154
153
  llm: BaseLanguageModel,
155
154
  prompt: Optional[BasePromptTemplate] = None,
156
- verbose: bool = False,
155
+ verbose: bool = False, # noqa: FBT001,FBT002
157
156
  ) -> Chain:
158
157
  """Creates a chain that extracts information from a passage using pydantic schema.
159
158
 
@@ -178,20 +177,21 @@ def create_extraction_chain_pydantic(
178
177
  openai_schema = pydantic_schema.schema()
179
178
 
180
179
  openai_schema = _resolve_schema_references(
181
- openai_schema, openai_schema.get("definitions", {})
180
+ openai_schema,
181
+ openai_schema.get("definitions", {}),
182
182
  )
183
183
 
184
184
  function = _get_extraction_function(openai_schema)
185
185
  extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
186
186
  output_parser = PydanticAttrOutputFunctionsParser(
187
- pydantic_schema=PydanticSchema, attr_name="info"
187
+ pydantic_schema=PydanticSchema,
188
+ attr_name="info",
188
189
  )
189
190
  llm_kwargs = get_llm_kwargs(function)
190
- chain = LLMChain(
191
+ return LLMChain(
191
192
  llm=llm,
192
193
  prompt=extraction_prompt,
193
194
  llm_kwargs=llm_kwargs,
194
195
  output_parser=output_parser,
195
196
  verbose=verbose,
196
197
  )
197
- return chain
@@ -23,14 +23,6 @@ if TYPE_CHECKING:
23
23
  from openapi_pydantic import Parameter
24
24
 
25
25
 
26
- def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
27
- summary = getattr(o, "summary", None)
28
- description = getattr(o, "description", None)
29
- if prefer_short:
30
- return summary or description
31
- return description or summary
32
-
33
-
34
26
  def _format_url(url: str, path_params: dict) -> str:
35
27
  expected_path_param = re.findall(r"{(.*?)}", url)
36
28
  new_params = {}
@@ -59,13 +51,12 @@ def _format_url(url: str, path_params: dict) -> str:
59
51
  sep = ","
60
52
  new_val = ""
61
53
  new_val += sep.join(kv_strs)
54
+ elif param[0] == ".":
55
+ new_val = f".{val}"
56
+ elif param[0] == ";":
57
+ new_val = f";{clean_param}={val}"
62
58
  else:
63
- if param[0] == ".":
64
- new_val = f".{val}"
65
- elif param[0] == ";":
66
- new_val = f";{clean_param}={val}"
67
- else:
68
- new_val = val
59
+ new_val = val
69
60
  new_params[param] = new_val
70
61
  return url.format(**new_params)
71
62
 
@@ -77,7 +68,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
77
68
  if p.param_schema:
78
69
  schema = spec.get_schema(p.param_schema)
79
70
  else:
80
- media_type_schema = list(p.content.values())[0].media_type_schema
71
+ media_type_schema = next(iter(p.content.values())).media_type_schema
81
72
  schema = spec.get_schema(media_type_schema)
82
73
  if p.description and not schema.description:
83
74
  schema.description = p.description
@@ -102,11 +93,12 @@ def openapi_spec_to_openai_fn(
102
93
  """
103
94
  try:
104
95
  from langchain_community.tools import APIOperation
105
- except ImportError:
106
- raise ImportError(
96
+ except ImportError as e:
97
+ msg = (
107
98
  "Could not import langchain_community.tools. "
108
99
  "Please install it with `pip install langchain-community`."
109
100
  )
101
+ raise ImportError(msg) from e
110
102
 
111
103
  if not spec.paths:
112
104
  return [], lambda: None
@@ -134,7 +126,8 @@ def openapi_spec_to_openai_fn(
134
126
  for param_loc, arg_name in param_loc_to_arg_name.items():
135
127
  if params_by_type[param_loc]:
136
128
  request_args[arg_name] = _openapi_params_to_json_schema(
137
- params_by_type[param_loc], spec
129
+ params_by_type[param_loc],
130
+ spec,
138
131
  )
139
132
  request_body = spec.get_request_body_for_operation(op)
140
133
  # TODO: Support more MIME types.
@@ -144,10 +137,10 @@ def openapi_spec_to_openai_fn(
144
137
  if media_type_object.media_type_schema:
145
138
  schema = spec.get_schema(media_type_object.media_type_schema)
146
139
  media_types[media_type] = json.loads(
147
- schema.json(exclude_none=True)
140
+ schema.json(exclude_none=True),
148
141
  )
149
142
  if len(media_types) == 1:
150
- media_type, schema_dict = list(media_types.items())[0]
143
+ media_type, schema_dict = next(iter(media_types.items()))
151
144
  key = "json" if media_type == "application/json" else "data"
152
145
  request_args[key] = schema_dict
153
146
  elif len(media_types) > 1:
@@ -173,6 +166,7 @@ def openapi_spec_to_openai_fn(
173
166
  fn_args: dict,
174
167
  headers: Optional[dict] = None,
175
168
  params: Optional[dict] = None,
169
+ timeout: Optional[int] = 30,
176
170
  **kwargs: Any,
177
171
  ) -> Any:
178
172
  method = _name_to_call_map[name]["method"]
@@ -192,7 +186,7 @@ def openapi_spec_to_openai_fn(
192
186
  _kwargs["params"].update(params)
193
187
  else:
194
188
  _kwargs["params"] = params
195
- return requests.request(method, url, **_kwargs)
189
+ return requests.request(method, url, **_kwargs, timeout=timeout)
196
190
 
197
191
  return functions, default_call_api
198
192
 
@@ -229,11 +223,11 @@ class SimpleRequestChain(Chain):
229
223
  _text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
230
224
  _run_manager.on_text(_text)
231
225
  api_response: Response = self.request_method(name, args)
232
- if api_response.status_code != 200:
226
+ if api_response.status_code != requests.codes.ok:
233
227
  response = (
234
228
  f"{api_response.status_code}: {api_response.reason}"
235
- + f"\nFor {name} "
236
- + f"Called with args: {args.get('params', '')}"
229
+ f"\nFor {name} "
230
+ f"Called with args: {args.get('params', '')}"
237
231
  )
238
232
  else:
239
233
  try:
@@ -248,7 +242,7 @@ class SimpleRequestChain(Chain):
248
242
  message=(
249
243
  "This function is deprecated and will be removed in langchain 1.0. "
250
244
  "See API reference for replacement: "
251
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501
245
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
252
246
  ),
253
247
  removal="1.0",
254
248
  )
@@ -258,7 +252,7 @@ def get_openapi_chain(
258
252
  prompt: Optional[BasePromptTemplate] = None,
259
253
  request_chain: Optional[Chain] = None,
260
254
  llm_chain_kwargs: Optional[dict] = None,
261
- verbose: bool = False,
255
+ verbose: bool = False, # noqa: FBT001,FBT002
262
256
  headers: Optional[dict] = None,
263
257
  params: Optional[dict] = None,
264
258
  **kwargs: Any,
@@ -352,10 +346,11 @@ def get_openapi_chain(
352
346
  try:
353
347
  from langchain_community.utilities.openapi import OpenAPISpec
354
348
  except ImportError as e:
355
- raise ImportError(
349
+ msg = (
356
350
  "Could not import langchain_community.utilities.openapi. "
357
351
  "Please install it with `pip install langchain-community`."
358
- ) from e
352
+ )
353
+ raise ImportError(msg) from e
359
354
  if isinstance(spec, str):
360
355
  for conversion in (
361
356
  OpenAPISpec.from_url,
@@ -365,21 +360,23 @@ def get_openapi_chain(
365
360
  try:
366
361
  spec = conversion(spec)
367
362
  break
368
- except ImportError as e:
369
- raise e
370
- except Exception:
363
+ except ImportError:
364
+ raise
365
+ except Exception: # noqa: S110
371
366
  pass
372
367
  if isinstance(spec, str):
373
- raise ValueError(f"Unable to parse spec from source {spec}")
368
+ msg = f"Unable to parse spec from source {spec}"
369
+ raise ValueError(msg) # noqa: TRY004
374
370
  openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
375
371
  if not llm:
376
- raise ValueError(
372
+ msg = (
377
373
  "Must provide an LLM for this chain.For example,\n"
378
374
  "from langchain_openai import ChatOpenAI\n"
379
375
  "llm = ChatOpenAI()\n"
380
376
  )
377
+ raise ValueError(msg)
381
378
  prompt = prompt or ChatPromptTemplate.from_template(
382
- "Use the provided API's to respond to this user query:\n\n{query}"
379
+ "Use the provided API's to respond to this user query:\n\n{query}",
383
380
  )
384
381
  llm_chain = LLMChain(
385
382
  llm=llm,
@@ -392,7 +389,10 @@ def get_openapi_chain(
392
389
  )
393
390
  request_chain = request_chain or SimpleRequestChain(
394
391
  request_method=lambda name, args: call_api_fn(
395
- name, args, headers=headers, params=params
392
+ name,
393
+ args,
394
+ headers=headers,
395
+ params=params,
396
396
  ),
397
397
  verbose=verbose,
398
398
  )
@@ -22,7 +22,8 @@ class AnswerWithSources(BaseModel):
22
22
 
23
23
  answer: str = Field(..., description="Answer to the question that was asked")
24
24
  sources: list[str] = Field(
25
- ..., description="List of sources used to answer the question"
25
+ ...,
26
+ description="List of sources used to answer the question",
26
27
  )
27
28
 
28
29
 
@@ -32,7 +33,7 @@ class AnswerWithSources(BaseModel):
32
33
  message=(
33
34
  "This function is deprecated. Refer to this guide on retrieval and question "
34
35
  "answering with structured responses: "
35
- "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
36
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
36
37
  ),
37
38
  )
38
39
  def create_qa_with_structure_chain(
@@ -40,7 +41,7 @@ def create_qa_with_structure_chain(
40
41
  schema: Union[dict, type[BaseModel]],
41
42
  output_parser: str = "base",
42
43
  prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
43
- verbose: bool = False,
44
+ verbose: bool = False, # noqa: FBT001,FBT002
44
45
  ) -> LLMChain:
45
46
  """Create a question answering chain that returns an answer with sources
46
47
  based on schema.
@@ -57,20 +58,22 @@ def create_qa_with_structure_chain(
57
58
  """
58
59
  if output_parser == "pydantic":
59
60
  if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
60
- raise ValueError(
61
+ msg = (
61
62
  "Must provide a pydantic class for schema when output_parser is "
62
63
  "'pydantic'."
63
64
  )
65
+ raise ValueError(msg)
64
66
  _output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
65
- pydantic_schema=schema
67
+ pydantic_schema=schema,
66
68
  )
67
69
  elif output_parser == "base":
68
70
  _output_parser = OutputFunctionsParser()
69
71
  else:
70
- raise ValueError(
72
+ msg = (
71
73
  f"Got unexpected output_parser: {output_parser}. "
72
74
  f"Should be one of `pydantic` or `base`."
73
75
  )
76
+ raise ValueError(msg)
74
77
  if isinstance(schema, type) and is_basemodel_subclass(schema):
75
78
  if hasattr(schema, "model_json_schema"):
76
79
  schema_dict = cast(dict, schema.model_json_schema())
@@ -89,7 +92,7 @@ def create_qa_with_structure_chain(
89
92
  content=(
90
93
  "You are a world class algorithm to answer "
91
94
  "questions in a specific format."
92
- )
95
+ ),
93
96
  ),
94
97
  HumanMessage(content="Answer question using the following context"),
95
98
  HumanMessagePromptTemplate.from_template("{context}"),
@@ -98,14 +101,13 @@ def create_qa_with_structure_chain(
98
101
  ]
99
102
  prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
100
103
 
101
- chain = LLMChain(
104
+ return LLMChain(
102
105
  llm=llm,
103
106
  prompt=prompt,
104
107
  llm_kwargs=llm_kwargs,
105
108
  output_parser=_output_parser,
106
109
  verbose=verbose,
107
110
  )
108
- return chain
109
111
 
110
112
 
111
113
  @deprecated(
@@ -114,11 +116,13 @@ def create_qa_with_structure_chain(
114
116
  message=(
115
117
  "This function is deprecated. Refer to this guide on retrieval and question "
116
118
  "answering with sources: "
117
- "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
119
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
118
120
  ),
119
121
  )
120
122
  def create_qa_with_sources_chain(
121
- llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
123
+ llm: BaseLanguageModel,
124
+ verbose: bool = False, # noqa: FBT001,FBT002
125
+ **kwargs: Any,
122
126
  ) -> LLMChain:
123
127
  """Create a question answering chain that returns an answer with sources.
124
128
 
@@ -131,5 +135,8 @@ def create_qa_with_sources_chain(
131
135
  Chain (LLMChain) that can be used to answer questions with citations.
132
136
  """
133
137
  return create_qa_with_structure_chain(
134
- llm, AnswerWithSources, verbose=verbose, **kwargs
138
+ llm,
139
+ AnswerWithSources,
140
+ verbose=verbose,
141
+ **kwargs,
135
142
  )
@@ -91,14 +91,13 @@ def create_tagging_chain(
91
91
  prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
92
92
  output_parser = JsonOutputFunctionsParser()
93
93
  llm_kwargs = get_llm_kwargs(function)
94
- chain = LLMChain(
94
+ return LLMChain(
95
95
  llm=llm,
96
96
  prompt=prompt,
97
97
  llm_kwargs=llm_kwargs,
98
98
  output_parser=output_parser,
99
99
  **kwargs,
100
100
  )
101
- return chain
102
101
 
103
102
 
104
103
  @deprecated(
@@ -164,11 +163,10 @@ def create_tagging_chain_pydantic(
164
163
  prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
165
164
  output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
166
165
  llm_kwargs = get_llm_kwargs(function)
167
- chain = LLMChain(
166
+ return LLMChain(
168
167
  llm=llm,
169
168
  prompt=prompt,
170
169
  llm_kwargs=llm_kwargs,
171
170
  output_parser=output_parser,
172
171
  **kwargs,
173
172
  )
174
- return chain
@@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
34
34
  """
35
35
  from pydantic import BaseModel, Field
36
36
  from langchain_anthropic import ChatAnthropic
37
-
37
+
38
38
  class Joke(BaseModel):
39
39
  setup: str = Field(description="The setup of the joke")
40
- punchline: str = Field(description="The punchline to the joke")
41
-
40
+ punchline: str = Field(description="The punchline to the joke")
41
+
42
42
  # Or any other chat model that supports tools.
43
43
  # Please reference to to the documentation of structured_output
44
- # to see an up to date list of which models support
44
+ # to see an up to date list of which models support
45
45
  # with_structured_output.
46
46
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
47
47
  structured_llm = model.with_structured_output(Joke)
48
- structured_llm.invoke("Tell me a joke about cats.
48
+ structured_llm.invoke("Tell me a joke about cats.
49
49
  Make sure to call the Joke function.")
50
50
  """
51
51
  ),
@@ -71,10 +71,9 @@ def create_extraction_chain_pydantic(
71
71
  [
72
72
  ("system", system_message),
73
73
  ("user", "{input}"),
74
- ]
74
+ ],
75
75
  )
76
76
  functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
77
77
  tools = [{"type": "function", "function": d} for d in functions]
78
78
  model = llm.bind(tools=tools)
79
- chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
80
- return chain
79
+ return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
@@ -19,7 +19,7 @@ from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
19
19
  since="0.2.7",
20
20
  alternative=(
21
21
  "example in API reference with more detail: "
22
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
22
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
23
23
  ),
24
24
  removal="1.0",
25
25
  )
@@ -66,7 +66,7 @@ class QAGenerationChain(Chain):
66
66
  llm_chain: LLMChain
67
67
  """LLM Chain that generates responses from user input and context."""
68
68
  text_splitter: TextSplitter = Field(
69
- default=RecursiveCharacterTextSplitter(chunk_overlap=500)
69
+ default=RecursiveCharacterTextSplitter(chunk_overlap=500),
70
70
  )
71
71
  """Text splitter that splits the input into chunks."""
72
72
  input_key: str = "text"
@@ -117,7 +117,8 @@ class QAGenerationChain(Chain):
117
117
  ) -> dict[str, list]:
118
118
  docs = self.text_splitter.create_documents([inputs[self.input_key]])
119
119
  results = self.llm_chain.generate(
120
- [{"text": d.page_content} for d in docs], run_manager=run_manager
120
+ [{"text": d.page_content} for d in docs],
121
+ run_manager=run_manager,
121
122
  )
122
123
  qa = [json.loads(res[0].text) for res in results.generations]
123
124
  return {self.output_key: qa}
@@ -1,5 +1,3 @@
1
- # flake8: noqa
2
- from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
3
1
  from langchain_core.prompts.chat import (
4
2
  ChatPromptTemplate,
5
3
  HumanMessagePromptTemplate,
@@ -7,6 +5,8 @@ from langchain_core.prompts.chat import (
7
5
  )
8
6
  from langchain_core.prompts.prompt import PromptTemplate
9
7
 
8
+ from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
9
+
10
10
  templ1 = """You are a smart assistant designed to help high school teachers come up with reading comprehension questions.
11
11
  Given a piece of text, you must come up with a question and answer pair that can be used to test a student's reading comprehension abilities.
12
12
  When coming up with this question/answer pair, you must respond in the following format:
@@ -18,10 +18,10 @@ When coming up with this question/answer pair, you must respond in the following
18
18
  ```
19
19
 
20
20
  Everything between the ``` must be valid json.
21
- """
21
+ """ # noqa: E501
22
22
  templ2 = """Please come up with a question/answer pair, in the specified JSON format, for the following text:
23
23
  ----------------
24
- {text}"""
24
+ {text}""" # noqa: E501
25
25
  CHAT_PROMPT = ChatPromptTemplate.from_messages(
26
26
  [
27
27
  SystemMessagePromptTemplate.from_template(templ1),
@@ -42,7 +42,7 @@ Everything between the ``` must be valid json.
42
42
 
43
43
  Please come up with a question/answer pair, in the specified JSON format, for the following text:
44
44
  ----------------
45
- {text}"""
45
+ {text}""" # noqa: E501
46
46
  PROMPT = PromptTemplate.from_template(templ)
47
47
 
48
48
  PROMPT_SELECTOR = ConditionalPromptSelector(
@@ -70,7 +70,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
70
70
  document_variable_name="summaries",
71
71
  )
72
72
  reduce_documents_chain = ReduceDocumentsChain(
73
- combine_documents_chain=combine_results_chain
73
+ combine_documents_chain=combine_results_chain,
74
74
  )
75
75
  combine_documents_chain = MapReduceDocumentsChain(
76
76
  llm_chain=llm_question_chain,
@@ -93,7 +93,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
93
93
  """Load chain from chain type."""
94
94
  _chain_kwargs = chain_type_kwargs or {}
95
95
  combine_documents_chain = load_qa_with_sources_chain(
96
- llm, chain_type=chain_type, **_chain_kwargs
96
+ llm,
97
+ chain_type=chain_type,
98
+ **_chain_kwargs,
97
99
  )
98
100
  return cls(combine_documents_chain=combine_documents_chain, **kwargs)
99
101
 
@@ -118,7 +120,7 @@ class BaseQAWithSourcesChain(Chain, ABC):
118
120
  """
119
121
  _output_keys = [self.answer_key, self.sources_answer_key]
120
122
  if self.return_source_documents:
121
- _output_keys = _output_keys + ["source_documents"]
123
+ _output_keys = [*_output_keys, "source_documents"]
122
124
  return _output_keys
123
125
 
124
126
  @model_validator(mode="before")
@@ -133,7 +135,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
133
135
  """Split sources from answer."""
134
136
  if re.search(r"SOURCES?:", answer, re.IGNORECASE):
135
137
  answer, sources = re.split(
136
- r"SOURCES?:|QUESTION:\s", answer, flags=re.IGNORECASE
138
+ r"SOURCES?:|QUESTION:\s",
139
+ answer,
140
+ flags=re.IGNORECASE,
137
141
  )[:2]
138
142
  sources = re.split(r"\n", sources)[0].strip()
139
143
  else:
@@ -164,7 +168,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
164
168
  docs = self._get_docs(inputs) # type: ignore[call-arg]
165
169
 
166
170
  answer = self.combine_documents_chain.run(
167
- input_documents=docs, callbacks=_run_manager.get_child(), **inputs
171
+ input_documents=docs,
172
+ callbacks=_run_manager.get_child(),
173
+ **inputs,
168
174
  )
169
175
  answer, sources = self._split_sources(answer)
170
176
  result: dict[str, Any] = {
@@ -198,7 +204,9 @@ class BaseQAWithSourcesChain(Chain, ABC):
198
204
  else:
199
205
  docs = await self._aget_docs(inputs) # type: ignore[call-arg]
200
206
  answer = await self.combine_documents_chain.arun(
201
- input_documents=docs, callbacks=_run_manager.get_child(), **inputs
207
+ input_documents=docs,
208
+ callbacks=_run_manager.get_child(),
209
+ **inputs,
202
210
  )
203
211
  answer, sources = self._split_sources(answer)
204
212
  result: dict[str, Any] = {