langchain 0.3.26__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (591) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +174 -151
  9. langchain/agents/agent_iterator.py +50 -26
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +4 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +6 -6
  48. langchain/agents/chat/base.py +8 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +11 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +9 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +17 -9
  63. langchain/agents/json_chat/base.py +19 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +11 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +81 -71
  71. langchain/agents/openai_functions_agent/agent_token_buffer_memory.py +2 -0
  72. langchain/agents/openai_functions_agent/base.py +47 -37
  73. langchain/agents/openai_functions_multi_agent/base.py +40 -27
  74. langchain/agents/openai_tools/base.py +9 -8
  75. langchain/agents/output_parsers/__init__.py +3 -3
  76. langchain/agents/output_parsers/json.py +8 -6
  77. langchain/agents/output_parsers/openai_functions.py +24 -9
  78. langchain/agents/output_parsers/openai_tools.py +16 -4
  79. langchain/agents/output_parsers/react_json_single_input.py +13 -5
  80. langchain/agents/output_parsers/react_single_input.py +18 -11
  81. langchain/agents/output_parsers/self_ask.py +5 -2
  82. langchain/agents/output_parsers/tools.py +32 -13
  83. langchain/agents/output_parsers/xml.py +102 -28
  84. langchain/agents/react/agent.py +5 -4
  85. langchain/agents/react/base.py +26 -17
  86. langchain/agents/react/output_parser.py +7 -6
  87. langchain/agents/react/textworld_prompt.py +0 -1
  88. langchain/agents/react/wiki_prompt.py +14 -15
  89. langchain/agents/schema.py +5 -2
  90. langchain/agents/self_ask_with_search/base.py +23 -15
  91. langchain/agents/self_ask_with_search/prompt.py +0 -1
  92. langchain/agents/structured_chat/base.py +19 -11
  93. langchain/agents/structured_chat/output_parser.py +29 -18
  94. langchain/agents/structured_chat/prompt.py +3 -4
  95. langchain/agents/tool_calling_agent/base.py +8 -6
  96. langchain/agents/tools.py +5 -2
  97. langchain/agents/utils.py +2 -3
  98. langchain/agents/xml/base.py +12 -6
  99. langchain/agents/xml/prompt.py +1 -2
  100. langchain/cache.py +12 -12
  101. langchain/callbacks/__init__.py +11 -11
  102. langchain/callbacks/aim_callback.py +2 -2
  103. langchain/callbacks/argilla_callback.py +1 -1
  104. langchain/callbacks/arize_callback.py +1 -1
  105. langchain/callbacks/arthur_callback.py +1 -1
  106. langchain/callbacks/base.py +7 -7
  107. langchain/callbacks/clearml_callback.py +1 -1
  108. langchain/callbacks/comet_ml_callback.py +1 -1
  109. langchain/callbacks/confident_callback.py +1 -1
  110. langchain/callbacks/context_callback.py +1 -1
  111. langchain/callbacks/flyte_callback.py +1 -1
  112. langchain/callbacks/human.py +2 -2
  113. langchain/callbacks/infino_callback.py +1 -1
  114. langchain/callbacks/labelstudio_callback.py +1 -1
  115. langchain/callbacks/llmonitor_callback.py +1 -1
  116. langchain/callbacks/manager.py +5 -5
  117. langchain/callbacks/mlflow_callback.py +2 -2
  118. langchain/callbacks/openai_info.py +1 -1
  119. langchain/callbacks/promptlayer_callback.py +1 -1
  120. langchain/callbacks/sagemaker_callback.py +1 -1
  121. langchain/callbacks/streaming_aiter.py +17 -3
  122. langchain/callbacks/streaming_aiter_final_only.py +16 -5
  123. langchain/callbacks/streaming_stdout_final_only.py +10 -3
  124. langchain/callbacks/streamlit/__init__.py +3 -2
  125. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  126. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  127. langchain/callbacks/tracers/__init__.py +1 -1
  128. langchain/callbacks/tracers/comet.py +1 -1
  129. langchain/callbacks/tracers/evaluation.py +1 -1
  130. langchain/callbacks/tracers/log_stream.py +1 -1
  131. langchain/callbacks/tracers/logging.py +12 -1
  132. langchain/callbacks/tracers/stdout.py +1 -1
  133. langchain/callbacks/trubrics_callback.py +1 -1
  134. langchain/callbacks/utils.py +4 -4
  135. langchain/callbacks/wandb_callback.py +1 -1
  136. langchain/callbacks/whylabs_callback.py +1 -1
  137. langchain/chains/api/base.py +41 -23
  138. langchain/chains/api/news_docs.py +1 -2
  139. langchain/chains/api/open_meteo_docs.py +1 -2
  140. langchain/chains/api/openapi/requests_chain.py +1 -1
  141. langchain/chains/api/openapi/response_chain.py +1 -1
  142. langchain/chains/api/podcast_docs.py +1 -2
  143. langchain/chains/api/prompt.py +1 -2
  144. langchain/chains/api/tmdb_docs.py +1 -2
  145. langchain/chains/base.py +96 -56
  146. langchain/chains/chat_vector_db/prompts.py +2 -3
  147. langchain/chains/combine_documents/__init__.py +1 -1
  148. langchain/chains/combine_documents/base.py +30 -11
  149. langchain/chains/combine_documents/map_reduce.py +41 -30
  150. langchain/chains/combine_documents/map_rerank.py +39 -24
  151. langchain/chains/combine_documents/reduce.py +48 -26
  152. langchain/chains/combine_documents/refine.py +27 -17
  153. langchain/chains/combine_documents/stuff.py +24 -13
  154. langchain/chains/constitutional_ai/base.py +11 -4
  155. langchain/chains/constitutional_ai/principles.py +22 -25
  156. langchain/chains/constitutional_ai/prompts.py +25 -28
  157. langchain/chains/conversation/base.py +9 -4
  158. langchain/chains/conversation/memory.py +5 -5
  159. langchain/chains/conversation/prompt.py +5 -5
  160. langchain/chains/conversational_retrieval/base.py +108 -79
  161. langchain/chains/conversational_retrieval/prompts.py +2 -3
  162. langchain/chains/elasticsearch_database/base.py +10 -10
  163. langchain/chains/elasticsearch_database/prompts.py +2 -3
  164. langchain/chains/ernie_functions/__init__.py +2 -2
  165. langchain/chains/example_generator.py +3 -1
  166. langchain/chains/flare/base.py +28 -12
  167. langchain/chains/flare/prompts.py +2 -0
  168. langchain/chains/graph_qa/cypher.py +2 -2
  169. langchain/chains/graph_qa/falkordb.py +1 -1
  170. langchain/chains/graph_qa/gremlin.py +1 -1
  171. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  172. langchain/chains/graph_qa/prompts.py +2 -2
  173. langchain/chains/history_aware_retriever.py +2 -1
  174. langchain/chains/hyde/base.py +6 -5
  175. langchain/chains/hyde/prompts.py +5 -6
  176. langchain/chains/llm.py +82 -61
  177. langchain/chains/llm_bash/__init__.py +3 -2
  178. langchain/chains/llm_checker/base.py +19 -6
  179. langchain/chains/llm_checker/prompt.py +3 -4
  180. langchain/chains/llm_math/base.py +25 -10
  181. langchain/chains/llm_math/prompt.py +1 -2
  182. langchain/chains/llm_summarization_checker/base.py +22 -7
  183. langchain/chains/llm_symbolic_math/__init__.py +3 -2
  184. langchain/chains/loading.py +155 -97
  185. langchain/chains/mapreduce.py +4 -3
  186. langchain/chains/moderation.py +11 -9
  187. langchain/chains/natbot/base.py +11 -9
  188. langchain/chains/natbot/crawler.py +102 -76
  189. langchain/chains/natbot/prompt.py +2 -3
  190. langchain/chains/openai_functions/__init__.py +7 -7
  191. langchain/chains/openai_functions/base.py +15 -10
  192. langchain/chains/openai_functions/citation_fuzzy_match.py +21 -11
  193. langchain/chains/openai_functions/extraction.py +19 -19
  194. langchain/chains/openai_functions/openapi.py +39 -35
  195. langchain/chains/openai_functions/qa_with_structure.py +22 -15
  196. langchain/chains/openai_functions/tagging.py +4 -4
  197. langchain/chains/openai_tools/extraction.py +7 -8
  198. langchain/chains/qa_generation/base.py +8 -3
  199. langchain/chains/qa_generation/prompt.py +5 -5
  200. langchain/chains/qa_with_sources/base.py +17 -6
  201. langchain/chains/qa_with_sources/loading.py +16 -8
  202. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  203. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  204. langchain/chains/qa_with_sources/retrieval.py +15 -6
  205. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  206. langchain/chains/qa_with_sources/vector_db.py +21 -8
  207. langchain/chains/query_constructor/base.py +37 -34
  208. langchain/chains/query_constructor/ir.py +4 -4
  209. langchain/chains/query_constructor/parser.py +101 -34
  210. langchain/chains/query_constructor/prompt.py +5 -6
  211. langchain/chains/question_answering/chain.py +21 -10
  212. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  213. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  214. langchain/chains/question_answering/refine_prompts.py +2 -5
  215. langchain/chains/question_answering/stuff_prompt.py +5 -5
  216. langchain/chains/retrieval.py +1 -3
  217. langchain/chains/retrieval_qa/base.py +38 -27
  218. langchain/chains/retrieval_qa/prompt.py +1 -2
  219. langchain/chains/router/__init__.py +3 -3
  220. langchain/chains/router/base.py +38 -22
  221. langchain/chains/router/embedding_router.py +15 -8
  222. langchain/chains/router/llm_router.py +23 -20
  223. langchain/chains/router/multi_prompt.py +5 -2
  224. langchain/chains/router/multi_retrieval_qa.py +28 -5
  225. langchain/chains/sequential.py +30 -18
  226. langchain/chains/sql_database/prompt.py +14 -16
  227. langchain/chains/sql_database/query.py +7 -5
  228. langchain/chains/structured_output/__init__.py +1 -1
  229. langchain/chains/structured_output/base.py +77 -67
  230. langchain/chains/summarize/chain.py +11 -5
  231. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  232. langchain/chains/summarize/stuff_prompt.py +0 -1
  233. langchain/chains/transform.py +9 -6
  234. langchain/chat_loaders/facebook_messenger.py +1 -1
  235. langchain/chat_loaders/langsmith.py +1 -1
  236. langchain/chat_loaders/utils.py +3 -3
  237. langchain/chat_models/__init__.py +20 -19
  238. langchain/chat_models/anthropic.py +1 -1
  239. langchain/chat_models/azureml_endpoint.py +1 -1
  240. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  241. langchain/chat_models/base.py +213 -139
  242. langchain/chat_models/bedrock.py +1 -1
  243. langchain/chat_models/fake.py +1 -1
  244. langchain/chat_models/meta.py +1 -1
  245. langchain/chat_models/pai_eas_endpoint.py +1 -1
  246. langchain/chat_models/promptlayer_openai.py +1 -1
  247. langchain/chat_models/volcengine_maas.py +1 -1
  248. langchain/docstore/base.py +1 -1
  249. langchain/document_loaders/__init__.py +9 -9
  250. langchain/document_loaders/airbyte.py +3 -3
  251. langchain/document_loaders/assemblyai.py +1 -1
  252. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  253. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  254. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  255. langchain/document_loaders/base.py +1 -1
  256. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  257. langchain/document_loaders/blob_loaders/schema.py +1 -4
  258. langchain/document_loaders/blockchain.py +1 -1
  259. langchain/document_loaders/chatgpt.py +1 -1
  260. langchain/document_loaders/college_confidential.py +1 -1
  261. langchain/document_loaders/confluence.py +1 -1
  262. langchain/document_loaders/email.py +1 -1
  263. langchain/document_loaders/facebook_chat.py +1 -1
  264. langchain/document_loaders/markdown.py +1 -1
  265. langchain/document_loaders/notebook.py +1 -1
  266. langchain/document_loaders/org_mode.py +1 -1
  267. langchain/document_loaders/parsers/__init__.py +1 -1
  268. langchain/document_loaders/parsers/docai.py +1 -1
  269. langchain/document_loaders/parsers/generic.py +1 -1
  270. langchain/document_loaders/parsers/html/__init__.py +1 -1
  271. langchain/document_loaders/parsers/html/bs4.py +1 -1
  272. langchain/document_loaders/parsers/language/cobol.py +1 -1
  273. langchain/document_loaders/parsers/language/python.py +1 -1
  274. langchain/document_loaders/parsers/msword.py +1 -1
  275. langchain/document_loaders/parsers/pdf.py +5 -5
  276. langchain/document_loaders/parsers/registry.py +1 -1
  277. langchain/document_loaders/pdf.py +8 -8
  278. langchain/document_loaders/powerpoint.py +1 -1
  279. langchain/document_loaders/pyspark_dataframe.py +1 -1
  280. langchain/document_loaders/telegram.py +2 -2
  281. langchain/document_loaders/tencent_cos_directory.py +1 -1
  282. langchain/document_loaders/unstructured.py +5 -5
  283. langchain/document_loaders/url_playwright.py +1 -1
  284. langchain/document_loaders/whatsapp_chat.py +1 -1
  285. langchain/document_loaders/youtube.py +2 -2
  286. langchain/document_transformers/__init__.py +3 -3
  287. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  288. langchain/document_transformers/doctran_text_extract.py +1 -1
  289. langchain/document_transformers/doctran_text_qa.py +1 -1
  290. langchain/document_transformers/doctran_text_translate.py +1 -1
  291. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  292. langchain/document_transformers/google_translate.py +1 -1
  293. langchain/document_transformers/html2text.py +1 -1
  294. langchain/document_transformers/nuclia_text_transform.py +1 -1
  295. langchain/embeddings/__init__.py +5 -5
  296. langchain/embeddings/base.py +35 -24
  297. langchain/embeddings/cache.py +37 -32
  298. langchain/embeddings/fake.py +1 -1
  299. langchain/embeddings/huggingface.py +2 -2
  300. langchain/evaluation/__init__.py +22 -22
  301. langchain/evaluation/agents/trajectory_eval_chain.py +26 -25
  302. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  303. langchain/evaluation/comparison/__init__.py +1 -1
  304. langchain/evaluation/comparison/eval_chain.py +21 -13
  305. langchain/evaluation/comparison/prompt.py +1 -2
  306. langchain/evaluation/criteria/__init__.py +1 -1
  307. langchain/evaluation/criteria/eval_chain.py +23 -11
  308. langchain/evaluation/criteria/prompt.py +2 -3
  309. langchain/evaluation/embedding_distance/base.py +34 -20
  310. langchain/evaluation/exact_match/base.py +14 -1
  311. langchain/evaluation/loading.py +16 -11
  312. langchain/evaluation/parsing/base.py +20 -4
  313. langchain/evaluation/parsing/json_distance.py +24 -10
  314. langchain/evaluation/parsing/json_schema.py +13 -12
  315. langchain/evaluation/qa/__init__.py +1 -1
  316. langchain/evaluation/qa/eval_chain.py +20 -5
  317. langchain/evaluation/qa/eval_prompt.py +7 -8
  318. langchain/evaluation/qa/generate_chain.py +4 -1
  319. langchain/evaluation/qa/generate_prompt.py +2 -4
  320. langchain/evaluation/regex_match/base.py +9 -1
  321. langchain/evaluation/schema.py +38 -30
  322. langchain/evaluation/scoring/__init__.py +1 -1
  323. langchain/evaluation/scoring/eval_chain.py +23 -15
  324. langchain/evaluation/scoring/prompt.py +0 -1
  325. langchain/evaluation/string_distance/base.py +20 -9
  326. langchain/globals.py +12 -11
  327. langchain/graphs/__init__.py +6 -6
  328. langchain/graphs/graph_document.py +1 -1
  329. langchain/graphs/networkx_graph.py +2 -2
  330. langchain/hub.py +9 -11
  331. langchain/indexes/__init__.py +3 -3
  332. langchain/indexes/_sql_record_manager.py +63 -46
  333. langchain/indexes/prompts/entity_extraction.py +1 -2
  334. langchain/indexes/prompts/entity_summarization.py +1 -2
  335. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  336. langchain/indexes/vectorstore.py +35 -19
  337. langchain/llms/__init__.py +13 -13
  338. langchain/llms/ai21.py +1 -1
  339. langchain/llms/azureml_endpoint.py +4 -4
  340. langchain/llms/base.py +15 -7
  341. langchain/llms/bedrock.py +1 -1
  342. langchain/llms/cloudflare_workersai.py +1 -1
  343. langchain/llms/gradient_ai.py +1 -1
  344. langchain/llms/loading.py +1 -1
  345. langchain/llms/openai.py +1 -1
  346. langchain/llms/sagemaker_endpoint.py +1 -1
  347. langchain/load/dump.py +1 -1
  348. langchain/load/load.py +1 -1
  349. langchain/load/serializable.py +3 -3
  350. langchain/memory/__init__.py +3 -3
  351. langchain/memory/buffer.py +14 -7
  352. langchain/memory/buffer_window.py +2 -0
  353. langchain/memory/chat_memory.py +14 -8
  354. langchain/memory/chat_message_histories/__init__.py +1 -1
  355. langchain/memory/chat_message_histories/astradb.py +1 -1
  356. langchain/memory/chat_message_histories/cassandra.py +1 -1
  357. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  358. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  359. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  360. langchain/memory/chat_message_histories/file.py +1 -1
  361. langchain/memory/chat_message_histories/firestore.py +1 -1
  362. langchain/memory/chat_message_histories/momento.py +1 -1
  363. langchain/memory/chat_message_histories/mongodb.py +1 -1
  364. langchain/memory/chat_message_histories/neo4j.py +1 -1
  365. langchain/memory/chat_message_histories/postgres.py +1 -1
  366. langchain/memory/chat_message_histories/redis.py +1 -1
  367. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  368. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  369. langchain/memory/chat_message_histories/streamlit.py +1 -1
  370. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  371. langchain/memory/chat_message_histories/xata.py +1 -1
  372. langchain/memory/chat_message_histories/zep.py +1 -1
  373. langchain/memory/combined.py +14 -13
  374. langchain/memory/entity.py +131 -61
  375. langchain/memory/prompt.py +10 -11
  376. langchain/memory/readonly.py +0 -2
  377. langchain/memory/simple.py +4 -3
  378. langchain/memory/summary.py +43 -11
  379. langchain/memory/summary_buffer.py +20 -8
  380. langchain/memory/token_buffer.py +2 -0
  381. langchain/memory/utils.py +3 -2
  382. langchain/memory/vectorstore.py +12 -5
  383. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  384. langchain/model_laboratory.py +12 -11
  385. langchain/output_parsers/__init__.py +4 -4
  386. langchain/output_parsers/boolean.py +7 -4
  387. langchain/output_parsers/combining.py +14 -7
  388. langchain/output_parsers/datetime.py +32 -31
  389. langchain/output_parsers/enum.py +10 -4
  390. langchain/output_parsers/fix.py +60 -53
  391. langchain/output_parsers/format_instructions.py +6 -8
  392. langchain/output_parsers/json.py +2 -2
  393. langchain/output_parsers/list.py +2 -2
  394. langchain/output_parsers/loading.py +9 -9
  395. langchain/output_parsers/openai_functions.py +3 -3
  396. langchain/output_parsers/openai_tools.py +1 -1
  397. langchain/output_parsers/pandas_dataframe.py +59 -48
  398. langchain/output_parsers/prompts.py +1 -2
  399. langchain/output_parsers/rail_parser.py +1 -1
  400. langchain/output_parsers/regex.py +9 -8
  401. langchain/output_parsers/regex_dict.py +7 -10
  402. langchain/output_parsers/retry.py +99 -80
  403. langchain/output_parsers/structured.py +21 -6
  404. langchain/output_parsers/yaml.py +19 -11
  405. langchain/prompts/__init__.py +5 -3
  406. langchain/prompts/base.py +5 -5
  407. langchain/prompts/chat.py +8 -8
  408. langchain/prompts/example_selector/__init__.py +3 -1
  409. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  410. langchain/prompts/few_shot.py +1 -1
  411. langchain/prompts/loading.py +3 -3
  412. langchain/prompts/prompt.py +1 -1
  413. langchain/pydantic_v1/__init__.py +1 -1
  414. langchain/retrievers/__init__.py +5 -5
  415. langchain/retrievers/bedrock.py +2 -2
  416. langchain/retrievers/bm25.py +1 -1
  417. langchain/retrievers/contextual_compression.py +14 -8
  418. langchain/retrievers/docarray.py +1 -1
  419. langchain/retrievers/document_compressors/__init__.py +5 -4
  420. langchain/retrievers/document_compressors/base.py +12 -6
  421. langchain/retrievers/document_compressors/chain_extract.py +5 -3
  422. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  423. langchain/retrievers/document_compressors/chain_filter.py +9 -9
  424. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  425. langchain/retrievers/document_compressors/cohere_rerank.py +17 -15
  426. langchain/retrievers/document_compressors/cross_encoder_rerank.py +2 -0
  427. langchain/retrievers/document_compressors/embeddings_filter.py +24 -17
  428. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  429. langchain/retrievers/document_compressors/listwise_rerank.py +8 -5
  430. langchain/retrievers/ensemble.py +30 -27
  431. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  432. langchain/retrievers/google_vertex_ai_search.py +2 -2
  433. langchain/retrievers/kendra.py +10 -10
  434. langchain/retrievers/llama_index.py +1 -1
  435. langchain/retrievers/merger_retriever.py +11 -11
  436. langchain/retrievers/milvus.py +1 -1
  437. langchain/retrievers/multi_query.py +35 -27
  438. langchain/retrievers/multi_vector.py +24 -9
  439. langchain/retrievers/parent_document_retriever.py +33 -9
  440. langchain/retrievers/re_phraser.py +6 -5
  441. langchain/retrievers/self_query/base.py +157 -127
  442. langchain/retrievers/time_weighted_retriever.py +21 -7
  443. langchain/retrievers/zilliz.py +1 -1
  444. langchain/runnables/hub.py +12 -0
  445. langchain/runnables/openai_functions.py +12 -2
  446. langchain/schema/__init__.py +23 -23
  447. langchain/schema/cache.py +1 -1
  448. langchain/schema/callbacks/base.py +7 -7
  449. langchain/schema/callbacks/manager.py +19 -19
  450. langchain/schema/callbacks/tracers/base.py +1 -1
  451. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  452. langchain/schema/callbacks/tracers/langchain.py +1 -1
  453. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  454. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  455. langchain/schema/callbacks/tracers/schemas.py +8 -8
  456. langchain/schema/callbacks/tracers/stdout.py +3 -3
  457. langchain/schema/document.py +1 -1
  458. langchain/schema/language_model.py +2 -2
  459. langchain/schema/messages.py +12 -12
  460. langchain/schema/output.py +3 -3
  461. langchain/schema/output_parser.py +3 -3
  462. langchain/schema/runnable/__init__.py +3 -3
  463. langchain/schema/runnable/base.py +9 -9
  464. langchain/schema/runnable/config.py +5 -5
  465. langchain/schema/runnable/configurable.py +1 -1
  466. langchain/schema/runnable/history.py +1 -1
  467. langchain/schema/runnable/passthrough.py +1 -1
  468. langchain/schema/runnable/utils.py +16 -16
  469. langchain/schema/vectorstore.py +1 -1
  470. langchain/smith/__init__.py +2 -1
  471. langchain/smith/evaluation/__init__.py +2 -2
  472. langchain/smith/evaluation/config.py +9 -23
  473. langchain/smith/evaluation/name_generation.py +3 -3
  474. langchain/smith/evaluation/progress.py +22 -4
  475. langchain/smith/evaluation/runner_utils.py +416 -247
  476. langchain/smith/evaluation/string_run_evaluator.py +102 -68
  477. langchain/storage/__init__.py +2 -2
  478. langchain/storage/_lc_store.py +4 -2
  479. langchain/storage/encoder_backed.py +7 -2
  480. langchain/storage/file_system.py +19 -16
  481. langchain/storage/in_memory.py +1 -1
  482. langchain/storage/upstash_redis.py +1 -1
  483. langchain/text_splitter.py +15 -15
  484. langchain/tools/__init__.py +28 -26
  485. langchain/tools/ainetwork/app.py +1 -1
  486. langchain/tools/ainetwork/base.py +1 -1
  487. langchain/tools/ainetwork/owner.py +1 -1
  488. langchain/tools/ainetwork/rule.py +1 -1
  489. langchain/tools/ainetwork/transfer.py +1 -1
  490. langchain/tools/ainetwork/value.py +1 -1
  491. langchain/tools/amadeus/closest_airport.py +1 -1
  492. langchain/tools/amadeus/flight_search.py +1 -1
  493. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  494. langchain/tools/base.py +4 -4
  495. langchain/tools/bearly/tool.py +1 -1
  496. langchain/tools/bing_search/__init__.py +1 -1
  497. langchain/tools/bing_search/tool.py +1 -1
  498. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  499. langchain/tools/dataforseo_api_search/tool.py +1 -1
  500. langchain/tools/ddg_search/tool.py +1 -1
  501. langchain/tools/e2b_data_analysis/tool.py +2 -2
  502. langchain/tools/edenai/__init__.py +1 -1
  503. langchain/tools/file_management/__init__.py +1 -1
  504. langchain/tools/file_management/copy.py +1 -1
  505. langchain/tools/file_management/delete.py +1 -1
  506. langchain/tools/gmail/__init__.py +2 -2
  507. langchain/tools/gmail/get_message.py +1 -1
  508. langchain/tools/gmail/search.py +1 -1
  509. langchain/tools/gmail/send_message.py +1 -1
  510. langchain/tools/google_finance/__init__.py +1 -1
  511. langchain/tools/google_finance/tool.py +1 -1
  512. langchain/tools/google_scholar/__init__.py +1 -1
  513. langchain/tools/google_scholar/tool.py +1 -1
  514. langchain/tools/google_search/__init__.py +1 -1
  515. langchain/tools/google_search/tool.py +1 -1
  516. langchain/tools/google_serper/__init__.py +1 -1
  517. langchain/tools/google_serper/tool.py +1 -1
  518. langchain/tools/google_trends/__init__.py +1 -1
  519. langchain/tools/google_trends/tool.py +1 -1
  520. langchain/tools/jira/tool.py +20 -1
  521. langchain/tools/json/tool.py +25 -3
  522. langchain/tools/memorize/tool.py +1 -1
  523. langchain/tools/multion/__init__.py +1 -1
  524. langchain/tools/multion/update_session.py +1 -1
  525. langchain/tools/office365/__init__.py +2 -2
  526. langchain/tools/office365/events_search.py +1 -1
  527. langchain/tools/office365/messages_search.py +1 -1
  528. langchain/tools/office365/send_event.py +1 -1
  529. langchain/tools/office365/send_message.py +1 -1
  530. langchain/tools/openapi/utils/api_models.py +6 -6
  531. langchain/tools/playwright/__init__.py +5 -5
  532. langchain/tools/playwright/click.py +1 -1
  533. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  534. langchain/tools/playwright/get_elements.py +1 -1
  535. langchain/tools/playwright/navigate.py +1 -1
  536. langchain/tools/plugin.py +2 -2
  537. langchain/tools/powerbi/tool.py +1 -1
  538. langchain/tools/python/__init__.py +3 -2
  539. langchain/tools/reddit_search/tool.py +1 -1
  540. langchain/tools/render.py +2 -2
  541. langchain/tools/requests/tool.py +2 -2
  542. langchain/tools/searchapi/tool.py +1 -1
  543. langchain/tools/searx_search/tool.py +1 -1
  544. langchain/tools/slack/get_message.py +1 -1
  545. langchain/tools/spark_sql/tool.py +1 -1
  546. langchain/tools/sql_database/tool.py +1 -1
  547. langchain/tools/tavily_search/__init__.py +1 -1
  548. langchain/tools/tavily_search/tool.py +1 -1
  549. langchain/tools/zapier/__init__.py +1 -1
  550. langchain/tools/zapier/tool.py +24 -2
  551. langchain/utilities/__init__.py +4 -4
  552. langchain/utilities/arcee.py +4 -4
  553. langchain/utilities/clickup.py +4 -4
  554. langchain/utilities/dalle_image_generator.py +1 -1
  555. langchain/utilities/dataforseo_api_search.py +1 -1
  556. langchain/utilities/opaqueprompts.py +1 -1
  557. langchain/utilities/reddit_search.py +1 -1
  558. langchain/utilities/sql_database.py +1 -1
  559. langchain/utilities/tavily_search.py +1 -1
  560. langchain/utilities/vertexai.py +2 -2
  561. langchain/utils/__init__.py +1 -1
  562. langchain/utils/aiter.py +1 -1
  563. langchain/utils/html.py +3 -3
  564. langchain/utils/input.py +1 -1
  565. langchain/utils/iter.py +1 -1
  566. langchain/utils/json_schema.py +1 -3
  567. langchain/utils/strings.py +1 -1
  568. langchain/utils/utils.py +6 -6
  569. langchain/vectorstores/__init__.py +5 -5
  570. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  571. langchain/vectorstores/azure_cosmos_db.py +1 -1
  572. langchain/vectorstores/clickhouse.py +1 -1
  573. langchain/vectorstores/elastic_vector_search.py +1 -1
  574. langchain/vectorstores/elasticsearch.py +2 -2
  575. langchain/vectorstores/myscale.py +1 -1
  576. langchain/vectorstores/neo4j_vector.py +1 -1
  577. langchain/vectorstores/pgembedding.py +1 -1
  578. langchain/vectorstores/qdrant.py +1 -1
  579. langchain/vectorstores/redis/__init__.py +1 -1
  580. langchain/vectorstores/redis/base.py +1 -1
  581. langchain/vectorstores/redis/filters.py +4 -4
  582. langchain/vectorstores/redis/schema.py +6 -6
  583. langchain/vectorstores/sklearn.py +2 -2
  584. langchain/vectorstores/starrocks.py +1 -1
  585. langchain/vectorstores/utils.py +1 -1
  586. {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/METADATA +4 -14
  587. {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/RECORD +590 -591
  588. {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/WHEEL +1 -1
  589. langchain/smith/evaluation/utils.py +0 -0
  590. {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
  591. {langchain-0.3.26.dist-info → langchain-0.4.0.dev0.dist-info}/licenses/LICENSE +0 -0
@@ -45,6 +45,14 @@ class FactWithEvidence(BaseModel):
45
45
  yield from s.spans()
46
46
 
47
47
  def get_spans(self, context: str) -> Iterator[str]:
48
+ """Get spans of the substring quote in the context.
49
+
50
+ Args:
51
+ context: The context in which to find the spans of the substring quote.
52
+
53
+ Returns:
54
+ An iterator over the spans of the substring quote in the context.
55
+ """
48
56
  for quote in self.substring_quote:
49
57
  yield from self._get_span(quote, context)
50
58
 
@@ -86,25 +94,25 @@ def create_citation_fuzzy_match_runnable(llm: BaseChatModel) -> Runnable:
86
94
 
87
95
  Returns:
88
96
  Runnable that can be used to answer questions with citations.
97
+
89
98
  """
90
99
  if llm.bind_tools is BaseChatModel.bind_tools:
91
- raise ValueError(
92
- "Language model must implement bind_tools to use this function."
93
- )
100
+ msg = "Language model must implement bind_tools to use this function."
101
+ raise ValueError(msg)
94
102
  prompt = ChatPromptTemplate(
95
103
  [
96
104
  SystemMessage(
97
105
  "You are a world class algorithm to answer "
98
- "questions with correct and exact citations."
106
+ "questions with correct and exact citations.",
99
107
  ),
100
108
  HumanMessagePromptTemplate.from_template(
101
109
  "Answer question using the following context."
102
110
  "\n\n{context}"
103
111
  "\n\nQuestion: {question}"
104
112
  "\n\nTips: Make sure to cite your sources, "
105
- "and use the exact words from the context."
113
+ "and use the exact words from the context.",
106
114
  ),
107
- ]
115
+ ],
108
116
  )
109
117
  return prompt | llm.with_structured_output(QuestionAnswer)
110
118
 
@@ -124,7 +132,10 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
124
132
  Chain (LLMChain) that can be used to answer questions with citations.
125
133
  """
126
134
  output_parser = PydanticOutputFunctionsParser(pydantic_schema=QuestionAnswer)
127
- schema = QuestionAnswer.schema()
135
+ if hasattr(QuestionAnswer, "model_json_schema"):
136
+ schema = QuestionAnswer.model_json_schema()
137
+ else:
138
+ schema = QuestionAnswer.schema()
128
139
  function = {
129
140
  "name": schema["title"],
130
141
  "description": schema["description"],
@@ -136,7 +147,7 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
136
147
  content=(
137
148
  "You are a world class algorithm to answer "
138
149
  "questions with correct and exact citations."
139
- )
150
+ ),
140
151
  ),
141
152
  HumanMessage(content="Answer question using the following context"),
142
153
  HumanMessagePromptTemplate.from_template("{context}"),
@@ -145,15 +156,14 @@ def create_citation_fuzzy_match_chain(llm: BaseLanguageModel) -> LLMChain:
145
156
  content=(
146
157
  "Tips: Make sure to cite your sources, "
147
158
  "and use the exact words from the context."
148
- )
159
+ ),
149
160
  ),
150
161
  ]
151
162
  prompt = ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
152
163
 
153
- chain = LLMChain(
164
+ return LLMChain(
154
165
  llm=llm,
155
166
  prompt=prompt,
156
167
  llm_kwargs=llm_kwargs,
157
168
  output_parser=output_parser,
158
169
  )
159
- return chain
@@ -25,7 +25,7 @@ def _get_extraction_function(entity_schema: dict) -> dict:
25
25
  "parameters": {
26
26
  "type": "object",
27
27
  "properties": {
28
- "info": {"type": "array", "items": _convert_schema(entity_schema)}
28
+ "info": {"type": "array", "items": _convert_schema(entity_schema)},
29
29
  },
30
30
  "required": ["info"],
31
31
  },
@@ -63,18 +63,18 @@ Passage:
63
63
  """
64
64
  from pydantic import BaseModel, Field
65
65
  from langchain_anthropic import ChatAnthropic
66
-
66
+
67
67
  class Joke(BaseModel):
68
68
  setup: str = Field(description="The setup of the joke")
69
- punchline: str = Field(description="The punchline to the joke")
70
-
69
+ punchline: str = Field(description="The punchline to the joke")
70
+
71
71
  # Or any other chat model that supports tools.
72
72
  # Please reference to to the documentation of structured_output
73
- # to see an up to date list of which models support
73
+ # to see an up to date list of which models support
74
74
  # with_structured_output.
75
75
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
76
76
  structured_llm = model.with_structured_output(Joke)
77
- structured_llm.invoke("Tell me a joke about cats.
77
+ structured_llm.invoke("Tell me a joke about cats.
78
78
  Make sure to call the Joke function.")
79
79
  """
80
80
  ),
@@ -84,7 +84,7 @@ def create_extraction_chain(
84
84
  llm: BaseLanguageModel,
85
85
  prompt: Optional[BasePromptTemplate] = None,
86
86
  tags: Optional[list[str]] = None,
87
- verbose: bool = False,
87
+ verbose: bool = False, # noqa: FBT001,FBT002
88
88
  ) -> Chain:
89
89
  """Creates a chain that extracts information from a passage.
90
90
 
@@ -103,7 +103,7 @@ def create_extraction_chain(
103
103
  extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
104
104
  output_parser = JsonKeyOutputFunctionsParser(key_name="info")
105
105
  llm_kwargs = get_llm_kwargs(function)
106
- chain = LLMChain(
106
+ return LLMChain(
107
107
  llm=llm,
108
108
  prompt=extraction_prompt,
109
109
  llm_kwargs=llm_kwargs,
@@ -111,7 +111,6 @@ def create_extraction_chain(
111
111
  tags=tags,
112
112
  verbose=verbose,
113
113
  )
114
- return chain
115
114
 
116
115
 
117
116
  @deprecated(
@@ -133,18 +132,18 @@ def create_extraction_chain(
133
132
  """
134
133
  from pydantic import BaseModel, Field
135
134
  from langchain_anthropic import ChatAnthropic
136
-
135
+
137
136
  class Joke(BaseModel):
138
137
  setup: str = Field(description="The setup of the joke")
139
- punchline: str = Field(description="The punchline to the joke")
140
-
138
+ punchline: str = Field(description="The punchline to the joke")
139
+
141
140
  # Or any other chat model that supports tools.
142
141
  # Please reference to to the documentation of structured_output
143
- # to see an up to date list of which models support
142
+ # to see an up to date list of which models support
144
143
  # with_structured_output.
145
144
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
146
145
  structured_llm = model.with_structured_output(Joke)
147
- structured_llm.invoke("Tell me a joke about cats.
146
+ structured_llm.invoke("Tell me a joke about cats.
148
147
  Make sure to call the Joke function.")
149
148
  """
150
149
  ),
@@ -153,7 +152,7 @@ def create_extraction_chain_pydantic(
153
152
  pydantic_schema: Any,
154
153
  llm: BaseLanguageModel,
155
154
  prompt: Optional[BasePromptTemplate] = None,
156
- verbose: bool = False,
155
+ verbose: bool = False, # noqa: FBT001,FBT002
157
156
  ) -> Chain:
158
157
  """Creates a chain that extracts information from a passage using pydantic schema.
159
158
 
@@ -178,20 +177,21 @@ def create_extraction_chain_pydantic(
178
177
  openai_schema = pydantic_schema.schema()
179
178
 
180
179
  openai_schema = _resolve_schema_references(
181
- openai_schema, openai_schema.get("definitions", {})
180
+ openai_schema,
181
+ openai_schema.get("definitions", {}),
182
182
  )
183
183
 
184
184
  function = _get_extraction_function(openai_schema)
185
185
  extraction_prompt = prompt or ChatPromptTemplate.from_template(_EXTRACTION_TEMPLATE)
186
186
  output_parser = PydanticAttrOutputFunctionsParser(
187
- pydantic_schema=PydanticSchema, attr_name="info"
187
+ pydantic_schema=PydanticSchema,
188
+ attr_name="info",
188
189
  )
189
190
  llm_kwargs = get_llm_kwargs(function)
190
- chain = LLMChain(
191
+ return LLMChain(
191
192
  llm=llm,
192
193
  prompt=extraction_prompt,
193
194
  llm_kwargs=llm_kwargs,
194
195
  output_parser=output_parser,
195
196
  verbose=verbose,
196
197
  )
197
- return chain
@@ -13,6 +13,7 @@ from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsPa
13
13
  from langchain_core.prompts import BasePromptTemplate, ChatPromptTemplate
14
14
  from langchain_core.utils.input import get_colored_text
15
15
  from requests import Response
16
+ from typing_extensions import override
16
17
 
17
18
  from langchain.chains.base import Chain
18
19
  from langchain.chains.llm import LLMChain
@@ -23,14 +24,6 @@ if TYPE_CHECKING:
23
24
  from openapi_pydantic import Parameter
24
25
 
25
26
 
26
- def _get_description(o: Any, prefer_short: bool) -> Optional[str]:
27
- summary = getattr(o, "summary", None)
28
- description = getattr(o, "description", None)
29
- if prefer_short:
30
- return summary or description
31
- return description or summary
32
-
33
-
34
27
  def _format_url(url: str, path_params: dict) -> str:
35
28
  expected_path_param = re.findall(r"{(.*?)}", url)
36
29
  new_params = {}
@@ -59,13 +52,12 @@ def _format_url(url: str, path_params: dict) -> str:
59
52
  sep = ","
60
53
  new_val = ""
61
54
  new_val += sep.join(kv_strs)
55
+ elif param[0] == ".":
56
+ new_val = f".{val}"
57
+ elif param[0] == ";":
58
+ new_val = f";{clean_param}={val}"
62
59
  else:
63
- if param[0] == ".":
64
- new_val = f".{val}"
65
- elif param[0] == ";":
66
- new_val = f";{clean_param}={val}"
67
- else:
68
- new_val = val
60
+ new_val = val
69
61
  new_params[param] = new_val
70
62
  return url.format(**new_params)
71
63
 
@@ -77,7 +69,7 @@ def _openapi_params_to_json_schema(params: list[Parameter], spec: OpenAPISpec) -
77
69
  if p.param_schema:
78
70
  schema = spec.get_schema(p.param_schema)
79
71
  else:
80
- media_type_schema = list(p.content.values())[0].media_type_schema
72
+ media_type_schema = next(iter(p.content.values())).media_type_schema
81
73
  schema = spec.get_schema(media_type_schema)
82
74
  if p.description and not schema.description:
83
75
  schema.description = p.description
@@ -102,11 +94,12 @@ def openapi_spec_to_openai_fn(
102
94
  """
103
95
  try:
104
96
  from langchain_community.tools import APIOperation
105
- except ImportError:
106
- raise ImportError(
97
+ except ImportError as e:
98
+ msg = (
107
99
  "Could not import langchain_community.tools. "
108
100
  "Please install it with `pip install langchain-community`."
109
101
  )
102
+ raise ImportError(msg) from e
110
103
 
111
104
  if not spec.paths:
112
105
  return [], lambda: None
@@ -134,7 +127,8 @@ def openapi_spec_to_openai_fn(
134
127
  for param_loc, arg_name in param_loc_to_arg_name.items():
135
128
  if params_by_type[param_loc]:
136
129
  request_args[arg_name] = _openapi_params_to_json_schema(
137
- params_by_type[param_loc], spec
130
+ params_by_type[param_loc],
131
+ spec,
138
132
  )
139
133
  request_body = spec.get_request_body_for_operation(op)
140
134
  # TODO: Support more MIME types.
@@ -144,10 +138,10 @@ def openapi_spec_to_openai_fn(
144
138
  if media_type_object.media_type_schema:
145
139
  schema = spec.get_schema(media_type_object.media_type_schema)
146
140
  media_types[media_type] = json.loads(
147
- schema.json(exclude_none=True)
141
+ schema.json(exclude_none=True),
148
142
  )
149
143
  if len(media_types) == 1:
150
- media_type, schema_dict = list(media_types.items())[0]
144
+ media_type, schema_dict = next(iter(media_types.items()))
151
145
  key = "json" if media_type == "application/json" else "data"
152
146
  request_args[key] = schema_dict
153
147
  elif len(media_types) > 1:
@@ -173,6 +167,7 @@ def openapi_spec_to_openai_fn(
173
167
  fn_args: dict,
174
168
  headers: Optional[dict] = None,
175
169
  params: Optional[dict] = None,
170
+ timeout: Optional[int] = 30,
176
171
  **kwargs: Any,
177
172
  ) -> Any:
178
173
  method = _name_to_call_map[name]["method"]
@@ -192,7 +187,7 @@ def openapi_spec_to_openai_fn(
192
187
  _kwargs["params"].update(params)
193
188
  else:
194
189
  _kwargs["params"] = params
195
- return requests.request(method, url, **_kwargs)
190
+ return requests.request(method, url, **_kwargs, timeout=timeout)
196
191
 
197
192
  return functions, default_call_api
198
193
 
@@ -208,10 +203,12 @@ class SimpleRequestChain(Chain):
208
203
  """Key to use for the input of the request."""
209
204
 
210
205
  @property
206
+ @override
211
207
  def input_keys(self) -> list[str]:
212
208
  return [self.input_key]
213
209
 
214
210
  @property
211
+ @override
215
212
  def output_keys(self) -> list[str]:
216
213
  return [self.output_key]
217
214
 
@@ -229,11 +226,11 @@ class SimpleRequestChain(Chain):
229
226
  _text = f"Calling endpoint {_pretty_name} with arguments:\n" + _pretty_args
230
227
  _run_manager.on_text(_text)
231
228
  api_response: Response = self.request_method(name, args)
232
- if api_response.status_code != 200:
229
+ if api_response.status_code != requests.codes.ok:
233
230
  response = (
234
231
  f"{api_response.status_code}: {api_response.reason}"
235
- + f"\nFor {name} "
236
- + f"Called with args: {args.get('params', '')}"
232
+ f"\nFor {name} "
233
+ f"Called with args: {args.get('params', '')}"
237
234
  )
238
235
  else:
239
236
  try:
@@ -248,7 +245,7 @@ class SimpleRequestChain(Chain):
248
245
  message=(
249
246
  "This function is deprecated and will be removed in langchain 1.0. "
250
247
  "See API reference for replacement: "
251
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html" # noqa: E501
248
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.openai_functions.openapi.get_openapi_chain.html"
252
249
  ),
253
250
  removal="1.0",
254
251
  )
@@ -258,7 +255,7 @@ def get_openapi_chain(
258
255
  prompt: Optional[BasePromptTemplate] = None,
259
256
  request_chain: Optional[Chain] = None,
260
257
  llm_chain_kwargs: Optional[dict] = None,
261
- verbose: bool = False,
258
+ verbose: bool = False, # noqa: FBT001,FBT002
262
259
  headers: Optional[dict] = None,
263
260
  params: Optional[dict] = None,
264
261
  **kwargs: Any,
@@ -348,14 +345,16 @@ def get_openapi_chain(
348
345
  `ChatOpenAI(model="gpt-3.5-turbo-0613")`.
349
346
  prompt: Main prompt template to use.
350
347
  request_chain: Chain for taking the functions output and executing the request.
348
+
351
349
  """ # noqa: E501
352
350
  try:
353
351
  from langchain_community.utilities.openapi import OpenAPISpec
354
352
  except ImportError as e:
355
- raise ImportError(
353
+ msg = (
356
354
  "Could not import langchain_community.utilities.openapi. "
357
355
  "Please install it with `pip install langchain-community`."
358
- ) from e
356
+ )
357
+ raise ImportError(msg) from e
359
358
  if isinstance(spec, str):
360
359
  for conversion in (
361
360
  OpenAPISpec.from_url,
@@ -365,21 +364,23 @@ def get_openapi_chain(
365
364
  try:
366
365
  spec = conversion(spec)
367
366
  break
368
- except ImportError as e:
369
- raise e
370
- except Exception:
367
+ except ImportError:
368
+ raise
369
+ except Exception: # noqa: S110
371
370
  pass
372
371
  if isinstance(spec, str):
373
- raise ValueError(f"Unable to parse spec from source {spec}")
372
+ msg = f"Unable to parse spec from source {spec}"
373
+ raise ValueError(msg) # noqa: TRY004
374
374
  openai_fns, call_api_fn = openapi_spec_to_openai_fn(spec)
375
375
  if not llm:
376
- raise ValueError(
376
+ msg = (
377
377
  "Must provide an LLM for this chain.For example,\n"
378
378
  "from langchain_openai import ChatOpenAI\n"
379
379
  "llm = ChatOpenAI()\n"
380
380
  )
381
+ raise ValueError(msg)
381
382
  prompt = prompt or ChatPromptTemplate.from_template(
382
- "Use the provided API's to respond to this user query:\n\n{query}"
383
+ "Use the provided API's to respond to this user query:\n\n{query}",
383
384
  )
384
385
  llm_chain = LLMChain(
385
386
  llm=llm,
@@ -392,7 +393,10 @@ def get_openapi_chain(
392
393
  )
393
394
  request_chain = request_chain or SimpleRequestChain(
394
395
  request_method=lambda name, args: call_api_fn(
395
- name, args, headers=headers, params=params
396
+ name,
397
+ args,
398
+ headers=headers,
399
+ params=params,
396
400
  ),
397
401
  verbose=verbose,
398
402
  )
@@ -22,7 +22,8 @@ class AnswerWithSources(BaseModel):
22
22
 
23
23
  answer: str = Field(..., description="Answer to the question that was asked")
24
24
  sources: list[str] = Field(
25
- ..., description="List of sources used to answer the question"
25
+ ...,
26
+ description="List of sources used to answer the question",
26
27
  )
27
28
 
28
29
 
@@ -32,7 +33,7 @@ class AnswerWithSources(BaseModel):
32
33
  message=(
33
34
  "This function is deprecated. Refer to this guide on retrieval and question "
34
35
  "answering with structured responses: "
35
- "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
36
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
36
37
  ),
37
38
  )
38
39
  def create_qa_with_structure_chain(
@@ -40,7 +41,7 @@ def create_qa_with_structure_chain(
40
41
  schema: Union[dict, type[BaseModel]],
41
42
  output_parser: str = "base",
42
43
  prompt: Optional[Union[PromptTemplate, ChatPromptTemplate]] = None,
43
- verbose: bool = False,
44
+ verbose: bool = False, # noqa: FBT001,FBT002
44
45
  ) -> LLMChain:
45
46
  """Create a question answering chain that returns an answer with sources
46
47
  based on schema.
@@ -57,27 +58,29 @@ def create_qa_with_structure_chain(
57
58
  """
58
59
  if output_parser == "pydantic":
59
60
  if not (isinstance(schema, type) and is_basemodel_subclass(schema)):
60
- raise ValueError(
61
+ msg = (
61
62
  "Must provide a pydantic class for schema when output_parser is "
62
63
  "'pydantic'."
63
64
  )
65
+ raise ValueError(msg)
64
66
  _output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(
65
- pydantic_schema=schema
67
+ pydantic_schema=schema,
66
68
  )
67
69
  elif output_parser == "base":
68
70
  _output_parser = OutputFunctionsParser()
69
71
  else:
70
- raise ValueError(
72
+ msg = (
71
73
  f"Got unexpected output_parser: {output_parser}. "
72
74
  f"Should be one of `pydantic` or `base`."
73
75
  )
76
+ raise ValueError(msg)
74
77
  if isinstance(schema, type) and is_basemodel_subclass(schema):
75
78
  if hasattr(schema, "model_json_schema"):
76
- schema_dict = cast(dict, schema.model_json_schema())
79
+ schema_dict = cast("dict", schema.model_json_schema())
77
80
  else:
78
- schema_dict = cast(dict, schema.schema())
81
+ schema_dict = cast("dict", schema.schema())
79
82
  else:
80
- schema_dict = cast(dict, schema)
83
+ schema_dict = cast("dict", schema)
81
84
  function = {
82
85
  "name": schema_dict["title"],
83
86
  "description": schema_dict["description"],
@@ -89,7 +92,7 @@ def create_qa_with_structure_chain(
89
92
  content=(
90
93
  "You are a world class algorithm to answer "
91
94
  "questions in a specific format."
92
- )
95
+ ),
93
96
  ),
94
97
  HumanMessage(content="Answer question using the following context"),
95
98
  HumanMessagePromptTemplate.from_template("{context}"),
@@ -98,14 +101,13 @@ def create_qa_with_structure_chain(
98
101
  ]
99
102
  prompt = prompt or ChatPromptTemplate(messages=messages) # type: ignore[arg-type]
100
103
 
101
- chain = LLMChain(
104
+ return LLMChain(
102
105
  llm=llm,
103
106
  prompt=prompt,
104
107
  llm_kwargs=llm_kwargs,
105
108
  output_parser=_output_parser,
106
109
  verbose=verbose,
107
110
  )
108
- return chain
109
111
 
110
112
 
111
113
  @deprecated(
@@ -114,11 +116,13 @@ def create_qa_with_structure_chain(
114
116
  message=(
115
117
  "This function is deprecated. Refer to this guide on retrieval and question "
116
118
  "answering with sources: "
117
- "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response" # noqa: E501
119
+ "https://python.langchain.com/docs/how_to/qa_sources/#structure-sources-in-model-response"
118
120
  ),
119
121
  )
120
122
  def create_qa_with_sources_chain(
121
- llm: BaseLanguageModel, verbose: bool = False, **kwargs: Any
123
+ llm: BaseLanguageModel,
124
+ verbose: bool = False, # noqa: FBT001,FBT002
125
+ **kwargs: Any,
122
126
  ) -> LLMChain:
123
127
  """Create a question answering chain that returns an answer with sources.
124
128
 
@@ -131,5 +135,8 @@ def create_qa_with_sources_chain(
131
135
  Chain (LLMChain) that can be used to answer questions with citations.
132
136
  """
133
137
  return create_qa_with_structure_chain(
134
- llm, AnswerWithSources, verbose=verbose, **kwargs
138
+ llm,
139
+ AnswerWithSources,
140
+ verbose=verbose,
141
+ **kwargs,
135
142
  )
@@ -86,19 +86,19 @@ def create_tagging_chain(
86
86
 
87
87
  Returns:
88
88
  Chain (LLMChain) that can be used to extract information from a passage.
89
+
89
90
  """
90
91
  function = _get_tagging_function(schema)
91
92
  prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
92
93
  output_parser = JsonOutputFunctionsParser()
93
94
  llm_kwargs = get_llm_kwargs(function)
94
- chain = LLMChain(
95
+ return LLMChain(
95
96
  llm=llm,
96
97
  prompt=prompt,
97
98
  llm_kwargs=llm_kwargs,
98
99
  output_parser=output_parser,
99
100
  **kwargs,
100
101
  )
101
- return chain
102
102
 
103
103
 
104
104
  @deprecated(
@@ -155,6 +155,7 @@ def create_tagging_chain_pydantic(
155
155
 
156
156
  Returns:
157
157
  Chain (LLMChain) that can be used to extract information from a passage.
158
+
158
159
  """
159
160
  if hasattr(pydantic_schema, "model_json_schema"):
160
161
  openai_schema = pydantic_schema.model_json_schema()
@@ -164,11 +165,10 @@ def create_tagging_chain_pydantic(
164
165
  prompt = prompt or ChatPromptTemplate.from_template(_TAGGING_TEMPLATE)
165
166
  output_parser = PydanticOutputFunctionsParser(pydantic_schema=pydantic_schema)
166
167
  llm_kwargs = get_llm_kwargs(function)
167
- chain = LLMChain(
168
+ return LLMChain(
168
169
  llm=llm,
169
170
  prompt=prompt,
170
171
  llm_kwargs=llm_kwargs,
171
172
  output_parser=output_parser,
172
173
  **kwargs,
173
174
  )
174
- return chain
@@ -34,18 +34,18 @@ If a property is not present and is not required in the function parameters, do
34
34
  """
35
35
  from pydantic import BaseModel, Field
36
36
  from langchain_anthropic import ChatAnthropic
37
-
37
+
38
38
  class Joke(BaseModel):
39
39
  setup: str = Field(description="The setup of the joke")
40
- punchline: str = Field(description="The punchline to the joke")
41
-
40
+ punchline: str = Field(description="The punchline to the joke")
41
+
42
42
  # Or any other chat model that supports tools.
43
43
  # Please reference to to the documentation of structured_output
44
- # to see an up to date list of which models support
44
+ # to see an up to date list of which models support
45
45
  # with_structured_output.
46
46
  model = ChatAnthropic(model="claude-3-opus-20240229", temperature=0)
47
47
  structured_llm = model.with_structured_output(Joke)
48
- structured_llm.invoke("Tell me a joke about cats.
48
+ structured_llm.invoke("Tell me a joke about cats.
49
49
  Make sure to call the Joke function.")
50
50
  """
51
51
  ),
@@ -71,10 +71,9 @@ def create_extraction_chain_pydantic(
71
71
  [
72
72
  ("system", system_message),
73
73
  ("user", "{input}"),
74
- ]
74
+ ],
75
75
  )
76
76
  functions = [convert_pydantic_to_openai_function(p) for p in pydantic_schemas]
77
77
  tools = [{"type": "function", "function": d} for d in functions]
78
78
  model = llm.bind(tools=tools)
79
- chain = prompt | model | PydanticToolsParser(tools=pydantic_schemas)
80
- return chain
79
+ return prompt | model | PydanticToolsParser(tools=pydantic_schemas)
@@ -9,6 +9,7 @@ from langchain_core.language_models import BaseLanguageModel
9
9
  from langchain_core.prompts import BasePromptTemplate
10
10
  from langchain_text_splitters import RecursiveCharacterTextSplitter, TextSplitter
11
11
  from pydantic import Field
12
+ from typing_extensions import override
12
13
 
13
14
  from langchain.chains.base import Chain
14
15
  from langchain.chains.llm import LLMChain
@@ -19,7 +20,7 @@ from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR
19
20
  since="0.2.7",
20
21
  alternative=(
21
22
  "example in API reference with more detail: "
22
- "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html" # noqa: E501
23
+ "https://api.python.langchain.com/en/latest/chains/langchain.chains.qa_generation.base.QAGenerationChain.html"
23
24
  ),
24
25
  removal="1.0",
25
26
  )
@@ -61,12 +62,13 @@ class QAGenerationChain(Chain):
61
62
  split_text | RunnableEach(bound=prompt | llm | JsonOutputParser())
62
63
  )
63
64
  )
65
+
64
66
  """
65
67
 
66
68
  llm_chain: LLMChain
67
69
  """LLM Chain that generates responses from user input and context."""
68
70
  text_splitter: TextSplitter = Field(
69
- default=RecursiveCharacterTextSplitter(chunk_overlap=500)
71
+ default=RecursiveCharacterTextSplitter(chunk_overlap=500),
70
72
  )
71
73
  """Text splitter that splits the input into chunks."""
72
74
  input_key: str = "text"
@@ -103,10 +105,12 @@ class QAGenerationChain(Chain):
103
105
  raise NotImplementedError
104
106
 
105
107
  @property
108
+ @override
106
109
  def input_keys(self) -> list[str]:
107
110
  return [self.input_key]
108
111
 
109
112
  @property
113
+ @override
110
114
  def output_keys(self) -> list[str]:
111
115
  return [self.output_key]
112
116
 
@@ -117,7 +121,8 @@ class QAGenerationChain(Chain):
117
121
  ) -> dict[str, list]:
118
122
  docs = self.text_splitter.create_documents([inputs[self.input_key]])
119
123
  results = self.llm_chain.generate(
120
- [{"text": d.page_content} for d in docs], run_manager=run_manager
124
+ [{"text": d.page_content} for d in docs],
125
+ run_manager=run_manager,
121
126
  )
122
127
  qa = [json.loads(res[0].text) for res in results.generations]
123
128
  return {self.output_key: qa}