langchain 0.3.26__py3-none-any.whl → 0.3.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain might be problematic. Click here for more details.

Files changed (580) hide show
  1. langchain/__init__.py +110 -96
  2. langchain/_api/__init__.py +2 -2
  3. langchain/_api/deprecation.py +3 -3
  4. langchain/_api/module_import.py +51 -46
  5. langchain/_api/path.py +1 -1
  6. langchain/adapters/openai.py +8 -8
  7. langchain/agents/__init__.py +15 -12
  8. langchain/agents/agent.py +160 -133
  9. langchain/agents/agent_iterator.py +31 -14
  10. langchain/agents/agent_toolkits/__init__.py +7 -6
  11. langchain/agents/agent_toolkits/ainetwork/toolkit.py +1 -1
  12. langchain/agents/agent_toolkits/amadeus/toolkit.py +1 -1
  13. langchain/agents/agent_toolkits/azure_cognitive_services.py +1 -1
  14. langchain/agents/agent_toolkits/clickup/toolkit.py +1 -1
  15. langchain/agents/agent_toolkits/conversational_retrieval/openai_functions.py +6 -4
  16. langchain/agents/agent_toolkits/csv/__init__.py +4 -2
  17. langchain/agents/agent_toolkits/file_management/__init__.py +1 -1
  18. langchain/agents/agent_toolkits/file_management/toolkit.py +1 -1
  19. langchain/agents/agent_toolkits/github/toolkit.py +9 -9
  20. langchain/agents/agent_toolkits/gitlab/toolkit.py +1 -1
  21. langchain/agents/agent_toolkits/json/base.py +1 -1
  22. langchain/agents/agent_toolkits/multion/toolkit.py +1 -1
  23. langchain/agents/agent_toolkits/office365/toolkit.py +1 -1
  24. langchain/agents/agent_toolkits/openapi/base.py +1 -1
  25. langchain/agents/agent_toolkits/openapi/planner.py +2 -2
  26. langchain/agents/agent_toolkits/openapi/planner_prompt.py +10 -10
  27. langchain/agents/agent_toolkits/openapi/prompt.py +1 -1
  28. langchain/agents/agent_toolkits/openapi/toolkit.py +1 -1
  29. langchain/agents/agent_toolkits/pandas/__init__.py +4 -2
  30. langchain/agents/agent_toolkits/playwright/__init__.py +1 -1
  31. langchain/agents/agent_toolkits/playwright/toolkit.py +1 -1
  32. langchain/agents/agent_toolkits/powerbi/base.py +1 -1
  33. langchain/agents/agent_toolkits/powerbi/chat_base.py +1 -1
  34. langchain/agents/agent_toolkits/powerbi/prompt.py +2 -2
  35. langchain/agents/agent_toolkits/powerbi/toolkit.py +1 -1
  36. langchain/agents/agent_toolkits/python/__init__.py +4 -2
  37. langchain/agents/agent_toolkits/spark/__init__.py +4 -2
  38. langchain/agents/agent_toolkits/spark_sql/base.py +1 -1
  39. langchain/agents/agent_toolkits/spark_sql/toolkit.py +1 -1
  40. langchain/agents/agent_toolkits/sql/prompt.py +1 -1
  41. langchain/agents/agent_toolkits/sql/toolkit.py +1 -1
  42. langchain/agents/agent_toolkits/vectorstore/base.py +2 -2
  43. langchain/agents/agent_toolkits/vectorstore/prompt.py +2 -4
  44. langchain/agents/agent_toolkits/vectorstore/toolkit.py +12 -11
  45. langchain/agents/agent_toolkits/xorbits/__init__.py +4 -2
  46. langchain/agents/agent_toolkits/zapier/toolkit.py +1 -1
  47. langchain/agents/agent_types.py +6 -6
  48. langchain/agents/chat/base.py +6 -12
  49. langchain/agents/chat/output_parser.py +9 -6
  50. langchain/agents/chat/prompt.py +3 -4
  51. langchain/agents/conversational/base.py +9 -5
  52. langchain/agents/conversational/output_parser.py +4 -2
  53. langchain/agents/conversational/prompt.py +2 -3
  54. langchain/agents/conversational_chat/base.py +7 -5
  55. langchain/agents/conversational_chat/output_parser.py +9 -11
  56. langchain/agents/conversational_chat/prompt.py +5 -6
  57. langchain/agents/format_scratchpad/__init__.py +3 -3
  58. langchain/agents/format_scratchpad/log_to_messages.py +1 -1
  59. langchain/agents/format_scratchpad/openai_functions.py +8 -6
  60. langchain/agents/format_scratchpad/tools.py +5 -3
  61. langchain/agents/format_scratchpad/xml.py +33 -2
  62. langchain/agents/initialize.py +16 -8
  63. langchain/agents/json_chat/base.py +18 -18
  64. langchain/agents/json_chat/prompt.py +2 -3
  65. langchain/agents/load_tools.py +2 -1
  66. langchain/agents/loading.py +28 -18
  67. langchain/agents/mrkl/base.py +9 -4
  68. langchain/agents/mrkl/output_parser.py +17 -13
  69. langchain/agents/mrkl/prompt.py +1 -2
  70. langchain/agents/openai_assistant/base.py +80 -70
  71. langchain/agents/openai_functions_agent/base.py +46 -37
  72. langchain/agents/openai_functions_multi_agent/base.py +39 -26
  73. langchain/agents/openai_tools/base.py +8 -8
  74. langchain/agents/output_parsers/__init__.py +3 -3
  75. langchain/agents/output_parsers/json.py +6 -6
  76. langchain/agents/output_parsers/openai_functions.py +15 -7
  77. langchain/agents/output_parsers/openai_tools.py +9 -4
  78. langchain/agents/output_parsers/react_json_single_input.py +10 -5
  79. langchain/agents/output_parsers/react_single_input.py +15 -11
  80. langchain/agents/output_parsers/self_ask.py +3 -2
  81. langchain/agents/output_parsers/tools.py +18 -13
  82. langchain/agents/output_parsers/xml.py +99 -28
  83. langchain/agents/react/agent.py +4 -4
  84. langchain/agents/react/base.py +22 -17
  85. langchain/agents/react/output_parser.py +5 -6
  86. langchain/agents/react/textworld_prompt.py +0 -1
  87. langchain/agents/react/wiki_prompt.py +14 -15
  88. langchain/agents/schema.py +3 -2
  89. langchain/agents/self_ask_with_search/base.py +19 -15
  90. langchain/agents/self_ask_with_search/prompt.py +0 -1
  91. langchain/agents/structured_chat/base.py +14 -11
  92. langchain/agents/structured_chat/output_parser.py +16 -18
  93. langchain/agents/structured_chat/prompt.py +3 -4
  94. langchain/agents/tool_calling_agent/base.py +7 -6
  95. langchain/agents/tools.py +2 -2
  96. langchain/agents/utils.py +2 -3
  97. langchain/agents/xml/base.py +5 -5
  98. langchain/agents/xml/prompt.py +1 -2
  99. langchain/cache.py +12 -12
  100. langchain/callbacks/__init__.py +11 -11
  101. langchain/callbacks/aim_callback.py +2 -2
  102. langchain/callbacks/argilla_callback.py +1 -1
  103. langchain/callbacks/arize_callback.py +1 -1
  104. langchain/callbacks/arthur_callback.py +1 -1
  105. langchain/callbacks/base.py +7 -7
  106. langchain/callbacks/clearml_callback.py +1 -1
  107. langchain/callbacks/comet_ml_callback.py +1 -1
  108. langchain/callbacks/confident_callback.py +1 -1
  109. langchain/callbacks/context_callback.py +1 -1
  110. langchain/callbacks/flyte_callback.py +1 -1
  111. langchain/callbacks/human.py +2 -2
  112. langchain/callbacks/infino_callback.py +1 -1
  113. langchain/callbacks/labelstudio_callback.py +1 -1
  114. langchain/callbacks/llmonitor_callback.py +1 -1
  115. langchain/callbacks/manager.py +5 -5
  116. langchain/callbacks/mlflow_callback.py +2 -2
  117. langchain/callbacks/openai_info.py +1 -1
  118. langchain/callbacks/promptlayer_callback.py +1 -1
  119. langchain/callbacks/sagemaker_callback.py +1 -1
  120. langchain/callbacks/streaming_aiter.py +4 -1
  121. langchain/callbacks/streaming_aiter_final_only.py +5 -3
  122. langchain/callbacks/streaming_stdout_final_only.py +5 -3
  123. langchain/callbacks/streamlit/__init__.py +3 -2
  124. langchain/callbacks/streamlit/mutable_expander.py +1 -1
  125. langchain/callbacks/streamlit/streamlit_callback_handler.py +3 -3
  126. langchain/callbacks/tracers/__init__.py +1 -1
  127. langchain/callbacks/tracers/comet.py +1 -1
  128. langchain/callbacks/tracers/evaluation.py +1 -1
  129. langchain/callbacks/tracers/log_stream.py +1 -1
  130. langchain/callbacks/tracers/logging.py +1 -1
  131. langchain/callbacks/tracers/stdout.py +1 -1
  132. langchain/callbacks/trubrics_callback.py +1 -1
  133. langchain/callbacks/utils.py +4 -4
  134. langchain/callbacks/wandb_callback.py +1 -1
  135. langchain/callbacks/whylabs_callback.py +1 -1
  136. langchain/chains/api/base.py +36 -22
  137. langchain/chains/api/news_docs.py +1 -2
  138. langchain/chains/api/open_meteo_docs.py +1 -2
  139. langchain/chains/api/openapi/requests_chain.py +1 -1
  140. langchain/chains/api/openapi/response_chain.py +1 -1
  141. langchain/chains/api/podcast_docs.py +1 -2
  142. langchain/chains/api/prompt.py +1 -2
  143. langchain/chains/api/tmdb_docs.py +1 -2
  144. langchain/chains/base.py +88 -54
  145. langchain/chains/chat_vector_db/prompts.py +2 -3
  146. langchain/chains/combine_documents/__init__.py +1 -1
  147. langchain/chains/combine_documents/base.py +23 -10
  148. langchain/chains/combine_documents/map_reduce.py +38 -30
  149. langchain/chains/combine_documents/map_rerank.py +33 -20
  150. langchain/chains/combine_documents/reduce.py +47 -26
  151. langchain/chains/combine_documents/refine.py +26 -17
  152. langchain/chains/combine_documents/stuff.py +19 -12
  153. langchain/chains/constitutional_ai/base.py +4 -4
  154. langchain/chains/constitutional_ai/principles.py +22 -25
  155. langchain/chains/constitutional_ai/prompts.py +25 -28
  156. langchain/chains/conversation/base.py +5 -3
  157. langchain/chains/conversation/memory.py +5 -5
  158. langchain/chains/conversation/prompt.py +5 -5
  159. langchain/chains/conversational_retrieval/base.py +41 -20
  160. langchain/chains/conversational_retrieval/prompts.py +2 -3
  161. langchain/chains/elasticsearch_database/base.py +8 -9
  162. langchain/chains/elasticsearch_database/prompts.py +2 -3
  163. langchain/chains/ernie_functions/__init__.py +2 -2
  164. langchain/chains/example_generator.py +3 -1
  165. langchain/chains/flare/base.py +26 -12
  166. langchain/chains/graph_qa/cypher.py +2 -2
  167. langchain/chains/graph_qa/falkordb.py +1 -1
  168. langchain/chains/graph_qa/gremlin.py +1 -1
  169. langchain/chains/graph_qa/neptune_sparql.py +1 -1
  170. langchain/chains/graph_qa/prompts.py +2 -2
  171. langchain/chains/history_aware_retriever.py +2 -1
  172. langchain/chains/hyde/base.py +6 -5
  173. langchain/chains/hyde/prompts.py +5 -6
  174. langchain/chains/llm.py +77 -61
  175. langchain/chains/llm_bash/__init__.py +2 -1
  176. langchain/chains/llm_checker/base.py +7 -5
  177. langchain/chains/llm_checker/prompt.py +3 -4
  178. langchain/chains/llm_math/base.py +16 -9
  179. langchain/chains/llm_math/prompt.py +1 -2
  180. langchain/chains/llm_summarization_checker/base.py +9 -6
  181. langchain/chains/llm_symbolic_math/__init__.py +2 -1
  182. langchain/chains/loading.py +151 -95
  183. langchain/chains/mapreduce.py +4 -3
  184. langchain/chains/moderation.py +8 -9
  185. langchain/chains/natbot/base.py +8 -8
  186. langchain/chains/natbot/crawler.py +73 -76
  187. langchain/chains/natbot/prompt.py +2 -3
  188. langchain/chains/openai_functions/__init__.py +7 -7
  189. langchain/chains/openai_functions/base.py +13 -10
  190. langchain/chains/openai_functions/citation_fuzzy_match.py +12 -11
  191. langchain/chains/openai_functions/extraction.py +19 -19
  192. langchain/chains/openai_functions/openapi.py +35 -35
  193. langchain/chains/openai_functions/qa_with_structure.py +19 -12
  194. langchain/chains/openai_functions/tagging.py +2 -4
  195. langchain/chains/openai_tools/extraction.py +7 -8
  196. langchain/chains/qa_generation/base.py +4 -3
  197. langchain/chains/qa_generation/prompt.py +5 -5
  198. langchain/chains/qa_with_sources/base.py +14 -6
  199. langchain/chains/qa_with_sources/loading.py +16 -8
  200. langchain/chains/qa_with_sources/map_reduce_prompt.py +8 -9
  201. langchain/chains/qa_with_sources/refine_prompts.py +0 -1
  202. langchain/chains/qa_with_sources/retrieval.py +14 -5
  203. langchain/chains/qa_with_sources/stuff_prompt.py +6 -7
  204. langchain/chains/qa_with_sources/vector_db.py +17 -6
  205. langchain/chains/query_constructor/base.py +34 -33
  206. langchain/chains/query_constructor/ir.py +4 -4
  207. langchain/chains/query_constructor/parser.py +37 -32
  208. langchain/chains/query_constructor/prompt.py +5 -6
  209. langchain/chains/question_answering/chain.py +21 -10
  210. langchain/chains/question_answering/map_reduce_prompt.py +14 -14
  211. langchain/chains/question_answering/map_rerank_prompt.py +3 -3
  212. langchain/chains/question_answering/refine_prompts.py +2 -5
  213. langchain/chains/question_answering/stuff_prompt.py +5 -5
  214. langchain/chains/retrieval.py +1 -3
  215. langchain/chains/retrieval_qa/base.py +34 -27
  216. langchain/chains/retrieval_qa/prompt.py +1 -2
  217. langchain/chains/router/__init__.py +3 -3
  218. langchain/chains/router/base.py +24 -20
  219. langchain/chains/router/embedding_router.py +12 -8
  220. langchain/chains/router/llm_router.py +17 -16
  221. langchain/chains/router/multi_prompt.py +2 -2
  222. langchain/chains/router/multi_retrieval_qa.py +10 -5
  223. langchain/chains/sequential.py +30 -18
  224. langchain/chains/sql_database/prompt.py +14 -16
  225. langchain/chains/sql_database/query.py +6 -5
  226. langchain/chains/structured_output/__init__.py +1 -1
  227. langchain/chains/structured_output/base.py +75 -67
  228. langchain/chains/summarize/chain.py +11 -5
  229. langchain/chains/summarize/map_reduce_prompt.py +0 -1
  230. langchain/chains/summarize/stuff_prompt.py +0 -1
  231. langchain/chains/transform.py +5 -6
  232. langchain/chat_loaders/facebook_messenger.py +1 -1
  233. langchain/chat_loaders/langsmith.py +1 -1
  234. langchain/chat_loaders/utils.py +3 -3
  235. langchain/chat_models/__init__.py +20 -19
  236. langchain/chat_models/anthropic.py +1 -1
  237. langchain/chat_models/azureml_endpoint.py +1 -1
  238. langchain/chat_models/baidu_qianfan_endpoint.py +1 -1
  239. langchain/chat_models/base.py +160 -123
  240. langchain/chat_models/bedrock.py +1 -1
  241. langchain/chat_models/fake.py +1 -1
  242. langchain/chat_models/meta.py +1 -1
  243. langchain/chat_models/pai_eas_endpoint.py +1 -1
  244. langchain/chat_models/promptlayer_openai.py +1 -1
  245. langchain/chat_models/volcengine_maas.py +1 -1
  246. langchain/docstore/base.py +1 -1
  247. langchain/document_loaders/__init__.py +9 -9
  248. langchain/document_loaders/airbyte.py +3 -3
  249. langchain/document_loaders/assemblyai.py +1 -1
  250. langchain/document_loaders/azure_blob_storage_container.py +1 -1
  251. langchain/document_loaders/azure_blob_storage_file.py +1 -1
  252. langchain/document_loaders/baiducloud_bos_file.py +1 -1
  253. langchain/document_loaders/base.py +1 -1
  254. langchain/document_loaders/blob_loaders/__init__.py +1 -1
  255. langchain/document_loaders/blockchain.py +1 -1
  256. langchain/document_loaders/chatgpt.py +1 -1
  257. langchain/document_loaders/college_confidential.py +1 -1
  258. langchain/document_loaders/confluence.py +1 -1
  259. langchain/document_loaders/email.py +1 -1
  260. langchain/document_loaders/facebook_chat.py +1 -1
  261. langchain/document_loaders/markdown.py +1 -1
  262. langchain/document_loaders/notebook.py +1 -1
  263. langchain/document_loaders/org_mode.py +1 -1
  264. langchain/document_loaders/parsers/__init__.py +1 -1
  265. langchain/document_loaders/parsers/docai.py +1 -1
  266. langchain/document_loaders/parsers/generic.py +1 -1
  267. langchain/document_loaders/parsers/html/__init__.py +1 -1
  268. langchain/document_loaders/parsers/html/bs4.py +1 -1
  269. langchain/document_loaders/parsers/language/cobol.py +1 -1
  270. langchain/document_loaders/parsers/language/python.py +1 -1
  271. langchain/document_loaders/parsers/msword.py +1 -1
  272. langchain/document_loaders/parsers/pdf.py +5 -5
  273. langchain/document_loaders/parsers/registry.py +1 -1
  274. langchain/document_loaders/pdf.py +8 -8
  275. langchain/document_loaders/powerpoint.py +1 -1
  276. langchain/document_loaders/pyspark_dataframe.py +1 -1
  277. langchain/document_loaders/telegram.py +2 -2
  278. langchain/document_loaders/tencent_cos_directory.py +1 -1
  279. langchain/document_loaders/unstructured.py +5 -5
  280. langchain/document_loaders/url_playwright.py +1 -1
  281. langchain/document_loaders/whatsapp_chat.py +1 -1
  282. langchain/document_loaders/youtube.py +2 -2
  283. langchain/document_transformers/__init__.py +3 -3
  284. langchain/document_transformers/beautiful_soup_transformer.py +1 -1
  285. langchain/document_transformers/doctran_text_extract.py +1 -1
  286. langchain/document_transformers/doctran_text_qa.py +1 -1
  287. langchain/document_transformers/doctran_text_translate.py +1 -1
  288. langchain/document_transformers/embeddings_redundant_filter.py +3 -3
  289. langchain/document_transformers/google_translate.py +1 -1
  290. langchain/document_transformers/html2text.py +1 -1
  291. langchain/document_transformers/nuclia_text_transform.py +1 -1
  292. langchain/embeddings/__init__.py +5 -5
  293. langchain/embeddings/base.py +33 -24
  294. langchain/embeddings/cache.py +36 -31
  295. langchain/embeddings/fake.py +1 -1
  296. langchain/embeddings/huggingface.py +2 -2
  297. langchain/evaluation/__init__.py +22 -22
  298. langchain/evaluation/agents/trajectory_eval_chain.py +23 -23
  299. langchain/evaluation/agents/trajectory_eval_prompt.py +6 -9
  300. langchain/evaluation/comparison/__init__.py +1 -1
  301. langchain/evaluation/comparison/eval_chain.py +20 -13
  302. langchain/evaluation/comparison/prompt.py +1 -2
  303. langchain/evaluation/criteria/__init__.py +1 -1
  304. langchain/evaluation/criteria/eval_chain.py +20 -11
  305. langchain/evaluation/criteria/prompt.py +2 -3
  306. langchain/evaluation/embedding_distance/base.py +23 -20
  307. langchain/evaluation/loading.py +15 -11
  308. langchain/evaluation/parsing/base.py +4 -1
  309. langchain/evaluation/parsing/json_distance.py +5 -2
  310. langchain/evaluation/parsing/json_schema.py +12 -8
  311. langchain/evaluation/qa/__init__.py +1 -1
  312. langchain/evaluation/qa/eval_chain.py +12 -5
  313. langchain/evaluation/qa/eval_prompt.py +7 -8
  314. langchain/evaluation/qa/generate_chain.py +2 -1
  315. langchain/evaluation/qa/generate_prompt.py +2 -4
  316. langchain/evaluation/schema.py +38 -30
  317. langchain/evaluation/scoring/__init__.py +1 -1
  318. langchain/evaluation/scoring/eval_chain.py +22 -15
  319. langchain/evaluation/scoring/prompt.py +0 -1
  320. langchain/evaluation/string_distance/base.py +14 -9
  321. langchain/globals.py +12 -11
  322. langchain/graphs/__init__.py +6 -6
  323. langchain/graphs/graph_document.py +1 -1
  324. langchain/graphs/networkx_graph.py +2 -2
  325. langchain/hub.py +9 -11
  326. langchain/indexes/__init__.py +3 -3
  327. langchain/indexes/_sql_record_manager.py +63 -46
  328. langchain/indexes/prompts/entity_extraction.py +1 -2
  329. langchain/indexes/prompts/entity_summarization.py +1 -2
  330. langchain/indexes/prompts/knowledge_triplet_extraction.py +1 -3
  331. langchain/indexes/vectorstore.py +35 -19
  332. langchain/llms/__init__.py +13 -13
  333. langchain/llms/ai21.py +1 -1
  334. langchain/llms/azureml_endpoint.py +4 -4
  335. langchain/llms/base.py +15 -7
  336. langchain/llms/bedrock.py +1 -1
  337. langchain/llms/cloudflare_workersai.py +1 -1
  338. langchain/llms/gradient_ai.py +1 -1
  339. langchain/llms/loading.py +1 -1
  340. langchain/llms/openai.py +1 -1
  341. langchain/llms/sagemaker_endpoint.py +1 -1
  342. langchain/load/dump.py +1 -1
  343. langchain/load/load.py +1 -1
  344. langchain/load/serializable.py +3 -3
  345. langchain/memory/__init__.py +3 -3
  346. langchain/memory/buffer.py +9 -7
  347. langchain/memory/chat_memory.py +14 -8
  348. langchain/memory/chat_message_histories/__init__.py +1 -1
  349. langchain/memory/chat_message_histories/astradb.py +1 -1
  350. langchain/memory/chat_message_histories/cassandra.py +1 -1
  351. langchain/memory/chat_message_histories/cosmos_db.py +1 -1
  352. langchain/memory/chat_message_histories/dynamodb.py +1 -1
  353. langchain/memory/chat_message_histories/elasticsearch.py +1 -1
  354. langchain/memory/chat_message_histories/file.py +1 -1
  355. langchain/memory/chat_message_histories/firestore.py +1 -1
  356. langchain/memory/chat_message_histories/momento.py +1 -1
  357. langchain/memory/chat_message_histories/mongodb.py +1 -1
  358. langchain/memory/chat_message_histories/neo4j.py +1 -1
  359. langchain/memory/chat_message_histories/postgres.py +1 -1
  360. langchain/memory/chat_message_histories/redis.py +1 -1
  361. langchain/memory/chat_message_histories/rocksetdb.py +1 -1
  362. langchain/memory/chat_message_histories/singlestoredb.py +1 -1
  363. langchain/memory/chat_message_histories/streamlit.py +1 -1
  364. langchain/memory/chat_message_histories/upstash_redis.py +1 -1
  365. langchain/memory/chat_message_histories/xata.py +1 -1
  366. langchain/memory/chat_message_histories/zep.py +1 -1
  367. langchain/memory/combined.py +13 -12
  368. langchain/memory/entity.py +84 -61
  369. langchain/memory/prompt.py +10 -11
  370. langchain/memory/readonly.py +0 -2
  371. langchain/memory/simple.py +1 -3
  372. langchain/memory/summary.py +13 -11
  373. langchain/memory/summary_buffer.py +17 -8
  374. langchain/memory/utils.py +3 -2
  375. langchain/memory/vectorstore.py +12 -5
  376. langchain/memory/vectorstore_token_buffer_memory.py +5 -5
  377. langchain/model_laboratory.py +12 -11
  378. langchain/output_parsers/__init__.py +4 -4
  379. langchain/output_parsers/boolean.py +7 -4
  380. langchain/output_parsers/combining.py +10 -5
  381. langchain/output_parsers/datetime.py +32 -31
  382. langchain/output_parsers/enum.py +5 -3
  383. langchain/output_parsers/fix.py +52 -52
  384. langchain/output_parsers/format_instructions.py +6 -8
  385. langchain/output_parsers/json.py +2 -2
  386. langchain/output_parsers/list.py +2 -2
  387. langchain/output_parsers/loading.py +9 -9
  388. langchain/output_parsers/openai_functions.py +3 -3
  389. langchain/output_parsers/openai_tools.py +1 -1
  390. langchain/output_parsers/pandas_dataframe.py +43 -47
  391. langchain/output_parsers/prompts.py +1 -2
  392. langchain/output_parsers/rail_parser.py +1 -1
  393. langchain/output_parsers/regex.py +7 -8
  394. langchain/output_parsers/regex_dict.py +7 -10
  395. langchain/output_parsers/retry.py +77 -78
  396. langchain/output_parsers/structured.py +11 -6
  397. langchain/output_parsers/yaml.py +15 -11
  398. langchain/prompts/__init__.py +5 -3
  399. langchain/prompts/base.py +5 -5
  400. langchain/prompts/chat.py +8 -8
  401. langchain/prompts/example_selector/__init__.py +3 -1
  402. langchain/prompts/example_selector/semantic_similarity.py +2 -2
  403. langchain/prompts/few_shot.py +1 -1
  404. langchain/prompts/loading.py +3 -3
  405. langchain/prompts/prompt.py +1 -1
  406. langchain/retrievers/__init__.py +5 -5
  407. langchain/retrievers/bedrock.py +2 -2
  408. langchain/retrievers/bm25.py +1 -1
  409. langchain/retrievers/contextual_compression.py +14 -8
  410. langchain/retrievers/docarray.py +1 -1
  411. langchain/retrievers/document_compressors/__init__.py +5 -4
  412. langchain/retrievers/document_compressors/base.py +12 -6
  413. langchain/retrievers/document_compressors/chain_extract.py +2 -2
  414. langchain/retrievers/document_compressors/chain_extract_prompt.py +2 -3
  415. langchain/retrievers/document_compressors/chain_filter.py +9 -9
  416. langchain/retrievers/document_compressors/chain_filter_prompt.py +1 -2
  417. langchain/retrievers/document_compressors/cohere_rerank.py +15 -15
  418. langchain/retrievers/document_compressors/embeddings_filter.py +21 -17
  419. langchain/retrievers/document_compressors/flashrank_rerank.py +1 -1
  420. langchain/retrievers/document_compressors/listwise_rerank.py +7 -5
  421. langchain/retrievers/ensemble.py +28 -25
  422. langchain/retrievers/google_cloud_documentai_warehouse.py +1 -1
  423. langchain/retrievers/google_vertex_ai_search.py +2 -2
  424. langchain/retrievers/kendra.py +10 -10
  425. langchain/retrievers/llama_index.py +1 -1
  426. langchain/retrievers/merger_retriever.py +11 -11
  427. langchain/retrievers/milvus.py +1 -1
  428. langchain/retrievers/multi_query.py +32 -26
  429. langchain/retrievers/multi_vector.py +20 -8
  430. langchain/retrievers/parent_document_retriever.py +18 -9
  431. langchain/retrievers/re_phraser.py +6 -5
  432. langchain/retrievers/self_query/base.py +138 -127
  433. langchain/retrievers/time_weighted_retriever.py +18 -7
  434. langchain/retrievers/zilliz.py +1 -1
  435. langchain/runnables/openai_functions.py +6 -2
  436. langchain/schema/__init__.py +23 -23
  437. langchain/schema/cache.py +1 -1
  438. langchain/schema/callbacks/base.py +7 -7
  439. langchain/schema/callbacks/manager.py +19 -19
  440. langchain/schema/callbacks/tracers/base.py +1 -1
  441. langchain/schema/callbacks/tracers/evaluation.py +1 -1
  442. langchain/schema/callbacks/tracers/langchain.py +1 -1
  443. langchain/schema/callbacks/tracers/langchain_v1.py +1 -1
  444. langchain/schema/callbacks/tracers/log_stream.py +1 -1
  445. langchain/schema/callbacks/tracers/schemas.py +8 -8
  446. langchain/schema/callbacks/tracers/stdout.py +3 -3
  447. langchain/schema/document.py +1 -1
  448. langchain/schema/language_model.py +2 -2
  449. langchain/schema/messages.py +12 -12
  450. langchain/schema/output.py +3 -3
  451. langchain/schema/output_parser.py +3 -3
  452. langchain/schema/runnable/__init__.py +3 -3
  453. langchain/schema/runnable/base.py +9 -9
  454. langchain/schema/runnable/config.py +5 -5
  455. langchain/schema/runnable/configurable.py +1 -1
  456. langchain/schema/runnable/history.py +1 -1
  457. langchain/schema/runnable/passthrough.py +1 -1
  458. langchain/schema/runnable/utils.py +16 -16
  459. langchain/schema/vectorstore.py +1 -1
  460. langchain/smith/__init__.py +1 -1
  461. langchain/smith/evaluation/__init__.py +2 -2
  462. langchain/smith/evaluation/config.py +10 -7
  463. langchain/smith/evaluation/name_generation.py +3 -3
  464. langchain/smith/evaluation/progress.py +11 -2
  465. langchain/smith/evaluation/runner_utils.py +179 -127
  466. langchain/smith/evaluation/string_run_evaluator.py +75 -68
  467. langchain/storage/__init__.py +2 -2
  468. langchain/storage/_lc_store.py +4 -2
  469. langchain/storage/encoder_backed.py +6 -2
  470. langchain/storage/file_system.py +19 -16
  471. langchain/storage/in_memory.py +1 -1
  472. langchain/storage/upstash_redis.py +1 -1
  473. langchain/text_splitter.py +15 -15
  474. langchain/tools/__init__.py +28 -26
  475. langchain/tools/ainetwork/app.py +1 -1
  476. langchain/tools/ainetwork/base.py +1 -1
  477. langchain/tools/ainetwork/owner.py +1 -1
  478. langchain/tools/ainetwork/rule.py +1 -1
  479. langchain/tools/ainetwork/transfer.py +1 -1
  480. langchain/tools/ainetwork/value.py +1 -1
  481. langchain/tools/amadeus/closest_airport.py +1 -1
  482. langchain/tools/amadeus/flight_search.py +1 -1
  483. langchain/tools/azure_cognitive_services/__init__.py +1 -1
  484. langchain/tools/base.py +4 -4
  485. langchain/tools/bearly/tool.py +1 -1
  486. langchain/tools/bing_search/__init__.py +1 -1
  487. langchain/tools/bing_search/tool.py +1 -1
  488. langchain/tools/dataforseo_api_search/__init__.py +1 -1
  489. langchain/tools/dataforseo_api_search/tool.py +1 -1
  490. langchain/tools/ddg_search/tool.py +1 -1
  491. langchain/tools/e2b_data_analysis/tool.py +2 -2
  492. langchain/tools/edenai/__init__.py +1 -1
  493. langchain/tools/file_management/__init__.py +1 -1
  494. langchain/tools/file_management/copy.py +1 -1
  495. langchain/tools/file_management/delete.py +1 -1
  496. langchain/tools/gmail/__init__.py +2 -2
  497. langchain/tools/gmail/get_message.py +1 -1
  498. langchain/tools/gmail/search.py +1 -1
  499. langchain/tools/gmail/send_message.py +1 -1
  500. langchain/tools/google_finance/__init__.py +1 -1
  501. langchain/tools/google_finance/tool.py +1 -1
  502. langchain/tools/google_scholar/__init__.py +1 -1
  503. langchain/tools/google_scholar/tool.py +1 -1
  504. langchain/tools/google_search/__init__.py +1 -1
  505. langchain/tools/google_search/tool.py +1 -1
  506. langchain/tools/google_serper/__init__.py +1 -1
  507. langchain/tools/google_serper/tool.py +1 -1
  508. langchain/tools/google_trends/__init__.py +1 -1
  509. langchain/tools/google_trends/tool.py +1 -1
  510. langchain/tools/jira/tool.py +20 -1
  511. langchain/tools/json/tool.py +25 -3
  512. langchain/tools/memorize/tool.py +1 -1
  513. langchain/tools/multion/__init__.py +1 -1
  514. langchain/tools/multion/update_session.py +1 -1
  515. langchain/tools/office365/__init__.py +2 -2
  516. langchain/tools/office365/events_search.py +1 -1
  517. langchain/tools/office365/messages_search.py +1 -1
  518. langchain/tools/office365/send_event.py +1 -1
  519. langchain/tools/office365/send_message.py +1 -1
  520. langchain/tools/openapi/utils/api_models.py +6 -6
  521. langchain/tools/playwright/__init__.py +5 -5
  522. langchain/tools/playwright/click.py +1 -1
  523. langchain/tools/playwright/extract_hyperlinks.py +1 -1
  524. langchain/tools/playwright/get_elements.py +1 -1
  525. langchain/tools/playwright/navigate.py +1 -1
  526. langchain/tools/plugin.py +2 -2
  527. langchain/tools/powerbi/tool.py +1 -1
  528. langchain/tools/python/__init__.py +2 -1
  529. langchain/tools/reddit_search/tool.py +1 -1
  530. langchain/tools/render.py +2 -2
  531. langchain/tools/requests/tool.py +2 -2
  532. langchain/tools/searchapi/tool.py +1 -1
  533. langchain/tools/searx_search/tool.py +1 -1
  534. langchain/tools/slack/get_message.py +1 -1
  535. langchain/tools/spark_sql/tool.py +1 -1
  536. langchain/tools/sql_database/tool.py +1 -1
  537. langchain/tools/tavily_search/__init__.py +1 -1
  538. langchain/tools/tavily_search/tool.py +1 -1
  539. langchain/tools/zapier/__init__.py +1 -1
  540. langchain/tools/zapier/tool.py +24 -2
  541. langchain/utilities/__init__.py +4 -4
  542. langchain/utilities/arcee.py +4 -4
  543. langchain/utilities/clickup.py +4 -4
  544. langchain/utilities/dalle_image_generator.py +1 -1
  545. langchain/utilities/dataforseo_api_search.py +1 -1
  546. langchain/utilities/opaqueprompts.py +1 -1
  547. langchain/utilities/reddit_search.py +1 -1
  548. langchain/utilities/sql_database.py +1 -1
  549. langchain/utilities/tavily_search.py +1 -1
  550. langchain/utilities/vertexai.py +2 -2
  551. langchain/utils/__init__.py +1 -1
  552. langchain/utils/aiter.py +1 -1
  553. langchain/utils/html.py +3 -3
  554. langchain/utils/input.py +1 -1
  555. langchain/utils/iter.py +1 -1
  556. langchain/utils/json_schema.py +1 -3
  557. langchain/utils/strings.py +1 -1
  558. langchain/utils/utils.py +6 -6
  559. langchain/vectorstores/__init__.py +5 -5
  560. langchain/vectorstores/alibabacloud_opensearch.py +1 -1
  561. langchain/vectorstores/azure_cosmos_db.py +1 -1
  562. langchain/vectorstores/clickhouse.py +1 -1
  563. langchain/vectorstores/elastic_vector_search.py +1 -1
  564. langchain/vectorstores/elasticsearch.py +2 -2
  565. langchain/vectorstores/myscale.py +1 -1
  566. langchain/vectorstores/neo4j_vector.py +1 -1
  567. langchain/vectorstores/pgembedding.py +1 -1
  568. langchain/vectorstores/qdrant.py +1 -1
  569. langchain/vectorstores/redis/__init__.py +1 -1
  570. langchain/vectorstores/redis/base.py +1 -1
  571. langchain/vectorstores/redis/filters.py +4 -4
  572. langchain/vectorstores/redis/schema.py +6 -6
  573. langchain/vectorstores/sklearn.py +2 -2
  574. langchain/vectorstores/starrocks.py +1 -1
  575. langchain/vectorstores/utils.py +1 -1
  576. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/METADATA +4 -4
  577. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/RECORD +580 -580
  578. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/WHEEL +1 -1
  579. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/entry_points.txt +0 -0
  580. {langchain-0.3.26.dist-info → langchain-0.3.27.dist-info}/licenses/LICENSE +0 -0
@@ -50,7 +50,7 @@ def _parse_model_string(model_name: str) -> tuple[str, str]:
50
50
  """
51
51
  if ":" not in model_name:
52
52
  providers = _SUPPORTED_PROVIDERS
53
- raise ValueError(
53
+ msg = (
54
54
  f"Invalid model format '{model_name}'.\n"
55
55
  f"Model name must be in format 'provider:model-name'\n"
56
56
  f"Example valid model strings:\n"
@@ -59,49 +59,56 @@ def _parse_model_string(model_name: str) -> tuple[str, str]:
59
59
  f" - cohere:embed-english-v3.0\n"
60
60
  f"Supported providers: {providers}"
61
61
  )
62
+ raise ValueError(msg)
62
63
 
63
64
  provider, model = model_name.split(":", 1)
64
65
  provider = provider.lower().strip()
65
66
  model = model.strip()
66
67
 
67
68
  if provider not in _SUPPORTED_PROVIDERS:
68
- raise ValueError(
69
+ msg = (
69
70
  f"Provider '{provider}' is not supported.\n"
70
71
  f"Supported providers and their required packages:\n"
71
72
  f"{_get_provider_list()}"
72
73
  )
74
+ raise ValueError(msg)
73
75
  if not model:
74
- raise ValueError("Model name cannot be empty")
76
+ msg = "Model name cannot be empty"
77
+ raise ValueError(msg)
75
78
  return provider, model
76
79
 
77
80
 
78
81
  def _infer_model_and_provider(
79
- model: str, *, provider: Optional[str] = None
82
+ model: str,
83
+ *,
84
+ provider: Optional[str] = None,
80
85
  ) -> tuple[str, str]:
81
86
  if not model.strip():
82
- raise ValueError("Model name cannot be empty")
87
+ msg = "Model name cannot be empty"
88
+ raise ValueError(msg)
83
89
  if provider is None and ":" in model:
84
90
  provider, model_name = _parse_model_string(model)
85
91
  else:
86
- provider = provider
87
92
  model_name = model
88
93
 
89
94
  if not provider:
90
95
  providers = _SUPPORTED_PROVIDERS
91
- raise ValueError(
96
+ msg = (
92
97
  "Must specify either:\n"
93
98
  "1. A model string in format 'provider:model-name'\n"
94
99
  " Example: 'openai:text-embedding-3-small'\n"
95
100
  "2. Or explicitly set provider from: "
96
101
  f"{providers}"
97
102
  )
103
+ raise ValueError(msg)
98
104
 
99
105
  if provider not in _SUPPORTED_PROVIDERS:
100
- raise ValueError(
106
+ msg = (
101
107
  f"Provider '{provider}' is not supported.\n"
102
108
  f"Supported providers and their required packages:\n"
103
109
  f"{_get_provider_list()}"
104
110
  )
111
+ raise ValueError(msg)
105
112
  return provider, model_name
106
113
 
107
114
 
@@ -109,10 +116,11 @@ def _infer_model_and_provider(
109
116
  def _check_pkg(pkg: str) -> None:
110
117
  """Check if a package is installed."""
111
118
  if not util.find_spec(pkg):
112
- raise ImportError(
119
+ msg = (
113
120
  f"Could not import {pkg} python package. "
114
121
  f"Please install it with `pip install {pkg}`"
115
122
  )
123
+ raise ImportError(msg)
116
124
 
117
125
 
118
126
  def init_embeddings(
@@ -172,9 +180,10 @@ def init_embeddings(
172
180
  """
173
181
  if not model:
174
182
  providers = _SUPPORTED_PROVIDERS.keys()
175
- raise ValueError(
183
+ msg = (
176
184
  f"Must specify model name. Supported providers are: {', '.join(providers)}"
177
185
  )
186
+ raise ValueError(msg)
178
187
 
179
188
  provider, model_name = _infer_model_and_provider(model, provider=provider)
180
189
  pkg = _SUPPORTED_PROVIDERS[provider]
@@ -184,43 +193,43 @@ def init_embeddings(
184
193
  from langchain_openai import OpenAIEmbeddings
185
194
 
186
195
  return OpenAIEmbeddings(model=model_name, **kwargs)
187
- elif provider == "azure_openai":
196
+ if provider == "azure_openai":
188
197
  from langchain_openai import AzureOpenAIEmbeddings
189
198
 
190
199
  return AzureOpenAIEmbeddings(model=model_name, **kwargs)
191
- elif provider == "google_vertexai":
200
+ if provider == "google_vertexai":
192
201
  from langchain_google_vertexai import VertexAIEmbeddings
193
202
 
194
203
  return VertexAIEmbeddings(model=model_name, **kwargs)
195
- elif provider == "bedrock":
204
+ if provider == "bedrock":
196
205
  from langchain_aws import BedrockEmbeddings
197
206
 
198
207
  return BedrockEmbeddings(model_id=model_name, **kwargs)
199
- elif provider == "cohere":
208
+ if provider == "cohere":
200
209
  from langchain_cohere import CohereEmbeddings
201
210
 
202
211
  return CohereEmbeddings(model=model_name, **kwargs)
203
- elif provider == "mistralai":
212
+ if provider == "mistralai":
204
213
  from langchain_mistralai import MistralAIEmbeddings
205
214
 
206
215
  return MistralAIEmbeddings(model=model_name, **kwargs)
207
- elif provider == "huggingface":
216
+ if provider == "huggingface":
208
217
  from langchain_huggingface import HuggingFaceEmbeddings
209
218
 
210
219
  return HuggingFaceEmbeddings(model_name=model_name, **kwargs)
211
- elif provider == "ollama":
220
+ if provider == "ollama":
212
221
  from langchain_ollama import OllamaEmbeddings
213
222
 
214
223
  return OllamaEmbeddings(model=model_name, **kwargs)
215
- else:
216
- raise ValueError(
217
- f"Provider '{provider}' is not supported.\n"
218
- f"Supported providers and their required packages:\n"
219
- f"{_get_provider_list()}"
220
- )
224
+ msg = (
225
+ f"Provider '{provider}' is not supported.\n"
226
+ f"Supported providers and their required packages:\n"
227
+ f"{_get_provider_list()}"
228
+ )
229
+ raise ValueError(msg)
221
230
 
222
231
 
223
232
  __all__ = [
224
- "init_embeddings",
225
233
  "Embeddings", # This one is for backwards compatibility
234
+ "init_embeddings",
226
235
  ]
@@ -26,16 +26,16 @@ NAMESPACE_UUID = uuid.UUID(int=1985)
26
26
 
27
27
 
28
28
  def _sha1_hash_to_uuid(text: str) -> uuid.UUID:
29
- """Return a UUID derived from *text* using SHA1 (deterministic).
29
+ """Return a UUID derived from *text* using SHA-1 (deterministic).
30
30
 
31
- Deterministic and fast, **but not collisionresistant**.
31
+ Deterministic and fast, **but not collision-resistant**.
32
32
 
33
33
  A malicious attacker could try to create two different texts that hash to the same
34
34
  UUID. This may not necessarily be an issue in the context of caching embeddings,
35
35
  but new applications should swap this out for a stronger hash function like
36
- xxHash, BLAKE2 or SHA256, which are collision-resistant.
36
+ xxHash, BLAKE2 or SHA-256, which are collision-resistant.
37
37
  """
38
- sha1_hex = hashlib.sha1(text.encode("utf-8")).hexdigest()
38
+ sha1_hex = hashlib.sha1(text.encode("utf-8"), usedforsecurity=False).hexdigest()
39
39
  # Embed the hex string in `uuid5` to obtain a valid UUID.
40
40
  return uuid.uuid5(NAMESPACE_UUID, sha1_hex)
41
41
 
@@ -44,12 +44,12 @@ def _make_default_key_encoder(namespace: str, algorithm: str) -> Callable[[str],
44
44
  """Create a default key encoder function.
45
45
 
46
46
  Args:
47
- namespace: Prefix that segregates keys from different embedding models.
48
- algorithm:
49
- * `sha1` - fast but not collisionresistant
50
- * `blake2b` - cryptographically strong, faster than SHA1
51
- * `sha256` - cryptographically strong, slower than SHA1
52
- * `sha512` - cryptographically strong, slower than SHA1
47
+ namespace: Prefix that segregates keys from different embedding models.
48
+ algorithm:
49
+ * ``'sha1'`` - fast but not collision-resistant
50
+ * ``'blake2b'`` - cryptographically strong, faster than SHA-1
51
+ * ``'sha256'`` - cryptographically strong, slower than SHA-1
52
+ * ``'sha512'`` - cryptographically strong, slower than SHA-1
53
53
 
54
54
  Returns:
55
55
  A function that encodes a key using the specified algorithm.
@@ -67,7 +67,8 @@ def _make_default_key_encoder(namespace: str, algorithm: str) -> Callable[[str],
67
67
  return f"{namespace}{hashlib.sha256(key.encode('utf-8')).hexdigest()}"
68
68
  if algorithm == "sha512":
69
69
  return f"{namespace}{hashlib.sha512(key.encode('utf-8')).hexdigest()}"
70
- raise ValueError(f"Unsupported algorithm: {algorithm}")
70
+ msg = f"Unsupported algorithm: {algorithm}"
71
+ raise ValueError(msg)
71
72
 
72
73
  return _key_encoder
73
74
 
@@ -87,15 +88,15 @@ _warned_about_sha1: bool = False
87
88
 
88
89
 
89
90
  def _warn_about_sha1_encoder() -> None:
90
- """Emit a onetime warning about SHA1 collision weaknesses."""
91
- global _warned_about_sha1
91
+ """Emit a one-time warning about SHA-1 collision weaknesses."""
92
+ global _warned_about_sha1 # noqa: PLW0603
92
93
  if not _warned_about_sha1:
93
94
  warnings.warn(
94
- "Using default key encoder: SHA1 is *not* collisionresistant. "
95
+ "Using default key encoder: SHA-1 is *not* collision-resistant. "
95
96
  "While acceptable for most cache scenarios, a motivated attacker "
96
97
  "can craft two different payloads that map to the same cache key. "
97
98
  "If that risk matters in your environment, supply a stronger "
98
- "encoder (e.g. SHA256 or BLAKE2) via the `key_encoder` argument. "
99
+ "encoder (e.g. SHA-256 or BLAKE2) via the `key_encoder` argument. "
99
100
  "If you change the key encoder, consider also creating a new cache, "
100
101
  "to avoid (the potential for) collisions with existing keys.",
101
102
  category=UserWarning,
@@ -118,7 +119,6 @@ class CacheBackedEmbeddings(Embeddings):
118
119
  embeddings too, pass in a query_embedding_store to constructor.
119
120
 
120
121
  Examples:
121
-
122
122
  .. code-block: python
123
123
 
124
124
  from langchain.embeddings import CacheBackedEmbeddings
@@ -154,7 +154,7 @@ class CacheBackedEmbeddings(Embeddings):
154
154
  document_embedding_store: The store to use for caching document embeddings.
155
155
  batch_size: The number of documents to embed between store updates.
156
156
  query_embedding_store: The store to use for caching query embeddings.
157
- If None, query embeddings are not cached.
157
+ If ``None``, query embeddings are not cached.
158
158
  """
159
159
  super().__init__()
160
160
  self.document_embedding_store = document_embedding_store
@@ -176,7 +176,7 @@ class CacheBackedEmbeddings(Embeddings):
176
176
  A list of embeddings for the given texts.
177
177
  """
178
178
  vectors: list[Union[list[float], None]] = self.document_embedding_store.mget(
179
- texts
179
+ texts,
180
180
  )
181
181
  all_missing_indices: list[int] = [
182
182
  i for i, vector in enumerate(vectors) if vector is None
@@ -186,13 +186,14 @@ class CacheBackedEmbeddings(Embeddings):
186
186
  missing_texts = [texts[i] for i in missing_indices]
187
187
  missing_vectors = self.underlying_embeddings.embed_documents(missing_texts)
188
188
  self.document_embedding_store.mset(
189
- list(zip(missing_texts, missing_vectors))
189
+ list(zip(missing_texts, missing_vectors)),
190
190
  )
191
191
  for index, updated_vector in zip(missing_indices, missing_vectors):
192
192
  vectors[index] = updated_vector
193
193
 
194
194
  return cast(
195
- list[list[float]], vectors
195
+ list[list[float]],
196
+ vectors,
196
197
  ) # Nones should have been resolved by now
197
198
 
198
199
  async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
@@ -220,23 +221,24 @@ class CacheBackedEmbeddings(Embeddings):
220
221
  for missing_indices in batch_iterate(self.batch_size, all_missing_indices):
221
222
  missing_texts = [texts[i] for i in missing_indices]
222
223
  missing_vectors = await self.underlying_embeddings.aembed_documents(
223
- missing_texts
224
+ missing_texts,
224
225
  )
225
226
  await self.document_embedding_store.amset(
226
- list(zip(missing_texts, missing_vectors))
227
+ list(zip(missing_texts, missing_vectors)),
227
228
  )
228
229
  for index, updated_vector in zip(missing_indices, missing_vectors):
229
230
  vectors[index] = updated_vector
230
231
 
231
232
  return cast(
232
- list[list[float]], vectors
233
+ list[list[float]],
234
+ vectors,
233
235
  ) # Nones should have been resolved by now
234
236
 
235
237
  def embed_query(self, text: str) -> list[float]:
236
238
  """Embed query text.
237
239
 
238
240
  By default, this method does not cache queries. To enable caching, set the
239
- `cache_query` parameter to `True` when initializing the embedder.
241
+ ``cache_query`` parameter to ``True`` when initializing the embedder.
240
242
 
241
243
  Args:
242
244
  text: The text to embed.
@@ -259,7 +261,7 @@ class CacheBackedEmbeddings(Embeddings):
259
261
  """Embed query text.
260
262
 
261
263
  By default, this method does not cache queries. To enable caching, set the
262
- `cache_query` parameter to `True` when initializing the embedder.
264
+ ``cache_query`` parameter to ``True`` when initializing the embedder.
263
265
 
264
266
  Args:
265
267
  text: The text to embed.
@@ -288,7 +290,8 @@ class CacheBackedEmbeddings(Embeddings):
288
290
  batch_size: Optional[int] = None,
289
291
  query_embedding_cache: Union[bool, ByteStore] = False,
290
292
  key_encoder: Union[
291
- Callable[[str], str], Literal["sha1", "blake2b", "sha256", "sha512"]
293
+ Callable[[str], str],
294
+ Literal["sha1", "blake2b", "sha256", "sha512"],
292
295
  ] = "sha1",
293
296
  ) -> CacheBackedEmbeddings:
294
297
  """On-ramp that adds the necessary serialization and encoding to the store.
@@ -298,14 +301,14 @@ class CacheBackedEmbeddings(Embeddings):
298
301
  document_embedding_cache: The cache to use for storing document embeddings.
299
302
  *,
300
303
  namespace: The namespace to use for document cache.
301
- This namespace is used to avoid collisions with other caches.
302
- For example, set it to the name of the embedding model used.
304
+ This namespace is used to avoid collisions with other caches.
305
+ For example, set it to the name of the embedding model used.
303
306
  batch_size: The number of documents to embed between store updates.
304
307
  query_embedding_cache: The cache to use for storing query embeddings.
305
308
  True to use the same cache as document embeddings.
306
309
  False to not cache query embeddings.
307
310
  key_encoder: Optional callable to encode keys. If not provided,
308
- a default encoder using SHA1 will be used. SHA-1 is not
311
+ a default encoder using SHA-1 will be used. SHA-1 is not
309
312
  collision-resistant, and a motivated attacker could craft two
310
313
  different texts that hash to the same cache key.
311
314
 
@@ -327,15 +330,17 @@ class CacheBackedEmbeddings(Embeddings):
327
330
  # namespace.
328
331
  # A user can handle namespacing in directly their custom key encoder.
329
332
  if namespace:
330
- raise ValueError(
333
+ msg = (
331
334
  "Do not supply `namespace` when using a custom key_encoder; "
332
335
  "add any prefixing inside the encoder itself."
333
336
  )
337
+ raise ValueError(msg)
334
338
  else:
335
- raise ValueError(
339
+ msg = (
336
340
  "key_encoder must be either 'blake2b', 'sha1', 'sha256', 'sha512' "
337
341
  "or a callable that encodes keys."
338
342
  )
343
+ raise ValueError(msg) # noqa: TRY004
339
344
 
340
345
  document_embedding_store = EncoderBackedStore[str, list[float]](
341
346
  document_embedding_cache,
@@ -25,6 +25,6 @@ def __getattr__(name: str) -> Any:
25
25
 
26
26
 
27
27
  __all__ = [
28
- "FakeEmbeddings",
29
28
  "DeterministicFakeEmbedding",
29
+ "FakeEmbeddings",
30
30
  ]
@@ -29,8 +29,8 @@ def __getattr__(name: str) -> Any:
29
29
 
30
30
 
31
31
  __all__ = [
32
- "HuggingFaceEmbeddings",
33
- "HuggingFaceInstructEmbeddings",
34
32
  "HuggingFaceBgeEmbeddings",
33
+ "HuggingFaceEmbeddings",
35
34
  "HuggingFaceInferenceAPIEmbeddings",
35
+ "HuggingFaceInstructEmbeddings",
36
36
  ]
@@ -95,34 +95,34 @@ from langchain.evaluation.string_distance import (
95
95
  )
96
96
 
97
97
  __all__ = [
98
- "EvaluatorType",
99
- "ExactMatchStringEvaluator",
100
- "RegexMatchStringEvaluator",
101
- "PairwiseStringEvalChain",
102
- "LabeledPairwiseStringEvalChain",
103
- "QAEvalChain",
104
- "CotQAEvalChain",
98
+ "AgentTrajectoryEvaluator",
105
99
  "ContextQAEvalChain",
106
- "StringEvaluator",
107
- "PairwiseStringEvaluator",
108
- "TrajectoryEvalChain",
109
- "CriteriaEvalChain",
100
+ "CotQAEvalChain",
110
101
  "Criteria",
102
+ "CriteriaEvalChain",
111
103
  "EmbeddingDistance",
112
104
  "EmbeddingDistanceEvalChain",
105
+ "EvaluatorType",
106
+ "ExactMatchStringEvaluator",
107
+ "JsonEditDistanceEvaluator",
108
+ "JsonEqualityEvaluator",
109
+ "JsonSchemaEvaluator",
110
+ "JsonValidityEvaluator",
111
+ "LabeledCriteriaEvalChain",
112
+ "LabeledPairwiseStringEvalChain",
113
+ "LabeledScoreStringEvalChain",
113
114
  "PairwiseEmbeddingDistanceEvalChain",
115
+ "PairwiseStringDistanceEvalChain",
116
+ "PairwiseStringEvalChain",
117
+ "PairwiseStringEvaluator",
118
+ "QAEvalChain",
119
+ "RegexMatchStringEvaluator",
120
+ "ScoreStringEvalChain",
114
121
  "StringDistance",
115
122
  "StringDistanceEvalChain",
116
- "PairwiseStringDistanceEvalChain",
117
- "LabeledCriteriaEvalChain",
118
- "load_evaluators",
119
- "load_evaluator",
123
+ "StringEvaluator",
124
+ "TrajectoryEvalChain",
120
125
  "load_dataset",
121
- "AgentTrajectoryEvaluator",
122
- "ScoreStringEvalChain",
123
- "LabeledScoreStringEvalChain",
124
- "JsonValidityEvaluator",
125
- "JsonEqualityEvaluator",
126
- "JsonEditDistanceEvaluator",
127
- "JsonSchemaEvaluator",
126
+ "load_evaluator",
127
+ "load_evaluators",
128
128
  ]
@@ -27,6 +27,7 @@ from langchain_core.language_models.chat_models import BaseChatModel
27
27
  from langchain_core.output_parsers import BaseOutputParser
28
28
  from langchain_core.tools import BaseTool
29
29
  from pydantic import ConfigDict, Field
30
+ from typing_extensions import override
30
31
 
31
32
  from langchain.chains.llm import LLMChain
32
33
  from langchain.evaluation.agents.trajectory_eval_prompt import (
@@ -35,6 +36,8 @@ from langchain.evaluation.agents.trajectory_eval_prompt import (
35
36
  )
36
37
  from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain
37
38
 
39
+ _MAX_SCORE = 5
40
+
38
41
 
39
42
  class TrajectoryEval(TypedDict):
40
43
  """A named tuple containing the score and reasoning for a trajectory."""
@@ -66,9 +69,8 @@ class TrajectoryOutputParser(BaseOutputParser):
66
69
  if the LLM's score is not a digit in the range 1-5.
67
70
  """
68
71
  if "Score:" not in text:
69
- raise OutputParserException(
70
- f"Could not find score in model eval output: {text}"
71
- )
72
+ msg = f"Could not find score in model eval output: {text}"
73
+ raise OutputParserException(msg)
72
74
 
73
75
  reasoning, score_str = text.split("Score: ", maxsplit=1)
74
76
 
@@ -82,16 +84,14 @@ class TrajectoryOutputParser(BaseOutputParser):
82
84
  _score = re.search(r"(\d+(\.\d+)?)", score_str)
83
85
  # If the score is not found or is a float, raise an exception.
84
86
  if _score is None or "." in _score.group(1):
85
- raise OutputParserException(
86
- f"Score is not an integer digit in the range 1-5: {text}"
87
- )
87
+ msg = f"Score is not an integer digit in the range 1-5: {text}"
88
+ raise OutputParserException(msg)
88
89
  score = int(_score.group(1))
89
90
  # If the score is not in the range 1-5, raise an exception.
90
- if not 1 <= score <= 5:
91
- raise OutputParserException(
92
- f"Score is not a digit in the range 1-5: {text}"
93
- )
94
- normalized_score = (score - 1) / 4
91
+ if not 1 <= score <= _MAX_SCORE:
92
+ msg = f"Score is not a digit in the range 1-5: {text}"
93
+ raise OutputParserException(msg)
94
+ normalized_score = (score - 1) / (_MAX_SCORE - 1)
95
95
  return TrajectoryEval(score=normalized_score, reasoning=reasoning)
96
96
 
97
97
 
@@ -147,7 +147,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
147
147
  eval_chain: LLMChain
148
148
  """The language model chain used for evaluation."""
149
149
  output_parser: TrajectoryOutputParser = Field(
150
- default_factory=TrajectoryOutputParser
150
+ default_factory=TrajectoryOutputParser,
151
151
  )
152
152
  """The output parser used to parse the output."""
153
153
  return_reasoning: bool = False # :meta private:
@@ -176,7 +176,7 @@ class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
176
176
  f"""Tool {i}: {tool.name}
177
177
  Description: {tool.description}"""
178
178
  for i, tool in enumerate(self.agent_tools, 1)
179
- ]
179
+ ],
180
180
  )
181
181
 
182
182
  @staticmethod
@@ -201,7 +201,7 @@ Tool used: {action.tool}
201
201
  Tool input: {action.tool_input}
202
202
  Tool output: {output}"""
203
203
  for i, (action, output) in enumerate(steps, 1)
204
- ]
204
+ ],
205
205
  )
206
206
 
207
207
  @staticmethod
@@ -244,13 +244,9 @@ The following is the expected answer. Use this to measure correctness:
244
244
  TrajectoryEvalChain: The TrajectoryEvalChain object.
245
245
  """
246
246
  if not isinstance(llm, BaseChatModel):
247
- raise NotImplementedError(
248
- "Only chat models supported by the current trajectory eval"
249
- )
250
- if agent_tools:
251
- prompt = EVAL_CHAT_PROMPT
252
- else:
253
- prompt = TOOL_FREE_EVAL_CHAT_PROMPT
247
+ msg = "Only chat models supported by the current trajectory eval"
248
+ raise NotImplementedError(msg)
249
+ prompt = EVAL_CHAT_PROMPT if agent_tools else TOOL_FREE_EVAL_CHAT_PROMPT
254
250
  eval_chain = LLMChain(llm=llm, prompt=prompt)
255
251
  return cls(
256
252
  agent_tools=agent_tools, # type: ignore[arg-type]
@@ -302,7 +298,8 @@ The following is the expected answer. Use this to measure correctness:
302
298
  chain_input["tool_descriptions"] = self._tools_description
303
299
  _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
304
300
  raw_output = self.eval_chain.run(
305
- chain_input, callbacks=_run_manager.get_child()
301
+ chain_input,
302
+ callbacks=_run_manager.get_child(),
306
303
  )
307
304
  return cast(dict, self.output_parser.parse(raw_output))
308
305
 
@@ -326,10 +323,12 @@ The following is the expected answer. Use this to measure correctness:
326
323
  chain_input["tool_descriptions"] = self._tools_description
327
324
  _run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
328
325
  raw_output = await self.eval_chain.arun(
329
- chain_input, callbacks=_run_manager.get_child()
326
+ chain_input,
327
+ callbacks=_run_manager.get_child(),
330
328
  )
331
329
  return cast(dict, self.output_parser.parse(raw_output))
332
330
 
331
+ @override
333
332
  def _evaluate_agent_trajectory(
334
333
  self,
335
334
  *,
@@ -372,6 +371,7 @@ The following is the expected answer. Use this to measure correctness:
372
371
  return_only_outputs=True,
373
372
  )
374
373
 
374
+ @override
375
375
  async def _aevaluate_agent_trajectory(
376
376
  self,
377
377
  *,
@@ -1,14 +1,11 @@
1
1
  """Prompt for trajectory evaluation chain."""
2
2
 
3
- # flake8: noqa
4
- from langchain_core.messages import HumanMessage, AIMessage, SystemMessage
5
-
3
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage
6
4
  from langchain_core.prompts.chat import (
7
5
  ChatPromptTemplate,
8
6
  HumanMessagePromptTemplate,
9
7
  )
10
8
 
11
-
12
9
  EVAL_TEMPLATE = """An AI language model has been given access to the following set of tools to help answer a user's question.
13
10
 
14
11
  The tools given to the AI model are:
@@ -39,7 +36,7 @@ i. Is the final answer helpful?
39
36
  ii. Does the AI language use a logical sequence of tools to answer the question?
40
37
  iii. Does the AI language model use the tools in a helpful way?
41
38
  iv. Does the AI language model use too many steps to answer the question?
42
- v. Are the appropriate tools used to answer the question?"""
39
+ v. Are the appropriate tools used to answer the question?""" # noqa: E501
43
40
 
44
41
  EXAMPLE_INPUT = """An AI language model has been given access to the following set of tools to help answer a user's question.
45
42
 
@@ -84,7 +81,7 @@ i. Is the final answer helpful?
84
81
  ii. Does the AI language use a logical sequence of tools to answer the question?
85
82
  iii. Does the AI language model use the tools in a helpful way?
86
83
  iv. Does the AI language model use too many steps to answer the question?
87
- v. Are the appropriate tools used to answer the question?"""
84
+ v. Are the appropriate tools used to answer the question?""" # noqa: E501
88
85
 
89
86
  EXAMPLE_OUTPUT = """First, let's evaluate the final answer. The final uses good reasoning but is wrong. 2,857 divided by 305 is not 17.5.\
90
87
  The model should have used the calculator to figure this out. Second does the model use a logical sequence of tools to answer the question?\
@@ -92,10 +89,10 @@ The way model uses the search is not helpful. The model should have used the sea
92
89
  The model didn't use the calculator tool and gave an incorrect answer. The search API should be used for current events or specific questions.\
93
90
  The tools were not used in a helpful way. The model did not use too many steps to answer the question.\
94
91
  The model did not use the appropriate tools to answer the question.\
95
-
92
+
96
93
  Judgment: Given the good reasoning in the final answer but otherwise poor performance, we give the model a score of 2.
97
94
 
98
- Score: 2"""
95
+ Score: 2""" # noqa: E501
99
96
 
100
97
  EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages(
101
98
  messages=[
@@ -134,7 +131,7 @@ i. Is the final answer helpful?
134
131
  ii. Does the AI language use a logical sequence of tools to answer the question?
135
132
  iii. Does the AI language model use the tools in a helpful way?
136
133
  iv. Does the AI language model use too many steps to answer the question?
137
- v. Are the appropriate tools used to answer the question?"""
134
+ v. Are the appropriate tools used to answer the question?""" # noqa: E501
138
135
 
139
136
 
140
137
  TOOL_FREE_EVAL_CHAT_PROMPT = ChatPromptTemplate.from_messages(
@@ -33,4 +33,4 @@ from langchain.evaluation.comparison.eval_chain import (
33
33
  PairwiseStringEvalChain,
34
34
  )
35
35
 
36
- __all__ = ["PairwiseStringEvalChain", "LabeledPairwiseStringEvalChain"]
36
+ __all__ = ["LabeledPairwiseStringEvalChain", "PairwiseStringEvalChain"]