langchain 1.0.0-alpha.5 → 1.0.0-alpha.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (351) hide show
  1. package/dist/agents/ReactAgent.cjs +5 -5
  2. package/dist/agents/ReactAgent.cjs.map +1 -1
  3. package/dist/agents/ReactAgent.d.cts +1 -3
  4. package/dist/agents/ReactAgent.d.cts.map +1 -1
  5. package/dist/agents/ReactAgent.d.ts +1 -3
  6. package/dist/agents/ReactAgent.d.ts.map +1 -1
  7. package/dist/agents/ReactAgent.js +6 -6
  8. package/dist/agents/ReactAgent.js.map +1 -1
  9. package/dist/agents/annotation.cjs.map +1 -1
  10. package/dist/agents/annotation.d.cts +4 -6
  11. package/dist/agents/annotation.d.cts.map +1 -1
  12. package/dist/agents/annotation.d.ts +4 -6
  13. package/dist/agents/annotation.d.ts.map +1 -1
  14. package/dist/agents/annotation.js.map +1 -1
  15. package/dist/agents/createAgent.cjs.map +1 -1
  16. package/dist/agents/createAgent.js.map +1 -1
  17. package/dist/agents/index.cjs +2 -2
  18. package/dist/agents/index.cjs.map +1 -1
  19. package/dist/agents/index.d.cts +47 -47
  20. package/dist/agents/index.d.cts.map +1 -1
  21. package/dist/agents/index.d.ts +47 -47
  22. package/dist/agents/index.d.ts.map +1 -1
  23. package/dist/agents/index.js +2 -2
  24. package/dist/agents/index.js.map +1 -1
  25. package/dist/agents/middlewareAgent/ReactAgent.cjs +18 -18
  26. package/dist/agents/middlewareAgent/ReactAgent.cjs.map +1 -1
  27. package/dist/agents/middlewareAgent/ReactAgent.d.cts +8 -9
  28. package/dist/agents/middlewareAgent/ReactAgent.d.cts.map +1 -1
  29. package/dist/agents/middlewareAgent/ReactAgent.d.ts +8 -9
  30. package/dist/agents/middlewareAgent/ReactAgent.d.ts.map +1 -1
  31. package/dist/agents/middlewareAgent/ReactAgent.js +18 -18
  32. package/dist/agents/middlewareAgent/ReactAgent.js.map +1 -1
  33. package/dist/agents/middlewareAgent/annotation.cjs +2 -2
  34. package/dist/agents/middlewareAgent/annotation.cjs.map +1 -1
  35. package/dist/agents/middlewareAgent/annotation.js +2 -2
  36. package/dist/agents/middlewareAgent/annotation.js.map +1 -1
  37. package/dist/agents/middlewareAgent/index.cjs.map +1 -1
  38. package/dist/agents/middlewareAgent/index.js.map +1 -1
  39. package/dist/agents/middlewareAgent/{middlewares → middleware}/hitl.cjs +8 -8
  40. package/dist/agents/middlewareAgent/middleware/hitl.cjs.map +1 -0
  41. package/dist/agents/middlewareAgent/{middlewares → middleware}/hitl.d.cts +3 -3
  42. package/dist/agents/middlewareAgent/middleware/hitl.d.cts.map +1 -0
  43. package/dist/agents/middlewareAgent/{middlewares → middleware}/hitl.d.ts +3 -3
  44. package/dist/agents/middlewareAgent/middleware/hitl.d.ts.map +1 -0
  45. package/dist/agents/middlewareAgent/{middlewares → middleware}/hitl.js +3 -3
  46. package/dist/agents/middlewareAgent/middleware/hitl.js.map +1 -0
  47. package/dist/agents/middlewareAgent/middleware/index.cjs +26 -0
  48. package/dist/agents/middlewareAgent/middleware/index.cjs.map +1 -0
  49. package/dist/agents/middlewareAgent/middleware/index.js +17 -0
  50. package/dist/agents/middlewareAgent/middleware/index.js.map +1 -0
  51. package/dist/agents/middlewareAgent/{middlewares → middleware}/promptCaching.cjs +51 -22
  52. package/dist/agents/middlewareAgent/middleware/promptCaching.cjs.map +1 -0
  53. package/dist/agents/middlewareAgent/{middlewares → middleware}/promptCaching.d.cts +9 -9
  54. package/dist/agents/middlewareAgent/middleware/promptCaching.d.cts.map +1 -0
  55. package/dist/agents/middlewareAgent/{middlewares → middleware}/promptCaching.d.ts +9 -9
  56. package/dist/agents/middlewareAgent/middleware/promptCaching.d.ts.map +1 -0
  57. package/dist/agents/middlewareAgent/{middlewares → middleware}/promptCaching.js +50 -21
  58. package/dist/agents/middlewareAgent/middleware/promptCaching.js.map +1 -0
  59. package/dist/agents/middlewareAgent/{middlewares → middleware}/summarization.cjs +11 -11
  60. package/dist/agents/middlewareAgent/middleware/summarization.cjs.map +1 -0
  61. package/dist/agents/middlewareAgent/{middlewares → middleware}/summarization.d.cts +3 -3
  62. package/dist/agents/middlewareAgent/middleware/summarization.d.cts.map +1 -0
  63. package/dist/agents/middlewareAgent/{middlewares → middleware}/summarization.d.ts +10 -10
  64. package/dist/agents/middlewareAgent/{middlewares/summarization.d.cts.map → middleware/summarization.d.ts.map} +1 -1
  65. package/dist/agents/middlewareAgent/{middlewares → middleware}/summarization.js +4 -4
  66. package/dist/agents/middlewareAgent/middleware/summarization.js.map +1 -0
  67. package/dist/agents/middlewareAgent/middleware.cjs.map +1 -1
  68. package/dist/agents/middlewareAgent/middleware.d.cts +1 -1
  69. package/dist/agents/middlewareAgent/middleware.d.cts.map +1 -1
  70. package/dist/agents/middlewareAgent/middleware.d.ts +1 -1
  71. package/dist/agents/middlewareAgent/middleware.d.ts.map +1 -1
  72. package/dist/agents/middlewareAgent/middleware.js.map +1 -1
  73. package/dist/agents/middlewareAgent/nodes/AfterModalNode.cjs.map +1 -1
  74. package/dist/agents/middlewareAgent/nodes/AfterModalNode.js.map +1 -1
  75. package/dist/agents/middlewareAgent/nodes/AgentNode.cjs +12 -19
  76. package/dist/agents/middlewareAgent/nodes/AgentNode.cjs.map +1 -1
  77. package/dist/agents/middlewareAgent/nodes/AgentNode.js +12 -19
  78. package/dist/agents/middlewareAgent/nodes/AgentNode.js.map +1 -1
  79. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.cjs.map +1 -1
  80. package/dist/agents/middlewareAgent/nodes/BeforeModalNode.js.map +1 -1
  81. package/dist/agents/middlewareAgent/nodes/middleware.cjs.map +1 -1
  82. package/dist/agents/middlewareAgent/nodes/middleware.js.map +1 -1
  83. package/dist/agents/middlewareAgent/nodes/utils.cjs +7 -7
  84. package/dist/agents/middlewareAgent/nodes/utils.cjs.map +1 -1
  85. package/dist/agents/middlewareAgent/nodes/utils.js +3 -3
  86. package/dist/agents/middlewareAgent/nodes/utils.js.map +1 -1
  87. package/dist/agents/middlewareAgent/types.d.cts +10 -23
  88. package/dist/agents/middlewareAgent/types.d.cts.map +1 -1
  89. package/dist/agents/middlewareAgent/types.d.ts +10 -23
  90. package/dist/agents/middlewareAgent/types.d.ts.map +1 -1
  91. package/dist/agents/nodes/AgentNode.cjs +4 -4
  92. package/dist/agents/nodes/AgentNode.cjs.map +1 -1
  93. package/dist/agents/nodes/AgentNode.js +4 -4
  94. package/dist/agents/nodes/AgentNode.js.map +1 -1
  95. package/dist/agents/nodes/ToolNode.cjs +3 -3
  96. package/dist/agents/nodes/ToolNode.cjs.map +1 -1
  97. package/dist/agents/nodes/ToolNode.d.cts +1 -2
  98. package/dist/agents/nodes/ToolNode.d.cts.map +1 -1
  99. package/dist/agents/nodes/ToolNode.d.ts +1 -2
  100. package/dist/agents/nodes/ToolNode.d.ts.map +1 -1
  101. package/dist/agents/nodes/ToolNode.js +4 -4
  102. package/dist/agents/nodes/ToolNode.js.map +1 -1
  103. package/dist/agents/responses.cjs +1 -1
  104. package/dist/agents/responses.cjs.map +1 -1
  105. package/dist/agents/responses.d.cts.map +1 -1
  106. package/dist/agents/responses.d.ts.map +1 -1
  107. package/dist/agents/responses.js +1 -1
  108. package/dist/agents/responses.js.map +1 -1
  109. package/dist/agents/types.d.cts +1 -3
  110. package/dist/agents/types.d.cts.map +1 -1
  111. package/dist/agents/types.d.ts +1 -3
  112. package/dist/agents/types.d.ts.map +1 -1
  113. package/dist/agents/utils.cjs +6 -6
  114. package/dist/agents/utils.cjs.map +1 -1
  115. package/dist/agents/utils.js +7 -7
  116. package/dist/agents/utils.js.map +1 -1
  117. package/dist/agents/withAgentName.cjs.map +1 -1
  118. package/dist/agents/withAgentName.js.map +1 -1
  119. package/dist/chains/api/prompts.cjs.map +1 -1
  120. package/dist/chains/api/prompts.js.map +1 -1
  121. package/dist/chains/constitutional_ai/constitutional_chain.cjs.map +1 -1
  122. package/dist/chains/constitutional_ai/constitutional_chain.js.map +1 -1
  123. package/dist/chains/index.cjs +0 -3
  124. package/dist/chains/index.cjs.map +1 -1
  125. package/dist/chains/index.d.cts +1 -2
  126. package/dist/chains/index.d.ts +1 -2
  127. package/dist/chains/index.js +1 -3
  128. package/dist/chains/index.js.map +1 -1
  129. package/dist/chains/openai_functions/extraction.cjs.map +1 -1
  130. package/dist/chains/openai_functions/extraction.d.cts +1 -3
  131. package/dist/chains/openai_functions/extraction.d.cts.map +1 -1
  132. package/dist/chains/openai_functions/extraction.d.ts +1 -3
  133. package/dist/chains/openai_functions/extraction.d.ts.map +1 -1
  134. package/dist/chains/openai_functions/extraction.js.map +1 -1
  135. package/dist/chains/openai_functions/index.cjs +0 -5
  136. package/dist/chains/openai_functions/index.cjs.map +1 -1
  137. package/dist/chains/openai_functions/index.d.cts +1 -2
  138. package/dist/chains/openai_functions/index.d.ts +1 -2
  139. package/dist/chains/openai_functions/index.js +1 -4
  140. package/dist/chains/openai_functions/index.js.map +1 -1
  141. package/dist/chains/openai_functions/openapi.cjs +4 -4
  142. package/dist/chains/openai_functions/openapi.cjs.map +1 -1
  143. package/dist/chains/openai_functions/openapi.d.cts +1 -1
  144. package/dist/chains/openai_functions/openapi.js +4 -4
  145. package/dist/chains/openai_functions/openapi.js.map +1 -1
  146. package/dist/chains/openai_functions/tagging.cjs.map +1 -1
  147. package/dist/chains/openai_functions/tagging.d.cts +1 -3
  148. package/dist/chains/openai_functions/tagging.d.cts.map +1 -1
  149. package/dist/chains/openai_functions/tagging.d.ts +1 -3
  150. package/dist/chains/openai_functions/tagging.d.ts.map +1 -1
  151. package/dist/chains/openai_functions/tagging.js.map +1 -1
  152. package/dist/chains/query_constructor/index.cjs +4 -4
  153. package/dist/chains/query_constructor/index.cjs.map +1 -1
  154. package/dist/chains/query_constructor/index.d.cts +4 -2
  155. package/dist/chains/query_constructor/index.d.cts.map +1 -1
  156. package/dist/chains/query_constructor/index.d.ts +4 -2
  157. package/dist/chains/query_constructor/index.d.ts.map +1 -1
  158. package/dist/chains/query_constructor/index.js +1 -1
  159. package/dist/chains/query_constructor/index.js.map +1 -1
  160. package/dist/chains/question_answering/load.d.ts +2 -2
  161. package/dist/chains/question_answering/load.d.ts.map +1 -1
  162. package/dist/chains/question_answering/map_reduce_prompts.cjs.map +1 -1
  163. package/dist/chains/question_answering/map_reduce_prompts.js.map +1 -1
  164. package/dist/chains/question_answering/refine_prompts.cjs.map +1 -1
  165. package/dist/chains/question_answering/refine_prompts.js.map +1 -1
  166. package/dist/chains/question_answering/stuff_prompts.cjs.map +1 -1
  167. package/dist/chains/question_answering/stuff_prompts.js.map +1 -1
  168. package/dist/chains/router/multi_prompt.cjs +4 -4
  169. package/dist/chains/router/multi_prompt.cjs.map +1 -1
  170. package/dist/chains/router/multi_prompt.js +1 -1
  171. package/dist/chains/router/multi_prompt.js.map +1 -1
  172. package/dist/chains/router/multi_retrieval_qa.cjs +4 -4
  173. package/dist/chains/router/multi_retrieval_qa.cjs.map +1 -1
  174. package/dist/chains/router/multi_retrieval_qa.js +1 -1
  175. package/dist/chains/router/multi_retrieval_qa.js.map +1 -1
  176. package/dist/chains/sql_db/sql_db_prompt.cjs.map +1 -1
  177. package/dist/chains/sql_db/sql_db_prompt.d.cts.map +1 -1
  178. package/dist/chains/sql_db/sql_db_prompt.d.ts.map +1 -1
  179. package/dist/chains/sql_db/sql_db_prompt.js.map +1 -1
  180. package/dist/chains/summarization/load.d.ts +2 -2
  181. package/dist/chains/summarization/load.d.ts.map +1 -1
  182. package/dist/chains/summarization/stuff_prompts.cjs.map +1 -1
  183. package/dist/chains/summarization/stuff_prompts.js.map +1 -1
  184. package/dist/chat_models/universal.cjs +8 -5
  185. package/dist/chat_models/universal.cjs.map +1 -1
  186. package/dist/chat_models/universal.d.cts +2 -2
  187. package/dist/chat_models/universal.d.cts.map +1 -1
  188. package/dist/chat_models/universal.d.ts +2 -2
  189. package/dist/chat_models/universal.d.ts.map +1 -1
  190. package/dist/chat_models/universal.js +8 -5
  191. package/dist/chat_models/universal.js.map +1 -1
  192. package/dist/document_loaders/fs/directory.cjs.map +1 -1
  193. package/dist/document_loaders/fs/directory.d.cts +0 -1
  194. package/dist/document_loaders/fs/directory.d.cts.map +1 -1
  195. package/dist/document_loaders/fs/directory.d.ts +0 -1
  196. package/dist/document_loaders/fs/directory.d.ts.map +1 -1
  197. package/dist/document_loaders/fs/directory.js.map +1 -1
  198. package/dist/document_loaders/fs/json.cjs +7 -1
  199. package/dist/document_loaders/fs/json.cjs.map +1 -1
  200. package/dist/document_loaders/fs/json.js +7 -1
  201. package/dist/document_loaders/fs/json.js.map +1 -1
  202. package/dist/embeddings/cache_backed.cjs +1 -1
  203. package/dist/embeddings/cache_backed.cjs.map +1 -1
  204. package/dist/embeddings/cache_backed.d.cts +1 -1
  205. package/dist/embeddings/cache_backed.d.ts +1 -1
  206. package/dist/embeddings/cache_backed.js +2 -2
  207. package/dist/embeddings/cache_backed.js.map +1 -1
  208. package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
  209. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  210. package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
  211. package/dist/evaluation/embedding_distance/base.cjs +2 -4
  212. package/dist/evaluation/embedding_distance/base.cjs.map +1 -1
  213. package/dist/evaluation/embedding_distance/base.js +2 -3
  214. package/dist/evaluation/embedding_distance/base.js.map +1 -1
  215. package/dist/evaluation/loader.cjs +7 -12
  216. package/dist/evaluation/loader.cjs.map +1 -1
  217. package/dist/evaluation/loader.d.cts +8 -2
  218. package/dist/evaluation/loader.d.cts.map +1 -1
  219. package/dist/evaluation/loader.d.ts +8 -2
  220. package/dist/evaluation/loader.d.ts.map +1 -1
  221. package/dist/evaluation/loader.js +7 -12
  222. package/dist/evaluation/loader.js.map +1 -1
  223. package/dist/hub/base.cjs.map +1 -1
  224. package/dist/hub/base.js.map +1 -1
  225. package/dist/langchain-core/dist/load/serializable.d.cts.map +1 -1
  226. package/dist/langchain-core/dist/messages/base.d.cts +24 -33
  227. package/dist/langchain-core/dist/messages/base.d.cts.map +1 -1
  228. package/dist/langchain-core/dist/messages/content/index.d.cts +1 -1
  229. package/dist/langchain-core/dist/messages/content/index.d.cts.map +1 -1
  230. package/dist/langchain-core/dist/messages/message.d.cts +598 -0
  231. package/dist/langchain-core/dist/messages/message.d.cts.map +1 -0
  232. package/dist/langchain-core/dist/messages/metadata.d.cts +97 -0
  233. package/dist/langchain-core/dist/messages/metadata.d.cts.map +1 -0
  234. package/dist/langchain-core/dist/messages/utils.d.cts +75 -0
  235. package/dist/langchain-core/dist/messages/utils.d.cts.map +1 -0
  236. package/dist/langchain-core/dist/prompt_values.d.cts.map +1 -1
  237. package/dist/libs/langchain-core/dist/load/serializable.d.ts.map +1 -1
  238. package/dist/libs/langchain-core/dist/messages/base.d.ts +24 -33
  239. package/dist/libs/langchain-core/dist/messages/base.d.ts.map +1 -1
  240. package/dist/libs/langchain-core/dist/messages/content/index.d.ts +1 -1
  241. package/dist/libs/langchain-core/dist/messages/content/index.d.ts.map +1 -1
  242. package/dist/libs/langchain-core/dist/messages/message.d.ts +598 -0
  243. package/dist/libs/langchain-core/dist/messages/message.d.ts.map +1 -0
  244. package/dist/libs/langchain-core/dist/messages/metadata.d.ts +97 -0
  245. package/dist/libs/langchain-core/dist/messages/metadata.d.ts.map +1 -0
  246. package/dist/libs/langchain-core/dist/messages/utils.d.ts +75 -0
  247. package/dist/libs/langchain-core/dist/messages/utils.d.ts.map +1 -0
  248. package/dist/libs/langchain-core/dist/prompt_values.d.ts.map +1 -1
  249. package/dist/libs/langchain-core/dist/utils/types/index.d.ts +2 -0
  250. package/dist/libs/langchain-core/dist/utils/types/index.d.ts.map +1 -1
  251. package/dist/libs/langchain-core/dist/utils/types/zod.d.ts +1 -0
  252. package/dist/load/import_map.cjs +3 -14
  253. package/dist/load/import_map.cjs.map +1 -1
  254. package/dist/load/import_map.js +3 -14
  255. package/dist/load/import_map.js.map +1 -1
  256. package/dist/memory/prompt.cjs.map +1 -1
  257. package/dist/memory/prompt.d.cts.map +1 -1
  258. package/dist/memory/prompt.d.ts.map +1 -1
  259. package/dist/memory/prompt.js.map +1 -1
  260. package/dist/output_parsers/combining.cjs +1 -1
  261. package/dist/output_parsers/combining.cjs.map +1 -1
  262. package/dist/output_parsers/combining.js +1 -1
  263. package/dist/output_parsers/combining.js.map +1 -1
  264. package/dist/output_parsers/expression_type_handlers/array_literal_expression_handler.cjs.map +1 -1
  265. package/dist/output_parsers/expression_type_handlers/array_literal_expression_handler.js.map +1 -1
  266. package/dist/output_parsers/expression_type_handlers/base.cjs +1 -1
  267. package/dist/output_parsers/expression_type_handlers/base.cjs.map +1 -1
  268. package/dist/output_parsers/expression_type_handlers/base.js +1 -1
  269. package/dist/output_parsers/expression_type_handlers/base.js.map +1 -1
  270. package/dist/output_parsers/regex.cjs.map +1 -1
  271. package/dist/output_parsers/regex.js.map +1 -1
  272. package/dist/output_parsers/structured.cjs +4 -4
  273. package/dist/output_parsers/structured.cjs.map +1 -1
  274. package/dist/output_parsers/structured.d.cts +1 -1
  275. package/dist/output_parsers/structured.d.cts.map +1 -1
  276. package/dist/output_parsers/structured.d.ts +1 -1
  277. package/dist/output_parsers/structured.d.ts.map +1 -1
  278. package/dist/output_parsers/structured.js +2 -2
  279. package/dist/output_parsers/structured.js.map +1 -1
  280. package/dist/retrievers/ensemble.cjs.map +1 -1
  281. package/dist/retrievers/ensemble.js.map +1 -1
  282. package/dist/storage/file_system.cjs +1 -1
  283. package/dist/storage/file_system.cjs.map +1 -1
  284. package/dist/storage/file_system.js +1 -1
  285. package/dist/storage/file_system.js.map +1 -1
  286. package/dist/tools/fs.cjs +5 -5
  287. package/dist/tools/fs.cjs.map +1 -1
  288. package/dist/tools/fs.d.cts +1 -1
  289. package/dist/tools/fs.d.cts.map +1 -1
  290. package/dist/tools/fs.d.ts +1 -1
  291. package/dist/tools/fs.d.ts.map +1 -1
  292. package/dist/tools/fs.js +1 -1
  293. package/dist/tools/fs.js.map +1 -1
  294. package/dist/tools/retriever.cjs +2 -2
  295. package/dist/tools/retriever.cjs.map +1 -1
  296. package/dist/tools/retriever.d.cts +1 -1
  297. package/dist/tools/retriever.d.cts.map +1 -1
  298. package/dist/tools/retriever.d.ts +1 -1
  299. package/dist/tools/retriever.d.ts.map +1 -1
  300. package/dist/tools/retriever.js +1 -1
  301. package/dist/tools/retriever.js.map +1 -1
  302. package/dist/tools/sql.cjs +1 -2
  303. package/dist/tools/sql.cjs.map +1 -1
  304. package/dist/tools/sql.d.cts +1 -1
  305. package/dist/tools/sql.d.cts.map +1 -1
  306. package/dist/tools/sql.d.ts +1 -1
  307. package/dist/tools/sql.d.ts.map +1 -1
  308. package/dist/tools/sql.js +1 -2
  309. package/dist/tools/sql.js.map +1 -1
  310. package/dist/types/expression-parser.d.cts +2 -0
  311. package/dist/types/expression-parser.d.cts.map +1 -1
  312. package/dist/types/expression-parser.d.ts +2 -0
  313. package/dist/types/expression-parser.d.ts.map +1 -1
  314. package/dist/util/hub.cjs +1 -1
  315. package/dist/util/hub.js +1 -1
  316. package/dist/util/openapi.cjs +1 -1
  317. package/dist/util/openapi.cjs.map +1 -1
  318. package/dist/util/openapi.js +1 -1
  319. package/dist/util/openapi.js.map +1 -1
  320. package/package.json +15 -21
  321. package/dist/agents/middlewareAgent/middlewares/hitl.cjs.map +0 -1
  322. package/dist/agents/middlewareAgent/middlewares/hitl.d.cts.map +0 -1
  323. package/dist/agents/middlewareAgent/middlewares/hitl.d.ts.map +0 -1
  324. package/dist/agents/middlewareAgent/middlewares/hitl.js.map +0 -1
  325. package/dist/agents/middlewareAgent/middlewares/index.cjs +0 -8
  326. package/dist/agents/middlewareAgent/middlewares/index.js +0 -5
  327. package/dist/agents/middlewareAgent/middlewares/promptCaching.cjs.map +0 -1
  328. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.cts.map +0 -1
  329. package/dist/agents/middlewareAgent/middlewares/promptCaching.d.ts.map +0 -1
  330. package/dist/agents/middlewareAgent/middlewares/promptCaching.js.map +0 -1
  331. package/dist/agents/middlewareAgent/middlewares/summarization.cjs.map +0 -1
  332. package/dist/agents/middlewareAgent/middlewares/summarization.d.ts.map +0 -1
  333. package/dist/agents/middlewareAgent/middlewares/summarization.js.map +0 -1
  334. package/dist/chains/openai_functions/structured_output.cjs +0 -107
  335. package/dist/chains/openai_functions/structured_output.cjs.map +0 -1
  336. package/dist/chains/openai_functions/structured_output.d.cts +0 -38
  337. package/dist/chains/openai_functions/structured_output.d.cts.map +0 -1
  338. package/dist/chains/openai_functions/structured_output.d.ts +0 -38
  339. package/dist/chains/openai_functions/structured_output.d.ts.map +0 -1
  340. package/dist/chains/openai_functions/structured_output.js +0 -105
  341. package/dist/chains/openai_functions/structured_output.js.map +0 -1
  342. package/dist/chains/openai_moderation.cjs +0 -107
  343. package/dist/chains/openai_moderation.cjs.map +0 -1
  344. package/dist/chains/openai_moderation.d.cts +0 -74
  345. package/dist/chains/openai_moderation.d.cts.map +0 -1
  346. package/dist/chains/openai_moderation.d.ts +0 -74
  347. package/dist/chains/openai_moderation.d.ts.map +0 -1
  348. package/dist/chains/openai_moderation.js +0 -106
  349. package/dist/chains/openai_moderation.js.map +0 -1
  350. /package/dist/agents/middlewareAgent/{middlewares → middleware}/index.d.cts +0 -0
  351. /package/dist/agents/middlewareAgent/{middlewares → middleware}/index.d.ts +0 -0
@@ -1 +0,0 @@
1
- {"version":3,"file":"summarization.cjs","names":["z","messages: BaseMessage[]","textContent: string","options: z.input<typeof contextSchema>","createMiddleware","RemoveMessage","REMOVE_ALL_MESSAGES","systemMessage: SystemMessage | null","conversationMessages: BaseMessage[]","cutoffIndex: number","originalSystemMessage: SystemMessage | null","summary: string","summaryPrefix: string","content","SystemMessage","messagesToKeep: number","message: BaseMessage","aiMessage: AIMessage","aiMessageIndex: number","toolCallIds: Set<string>","messagesToSummarize: BaseMessage[]","model: BaseLanguageModel","summaryPrompt: string","tokenCounter: TokenCounter"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { v4 as uuid } from \"uuid\";\nimport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n isToolMessage,\n RemoveMessage,\n trimMessages,\n isSystemMessage,\n isAIMessage,\n} from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { REMOVE_ALL_MESSAGES } from \"@langchain/langgraph\";\nimport { createMiddleware } from \"../middleware.js\";\n\nconst DEFAULT_SUMMARY_PROMPT = `<role>\nContext Extraction Assistant\n</role>\n\n<primary_objective>\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\n</primary_objective>\n\n<objective_information>\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\n</objective_information>\n\n<instructions>\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\n</instructions>\n\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\n\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\n\n<messages>\nMessages to summarize:\n{messages}\n</messages>`;\n\nconst SUMMARY_PREFIX = \"## Previous conversation summary:\";\n\nconst DEFAULT_MESSAGES_TO_KEEP = 20;\nconst DEFAULT_TRIM_TOKEN_LIMIT = 4000;\nconst DEFAULT_FALLBACK_MESSAGE_COUNT = 15;\nconst SEARCH_RANGE_FOR_TOOL_PAIRS = 5;\n\ntype TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\n\nconst contextSchema = z.object({\n model: z.custom<BaseLanguageModel>(),\n maxTokensBeforeSummary: z.number().optional(),\n messagesToKeep: z.number().default(DEFAULT_MESSAGES_TO_KEEP),\n tokenCounter: z\n .function()\n .args(z.array(z.any()))\n .returns(z.union([z.number(), z.promise(z.number())]))\n .optional(),\n summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),\n summaryPrefix: z.string().default(SUMMARY_PREFIX),\n});\n\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport function countTokensApproximately(messages: BaseMessage[]): number {\n let totalChars = 0;\n for (const msg of messages) {\n let textContent: string;\n if (typeof msg.content === \"string\") {\n textContent = msg.content;\n } else if (Array.isArray(msg.content)) {\n textContent = msg.content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n } else {\n textContent = \"\";\n }\n totalChars += textContent.length;\n }\n // Approximate 1 token = 4 characters\n return Math.ceil(totalChars / 4);\n}\n\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport function summarizationMiddleware(\n options: z.input<typeof contextSchema>\n) {\n return createMiddleware({\n name: \"SummarizationMiddleware\",\n contextSchema,\n beforeModel: async (state, runtime) => {\n const config = { ...contextSchema.parse(options), ...runtime.context };\n const { messages } = state;\n\n // Ensure all messages have IDs\n ensureMessageIds(messages);\n\n const tokenCounter = config.tokenCounter || countTokensApproximately;\n const totalTokens = await tokenCounter(messages);\n\n if (\n config.maxTokensBeforeSummary == null ||\n totalTokens < config.maxTokensBeforeSummary\n ) {\n return;\n }\n\n const { systemMessage, conversationMessages } =\n splitSystemMessage(messages);\n const cutoffIndex = findSafeCutoff(\n conversationMessages,\n config.messagesToKeep\n );\n\n if (cutoffIndex <= 0) {\n return;\n }\n\n const { messagesToSummarize, preservedMessages } = partitionMessages(\n systemMessage,\n conversationMessages,\n cutoffIndex\n );\n\n const summary = await createSummary(\n messagesToSummarize,\n config.model,\n config.summaryPrompt,\n tokenCounter\n );\n\n const updatedSystemMessage = buildUpdatedSystemMessage(\n systemMessage,\n summary,\n config.summaryPrefix\n );\n\n return {\n messages: [\n new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),\n updatedSystemMessage,\n ...preservedMessages,\n ],\n };\n },\n });\n}\n\n/**\n * Ensure all messages have unique IDs\n */\nfunction ensureMessageIds(messages: BaseMessage[]): void {\n for (const msg of messages) {\n if (!msg.id) {\n msg.id = uuid();\n }\n }\n}\n\n/**\n * Separate system message from conversation messages\n */\nfunction splitSystemMessage(messages: BaseMessage[]): {\n systemMessage: SystemMessage | null;\n conversationMessages: BaseMessage[];\n} {\n if (messages.length > 0 && isSystemMessage(messages[0])) {\n return {\n systemMessage: messages[0] as SystemMessage,\n conversationMessages: messages.slice(1),\n };\n }\n return {\n systemMessage: null,\n conversationMessages: messages,\n };\n}\n\n/**\n * Partition messages into those to summarize and those to preserve\n */\nfunction partitionMessages(\n systemMessage: SystemMessage | null,\n conversationMessages: BaseMessage[],\n cutoffIndex: number\n): { messagesToSummarize: BaseMessage[]; preservedMessages: BaseMessage[] } {\n const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);\n const preservedMessages = conversationMessages.slice(cutoffIndex);\n\n // Include system message in messages to summarize to capture previous summaries\n if (systemMessage) {\n messagesToSummarize.unshift(systemMessage);\n }\n\n return { messagesToSummarize, preservedMessages };\n}\n\n/**\n * Build updated system message incorporating the summary\n */\nfunction buildUpdatedSystemMessage(\n originalSystemMessage: SystemMessage | null,\n summary: string,\n summaryPrefix: string\n): SystemMessage {\n let originalContent = \"\";\n if (originalSystemMessage) {\n const { content } = originalSystemMessage;\n if (typeof content === \"string\") {\n originalContent = content.split(summaryPrefix)[0].trim();\n }\n }\n\n const content = originalContent\n ? `${originalContent}\\n${summaryPrefix}\\n${summary}`\n : `${summaryPrefix}\\n${summary}`;\n\n return new SystemMessage({\n content,\n id: originalSystemMessage?.id || uuid(),\n });\n}\n\n/**\n * Find safe cutoff point that preserves AI/Tool message pairs\n */\nfunction findSafeCutoff(\n messages: BaseMessage[],\n messagesToKeep: number\n): number {\n if (messages.length <= messagesToKeep) {\n return 0;\n }\n\n const targetCutoff = messages.length - messagesToKeep;\n\n for (let i = targetCutoff; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Check if cutting at index would separate AI/Tool message pairs\n */\nfunction isSafeCutoffPoint(\n messages: BaseMessage[],\n cutoffIndex: number\n): boolean {\n if (cutoffIndex >= messages.length) {\n return true;\n }\n\n const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);\n const searchEnd = Math.min(\n messages.length,\n cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS\n );\n\n for (let i = searchStart; i < searchEnd; i++) {\n if (!hasToolCalls(messages[i])) {\n continue;\n }\n\n const toolCallIds = extractToolCallIds(messages[i] as AIMessage);\n if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Check if message is an AI message with tool calls\n */\nfunction hasToolCalls(message: BaseMessage): boolean {\n return (\n isAIMessage(message) &&\n \"tool_calls\" in message &&\n Array.isArray(message.tool_calls) &&\n message.tool_calls.length > 0\n );\n}\n\n/**\n * Extract tool call IDs from an AI message\n */\nfunction extractToolCallIds(aiMessage: AIMessage): Set<string> {\n const toolCallIds = new Set<string>();\n if (aiMessage.tool_calls) {\n for (const toolCall of aiMessage.tool_calls) {\n const id =\n typeof toolCall === \"object\" && \"id\" in toolCall ? toolCall.id : null;\n if (id) {\n toolCallIds.add(id);\n }\n }\n }\n return toolCallIds;\n}\n\n/**\n * Check if cutoff separates an AI message from its corresponding tool messages\n */\nfunction cutoffSeparatesToolPair(\n messages: BaseMessage[],\n aiMessageIndex: number,\n cutoffIndex: number,\n toolCallIds: Set<string>\n): boolean {\n for (let j = aiMessageIndex + 1; j < messages.length; j++) {\n const message = messages[j];\n if (isToolMessage(message) && toolCallIds.has(message.tool_call_id)) {\n const aiBeforeCutoff = aiMessageIndex < cutoffIndex;\n const toolBeforeCutoff = j < cutoffIndex;\n if (aiBeforeCutoff !== toolBeforeCutoff) {\n return true;\n }\n }\n }\n return false;\n}\n\n/**\n * Generate summary for the given messages\n */\nasync function createSummary(\n messagesToSummarize: BaseMessage[],\n model: BaseLanguageModel,\n summaryPrompt: string,\n tokenCounter: TokenCounter\n): Promise<string> {\n if (!messagesToSummarize.length) {\n return \"No previous conversation history.\";\n }\n\n const trimmedMessages = await trimMessagesForSummary(\n messagesToSummarize,\n tokenCounter\n );\n\n if (!trimmedMessages.length) {\n return \"Previous conversation was too long to summarize.\";\n }\n\n try {\n const formattedPrompt = summaryPrompt.replace(\n \"{messages}\",\n JSON.stringify(trimmedMessages, null, 2)\n );\n const response = await model.invoke(formattedPrompt);\n const { content } = response;\n return typeof content === \"string\"\n ? content.trim()\n : \"Error generating summary: Invalid response format\";\n } catch (e) {\n return `Error generating summary: ${e}`;\n }\n}\n\n/**\n * Trim messages to fit within summary generation limits\n */\nasync function trimMessagesForSummary(\n messages: BaseMessage[],\n tokenCounter: TokenCounter\n): Promise<BaseMessage[]> {\n try {\n return await trimMessages(messages, {\n maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,\n tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),\n strategy: \"last\",\n allowPartial: true,\n includeSystem: true,\n });\n } catch (e) {\n // Fallback to last N messages if trimming fails\n return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);\n }\n}\n"],"mappings":";;;;;;;;AAgBA,MAAM,yBAAyB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;WA0BrB,CAAC;AAEZ,MAAM,iBAAiB;AAEvB,MAAM,2BAA2B;AACjC,MAAM,2BAA2B;AACjC,MAAM,iCAAiC;AACvC,MAAM,8BAA8B;AAIpC,MAAM,gBAAgBA,MAAE,OAAO;CAC7B,OAAOA,MAAE,QAA2B;CACpC,wBAAwBA,MAAE,QAAQ,CAAC,UAAU;CAC7C,gBAAgBA,MAAE,QAAQ,CAAC,QAAQ,yBAAyB;CAC5D,cAAcA,MACX,UAAU,CACV,KAAKA,MAAE,MAAMA,MAAE,KAAK,CAAC,CAAC,CACtB,QAAQA,MAAE,MAAM,CAACA,MAAE,QAAQ,EAAEA,MAAE,QAAQA,MAAE,QAAQ,CAAC,AAAC,EAAC,CAAC,CACrD,UAAU;CACb,eAAeA,MAAE,QAAQ,CAAC,QAAQ,uBAAuB;CACzD,eAAeA,MAAE,QAAQ,CAAC,QAAQ,eAAe;AAClD,EAAC;;;;;;AAOF,SAAgB,yBAAyBC,UAAiC;CACxE,IAAI,aAAa;AACjB,MAAK,MAAM,OAAO,UAAU;EAC1B,IAAIC;AACJ,MAAI,OAAO,IAAI,YAAY,UACzB,cAAc,IAAI;WACT,MAAM,QAAQ,IAAI,QAAQ,EACnC,cAAc,IAAI,QACf,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;OAEX,cAAc;EAEhB,cAAc,YAAY;CAC3B;AAED,QAAO,KAAK,KAAK,aAAa,EAAE;AACjC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+BD,SAAgB,wBACdC,SACA;AACA,QAAOC,oCAAiB;EACtB,MAAM;EACN;EACA,aAAa,OAAO,OAAO,YAAY;GACrC,MAAM,SAAS;IAAE,GAAG,cAAc,MAAM,QAAQ;IAAE,GAAG,QAAQ;GAAS;GACtE,MAAM,EAAE,UAAU,GAAG;GAGrB,iBAAiB,SAAS;GAE1B,MAAM,eAAe,OAAO,gBAAgB;GAC5C,MAAM,cAAc,MAAM,aAAa,SAAS;AAEhD,OACE,OAAO,0BAA0B,QACjC,cAAc,OAAO,uBAErB;GAGF,MAAM,EAAE,eAAe,sBAAsB,GAC3C,mBAAmB,SAAS;GAC9B,MAAM,cAAc,eAClB,sBACA,OAAO,eACR;AAED,OAAI,eAAe,EACjB;GAGF,MAAM,EAAE,qBAAqB,mBAAmB,GAAG,kBACjD,eACA,sBACA,YACD;GAED,MAAM,UAAU,MAAM,cACpB,qBACA,OAAO,OACP,OAAO,eACP,aACD;GAED,MAAM,uBAAuB,0BAC3B,eACA,SACA,OAAO,cACR;AAED,UAAO,EACL,UAAU;IACR,IAAIC,wCAAc,EAAE,IAAIC,0CAAqB;IAC7C;IACA,GAAG;GACJ,EACF;EACF;CACF,EAAC;AACH;;;;AAKD,SAAS,iBAAiBL,UAA+B;AACvD,MAAK,MAAM,OAAO,SAChB,KAAI,CAAC,IAAI,IACP,IAAI,mBAAW;AAGpB;;;;AAKD,SAAS,mBAAmBA,UAG1B;AACA,KAAI,SAAS,SAAS,oDAAqB,SAAS,GAAG,CACrD,QAAO;EACL,eAAe,SAAS;EACxB,sBAAsB,SAAS,MAAM,EAAE;CACxC;AAEH,QAAO;EACL,eAAe;EACf,sBAAsB;CACvB;AACF;;;;AAKD,SAAS,kBACPM,eACAC,sBACAC,aAC0E;CAC1E,MAAM,sBAAsB,qBAAqB,MAAM,GAAG,YAAY;CACtE,MAAM,oBAAoB,qBAAqB,MAAM,YAAY;AAGjE,KAAI,eACF,oBAAoB,QAAQ,cAAc;AAG5C,QAAO;EAAE;EAAqB;CAAmB;AAClD;;;;AAKD,SAAS,0BACPC,uBACAC,SACAC,eACe;CACf,IAAI,kBAAkB;AACtB,KAAI,uBAAuB;EACzB,MAAM,EAAE,oBAAS,GAAG;AACpB,MAAI,OAAOC,cAAY,UACrB,kBAAkBA,UAAQ,MAAM,cAAc,CAAC,GAAG,MAAM;CAE3D;CAED,MAAM,UAAU,kBACZ,GAAG,gBAAgB,EAAE,EAAE,cAAc,EAAE,EAAE,SAAS,GAClD,GAAG,cAAc,EAAE,EAAE,SAAS;AAElC,QAAO,IAAIC,wCAAc;EACvB;EACA,IAAI,uBAAuB,oBAAY;CACxC;AACF;;;;AAKD,SAAS,eACPb,UACAc,gBACQ;AACR,KAAI,SAAS,UAAU,eACrB,QAAO;CAGT,MAAM,eAAe,SAAS,SAAS;AAEvC,MAAK,IAAI,IAAI,cAAc,KAAK,GAAG,IACjC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,kBACPd,UACAQ,aACS;AACT,KAAI,eAAe,SAAS,OAC1B,QAAO;CAGT,MAAM,cAAc,KAAK,IAAI,GAAG,cAAc,4BAA4B;CAC1E,MAAM,YAAY,KAAK,IACrB,SAAS,QACT,cAAc,4BACf;AAED,MAAK,IAAI,IAAI,aAAa,IAAI,WAAW,KAAK;AAC5C,MAAI,CAAC,aAAa,SAAS,GAAG,CAC5B;EAGF,MAAM,cAAc,mBAAmB,SAAS,GAAgB;AAChE,MAAI,wBAAwB,UAAU,GAAG,aAAa,YAAY,CAChE,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,SAAS,aAAaO,SAA+B;AACnD,mDACc,QAAQ,IACpB,gBAAgB,WAChB,MAAM,QAAQ,QAAQ,WAAW,IACjC,QAAQ,WAAW,SAAS;AAE/B;;;;AAKD,SAAS,mBAAmBC,WAAmC;CAC7D,MAAM,8BAAc,IAAI;AACxB,KAAI,UAAU,WACZ,MAAK,MAAM,YAAY,UAAU,YAAY;EAC3C,MAAM,KACJ,OAAO,aAAa,YAAY,QAAQ,WAAW,SAAS,KAAK;AACnE,MAAI,IACF,YAAY,IAAI,GAAG;CAEtB;AAEH,QAAO;AACR;;;;AAKD,SAAS,wBACPhB,UACAiB,gBACAT,aACAU,aACS;AACT,MAAK,IAAI,IAAI,iBAAiB,GAAG,IAAI,SAAS,QAAQ,KAAK;EACzD,MAAM,UAAU,SAAS;AACzB,mDAAkB,QAAQ,IAAI,YAAY,IAAI,QAAQ,aAAa,EAAE;GACnE,MAAM,iBAAiB,iBAAiB;GACxC,MAAM,mBAAmB,IAAI;AAC7B,OAAI,mBAAmB,iBACrB,QAAO;EAEV;CACF;AACD,QAAO;AACR;;;;AAKD,eAAe,cACbC,qBACAC,OACAC,eACAC,cACiB;AACjB,KAAI,CAAC,oBAAoB,OACvB,QAAO;CAGT,MAAM,kBAAkB,MAAM,uBAC5B,qBACA,aACD;AAED,KAAI,CAAC,gBAAgB,OACnB,QAAO;AAGT,KAAI;EACF,MAAM,kBAAkB,cAAc,QACpC,cACA,KAAK,UAAU,iBAAiB,MAAM,EAAE,CACzC;EACD,MAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;EACpD,MAAM,EAAE,SAAS,GAAG;AACpB,SAAO,OAAO,YAAY,WACtB,QAAQ,MAAM,GACd;CACL,SAAQ,GAAG;AACV,SAAO,CAAC,0BAA0B,EAAE,GAAG;CACxC;AACF;;;;AAKD,eAAe,uBACbtB,UACAsB,cACwB;AACxB,KAAI;AACF,SAAO,kDAAmB,UAAU;GAClC,WAAW;GACX,cAAc,OAAO,SAAS,QAAQ,QAAQ,aAAa,KAAK,CAAC;GACjE,UAAU;GACV,cAAc;GACd,eAAe;EAChB,EAAC;CACH,SAAQ,GAAG;AAEV,SAAO,SAAS,MAAM,CAAC,+BAA+B;CACvD;AACF"}
@@ -1 +0,0 @@
1
- {"version":3,"file":"summarization.d.ts","names":["___types_js0","z","BaseMessage","BaseLanguageModel","contextSchema","_langchain_core_language_models_base0","BaseLanguageModelCallOptions","ZodTypeDef","ZodType","ZodNumber","ZodOptional","ZodDefault","ZodAny","ZodArray","ZodUnknown","ZodTuple","ZodPromise","ZodUnion","ZodFunction","ZodString","ZodTypeAny","Promise","ZodObject","countTokensApproximately","summarizationMiddleware","input","AgentMiddleware"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.d.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { BaseMessage } from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\ndeclare const contextSchema: z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>;\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport declare function countTokensApproximately(messages: BaseMessage[]): number;\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport declare function summarizationMiddleware(options: z.input<typeof contextSchema>): import(\"../types.js\").AgentMiddleware<undefined, z.ZodObject<{\n model: z.ZodType<BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>, z.ZodTypeDef, BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>>;\n maxTokensBeforeSummary: z.ZodOptional<z.ZodNumber>;\n messagesToKeep: z.ZodDefault<z.ZodNumber>;\n tokenCounter: z.ZodOptional<z.ZodFunction<z.ZodTuple<[z.ZodArray<z.ZodAny, \"many\">], z.ZodUnknown>, z.ZodUnion<[z.ZodNumber, z.ZodPromise<z.ZodNumber>]>>>;\n summaryPrompt: z.ZodDefault<z.ZodString>;\n summaryPrefix: z.ZodDefault<z.ZodString>;\n}, \"strip\", z.ZodTypeAny, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep: number;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt: string;\n summaryPrefix: string;\n}, {\n model: BaseLanguageModel<any, import(\"@langchain/core/language_models/base\").BaseLanguageModelCallOptions>;\n maxTokensBeforeSummary?: number | undefined;\n messagesToKeep?: number | undefined;\n tokenCounter?: ((args_0: any[], ...args: unknown[]) => number | Promise<number>) | undefined;\n summaryPrompt?: string | undefined;\n summaryPrefix?: string | undefined;\n}>, any>;\nexport {};\n"],"mappings":";;;;;;;cAGcI,eAAeH,CAAAA,CAAEqB;SACpBrB,CAAAA,CAAEO,QAAQL,uBAoBnBE,qCAAAA,CApByFC,4BAAAA,GAA+BL,CAAAA,CAAEM,YAAYJ,uBAAFE,qCAAAA,CAAwEC,4BAAAA;0BAClLL,CAAAA,CAAES,YAAYT,CAAAA,CAAEQ;kBACxBR,CAAAA,CAAEU,WAAWV,CAAAA,CAAEQ;EAHrBL,YAAAA,EAIIH,CAAAA,CAAES,WAiBlB,CAjB8BT,CAAAA,CAAEiB,WAiBhC,CAjB4CjB,CAAAA,CAAEc,QAiB9C,CAAA,CAjBwDd,CAAAA,CAAEY,QAiB1D,CAjBmEZ,CAAAA,CAAEW,MAiBrE,EAAA,MAAA,CAAA,CAAA,EAjBuFX,CAAAA,CAAEa,UAiBzF,CAAA,EAjBsGb,CAAAA,CAAEgB,QAiBxG,CAAA,CAjBkHhB,CAAAA,CAAEQ,SAiBpH,EAjB+HR,CAAAA,CAAEe,UAiBjI,CAjB4If,CAAAA,CAAEQ,SAiB9I,CAAA,CAAA,CAAA,CAAA,CAAA;EAAA,aAAA,EAhBiBR,CAAAA,CAAEU,UAgBnB,CAhB8BV,CAAAA,CAAEkB,SAgBhC,CAAA;EAAA,aAAAd,EAfiBJ,CAAAA,CAAEU,UAenBN,CAf8BJ,CAAAA,CAAEkB,SAehCd,CAAAA;CApBqH,EAAA,OAAlGF,EAMTF,CAAAA,CAAEmB,UANOjB,EAAAA;EAAiB,KAAsFI,EAOjHJ,iBAPiHI,CAAAA,GAAAA,EAMpGF,qCAAAA,CACyDC,4BAAAA,CAP2CC;EAAU,sBAAAF,CAAAA,EAAAA,MAAAA,GAAAA,SAAwEC;EAA4B,cAAlGH,EAAAA,MAAAA;EAAiB,YAA5IK,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAUuDa,OAVvDb,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAO,aACwBC,EAAAA,MAAAA;EAAS,aAAvBC,EAAAA,MAAAA;CAAW,EAAA;EACG,KAAtBC,EAYXR,iBAZWQ,CAAAA,GAAAA,EAQqDN,qCAAAA,CAIMC,4BAAAA,CAZ3DK;EAAU,sBACuCC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAM,cAAjBC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,YAAuBC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAcvBO,OAduBP,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAArDC,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAQ,aAA8DN,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;CAAS,CAAA;;;;;;AAC5GR,iBAsBKsB,wBAAAA,CAtBHZ,QAAAA,EAsBsCT,WAtBtCS,EAAAA,CAAAA,EAAAA,MAAAA;;;;;;;;;;;AALmB;AA2BxC;AA8BA;;;;;;;;;;;;;;;;;AAI8CV,iBAJtBuB,uBAAAA,CAIwBT,OAAAA,EAJSd,CAAAA,CAAEwB,KAIXV,CAAAA,OAJwBX,aAIxBW,CAAAA,CAAAA,EAJqG,eAIrGA,CAAAA,SAAAA,EAJ0Fd,CAAAA,CAAEqB,SAI5FP,CAAAA;EAAQ,KAA8DN,EAH3GR,CAAAA,CAAEO,OAGyGC,CAHjGN,iBAGiGM,CAAAA,GAAAA,EAJtDJ,qCAAAA,CAC2BC,4BAAAA,CAG2BG,EAHIR,CAAAA,CAAEM,UAGNE,EAHkBN,iBAGlBM,CAAAA,GAAAA,EAHgBJ,qCAAAA,CAAwEC,4BAAAA,CAGxFG,CAAAA;EAAS,sBAAiBA,EAFpHR,CAAAA,CAAES,WAEkHD,CAFtGR,CAAAA,CAAEQ,SAEoGA,CAAAA;EAAS,cAAtBO,EAD/Gf,CAAAA,CAAEU,UAC6GK,CADlGf,CAAAA,CAAEQ,SACgGO,CAAAA;EAAU,YAAnCC,EAAxFhB,CAAAA,CAAES,WAAsFO,CAA1EhB,CAAAA,CAAEiB,WAAwED,CAA5DhB,CAAAA,CAAEc,QAA0DE,CAAAA,CAAhDhB,CAAAA,CAAEY,QAA8CI,CAArChB,CAAAA,CAAEW,MAAmCK,EAAAA,MAAAA,CAAAA,CAAAA,EAAjBhB,CAAAA,CAAEa,UAAeG,CAAAA,EAAFhB,CAAAA,CAAEgB,QAAAA,CAAAA,CAAUhB,CAAAA,CAAEQ,SAAZQ,EAAuBhB,CAAAA,CAAEe,UAAzBC,CAAoChB,CAAAA,CAAEQ,SAAtCQ,CAAAA,CAAAA,CAAAA,CAAAA,CAAAA;EAAQ,aAAhFC,EACfjB,CAAAA,CAAEU,UADaO,CACFjB,CAAAA,CAAEkB,SADAD,CAAAA;EAAW,aAAzBR,EAEDT,CAAAA,CAAEU,UAFDD,CAEYT,CAAAA,CAAEkB,SAFdT,CAAAA;CAAW,EAAA,OACGS,EAEtBlB,CAAAA,CAAEmB,UAFoBD,EAAAA;EAAS,KAAtBR,EAGVR,iBAHUQ,CAAAA,GAAAA,EAEGN,qCAAAA,CACyDC,4BAAAA,CAH5DK;EAAU,sBACGQ,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAS,cAAtBR,EAAAA,MAAAA;EAAU,YACjBS,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAIsDC,OAJtDD,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAU,aAAAf,EAAAA,MAAAA;EACqF,aAAlGF,EAAAA,MAAAA;CAAiB,EAAA;EAG+C,KAAAE,EAIhEF,iBAJgEE,CAAAA,GAAAA,EAAAA,qCAAAA,CAIMC,4BAAAA,CAAAA;EAA4B,sBAAlGH,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAiB,cAGwCkB,CAAAA,EAAAA,MAAAA,GAAAA,SAAAA;EAAO,YAlBiEC,CAAAA,EAAAA,CAAAA,CAAAA,MAAAA,EAAAA,GAAAA,EAAAA,EAAAA,GAAAA,IAAAA,EAAAA,OAAAA,EAAAA,EAAAA,GAAAA,MAAAA,GAkBxED,OAlBwEC,CAAAA,MAAAA,CAAAA,CAAAA,GAAAA,SAAAA;EAAS,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA;EAAvB,aAAA,CAAA,EAAA,MAAA,GAAA,SAAA"}
@@ -1 +0,0 @@
1
- {"version":3,"file":"summarization.js","names":["messages: BaseMessage[]","textContent: string","options: z.input<typeof contextSchema>","uuid","systemMessage: SystemMessage | null","conversationMessages: BaseMessage[]","cutoffIndex: number","originalSystemMessage: SystemMessage | null","summary: string","summaryPrefix: string","content","messagesToKeep: number","message: BaseMessage","aiMessage: AIMessage","aiMessageIndex: number","toolCallIds: Set<string>","messagesToSummarize: BaseMessage[]","model: BaseLanguageModel","summaryPrompt: string","tokenCounter: TokenCounter"],"sources":["../../../../src/agents/middlewareAgent/middlewares/summarization.ts"],"sourcesContent":["import { z } from \"zod\";\nimport { v4 as uuid } from \"uuid\";\nimport {\n BaseMessage,\n AIMessage,\n SystemMessage,\n isToolMessage,\n RemoveMessage,\n trimMessages,\n isSystemMessage,\n isAIMessage,\n} from \"@langchain/core/messages\";\nimport { BaseLanguageModel } from \"@langchain/core/language_models/base\";\nimport { REMOVE_ALL_MESSAGES } from \"@langchain/langgraph\";\nimport { createMiddleware } from \"../middleware.js\";\n\nconst DEFAULT_SUMMARY_PROMPT = `<role>\nContext Extraction Assistant\n</role>\n\n<primary_objective>\nYour sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.\n</primary_objective>\n\n<objective_information>\nYou're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.\nThis context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to your overall goal.\n</objective_information>\n\n<instructions>\nThe conversation history below will be replaced with the context you extract in this step. Because of this, you must do your very best to extract and record all of the most important context from the conversation history.\nYou want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.\n</instructions>\n\nThe user will message you with the full message history you'll be extracting context from, to then replace. Carefully read over it all, and think deeply about what information is most important to your overall goal that should be saved:\n\nWith all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.\nRespond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.\n\n<messages>\nMessages to summarize:\n{messages}\n</messages>`;\n\nconst SUMMARY_PREFIX = \"## Previous conversation summary:\";\n\nconst DEFAULT_MESSAGES_TO_KEEP = 20;\nconst DEFAULT_TRIM_TOKEN_LIMIT = 4000;\nconst DEFAULT_FALLBACK_MESSAGE_COUNT = 15;\nconst SEARCH_RANGE_FOR_TOOL_PAIRS = 5;\n\ntype TokenCounter = (messages: BaseMessage[]) => number | Promise<number>;\n\nconst contextSchema = z.object({\n model: z.custom<BaseLanguageModel>(),\n maxTokensBeforeSummary: z.number().optional(),\n messagesToKeep: z.number().default(DEFAULT_MESSAGES_TO_KEEP),\n tokenCounter: z\n .function()\n .args(z.array(z.any()))\n .returns(z.union([z.number(), z.promise(z.number())]))\n .optional(),\n summaryPrompt: z.string().default(DEFAULT_SUMMARY_PROMPT),\n summaryPrefix: z.string().default(SUMMARY_PREFIX),\n});\n\n/**\n * Default token counter that approximates based on character count\n * @param messages Messages to count tokens for\n * @returns Approximate token count\n */\nexport function countTokensApproximately(messages: BaseMessage[]): number {\n let totalChars = 0;\n for (const msg of messages) {\n let textContent: string;\n if (typeof msg.content === \"string\") {\n textContent = msg.content;\n } else if (Array.isArray(msg.content)) {\n textContent = msg.content\n .map((item) => {\n if (typeof item === \"string\") return item;\n if (item.type === \"text\" && \"text\" in item) return item.text;\n return \"\";\n })\n .join(\"\");\n } else {\n textContent = \"\";\n }\n totalChars += textContent.length;\n }\n // Approximate 1 token = 4 characters\n return Math.ceil(totalChars / 4);\n}\n\n/**\n * Summarization middleware that automatically summarizes conversation history when token limits are approached.\n *\n * This middleware monitors message token counts and automatically summarizes older\n * messages when a threshold is reached, preserving recent messages and maintaining\n * context continuity by ensuring AI/Tool message pairs remain together.\n *\n * @param options Configuration options for the summarization middleware\n * @returns A middleware instance\n *\n * @example\n * ```ts\n * import { summarizationMiddleware } from \"langchain/middleware\";\n * import { createAgent } from \"langchain\";\n *\n * const agent = createAgent({\n * llm: model,\n * tools: [getWeather],\n * middlewares: [\n * summarizationMiddleware({\n * model: new ChatOpenAI({ model: \"gpt-4o\" }),\n * maxTokensBeforeSummary: 4000,\n * messagesToKeep: 20,\n * })\n * ],\n * });\n *\n * ```\n */\nexport function summarizationMiddleware(\n options: z.input<typeof contextSchema>\n) {\n return createMiddleware({\n name: \"SummarizationMiddleware\",\n contextSchema,\n beforeModel: async (state, runtime) => {\n const config = { ...contextSchema.parse(options), ...runtime.context };\n const { messages } = state;\n\n // Ensure all messages have IDs\n ensureMessageIds(messages);\n\n const tokenCounter = config.tokenCounter || countTokensApproximately;\n const totalTokens = await tokenCounter(messages);\n\n if (\n config.maxTokensBeforeSummary == null ||\n totalTokens < config.maxTokensBeforeSummary\n ) {\n return;\n }\n\n const { systemMessage, conversationMessages } =\n splitSystemMessage(messages);\n const cutoffIndex = findSafeCutoff(\n conversationMessages,\n config.messagesToKeep\n );\n\n if (cutoffIndex <= 0) {\n return;\n }\n\n const { messagesToSummarize, preservedMessages } = partitionMessages(\n systemMessage,\n conversationMessages,\n cutoffIndex\n );\n\n const summary = await createSummary(\n messagesToSummarize,\n config.model,\n config.summaryPrompt,\n tokenCounter\n );\n\n const updatedSystemMessage = buildUpdatedSystemMessage(\n systemMessage,\n summary,\n config.summaryPrefix\n );\n\n return {\n messages: [\n new RemoveMessage({ id: REMOVE_ALL_MESSAGES }),\n updatedSystemMessage,\n ...preservedMessages,\n ],\n };\n },\n });\n}\n\n/**\n * Ensure all messages have unique IDs\n */\nfunction ensureMessageIds(messages: BaseMessage[]): void {\n for (const msg of messages) {\n if (!msg.id) {\n msg.id = uuid();\n }\n }\n}\n\n/**\n * Separate system message from conversation messages\n */\nfunction splitSystemMessage(messages: BaseMessage[]): {\n systemMessage: SystemMessage | null;\n conversationMessages: BaseMessage[];\n} {\n if (messages.length > 0 && isSystemMessage(messages[0])) {\n return {\n systemMessage: messages[0] as SystemMessage,\n conversationMessages: messages.slice(1),\n };\n }\n return {\n systemMessage: null,\n conversationMessages: messages,\n };\n}\n\n/**\n * Partition messages into those to summarize and those to preserve\n */\nfunction partitionMessages(\n systemMessage: SystemMessage | null,\n conversationMessages: BaseMessage[],\n cutoffIndex: number\n): { messagesToSummarize: BaseMessage[]; preservedMessages: BaseMessage[] } {\n const messagesToSummarize = conversationMessages.slice(0, cutoffIndex);\n const preservedMessages = conversationMessages.slice(cutoffIndex);\n\n // Include system message in messages to summarize to capture previous summaries\n if (systemMessage) {\n messagesToSummarize.unshift(systemMessage);\n }\n\n return { messagesToSummarize, preservedMessages };\n}\n\n/**\n * Build updated system message incorporating the summary\n */\nfunction buildUpdatedSystemMessage(\n originalSystemMessage: SystemMessage | null,\n summary: string,\n summaryPrefix: string\n): SystemMessage {\n let originalContent = \"\";\n if (originalSystemMessage) {\n const { content } = originalSystemMessage;\n if (typeof content === \"string\") {\n originalContent = content.split(summaryPrefix)[0].trim();\n }\n }\n\n const content = originalContent\n ? `${originalContent}\\n${summaryPrefix}\\n${summary}`\n : `${summaryPrefix}\\n${summary}`;\n\n return new SystemMessage({\n content,\n id: originalSystemMessage?.id || uuid(),\n });\n}\n\n/**\n * Find safe cutoff point that preserves AI/Tool message pairs\n */\nfunction findSafeCutoff(\n messages: BaseMessage[],\n messagesToKeep: number\n): number {\n if (messages.length <= messagesToKeep) {\n return 0;\n }\n\n const targetCutoff = messages.length - messagesToKeep;\n\n for (let i = targetCutoff; i >= 0; i--) {\n if (isSafeCutoffPoint(messages, i)) {\n return i;\n }\n }\n\n return 0;\n}\n\n/**\n * Check if cutting at index would separate AI/Tool message pairs\n */\nfunction isSafeCutoffPoint(\n messages: BaseMessage[],\n cutoffIndex: number\n): boolean {\n if (cutoffIndex >= messages.length) {\n return true;\n }\n\n const searchStart = Math.max(0, cutoffIndex - SEARCH_RANGE_FOR_TOOL_PAIRS);\n const searchEnd = Math.min(\n messages.length,\n cutoffIndex + SEARCH_RANGE_FOR_TOOL_PAIRS\n );\n\n for (let i = searchStart; i < searchEnd; i++) {\n if (!hasToolCalls(messages[i])) {\n continue;\n }\n\n const toolCallIds = extractToolCallIds(messages[i] as AIMessage);\n if (cutoffSeparatesToolPair(messages, i, cutoffIndex, toolCallIds)) {\n return false;\n }\n }\n\n return true;\n}\n\n/**\n * Check if message is an AI message with tool calls\n */\nfunction hasToolCalls(message: BaseMessage): boolean {\n return (\n isAIMessage(message) &&\n \"tool_calls\" in message &&\n Array.isArray(message.tool_calls) &&\n message.tool_calls.length > 0\n );\n}\n\n/**\n * Extract tool call IDs from an AI message\n */\nfunction extractToolCallIds(aiMessage: AIMessage): Set<string> {\n const toolCallIds = new Set<string>();\n if (aiMessage.tool_calls) {\n for (const toolCall of aiMessage.tool_calls) {\n const id =\n typeof toolCall === \"object\" && \"id\" in toolCall ? toolCall.id : null;\n if (id) {\n toolCallIds.add(id);\n }\n }\n }\n return toolCallIds;\n}\n\n/**\n * Check if cutoff separates an AI message from its corresponding tool messages\n */\nfunction cutoffSeparatesToolPair(\n messages: BaseMessage[],\n aiMessageIndex: number,\n cutoffIndex: number,\n toolCallIds: Set<string>\n): boolean {\n for (let j = aiMessageIndex + 1; j < messages.length; j++) {\n const message = messages[j];\n if (isToolMessage(message) && toolCallIds.has(message.tool_call_id)) {\n const aiBeforeCutoff = aiMessageIndex < cutoffIndex;\n const toolBeforeCutoff = j < cutoffIndex;\n if (aiBeforeCutoff !== toolBeforeCutoff) {\n return true;\n }\n }\n }\n return false;\n}\n\n/**\n * Generate summary for the given messages\n */\nasync function createSummary(\n messagesToSummarize: BaseMessage[],\n model: BaseLanguageModel,\n summaryPrompt: string,\n tokenCounter: TokenCounter\n): Promise<string> {\n if (!messagesToSummarize.length) {\n return \"No previous conversation history.\";\n }\n\n const trimmedMessages = await trimMessagesForSummary(\n messagesToSummarize,\n tokenCounter\n );\n\n if (!trimmedMessages.length) {\n return \"Previous conversation was too long to summarize.\";\n }\n\n try {\n const formattedPrompt = summaryPrompt.replace(\n \"{messages}\",\n JSON.stringify(trimmedMessages, null, 2)\n );\n const response = await model.invoke(formattedPrompt);\n const { content } = response;\n return typeof content === \"string\"\n ? content.trim()\n : \"Error generating summary: Invalid response format\";\n } catch (e) {\n return `Error generating summary: ${e}`;\n }\n}\n\n/**\n * Trim messages to fit within summary generation limits\n */\nasync function trimMessagesForSummary(\n messages: BaseMessage[],\n tokenCounter: TokenCounter\n): Promise<BaseMessage[]> {\n try {\n return await trimMessages(messages, {\n maxTokens: DEFAULT_TRIM_TOKEN_LIMIT,\n tokenCounter: async (msgs) => Promise.resolve(tokenCounter(msgs)),\n strategy: \"last\",\n allowPartial: true,\n includeSystem: true,\n });\n } catch (e) {\n // Fallback to last N messages if trimming fails\n return messages.slice(-DEFAULT_FALLBACK_MESSAGE_COUNT);\n }\n}\n"],"mappings":";;;;;;;AAgBA,MAAM,yBAAyB,CAAC;;;;;;;;;;;;;;;;;;;;;;;;;;WA0BrB,CAAC;AAEZ,MAAM,iBAAiB;AAEvB,MAAM,2BAA2B;AACjC,MAAM,2BAA2B;AACjC,MAAM,iCAAiC;AACvC,MAAM,8BAA8B;AAIpC,MAAM,gBAAgB,EAAE,OAAO;CAC7B,OAAO,EAAE,QAA2B;CACpC,wBAAwB,EAAE,QAAQ,CAAC,UAAU;CAC7C,gBAAgB,EAAE,QAAQ,CAAC,QAAQ,yBAAyB;CAC5D,cAAc,EACX,UAAU,CACV,KAAK,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC,CACtB,QAAQ,EAAE,MAAM,CAAC,EAAE,QAAQ,EAAE,EAAE,QAAQ,EAAE,QAAQ,CAAC,AAAC,EAAC,CAAC,CACrD,UAAU;CACb,eAAe,EAAE,QAAQ,CAAC,QAAQ,uBAAuB;CACzD,eAAe,EAAE,QAAQ,CAAC,QAAQ,eAAe;AAClD,EAAC;;;;;;AAOF,SAAgB,yBAAyBA,UAAiC;CACxE,IAAI,aAAa;AACjB,MAAK,MAAM,OAAO,UAAU;EAC1B,IAAIC;AACJ,MAAI,OAAO,IAAI,YAAY,UACzB,cAAc,IAAI;WACT,MAAM,QAAQ,IAAI,QAAQ,EACnC,cAAc,IAAI,QACf,IAAI,CAAC,SAAS;AACb,OAAI,OAAO,SAAS,SAAU,QAAO;AACrC,OAAI,KAAK,SAAS,UAAU,UAAU,KAAM,QAAO,KAAK;AACxD,UAAO;EACR,EAAC,CACD,KAAK,GAAG;OAEX,cAAc;EAEhB,cAAc,YAAY;CAC3B;AAED,QAAO,KAAK,KAAK,aAAa,EAAE;AACjC;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AA+BD,SAAgB,wBACdC,SACA;AACA,QAAO,iBAAiB;EACtB,MAAM;EACN;EACA,aAAa,OAAO,OAAO,YAAY;GACrC,MAAM,SAAS;IAAE,GAAG,cAAc,MAAM,QAAQ;IAAE,GAAG,QAAQ;GAAS;GACtE,MAAM,EAAE,UAAU,GAAG;GAGrB,iBAAiB,SAAS;GAE1B,MAAM,eAAe,OAAO,gBAAgB;GAC5C,MAAM,cAAc,MAAM,aAAa,SAAS;AAEhD,OACE,OAAO,0BAA0B,QACjC,cAAc,OAAO,uBAErB;GAGF,MAAM,EAAE,eAAe,sBAAsB,GAC3C,mBAAmB,SAAS;GAC9B,MAAM,cAAc,eAClB,sBACA,OAAO,eACR;AAED,OAAI,eAAe,EACjB;GAGF,MAAM,EAAE,qBAAqB,mBAAmB,GAAG,kBACjD,eACA,sBACA,YACD;GAED,MAAM,UAAU,MAAM,cACpB,qBACA,OAAO,OACP,OAAO,eACP,aACD;GAED,MAAM,uBAAuB,0BAC3B,eACA,SACA,OAAO,cACR;AAED,UAAO,EACL,UAAU;IACR,IAAI,cAAc,EAAE,IAAI,oBAAqB;IAC7C;IACA,GAAG;GACJ,EACF;EACF;CACF,EAAC;AACH;;;;AAKD,SAAS,iBAAiBF,UAA+B;AACvD,MAAK,MAAM,OAAO,SAChB,KAAI,CAAC,IAAI,IACP,IAAI,KAAKG,IAAM;AAGpB;;;;AAKD,SAAS,mBAAmBH,UAG1B;AACA,KAAI,SAAS,SAAS,KAAK,gBAAgB,SAAS,GAAG,CACrD,QAAO;EACL,eAAe,SAAS;EACxB,sBAAsB,SAAS,MAAM,EAAE;CACxC;AAEH,QAAO;EACL,eAAe;EACf,sBAAsB;CACvB;AACF;;;;AAKD,SAAS,kBACPI,eACAC,sBACAC,aAC0E;CAC1E,MAAM,sBAAsB,qBAAqB,MAAM,GAAG,YAAY;CACtE,MAAM,oBAAoB,qBAAqB,MAAM,YAAY;AAGjE,KAAI,eACF,oBAAoB,QAAQ,cAAc;AAG5C,QAAO;EAAE;EAAqB;CAAmB;AAClD;;;;AAKD,SAAS,0BACPC,uBACAC,SACAC,eACe;CACf,IAAI,kBAAkB;AACtB,KAAI,uBAAuB;EACzB,MAAM,EAAE,oBAAS,GAAG;AACpB,MAAI,OAAOC,cAAY,UACrB,kBAAkBA,UAAQ,MAAM,cAAc,CAAC,GAAG,MAAM;CAE3D;CAED,MAAM,UAAU,kBACZ,GAAG,gBAAgB,EAAE,EAAE,cAAc,EAAE,EAAE,SAAS,GAClD,GAAG,cAAc,EAAE,EAAE,SAAS;AAElC,QAAO,IAAI,cAAc;EACvB;EACA,IAAI,uBAAuB,MAAMP,IAAM;CACxC;AACF;;;;AAKD,SAAS,eACPH,UACAW,gBACQ;AACR,KAAI,SAAS,UAAU,eACrB,QAAO;CAGT,MAAM,eAAe,SAAS,SAAS;AAEvC,MAAK,IAAI,IAAI,cAAc,KAAK,GAAG,IACjC,KAAI,kBAAkB,UAAU,EAAE,CAChC,QAAO;AAIX,QAAO;AACR;;;;AAKD,SAAS,kBACPX,UACAM,aACS;AACT,KAAI,eAAe,SAAS,OAC1B,QAAO;CAGT,MAAM,cAAc,KAAK,IAAI,GAAG,cAAc,4BAA4B;CAC1E,MAAM,YAAY,KAAK,IACrB,SAAS,QACT,cAAc,4BACf;AAED,MAAK,IAAI,IAAI,aAAa,IAAI,WAAW,KAAK;AAC5C,MAAI,CAAC,aAAa,SAAS,GAAG,CAC5B;EAGF,MAAM,cAAc,mBAAmB,SAAS,GAAgB;AAChE,MAAI,wBAAwB,UAAU,GAAG,aAAa,YAAY,CAChE,QAAO;CAEV;AAED,QAAO;AACR;;;;AAKD,SAAS,aAAaM,SAA+B;AACnD,QACE,YAAY,QAAQ,IACpB,gBAAgB,WAChB,MAAM,QAAQ,QAAQ,WAAW,IACjC,QAAQ,WAAW,SAAS;AAE/B;;;;AAKD,SAAS,mBAAmBC,WAAmC;CAC7D,MAAM,8BAAc,IAAI;AACxB,KAAI,UAAU,WACZ,MAAK,MAAM,YAAY,UAAU,YAAY;EAC3C,MAAM,KACJ,OAAO,aAAa,YAAY,QAAQ,WAAW,SAAS,KAAK;AACnE,MAAI,IACF,YAAY,IAAI,GAAG;CAEtB;AAEH,QAAO;AACR;;;;AAKD,SAAS,wBACPb,UACAc,gBACAR,aACAS,aACS;AACT,MAAK,IAAI,IAAI,iBAAiB,GAAG,IAAI,SAAS,QAAQ,KAAK;EACzD,MAAM,UAAU,SAAS;AACzB,MAAI,cAAc,QAAQ,IAAI,YAAY,IAAI,QAAQ,aAAa,EAAE;GACnE,MAAM,iBAAiB,iBAAiB;GACxC,MAAM,mBAAmB,IAAI;AAC7B,OAAI,mBAAmB,iBACrB,QAAO;EAEV;CACF;AACD,QAAO;AACR;;;;AAKD,eAAe,cACbC,qBACAC,OACAC,eACAC,cACiB;AACjB,KAAI,CAAC,oBAAoB,OACvB,QAAO;CAGT,MAAM,kBAAkB,MAAM,uBAC5B,qBACA,aACD;AAED,KAAI,CAAC,gBAAgB,OACnB,QAAO;AAGT,KAAI;EACF,MAAM,kBAAkB,cAAc,QACpC,cACA,KAAK,UAAU,iBAAiB,MAAM,EAAE,CACzC;EACD,MAAM,WAAW,MAAM,MAAM,OAAO,gBAAgB;EACpD,MAAM,EAAE,SAAS,GAAG;AACpB,SAAO,OAAO,YAAY,WACtB,QAAQ,MAAM,GACd;CACL,SAAQ,GAAG;AACV,SAAO,CAAC,0BAA0B,EAAE,GAAG;CACxC;AACF;;;;AAKD,eAAe,uBACbnB,UACAmB,cACwB;AACxB,KAAI;AACF,SAAO,MAAM,aAAa,UAAU;GAClC,WAAW;GACX,cAAc,OAAO,SAAS,QAAQ,QAAQ,aAAa,KAAK,CAAC;GACjE,UAAU;GACV,cAAc;GACd,eAAe;EAChB,EAAC;CACH,SAAQ,GAAG;AAEV,SAAO,SAAS,MAAM,CAAC,+BAA+B;CACvD;AACF"}
@@ -1,107 +0,0 @@
1
- const require_rolldown_runtime = require('../../_virtual/rolldown_runtime.cjs');
2
- const require_llm_chain = require('../llm_chain.cjs');
3
- const require_openai_functions = require('../../output_parsers/openai_functions.cjs');
4
- const __langchain_core_utils_types = require_rolldown_runtime.__toESM(require("@langchain/core/utils/types"));
5
- const __langchain_core_utils_json_schema = require_rolldown_runtime.__toESM(require("@langchain/core/utils/json_schema"));
6
- const __langchain_core_output_parsers = require_rolldown_runtime.__toESM(require("@langchain/core/output_parsers"));
7
- const __langchain_openai = require_rolldown_runtime.__toESM(require("@langchain/openai"));
8
-
9
- //#region src/chains/openai_functions/structured_output.ts
10
- function isJsonSchema7Type(x) {
11
- return x.jsonSchema === void 0 && x.zodSchema === void 0;
12
- }
13
- /**
14
- * Class that extends the BaseLLMOutputParser class. It provides
15
- * functionality for parsing the structured output based on a JSON schema.
16
- */
17
- var FunctionCallStructuredOutputParser = class extends __langchain_core_output_parsers.BaseLLMOutputParser {
18
- lc_namespace = [
19
- "langchain",
20
- "chains",
21
- "openai_functions"
22
- ];
23
- functionOutputParser = new require_openai_functions.OutputFunctionsParser();
24
- jsonSchemaValidator;
25
- zodSchema;
26
- constructor(fieldsOrSchema) {
27
- let fields;
28
- if (isJsonSchema7Type(fieldsOrSchema)) fields = { jsonSchema: fieldsOrSchema };
29
- else fields = fieldsOrSchema;
30
- if (fields.jsonSchema === void 0 && fields.zodSchema === void 0) throw new Error(`Must provide at least one of "jsonSchema" or "zodSchema".`);
31
- super(fields);
32
- if (fields.jsonSchema !== void 0) this.jsonSchemaValidator = new __langchain_core_utils_json_schema.Validator(fields.jsonSchema, "7");
33
- if (fields.zodSchema !== void 0) this.zodSchema = fields.zodSchema;
34
- }
35
- /**
36
- * Method to parse the result of chat generations. It first parses the
37
- * result using the functionOutputParser, then parses the result against a
38
- * zod schema if the zod schema is available which allows the result to undergo
39
- * Zod preprocessing, then it parses that result against the JSON schema.
40
- * If the result is valid, it returns the parsed result. Otherwise, it throws
41
- * an OutputParserException.
42
- * @param generations Array of ChatGeneration instances to be parsed.
43
- * @returns The parsed result if it is valid according to the JSON schema.
44
- */
45
- async parseResult(generations) {
46
- const initialResult = await this.functionOutputParser.parseResult(generations);
47
- const parsedResult = JSON.parse(initialResult, (_, value) => {
48
- if (value === null) return void 0;
49
- return value;
50
- });
51
- if (this.zodSchema) {
52
- const zodParsedResult = await (0, __langchain_core_utils_types.interopSafeParseAsync)(this.zodSchema, parsedResult);
53
- if (zodParsedResult.success) return zodParsedResult.data;
54
- else throw new __langchain_core_output_parsers.OutputParserException(`Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`, initialResult);
55
- } else if (this.jsonSchemaValidator !== void 0) {
56
- const result = this.jsonSchemaValidator.validate(parsedResult);
57
- if (result.valid) return parsedResult;
58
- else throw new __langchain_core_output_parsers.OutputParserException(`Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify(result.errors)}`, initialResult);
59
- } else throw new Error("This parser requires an input JSON Schema or an input Zod schema.");
60
- }
61
- };
62
- /**
63
- * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead
64
- * Create a chain that returns output matching a JSON Schema.
65
- * @param input Object that includes all LLMChainInput fields except "outputParser"
66
- * as well as an additional required "outputSchema" JSON Schema object.
67
- * @returns OpenAPIChain
68
- */
69
- function createStructuredOutputChain(input) {
70
- const { outputSchema, llm = new __langchain_openai.ChatOpenAI({
71
- model: "gpt-3.5-turbo-0613",
72
- temperature: 0
73
- }), outputKey = "output", llmKwargs = {}, zodSchema,...rest } = input;
74
- if (outputSchema === void 0 && zodSchema === void 0) throw new Error(`Must provide one of "outputSchema" or "zodSchema".`);
75
- const functionName = "output_formatter";
76
- return new require_llm_chain.LLMChain({
77
- llm,
78
- llmKwargs: {
79
- ...llmKwargs,
80
- functions: [{
81
- name: functionName,
82
- description: `Output formatter. Should always be used to format your response to the user.`,
83
- parameters: outputSchema
84
- }],
85
- function_call: { name: functionName }
86
- },
87
- outputKey,
88
- outputParser: new FunctionCallStructuredOutputParser({
89
- jsonSchema: outputSchema,
90
- zodSchema
91
- }),
92
- ...rest
93
- });
94
- }
95
- /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */
96
- function createStructuredOutputChainFromZod(zodSchema, input) {
97
- return createStructuredOutputChain({
98
- ...input,
99
- outputSchema: (0, __langchain_core_utils_json_schema.toJsonSchema)(zodSchema),
100
- zodSchema
101
- });
102
- }
103
-
104
- //#endregion
105
- exports.createStructuredOutputChain = createStructuredOutputChain;
106
- exports.createStructuredOutputChainFromZod = createStructuredOutputChainFromZod;
107
- //# sourceMappingURL=structured_output.cjs.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"structured_output.cjs","names":["x: JsonSchema7Type | FunctionCallStructuredOutputParserFields","BaseLLMOutputParser","OutputFunctionsParser","fieldsOrSchema:\n | JsonSchema7Type\n | FunctionCallStructuredOutputParserFields<T>","Validator","generations: ChatGeneration[]","OutputParserException","input: StructuredOutputChainInput<T>","ChatOpenAI","LLMChain","zodSchema: T","input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">"],"sources":["../../../src/chains/openai_functions/structured_output.ts"],"sourcesContent":["import {\n type JsonSchema7Type,\n Validator,\n toJsonSchema,\n} from \"@langchain/core/utils/json_schema\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport {\n BaseLLMOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport { ChatGeneration } from \"@langchain/core/outputs\";\nimport { AIMessageChunk } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n InferInteropZodOutput,\n interopSafeParseAsync,\n InteropZodObject,\n} from \"@langchain/core/utils/types\";\nimport { LLMChain, type LLMChainInput } from \"../llm_chain.js\";\nimport { OutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing the input for creating a structured output chain. It\n * extends the LLMChainInput type and includes an additional\n * 'outputSchema' field representing the JSON schema for the expected\n * output.\n */\nexport type StructuredOutputChainInput<\n T extends InteropZodObject = InteropZodObject\n> = Omit<LLMChainInput, \"outputParser\" | \"llm\"> & {\n outputSchema?: JsonSchema7Type;\n prompt: BasePromptTemplate;\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n zodSchema?: T;\n};\n\nexport type FunctionCallStructuredOutputParserFields<\n T extends InteropZodObject = InteropZodObject\n> = {\n jsonSchema?: JsonSchema7Type;\n zodSchema?: T;\n};\n\nfunction isJsonSchema7Type(\n x: JsonSchema7Type | FunctionCallStructuredOutputParserFields\n): x is JsonSchema7Type {\n return (\n (x as FunctionCallStructuredOutputParserFields).jsonSchema === undefined &&\n (x as FunctionCallStructuredOutputParserFields).zodSchema === undefined\n );\n}\n\n/**\n * Class that extends the BaseLLMOutputParser class. It provides\n * functionality for parsing the structured output based on a JSON schema.\n */\nexport class FunctionCallStructuredOutputParser<\n T extends InteropZodObject\n> extends BaseLLMOutputParser<InferInteropZodOutput<T>> {\n lc_namespace = [\"langchain\", \"chains\", \"openai_functions\"];\n\n protected functionOutputParser = new OutputFunctionsParser();\n\n protected jsonSchemaValidator?: Validator;\n\n protected zodSchema?: T;\n\n constructor(fieldsOrSchema: JsonSchema7Type);\n\n constructor(fieldsOrSchema: FunctionCallStructuredOutputParserFields<T>);\n\n constructor(\n fieldsOrSchema:\n | JsonSchema7Type\n | FunctionCallStructuredOutputParserFields<T>\n ) {\n let fields;\n if (isJsonSchema7Type(fieldsOrSchema)) {\n fields = { jsonSchema: fieldsOrSchema };\n } else {\n fields = fieldsOrSchema;\n }\n if (fields.jsonSchema === undefined && fields.zodSchema === undefined) {\n throw new Error(\n `Must provide at least one of \"jsonSchema\" or \"zodSchema\".`\n );\n }\n super(fields);\n if (fields.jsonSchema !== undefined) {\n this.jsonSchemaValidator = new Validator(\n fields.jsonSchema as Record<string, unknown>,\n \"7\"\n );\n }\n if (fields.zodSchema !== undefined) {\n this.zodSchema = fields.zodSchema;\n }\n }\n\n /**\n * Method to parse the result of chat generations. It first parses the\n * result using the functionOutputParser, then parses the result against a\n * zod schema if the zod schema is available which allows the result to undergo\n * Zod preprocessing, then it parses that result against the JSON schema.\n * If the result is valid, it returns the parsed result. Otherwise, it throws\n * an OutputParserException.\n * @param generations Array of ChatGeneration instances to be parsed.\n * @returns The parsed result if it is valid according to the JSON schema.\n */\n async parseResult(generations: ChatGeneration[]) {\n const initialResult = await this.functionOutputParser.parseResult(\n generations\n );\n const parsedResult = JSON.parse(initialResult, (_, value) => {\n if (value === null) {\n return undefined;\n }\n return value;\n });\n if (this.zodSchema) {\n const zodParsedResult = await interopSafeParseAsync(\n this.zodSchema,\n parsedResult\n );\n if (zodParsedResult.success) {\n return zodParsedResult.data;\n } else {\n throw new OutputParserException(\n `Failed to parse. Text: \"${initialResult}\". Error: ${JSON.stringify(\n zodParsedResult.error.issues\n )}`,\n initialResult\n );\n }\n } else if (this.jsonSchemaValidator !== undefined) {\n const result = this.jsonSchemaValidator.validate(parsedResult);\n if (result.valid) {\n return parsedResult;\n } else {\n throw new OutputParserException(\n `Failed to parse. Text: \"${initialResult}\". Error: ${JSON.stringify(\n result.errors\n )}`,\n initialResult\n );\n }\n } else {\n throw new Error(\n \"This parser requires an input JSON Schema or an input Zod schema.\"\n );\n }\n }\n}\n\n/**\n * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead\n * Create a chain that returns output matching a JSON Schema.\n * @param input Object that includes all LLMChainInput fields except \"outputParser\"\n * as well as an additional required \"outputSchema\" JSON Schema object.\n * @returns OpenAPIChain\n */\nexport function createStructuredOutputChain<\n T extends InteropZodObject = InteropZodObject\n>(\n input: StructuredOutputChainInput<T>\n): LLMChain<\n any,\n | BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>\n | ChatOpenAI<BaseFunctionCallOptions>\n> {\n const {\n outputSchema,\n llm = new ChatOpenAI({ model: \"gpt-3.5-turbo-0613\", temperature: 0 }),\n outputKey = \"output\",\n llmKwargs = {},\n zodSchema,\n ...rest\n } = input;\n if (outputSchema === undefined && zodSchema === undefined) {\n throw new Error(`Must provide one of \"outputSchema\" or \"zodSchema\".`);\n }\n const functionName = \"output_formatter\";\n return new LLMChain({\n llm,\n llmKwargs: {\n ...llmKwargs,\n functions: [\n {\n name: functionName,\n description: `Output formatter. Should always be used to format your response to the user.`,\n parameters: outputSchema,\n },\n ],\n function_call: {\n name: functionName,\n },\n },\n outputKey,\n outputParser: new FunctionCallStructuredOutputParser<T>({\n jsonSchema: outputSchema,\n zodSchema,\n }),\n ...rest,\n });\n}\n\n/** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */\nexport function createStructuredOutputChainFromZod<T extends InteropZodObject>(\n zodSchema: T,\n input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">\n): LLMChain<\n any,\n | BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>\n | ChatOpenAI<BaseFunctionCallOptions>\n> {\n return createStructuredOutputChain<T>({\n ...input,\n outputSchema: toJsonSchema(zodSchema),\n zodSchema,\n });\n}\n"],"mappings":";;;;;;;;;AA6CA,SAAS,kBACPA,GACsB;AACtB,QACG,EAA+C,eAAe,UAC9D,EAA+C,cAAc;AAEjE;;;;;AAMD,IAAa,qCAAb,cAEUC,oDAA8C;CACtD,eAAe;EAAC;EAAa;EAAU;CAAmB;CAE1D,AAAU,uBAAuB,IAAIC;CAErC,AAAU;CAEV,AAAU;CAMV,YACEC,gBAGA;EACA,IAAI;AACJ,MAAI,kBAAkB,eAAe,EACnC,SAAS,EAAE,YAAY,eAAgB;OAEvC,SAAS;AAEX,MAAI,OAAO,eAAe,UAAa,OAAO,cAAc,OAC1D,OAAM,IAAI,MACR,CAAC,yDAAyD,CAAC;EAG/D,MAAM,OAAO;AACb,MAAI,OAAO,eAAe,QACxB,KAAK,sBAAsB,IAAIC,6CAC7B,OAAO,YACP;AAGJ,MAAI,OAAO,cAAc,QACvB,KAAK,YAAY,OAAO;CAE3B;;;;;;;;;;;CAYD,MAAM,YAAYC,aAA+B;EAC/C,MAAM,gBAAgB,MAAM,KAAK,qBAAqB,YACpD,YACD;EACD,MAAM,eAAe,KAAK,MAAM,eAAe,CAAC,GAAG,UAAU;AAC3D,OAAI,UAAU,KACZ,QAAO;AAET,UAAO;EACR,EAAC;AACF,MAAI,KAAK,WAAW;GAClB,MAAM,kBAAkB,8DACtB,KAAK,WACL,aACD;AACD,OAAI,gBAAgB,QAClB,QAAO,gBAAgB;OAEvB,OAAM,IAAIC,sDACR,CAAC,wBAAwB,EAAE,cAAc,UAAU,EAAE,KAAK,UACxD,gBAAgB,MAAM,OACvB,EAAE,EACH;EAGL,WAAU,KAAK,wBAAwB,QAAW;GACjD,MAAM,SAAS,KAAK,oBAAoB,SAAS,aAAa;AAC9D,OAAI,OAAO,MACT,QAAO;OAEP,OAAM,IAAIA,sDACR,CAAC,wBAAwB,EAAE,cAAc,UAAU,EAAE,KAAK,UACxD,OAAO,OACR,EAAE,EACH;EAGL,MACC,OAAM,IAAI,MACR;CAGL;AACF;;;;;;;;AASD,SAAgB,4BAGdC,OAKA;CACA,MAAM,EACJ,cACA,MAAM,IAAIC,8BAAW;EAAE,OAAO;EAAsB,aAAa;CAAG,IACpE,YAAY,UACZ,YAAY,CAAE,GACd,UACA,GAAG,MACJ,GAAG;AACJ,KAAI,iBAAiB,UAAa,cAAc,OAC9C,OAAM,IAAI,MAAM,CAAC,kDAAkD,CAAC;CAEtE,MAAM,eAAe;AACrB,QAAO,IAAIC,2BAAS;EAClB;EACA,WAAW;GACT,GAAG;GACH,WAAW,CACT;IACE,MAAM;IACN,aAAa,CAAC,4EAA4E,CAAC;IAC3F,YAAY;GACb,CACF;GACD,eAAe,EACb,MAAM,aACP;EACF;EACD;EACA,cAAc,IAAI,mCAAsC;GACtD,YAAY;GACZ;EACD;EACD,GAAG;CACJ;AACF;;AAGD,SAAgB,mCACdC,WACAC,OAKA;AACA,QAAO,4BAA+B;EACpC,GAAG;EACH,mEAA2B,UAAU;EACrC;CACD,EAAC;AACH"}
@@ -1,38 +0,0 @@
1
- import { LLMChain, LLMChainInput } from "../llm_chain.cjs";
2
- import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
3
- import { InferInteropZodOutput, InteropZodObject } from "@langchain/core/utils/types";
4
- import { AIMessageChunk } from "@langchain/core/messages";
5
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
6
- import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
7
- import { BasePromptTemplate } from "@langchain/core/prompts";
8
- import { ChatGeneration } from "@langchain/core/outputs";
9
- import { ChatOpenAI } from "@langchain/openai";
10
- import { JsonSchema7Type } from "@langchain/core/utils/json_schema";
11
-
12
- //#region src/chains/openai_functions/structured_output.d.ts
13
-
14
- /**
15
- * Type representing the input for creating a structured output chain. It
16
- * extends the LLMChainInput type and includes an additional
17
- * 'outputSchema' field representing the JSON schema for the expected
18
- * output.
19
- */
20
- type StructuredOutputChainInput<T extends InteropZodObject = InteropZodObject> = Omit<LLMChainInput, "outputParser" | "llm"> & {
21
- outputSchema?: JsonSchema7Type;
22
- prompt: BasePromptTemplate;
23
- llm?: BaseChatModel<BaseFunctionCallOptions>;
24
- zodSchema?: T;
25
- };
26
- /**
27
- * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead
28
- * Create a chain that returns output matching a JSON Schema.
29
- * @param input Object that includes all LLMChainInput fields except "outputParser"
30
- * as well as an additional required "outputSchema" JSON Schema object.
31
- * @returns OpenAPIChain
32
- */
33
- declare function createStructuredOutputChain<T extends InteropZodObject = InteropZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
34
- /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */
35
- declare function createStructuredOutputChainFromZod<T extends InteropZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, "outputSchema">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
36
- //#endregion
37
- export { StructuredOutputChainInput, createStructuredOutputChain, createStructuredOutputChainFromZod };
38
- //# sourceMappingURL=structured_output.d.cts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"structured_output.d.cts","names":["JsonSchema7Type","Validator","ChatOpenAI","BasePromptTemplate","BaseLLMOutputParser","ChatGeneration","AIMessageChunk","BaseChatModel","BaseFunctionCallOptions","InferInteropZodOutput","InteropZodObject","LLMChain","LLMChainInput","OutputFunctionsParser","StructuredOutputChainInput","Omit","T","FunctionCallStructuredOutputParserFields","FunctionCallStructuredOutputParser","Promise","createStructuredOutputChain","createStructuredOutputChainFromZod"],"sources":["../../../src/chains/openai_functions/structured_output.d.ts"],"sourcesContent":["import { type JsonSchema7Type, Validator } from \"@langchain/core/utils/json_schema\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChatGeneration } from \"@langchain/core/outputs\";\nimport { AIMessageChunk } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport { InferInteropZodOutput, InteropZodObject } from \"@langchain/core/utils/types\";\nimport { LLMChain, type LLMChainInput } from \"../llm_chain.js\";\nimport { OutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n/**\n * Type representing the input for creating a structured output chain. It\n * extends the LLMChainInput type and includes an additional\n * 'outputSchema' field representing the JSON schema for the expected\n * output.\n */\nexport type StructuredOutputChainInput<T extends InteropZodObject = InteropZodObject> = Omit<LLMChainInput, \"outputParser\" | \"llm\"> & {\n outputSchema?: JsonSchema7Type;\n prompt: BasePromptTemplate;\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n zodSchema?: T;\n};\nexport type FunctionCallStructuredOutputParserFields<T extends InteropZodObject = InteropZodObject> = {\n jsonSchema?: JsonSchema7Type;\n zodSchema?: T;\n};\n/**\n * Class that extends the BaseLLMOutputParser class. It provides\n * functionality for parsing the structured output based on a JSON schema.\n */\nexport declare class FunctionCallStructuredOutputParser<T extends InteropZodObject> extends BaseLLMOutputParser<InferInteropZodOutput<T>> {\n lc_namespace: string[];\n protected functionOutputParser: OutputFunctionsParser;\n protected jsonSchemaValidator?: Validator;\n protected zodSchema?: T;\n constructor(fieldsOrSchema: JsonSchema7Type);\n constructor(fieldsOrSchema: FunctionCallStructuredOutputParserFields<T>);\n /**\n * Method to parse the result of chat generations. It first parses the\n * result using the functionOutputParser, then parses the result against a\n * zod schema if the zod schema is available which allows the result to undergo\n * Zod preprocessing, then it parses that result against the JSON schema.\n * If the result is valid, it returns the parsed result. Otherwise, it throws\n * an OutputParserException.\n * @param generations Array of ChatGeneration instances to be parsed.\n * @returns The parsed result if it is valid according to the JSON schema.\n */\n parseResult(generations: ChatGeneration[]): Promise<any>;\n}\n/**\n * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead\n * Create a chain that returns output matching a JSON Schema.\n * @param input Object that includes all LLMChainInput fields except \"outputParser\"\n * as well as an additional required \"outputSchema\" JSON Schema object.\n * @returns OpenAPIChain\n */\nexport declare function createStructuredOutputChain<T extends InteropZodObject = InteropZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;\n/** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */\nexport declare function createStructuredOutputChainFromZod<T extends InteropZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;\n"],"mappings":";;;;;;;;;;;;;;AAiBA;;;;;AAAwFe,KAA5ED,0BAA4EC,CAAAA,UAAvCL,gBAAuCK,GAApBL,gBAAoBK,CAAAA,GAAAA,IAAAA,CAAKH,aAALG,EAAAA,cAAAA,GAAAA,KAAAA,CAAAA,GAAAA;EAAI,YACzEf,CAAAA,EAAAA,eAAAA;EAAe,MACtBG,EAAAA,kBAAAA;EAAkB,GACNK,CAAAA,EAAdD,aAAcC,CAAAA,uBAAAA,CAAAA;EAAuB,SAArCD,CAAAA,EACMS,CADNT;CAAa;AAuCmJ;;;;;;;iBAFlJa,sCAAsCV,mBAAmBA,yBAAyBI,2BAA2BE,KAAKL,cAAcJ,cAAcC,yBAAyBF,kBAAkBJ,WAAWM;;iBAEpMa,6CAA6CX,6BAA6BM,UAAUD,KAAKD,2BAA2BE,sBAAsBL,cAAcJ,cAAcC,yBAAyBF,kBAAkBJ,WAAWM"}
@@ -1,38 +0,0 @@
1
- import { LLMChain, LLMChainInput } from "../llm_chain.js";
2
- import { AIMessageChunk } from "@langchain/core/messages";
3
- import { BaseChatModel } from "@langchain/core/language_models/chat_models";
4
- import { InferInteropZodOutput, InteropZodObject } from "@langchain/core/utils/types";
5
- import { JsonSchema7Type, Validator } from "@langchain/core/utils/json_schema";
6
- import { BasePromptTemplate } from "@langchain/core/prompts";
7
- import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
8
- import { ChatGeneration } from "@langchain/core/outputs";
9
- import { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
10
- import { ChatOpenAI } from "@langchain/openai";
11
-
12
- //#region src/chains/openai_functions/structured_output.d.ts
13
-
14
- /**
15
- * Type representing the input for creating a structured output chain. It
16
- * extends the LLMChainInput type and includes an additional
17
- * 'outputSchema' field representing the JSON schema for the expected
18
- * output.
19
- */
20
- type StructuredOutputChainInput<T extends InteropZodObject = InteropZodObject> = Omit<LLMChainInput, "outputParser" | "llm"> & {
21
- outputSchema?: JsonSchema7Type;
22
- prompt: BasePromptTemplate;
23
- llm?: BaseChatModel<BaseFunctionCallOptions>;
24
- zodSchema?: T;
25
- };
26
- /**
27
- * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead
28
- * Create a chain that returns output matching a JSON Schema.
29
- * @param input Object that includes all LLMChainInput fields except "outputParser"
30
- * as well as an additional required "outputSchema" JSON Schema object.
31
- * @returns OpenAPIChain
32
- */
33
- declare function createStructuredOutputChain<T extends InteropZodObject = InteropZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
34
- /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */
35
- declare function createStructuredOutputChainFromZod<T extends InteropZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, "outputSchema">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;
36
- //#endregion
37
- export { StructuredOutputChainInput, createStructuredOutputChain, createStructuredOutputChainFromZod };
38
- //# sourceMappingURL=structured_output.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"structured_output.d.ts","names":["JsonSchema7Type","Validator","ChatOpenAI","BasePromptTemplate","BaseLLMOutputParser","ChatGeneration","AIMessageChunk","BaseChatModel","BaseFunctionCallOptions","InferInteropZodOutput","InteropZodObject","LLMChain","LLMChainInput","OutputFunctionsParser","StructuredOutputChainInput","Omit","T","FunctionCallStructuredOutputParserFields","FunctionCallStructuredOutputParser","Promise","createStructuredOutputChain","createStructuredOutputChainFromZod"],"sources":["../../../src/chains/openai_functions/structured_output.d.ts"],"sourcesContent":["import { type JsonSchema7Type, Validator } from \"@langchain/core/utils/json_schema\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport { ChatGeneration } from \"@langchain/core/outputs\";\nimport { AIMessageChunk } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport { InferInteropZodOutput, InteropZodObject } from \"@langchain/core/utils/types\";\nimport { LLMChain, type LLMChainInput } from \"../llm_chain.js\";\nimport { OutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n/**\n * Type representing the input for creating a structured output chain. It\n * extends the LLMChainInput type and includes an additional\n * 'outputSchema' field representing the JSON schema for the expected\n * output.\n */\nexport type StructuredOutputChainInput<T extends InteropZodObject = InteropZodObject> = Omit<LLMChainInput, \"outputParser\" | \"llm\"> & {\n outputSchema?: JsonSchema7Type;\n prompt: BasePromptTemplate;\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n zodSchema?: T;\n};\nexport type FunctionCallStructuredOutputParserFields<T extends InteropZodObject = InteropZodObject> = {\n jsonSchema?: JsonSchema7Type;\n zodSchema?: T;\n};\n/**\n * Class that extends the BaseLLMOutputParser class. It provides\n * functionality for parsing the structured output based on a JSON schema.\n */\nexport declare class FunctionCallStructuredOutputParser<T extends InteropZodObject> extends BaseLLMOutputParser<InferInteropZodOutput<T>> {\n lc_namespace: string[];\n protected functionOutputParser: OutputFunctionsParser;\n protected jsonSchemaValidator?: Validator;\n protected zodSchema?: T;\n constructor(fieldsOrSchema: JsonSchema7Type);\n constructor(fieldsOrSchema: FunctionCallStructuredOutputParserFields<T>);\n /**\n * Method to parse the result of chat generations. It first parses the\n * result using the functionOutputParser, then parses the result against a\n * zod schema if the zod schema is available which allows the result to undergo\n * Zod preprocessing, then it parses that result against the JSON schema.\n * If the result is valid, it returns the parsed result. Otherwise, it throws\n * an OutputParserException.\n * @param generations Array of ChatGeneration instances to be parsed.\n * @returns The parsed result if it is valid according to the JSON schema.\n */\n parseResult(generations: ChatGeneration[]): Promise<any>;\n}\n/**\n * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead\n * Create a chain that returns output matching a JSON Schema.\n * @param input Object that includes all LLMChainInput fields except \"outputParser\"\n * as well as an additional required \"outputSchema\" JSON Schema object.\n * @returns OpenAPIChain\n */\nexport declare function createStructuredOutputChain<T extends InteropZodObject = InteropZodObject>(input: StructuredOutputChainInput<T>): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;\n/** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */\nexport declare function createStructuredOutputChainFromZod<T extends InteropZodObject>(zodSchema: T, input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">): LLMChain<any, BaseChatModel<BaseFunctionCallOptions, AIMessageChunk> | ChatOpenAI<BaseFunctionCallOptions>>;\n"],"mappings":";;;;;;;;;;;;;;AAiBA;;;;;AAAwFe,KAA5ED,0BAA4EC,CAAAA,UAAvCL,gBAAuCK,GAApBL,gBAAoBK,CAAAA,GAAAA,IAAAA,CAAKH,aAALG,EAAAA,cAAAA,GAAAA,KAAAA,CAAAA,GAAAA;EAAI,YACzEf,CAAAA,EAAAA,eAAAA;EAAe,MACtBG,EAAAA,kBAAAA;EAAkB,GACNK,CAAAA,EAAdD,aAAcC,CAAAA,uBAAAA,CAAAA;EAAuB,SAArCD,CAAAA,EACMS,CADNT;CAAa;AAuCmJ;;;;;;;iBAFlJa,sCAAsCV,mBAAmBA,yBAAyBI,2BAA2BE,KAAKL,cAAcJ,cAAcC,yBAAyBF,kBAAkBJ,WAAWM;;iBAEpMa,6CAA6CX,6BAA6BM,UAAUD,KAAKD,2BAA2BE,sBAAsBL,cAAcJ,cAAcC,yBAAyBF,kBAAkBJ,WAAWM"}
@@ -1,105 +0,0 @@
1
- import { LLMChain } from "../llm_chain.js";
2
- import { OutputFunctionsParser } from "../../output_parsers/openai_functions.js";
3
- import { interopSafeParseAsync } from "@langchain/core/utils/types";
4
- import { Validator, toJsonSchema } from "@langchain/core/utils/json_schema";
5
- import { BaseLLMOutputParser, OutputParserException } from "@langchain/core/output_parsers";
6
- import { ChatOpenAI } from "@langchain/openai";
7
-
8
- //#region src/chains/openai_functions/structured_output.ts
9
- function isJsonSchema7Type(x) {
10
- return x.jsonSchema === void 0 && x.zodSchema === void 0;
11
- }
12
- /**
13
- * Class that extends the BaseLLMOutputParser class. It provides
14
- * functionality for parsing the structured output based on a JSON schema.
15
- */
16
- var FunctionCallStructuredOutputParser = class extends BaseLLMOutputParser {
17
- lc_namespace = [
18
- "langchain",
19
- "chains",
20
- "openai_functions"
21
- ];
22
- functionOutputParser = new OutputFunctionsParser();
23
- jsonSchemaValidator;
24
- zodSchema;
25
- constructor(fieldsOrSchema) {
26
- let fields;
27
- if (isJsonSchema7Type(fieldsOrSchema)) fields = { jsonSchema: fieldsOrSchema };
28
- else fields = fieldsOrSchema;
29
- if (fields.jsonSchema === void 0 && fields.zodSchema === void 0) throw new Error(`Must provide at least one of "jsonSchema" or "zodSchema".`);
30
- super(fields);
31
- if (fields.jsonSchema !== void 0) this.jsonSchemaValidator = new Validator(fields.jsonSchema, "7");
32
- if (fields.zodSchema !== void 0) this.zodSchema = fields.zodSchema;
33
- }
34
- /**
35
- * Method to parse the result of chat generations. It first parses the
36
- * result using the functionOutputParser, then parses the result against a
37
- * zod schema if the zod schema is available which allows the result to undergo
38
- * Zod preprocessing, then it parses that result against the JSON schema.
39
- * If the result is valid, it returns the parsed result. Otherwise, it throws
40
- * an OutputParserException.
41
- * @param generations Array of ChatGeneration instances to be parsed.
42
- * @returns The parsed result if it is valid according to the JSON schema.
43
- */
44
- async parseResult(generations) {
45
- const initialResult = await this.functionOutputParser.parseResult(generations);
46
- const parsedResult = JSON.parse(initialResult, (_, value) => {
47
- if (value === null) return void 0;
48
- return value;
49
- });
50
- if (this.zodSchema) {
51
- const zodParsedResult = await interopSafeParseAsync(this.zodSchema, parsedResult);
52
- if (zodParsedResult.success) return zodParsedResult.data;
53
- else throw new OutputParserException(`Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`, initialResult);
54
- } else if (this.jsonSchemaValidator !== void 0) {
55
- const result = this.jsonSchemaValidator.validate(parsedResult);
56
- if (result.valid) return parsedResult;
57
- else throw new OutputParserException(`Failed to parse. Text: "${initialResult}". Error: ${JSON.stringify(result.errors)}`, initialResult);
58
- } else throw new Error("This parser requires an input JSON Schema or an input Zod schema.");
59
- }
60
- };
61
- /**
62
- * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead
63
- * Create a chain that returns output matching a JSON Schema.
64
- * @param input Object that includes all LLMChainInput fields except "outputParser"
65
- * as well as an additional required "outputSchema" JSON Schema object.
66
- * @returns OpenAPIChain
67
- */
68
- function createStructuredOutputChain(input) {
69
- const { outputSchema, llm = new ChatOpenAI({
70
- model: "gpt-3.5-turbo-0613",
71
- temperature: 0
72
- }), outputKey = "output", llmKwargs = {}, zodSchema,...rest } = input;
73
- if (outputSchema === void 0 && zodSchema === void 0) throw new Error(`Must provide one of "outputSchema" or "zodSchema".`);
74
- const functionName = "output_formatter";
75
- return new LLMChain({
76
- llm,
77
- llmKwargs: {
78
- ...llmKwargs,
79
- functions: [{
80
- name: functionName,
81
- description: `Output formatter. Should always be used to format your response to the user.`,
82
- parameters: outputSchema
83
- }],
84
- function_call: { name: functionName }
85
- },
86
- outputKey,
87
- outputParser: new FunctionCallStructuredOutputParser({
88
- jsonSchema: outputSchema,
89
- zodSchema
90
- }),
91
- ...rest
92
- });
93
- }
94
- /** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */
95
- function createStructuredOutputChainFromZod(zodSchema, input) {
96
- return createStructuredOutputChain({
97
- ...input,
98
- outputSchema: toJsonSchema(zodSchema),
99
- zodSchema
100
- });
101
- }
102
-
103
- //#endregion
104
- export { createStructuredOutputChain, createStructuredOutputChainFromZod };
105
- //# sourceMappingURL=structured_output.js.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"structured_output.js","names":["x: JsonSchema7Type | FunctionCallStructuredOutputParserFields","fieldsOrSchema:\n | JsonSchema7Type\n | FunctionCallStructuredOutputParserFields<T>","generations: ChatGeneration[]","input: StructuredOutputChainInput<T>","zodSchema: T","input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">"],"sources":["../../../src/chains/openai_functions/structured_output.ts"],"sourcesContent":["import {\n type JsonSchema7Type,\n Validator,\n toJsonSchema,\n} from \"@langchain/core/utils/json_schema\";\nimport { ChatOpenAI } from \"@langchain/openai\";\nimport { BasePromptTemplate } from \"@langchain/core/prompts\";\nimport {\n BaseLLMOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport { ChatGeneration } from \"@langchain/core/outputs\";\nimport { AIMessageChunk } from \"@langchain/core/messages\";\nimport type { BaseChatModel } from \"@langchain/core/language_models/chat_models\";\nimport type { BaseFunctionCallOptions } from \"@langchain/core/language_models/base\";\nimport {\n InferInteropZodOutput,\n interopSafeParseAsync,\n InteropZodObject,\n} from \"@langchain/core/utils/types\";\nimport { LLMChain, type LLMChainInput } from \"../llm_chain.js\";\nimport { OutputFunctionsParser } from \"../../output_parsers/openai_functions.js\";\n\n/**\n * Type representing the input for creating a structured output chain. It\n * extends the LLMChainInput type and includes an additional\n * 'outputSchema' field representing the JSON schema for the expected\n * output.\n */\nexport type StructuredOutputChainInput<\n T extends InteropZodObject = InteropZodObject\n> = Omit<LLMChainInput, \"outputParser\" | \"llm\"> & {\n outputSchema?: JsonSchema7Type;\n prompt: BasePromptTemplate;\n llm?: BaseChatModel<BaseFunctionCallOptions>;\n zodSchema?: T;\n};\n\nexport type FunctionCallStructuredOutputParserFields<\n T extends InteropZodObject = InteropZodObject\n> = {\n jsonSchema?: JsonSchema7Type;\n zodSchema?: T;\n};\n\nfunction isJsonSchema7Type(\n x: JsonSchema7Type | FunctionCallStructuredOutputParserFields\n): x is JsonSchema7Type {\n return (\n (x as FunctionCallStructuredOutputParserFields).jsonSchema === undefined &&\n (x as FunctionCallStructuredOutputParserFields).zodSchema === undefined\n );\n}\n\n/**\n * Class that extends the BaseLLMOutputParser class. It provides\n * functionality for parsing the structured output based on a JSON schema.\n */\nexport class FunctionCallStructuredOutputParser<\n T extends InteropZodObject\n> extends BaseLLMOutputParser<InferInteropZodOutput<T>> {\n lc_namespace = [\"langchain\", \"chains\", \"openai_functions\"];\n\n protected functionOutputParser = new OutputFunctionsParser();\n\n protected jsonSchemaValidator?: Validator;\n\n protected zodSchema?: T;\n\n constructor(fieldsOrSchema: JsonSchema7Type);\n\n constructor(fieldsOrSchema: FunctionCallStructuredOutputParserFields<T>);\n\n constructor(\n fieldsOrSchema:\n | JsonSchema7Type\n | FunctionCallStructuredOutputParserFields<T>\n ) {\n let fields;\n if (isJsonSchema7Type(fieldsOrSchema)) {\n fields = { jsonSchema: fieldsOrSchema };\n } else {\n fields = fieldsOrSchema;\n }\n if (fields.jsonSchema === undefined && fields.zodSchema === undefined) {\n throw new Error(\n `Must provide at least one of \"jsonSchema\" or \"zodSchema\".`\n );\n }\n super(fields);\n if (fields.jsonSchema !== undefined) {\n this.jsonSchemaValidator = new Validator(\n fields.jsonSchema as Record<string, unknown>,\n \"7\"\n );\n }\n if (fields.zodSchema !== undefined) {\n this.zodSchema = fields.zodSchema;\n }\n }\n\n /**\n * Method to parse the result of chat generations. It first parses the\n * result using the functionOutputParser, then parses the result against a\n * zod schema if the zod schema is available which allows the result to undergo\n * Zod preprocessing, then it parses that result against the JSON schema.\n * If the result is valid, it returns the parsed result. Otherwise, it throws\n * an OutputParserException.\n * @param generations Array of ChatGeneration instances to be parsed.\n * @returns The parsed result if it is valid according to the JSON schema.\n */\n async parseResult(generations: ChatGeneration[]) {\n const initialResult = await this.functionOutputParser.parseResult(\n generations\n );\n const parsedResult = JSON.parse(initialResult, (_, value) => {\n if (value === null) {\n return undefined;\n }\n return value;\n });\n if (this.zodSchema) {\n const zodParsedResult = await interopSafeParseAsync(\n this.zodSchema,\n parsedResult\n );\n if (zodParsedResult.success) {\n return zodParsedResult.data;\n } else {\n throw new OutputParserException(\n `Failed to parse. Text: \"${initialResult}\". Error: ${JSON.stringify(\n zodParsedResult.error.issues\n )}`,\n initialResult\n );\n }\n } else if (this.jsonSchemaValidator !== undefined) {\n const result = this.jsonSchemaValidator.validate(parsedResult);\n if (result.valid) {\n return parsedResult;\n } else {\n throw new OutputParserException(\n `Failed to parse. Text: \"${initialResult}\". Error: ${JSON.stringify(\n result.errors\n )}`,\n initialResult\n );\n }\n } else {\n throw new Error(\n \"This parser requires an input JSON Schema or an input Zod schema.\"\n );\n }\n }\n}\n\n/**\n * @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead\n * Create a chain that returns output matching a JSON Schema.\n * @param input Object that includes all LLMChainInput fields except \"outputParser\"\n * as well as an additional required \"outputSchema\" JSON Schema object.\n * @returns OpenAPIChain\n */\nexport function createStructuredOutputChain<\n T extends InteropZodObject = InteropZodObject\n>(\n input: StructuredOutputChainInput<T>\n): LLMChain<\n any,\n | BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>\n | ChatOpenAI<BaseFunctionCallOptions>\n> {\n const {\n outputSchema,\n llm = new ChatOpenAI({ model: \"gpt-3.5-turbo-0613\", temperature: 0 }),\n outputKey = \"output\",\n llmKwargs = {},\n zodSchema,\n ...rest\n } = input;\n if (outputSchema === undefined && zodSchema === undefined) {\n throw new Error(`Must provide one of \"outputSchema\" or \"zodSchema\".`);\n }\n const functionName = \"output_formatter\";\n return new LLMChain({\n llm,\n llmKwargs: {\n ...llmKwargs,\n functions: [\n {\n name: functionName,\n description: `Output formatter. Should always be used to format your response to the user.`,\n parameters: outputSchema,\n },\n ],\n function_call: {\n name: functionName,\n },\n },\n outputKey,\n outputParser: new FunctionCallStructuredOutputParser<T>({\n jsonSchema: outputSchema,\n zodSchema,\n }),\n ...rest,\n });\n}\n\n/** @deprecated Use {@link https://api.js.langchain.com/functions/langchain.chains_openai_functions.createStructuredOutputRunnable.html | createStructuredOutputRunnable} instead */\nexport function createStructuredOutputChainFromZod<T extends InteropZodObject>(\n zodSchema: T,\n input: Omit<StructuredOutputChainInput<T>, \"outputSchema\">\n): LLMChain<\n any,\n | BaseChatModel<BaseFunctionCallOptions, AIMessageChunk>\n | ChatOpenAI<BaseFunctionCallOptions>\n> {\n return createStructuredOutputChain<T>({\n ...input,\n outputSchema: toJsonSchema(zodSchema),\n zodSchema,\n });\n}\n"],"mappings":";;;;;;;;AA6CA,SAAS,kBACPA,GACsB;AACtB,QACG,EAA+C,eAAe,UAC9D,EAA+C,cAAc;AAEjE;;;;;AAMD,IAAa,qCAAb,cAEU,oBAA8C;CACtD,eAAe;EAAC;EAAa;EAAU;CAAmB;CAE1D,AAAU,uBAAuB,IAAI;CAErC,AAAU;CAEV,AAAU;CAMV,YACEC,gBAGA;EACA,IAAI;AACJ,MAAI,kBAAkB,eAAe,EACnC,SAAS,EAAE,YAAY,eAAgB;OAEvC,SAAS;AAEX,MAAI,OAAO,eAAe,UAAa,OAAO,cAAc,OAC1D,OAAM,IAAI,MACR,CAAC,yDAAyD,CAAC;EAG/D,MAAM,OAAO;AACb,MAAI,OAAO,eAAe,QACxB,KAAK,sBAAsB,IAAI,UAC7B,OAAO,YACP;AAGJ,MAAI,OAAO,cAAc,QACvB,KAAK,YAAY,OAAO;CAE3B;;;;;;;;;;;CAYD,MAAM,YAAYC,aAA+B;EAC/C,MAAM,gBAAgB,MAAM,KAAK,qBAAqB,YACpD,YACD;EACD,MAAM,eAAe,KAAK,MAAM,eAAe,CAAC,GAAG,UAAU;AAC3D,OAAI,UAAU,KACZ,QAAO;AAET,UAAO;EACR,EAAC;AACF,MAAI,KAAK,WAAW;GAClB,MAAM,kBAAkB,MAAM,sBAC5B,KAAK,WACL,aACD;AACD,OAAI,gBAAgB,QAClB,QAAO,gBAAgB;OAEvB,OAAM,IAAI,sBACR,CAAC,wBAAwB,EAAE,cAAc,UAAU,EAAE,KAAK,UACxD,gBAAgB,MAAM,OACvB,EAAE,EACH;EAGL,WAAU,KAAK,wBAAwB,QAAW;GACjD,MAAM,SAAS,KAAK,oBAAoB,SAAS,aAAa;AAC9D,OAAI,OAAO,MACT,QAAO;OAEP,OAAM,IAAI,sBACR,CAAC,wBAAwB,EAAE,cAAc,UAAU,EAAE,KAAK,UACxD,OAAO,OACR,EAAE,EACH;EAGL,MACC,OAAM,IAAI,MACR;CAGL;AACF;;;;;;;;AASD,SAAgB,4BAGdC,OAKA;CACA,MAAM,EACJ,cACA,MAAM,IAAI,WAAW;EAAE,OAAO;EAAsB,aAAa;CAAG,IACpE,YAAY,UACZ,YAAY,CAAE,GACd,UACA,GAAG,MACJ,GAAG;AACJ,KAAI,iBAAiB,UAAa,cAAc,OAC9C,OAAM,IAAI,MAAM,CAAC,kDAAkD,CAAC;CAEtE,MAAM,eAAe;AACrB,QAAO,IAAI,SAAS;EAClB;EACA,WAAW;GACT,GAAG;GACH,WAAW,CACT;IACE,MAAM;IACN,aAAa,CAAC,4EAA4E,CAAC;IAC3F,YAAY;GACb,CACF;GACD,eAAe,EACb,MAAM,aACP;EACF;EACD;EACA,cAAc,IAAI,mCAAsC;GACtD,YAAY;GACZ;EACD;EACD,GAAG;CACJ;AACF;;AAGD,SAAgB,mCACdC,WACAC,OAKA;AACA,QAAO,4BAA+B;EACpC,GAAG;EACH,cAAc,aAAa,UAAU;EACrC;CACD,EAAC;AACH"}
@@ -1,107 +0,0 @@
1
- const require_rolldown_runtime = require('../_virtual/rolldown_runtime.cjs');
2
- const require_base = require('./base.cjs');
3
- const __langchain_openai = require_rolldown_runtime.__toESM(require("@langchain/openai"));
4
- const __langchain_core_utils_async_caller = require_rolldown_runtime.__toESM(require("@langchain/core/utils/async_caller"));
5
- const __langchain_core_utils_env = require_rolldown_runtime.__toESM(require("@langchain/core/utils/env"));
6
-
7
- //#region src/chains/openai_moderation.ts
8
- /**
9
- * Class representing a chain for moderating text using the OpenAI
10
- * Moderation API. It extends the BaseChain class and implements the
11
- * OpenAIModerationChainInput interface.
12
- * @example
13
- * ```typescript
14
- * const moderation = new OpenAIModerationChain({ throwError: true });
15
- *
16
- * const badString = "Bad naughty words from user";
17
- *
18
- * try {
19
- * const { output: moderatedContent, results } = await moderation.call({
20
- * input: badString,
21
- * });
22
- *
23
- * if (results[0].category_scores["harassment/threatening"] > 0.01) {
24
- * throw new Error("Harassment detected!");
25
- * }
26
- *
27
- * const model = new OpenAI({ temperature: 0 });
28
- * const promptTemplate = "Hello, how are you today {person}?";
29
- * const prompt = new PromptTemplate({
30
- * template: promptTemplate,
31
- * inputVariables: ["person"],
32
- * });
33
- * const chain = new LLMChain({ llm: model, prompt });
34
- * const response = await chain.call({ person: moderatedContent });
35
- * console.log({ response });
36
- * } catch (error) {
37
- * console.error("Naughty words detected!");
38
- * }
39
- * ```
40
- */
41
- var OpenAIModerationChain = class extends require_base.BaseChain {
42
- static lc_name() {
43
- return "OpenAIModerationChain";
44
- }
45
- get lc_secrets() {
46
- return { openAIApiKey: "OPENAI_API_KEY" };
47
- }
48
- inputKey = "input";
49
- outputKey = "output";
50
- openAIApiKey;
51
- openAIOrganization;
52
- clientConfig;
53
- client;
54
- throwError;
55
- caller;
56
- constructor(fields) {
57
- super(fields);
58
- this.throwError = fields?.throwError ?? false;
59
- this.openAIApiKey = fields?.apiKey ?? fields?.openAIApiKey ?? (0, __langchain_core_utils_env.getEnvironmentVariable)("OPENAI_API_KEY");
60
- if (!this.openAIApiKey) throw new Error("OpenAI API key not found");
61
- this.openAIOrganization = fields?.openAIOrganization;
62
- this.clientConfig = {
63
- ...fields?.configuration,
64
- apiKey: this.openAIApiKey,
65
- organization: this.openAIOrganization
66
- };
67
- this.client = new __langchain_openai.OpenAIClient(this.clientConfig);
68
- this.caller = new __langchain_core_utils_async_caller.AsyncCaller(fields ?? {});
69
- }
70
- _moderate(text, results) {
71
- if (results.flagged) {
72
- const errorStr = "Text was found that violates OpenAI's content policy.";
73
- if (this.throwError) throw new Error(errorStr);
74
- else return errorStr;
75
- }
76
- return text;
77
- }
78
- async _call(values) {
79
- const text = values[this.inputKey];
80
- const moderationRequest = { input: text };
81
- let mod;
82
- try {
83
- mod = await this.caller.call(() => this.client.moderations.create(moderationRequest));
84
- } catch (error) {
85
- if (error instanceof Error) throw error;
86
- else throw new Error(error);
87
- }
88
- const output = this._moderate(text, mod.results[0]);
89
- return {
90
- [this.outputKey]: output,
91
- results: mod.results
92
- };
93
- }
94
- _chainType() {
95
- return "moderation_chain";
96
- }
97
- get inputKeys() {
98
- return [this.inputKey];
99
- }
100
- get outputKeys() {
101
- return [this.outputKey];
102
- }
103
- };
104
-
105
- //#endregion
106
- exports.OpenAIModerationChain = OpenAIModerationChain;
107
- //# sourceMappingURL=openai_moderation.cjs.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"openai_moderation.cjs","names":["BaseChain","fields?: OpenAIModerationChainInput","OpenAIClient","AsyncCaller","text: string","results: OpenAIClient.Moderation","values: ChainValues","moderationRequest: OpenAIClient.ModerationCreateParams"],"sources":["../../src/chains/openai_moderation.ts"],"sourcesContent":["import { type ClientOptions, OpenAIClient } from \"@langchain/openai\";\nimport { ChainValues } from \"@langchain/core/utils/types\";\nimport {\n AsyncCaller,\n AsyncCallerParams,\n} from \"@langchain/core/utils/async_caller\";\nimport { getEnvironmentVariable } from \"@langchain/core/utils/env\";\nimport { BaseChain, ChainInputs } from \"./base.js\";\n\n/**\n * Interface for the input parameters of the OpenAIModerationChain class.\n */\nexport interface OpenAIModerationChainInput\n extends ChainInputs,\n AsyncCallerParams {\n apiKey?: string;\n /** @deprecated Use \"apiKey\" instead. */\n openAIApiKey?: string;\n openAIOrganization?: string;\n throwError?: boolean;\n configuration?: ClientOptions;\n}\n\n/**\n * Class representing a chain for moderating text using the OpenAI\n * Moderation API. It extends the BaseChain class and implements the\n * OpenAIModerationChainInput interface.\n * @example\n * ```typescript\n * const moderation = new OpenAIModerationChain({ throwError: true });\n *\n * const badString = \"Bad naughty words from user\";\n *\n * try {\n * const { output: moderatedContent, results } = await moderation.call({\n * input: badString,\n * });\n *\n * if (results[0].category_scores[\"harassment/threatening\"] > 0.01) {\n * throw new Error(\"Harassment detected!\");\n * }\n *\n * const model = new OpenAI({ temperature: 0 });\n * const promptTemplate = \"Hello, how are you today {person}?\";\n * const prompt = new PromptTemplate({\n * template: promptTemplate,\n * inputVariables: [\"person\"],\n * });\n * const chain = new LLMChain({ llm: model, prompt });\n * const response = await chain.call({ person: moderatedContent });\n * console.log({ response });\n * } catch (error) {\n * console.error(\"Naughty words detected!\");\n * }\n * ```\n */\nexport class OpenAIModerationChain\n extends BaseChain\n implements OpenAIModerationChainInput\n{\n static lc_name() {\n return \"OpenAIModerationChain\";\n }\n\n get lc_secrets(): { [key: string]: string } | undefined {\n return {\n openAIApiKey: \"OPENAI_API_KEY\",\n };\n }\n\n inputKey = \"input\";\n\n outputKey = \"output\";\n\n openAIApiKey?: string;\n\n openAIOrganization?: string;\n\n clientConfig: ClientOptions;\n\n client: OpenAIClient;\n\n throwError: boolean;\n\n caller: AsyncCaller;\n\n constructor(fields?: OpenAIModerationChainInput) {\n super(fields);\n this.throwError = fields?.throwError ?? false;\n this.openAIApiKey =\n fields?.apiKey ??\n fields?.openAIApiKey ??\n getEnvironmentVariable(\"OPENAI_API_KEY\");\n\n if (!this.openAIApiKey) {\n throw new Error(\"OpenAI API key not found\");\n }\n\n this.openAIOrganization = fields?.openAIOrganization;\n\n this.clientConfig = {\n ...fields?.configuration,\n apiKey: this.openAIApiKey,\n organization: this.openAIOrganization,\n };\n\n this.client = new OpenAIClient(this.clientConfig);\n\n this.caller = new AsyncCaller(fields ?? {});\n }\n\n _moderate(text: string, results: OpenAIClient.Moderation): string {\n if (results.flagged) {\n const errorStr = \"Text was found that violates OpenAI's content policy.\";\n if (this.throwError) {\n throw new Error(errorStr);\n } else {\n return errorStr;\n }\n }\n return text;\n }\n\n async _call(values: ChainValues): Promise<ChainValues> {\n const text = values[this.inputKey];\n const moderationRequest: OpenAIClient.ModerationCreateParams = {\n input: text,\n };\n let mod;\n try {\n mod = await this.caller.call(() =>\n this.client.moderations.create(moderationRequest)\n );\n } catch (error) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (error instanceof Error) {\n throw error;\n } else {\n throw new Error(error as string);\n }\n }\n const output = this._moderate(text, mod.results[0]);\n return {\n [this.outputKey]: output,\n results: mod.results,\n };\n }\n\n _chainType() {\n return \"moderation_chain\";\n }\n\n get inputKeys(): string[] {\n return [this.inputKey];\n }\n\n get outputKeys(): string[] {\n return [this.outputKey];\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAwDA,IAAa,wBAAb,cACUA,uBAEV;CACE,OAAO,UAAU;AACf,SAAO;CACR;CAED,IAAI,aAAoD;AACtD,SAAO,EACL,cAAc,iBACf;CACF;CAED,WAAW;CAEX,YAAY;CAEZ;CAEA;CAEA;CAEA;CAEA;CAEA;CAEA,YAAYC,QAAqC;EAC/C,MAAM,OAAO;EACb,KAAK,aAAa,QAAQ,cAAc;EACxC,KAAK,eACH,QAAQ,UACR,QAAQ,uEACe,iBAAiB;AAE1C,MAAI,CAAC,KAAK,aACR,OAAM,IAAI,MAAM;EAGlB,KAAK,qBAAqB,QAAQ;EAElC,KAAK,eAAe;GAClB,GAAG,QAAQ;GACX,QAAQ,KAAK;GACb,cAAc,KAAK;EACpB;EAED,KAAK,SAAS,IAAIC,gCAAa,KAAK;EAEpC,KAAK,SAAS,IAAIC,gDAAY,UAAU,CAAE;CAC3C;CAED,UAAUC,MAAcC,SAA0C;AAChE,MAAI,QAAQ,SAAS;GACnB,MAAM,WAAW;AACjB,OAAI,KAAK,WACP,OAAM,IAAI,MAAM;OAEhB,QAAO;EAEV;AACD,SAAO;CACR;CAED,MAAM,MAAMC,QAA2C;EACrD,MAAM,OAAO,OAAO,KAAK;EACzB,MAAMC,oBAAyD,EAC7D,OAAO,KACR;EACD,IAAI;AACJ,MAAI;GACF,MAAM,MAAM,KAAK,OAAO,KAAK,MAC3B,KAAK,OAAO,YAAY,OAAO,kBAAkB,CAClD;EACF,SAAQ,OAAO;AAEd,OAAI,iBAAiB,MACnB,OAAM;OAEN,OAAM,IAAI,MAAM;EAEnB;EACD,MAAM,SAAS,KAAK,UAAU,MAAM,IAAI,QAAQ,GAAG;AACnD,SAAO;IACJ,KAAK,YAAY;GAClB,SAAS,IAAI;EACd;CACF;CAED,aAAa;AACX,SAAO;CACR;CAED,IAAI,YAAsB;AACxB,SAAO,CAAC,KAAK,QAAS;CACvB;CAED,IAAI,aAAuB;AACzB,SAAO,CAAC,KAAK,SAAU;CACxB;AACF"}