@langchain/classic 1.0.27 → 1.0.28

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. package/CHANGELOG.md +13 -0
  2. package/dist/agents/agent.cjs.map +1 -1
  3. package/dist/agents/agent.js.map +1 -1
  4. package/dist/agents/executor.cjs.map +1 -1
  5. package/dist/agents/executor.js.map +1 -1
  6. package/dist/agents/openai_functions/index.cjs.map +1 -1
  7. package/dist/agents/openai_functions/index.d.cts.map +1 -1
  8. package/dist/agents/openai_functions/index.d.ts.map +1 -1
  9. package/dist/agents/openai_functions/index.js.map +1 -1
  10. package/dist/agents/openai_tools/index.cjs.map +1 -1
  11. package/dist/agents/openai_tools/index.d.cts +2 -2
  12. package/dist/agents/openai_tools/index.d.cts.map +1 -1
  13. package/dist/agents/openai_tools/index.d.ts +2 -2
  14. package/dist/agents/openai_tools/index.d.ts.map +1 -1
  15. package/dist/agents/openai_tools/index.js.map +1 -1
  16. package/dist/agents/react/index.d.cts +2 -2
  17. package/dist/agents/react/index.d.cts.map +1 -1
  18. package/dist/agents/react/index.d.ts +2 -2
  19. package/dist/agents/react/index.d.ts.map +1 -1
  20. package/dist/agents/structured_chat/index.d.cts +2 -2
  21. package/dist/agents/structured_chat/index.d.cts.map +1 -1
  22. package/dist/agents/structured_chat/index.d.ts +2 -2
  23. package/dist/agents/structured_chat/index.d.ts.map +1 -1
  24. package/dist/agents/tool_calling/index.d.cts +2 -2
  25. package/dist/agents/tool_calling/index.d.cts.map +1 -1
  26. package/dist/agents/tool_calling/index.d.ts +2 -2
  27. package/dist/agents/tool_calling/index.d.ts.map +1 -1
  28. package/dist/agents/tool_calling/output_parser.cjs.map +1 -1
  29. package/dist/agents/tool_calling/output_parser.d.cts.map +1 -1
  30. package/dist/agents/tool_calling/output_parser.js.map +1 -1
  31. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts +2 -2
  32. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.cts.map +1 -1
  33. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts +2 -2
  34. package/dist/agents/toolkits/conversational_retrieval/token_buffer_memory.d.ts.map +1 -1
  35. package/dist/agents/xml/index.d.cts.map +1 -1
  36. package/dist/agents/xml/index.d.ts.map +1 -1
  37. package/dist/cache/file_system.cjs.map +1 -1
  38. package/dist/cache/file_system.js.map +1 -1
  39. package/dist/chains/analyze_documents_chain.cjs.map +1 -1
  40. package/dist/chains/analyze_documents_chain.js.map +1 -1
  41. package/dist/chains/base.cjs.map +1 -1
  42. package/dist/chains/base.d.cts +2 -2
  43. package/dist/chains/base.d.cts.map +1 -1
  44. package/dist/chains/base.d.ts +2 -2
  45. package/dist/chains/base.d.ts.map +1 -1
  46. package/dist/chains/base.js.map +1 -1
  47. package/dist/chains/chat_vector_db_chain.cjs.map +1 -1
  48. package/dist/chains/chat_vector_db_chain.js.map +1 -1
  49. package/dist/chains/combine_docs_chain.d.cts +2 -2
  50. package/dist/chains/combine_docs_chain.d.cts.map +1 -1
  51. package/dist/chains/combine_docs_chain.d.ts +2 -2
  52. package/dist/chains/combine_docs_chain.d.ts.map +1 -1
  53. package/dist/chains/combine_documents/reduce.cjs.map +1 -1
  54. package/dist/chains/combine_documents/reduce.js.map +1 -1
  55. package/dist/chains/conversational_retrieval_chain.cjs.map +1 -1
  56. package/dist/chains/conversational_retrieval_chain.js.map +1 -1
  57. package/dist/chains/graph_qa/cypher.cjs.map +1 -1
  58. package/dist/chains/graph_qa/cypher.js.map +1 -1
  59. package/dist/chains/llm_chain.cjs.map +1 -1
  60. package/dist/chains/llm_chain.js.map +1 -1
  61. package/dist/chains/load.d.cts +1 -2
  62. package/dist/chains/load.d.cts.map +1 -1
  63. package/dist/chains/load.d.ts +1 -2
  64. package/dist/chains/load.d.ts.map +1 -1
  65. package/dist/chains/openai_functions/base.cjs.map +1 -1
  66. package/dist/chains/openai_functions/base.js.map +1 -1
  67. package/dist/chains/openai_functions/openapi.cjs.map +1 -1
  68. package/dist/chains/openai_functions/openapi.js.map +1 -1
  69. package/dist/chains/openai_moderation.cjs.map +1 -1
  70. package/dist/chains/openai_moderation.js.map +1 -1
  71. package/dist/chains/query_constructor/index.cjs.map +1 -1
  72. package/dist/chains/query_constructor/index.js.map +1 -1
  73. package/dist/chains/question_answering/load.d.cts +1 -2
  74. package/dist/chains/question_answering/load.d.cts.map +1 -1
  75. package/dist/chains/question_answering/load.d.ts +1 -2
  76. package/dist/chains/question_answering/load.d.ts.map +1 -1
  77. package/dist/chains/retrieval.cjs.map +1 -1
  78. package/dist/chains/retrieval.js.map +1 -1
  79. package/dist/chains/retrieval_qa.cjs.map +1 -1
  80. package/dist/chains/retrieval_qa.js.map +1 -1
  81. package/dist/chains/router/utils.cjs.map +1 -1
  82. package/dist/chains/router/utils.js.map +1 -1
  83. package/dist/chains/summarization/load.d.cts +1 -2
  84. package/dist/chains/summarization/load.d.cts.map +1 -1
  85. package/dist/chains/summarization/load.d.ts +1 -2
  86. package/dist/chains/summarization/load.d.ts.map +1 -1
  87. package/dist/chains/vector_db_qa.cjs.map +1 -1
  88. package/dist/chains/vector_db_qa.js.map +1 -1
  89. package/dist/chat_models/universal.cjs +8 -10
  90. package/dist/chat_models/universal.cjs.map +1 -1
  91. package/dist/chat_models/universal.d.cts +3 -5
  92. package/dist/chat_models/universal.d.cts.map +1 -1
  93. package/dist/chat_models/universal.d.ts +3 -5
  94. package/dist/chat_models/universal.d.ts.map +1 -1
  95. package/dist/chat_models/universal.js +8 -10
  96. package/dist/chat_models/universal.js.map +1 -1
  97. package/dist/document_loaders/fs/directory.d.cts.map +1 -1
  98. package/dist/document_loaders/fs/directory.d.ts.map +1 -1
  99. package/dist/document_loaders/fs/json.cjs.map +1 -1
  100. package/dist/document_loaders/fs/json.js.map +1 -1
  101. package/dist/document_transformers/openai_functions.d.cts.map +1 -1
  102. package/dist/evaluation/agents/index.d.cts +1 -0
  103. package/dist/evaluation/agents/trajectory.d.cts +2 -2
  104. package/dist/evaluation/agents/trajectory.d.cts.map +1 -1
  105. package/dist/evaluation/agents/trajectory.d.ts +2 -2
  106. package/dist/evaluation/agents/trajectory.d.ts.map +1 -1
  107. package/dist/evaluation/base.cjs.map +1 -1
  108. package/dist/evaluation/base.d.cts.map +1 -1
  109. package/dist/evaluation/base.js.map +1 -1
  110. package/dist/evaluation/comparison/index.d.cts +1 -0
  111. package/dist/evaluation/comparison/pairwise.cjs.map +1 -1
  112. package/dist/evaluation/comparison/pairwise.d.cts +3 -3
  113. package/dist/evaluation/comparison/pairwise.d.cts.map +1 -1
  114. package/dist/evaluation/comparison/pairwise.d.ts +3 -3
  115. package/dist/evaluation/comparison/pairwise.d.ts.map +1 -1
  116. package/dist/evaluation/comparison/pairwise.js.map +1 -1
  117. package/dist/evaluation/criteria/criteria.cjs.map +1 -1
  118. package/dist/evaluation/criteria/criteria.d.cts +3 -3
  119. package/dist/evaluation/criteria/criteria.d.cts.map +1 -1
  120. package/dist/evaluation/criteria/criteria.d.ts +3 -3
  121. package/dist/evaluation/criteria/criteria.d.ts.map +1 -1
  122. package/dist/evaluation/criteria/criteria.js.map +1 -1
  123. package/dist/evaluation/criteria/index.d.cts +1 -0
  124. package/dist/evaluation/embedding_distance/index.d.cts +1 -0
  125. package/dist/evaluation/loader.cjs.map +1 -1
  126. package/dist/evaluation/loader.d.cts.map +1 -1
  127. package/dist/evaluation/loader.js.map +1 -1
  128. package/dist/evaluation/qa/index.d.cts +1 -0
  129. package/dist/experimental/autogpt/prompt.d.cts +2 -2
  130. package/dist/experimental/autogpt/prompt.d.cts.map +1 -1
  131. package/dist/experimental/autogpt/prompt.d.ts +2 -2
  132. package/dist/experimental/autogpt/prompt.d.ts.map +1 -1
  133. package/dist/experimental/autogpt/prompt_generator.cjs.map +1 -1
  134. package/dist/experimental/autogpt/prompt_generator.js.map +1 -1
  135. package/dist/experimental/autogpt/schema.cjs.map +1 -1
  136. package/dist/experimental/autogpt/schema.js.map +1 -1
  137. package/dist/experimental/generative_agents/generative_agent_memory.cjs.map +1 -1
  138. package/dist/experimental/generative_agents/generative_agent_memory.js.map +1 -1
  139. package/dist/experimental/masking/parser.cjs.map +1 -1
  140. package/dist/experimental/masking/parser.js.map +1 -1
  141. package/dist/experimental/masking/regex_masking_transformer.cjs.map +1 -1
  142. package/dist/experimental/masking/regex_masking_transformer.js.map +1 -1
  143. package/dist/experimental/openai_assistant/index.cjs.map +1 -1
  144. package/dist/experimental/openai_assistant/index.d.cts +3 -3
  145. package/dist/experimental/openai_assistant/index.d.ts +3 -3
  146. package/dist/experimental/openai_assistant/index.js.map +1 -1
  147. package/dist/experimental/openai_files/index.d.cts +4 -4
  148. package/dist/experimental/openai_files/index.d.ts +4 -4
  149. package/dist/experimental/plan_and_execute/agent_executor.cjs.map +1 -1
  150. package/dist/experimental/plan_and_execute/agent_executor.js.map +1 -1
  151. package/dist/experimental/prompts/custom_format.cjs.map +1 -1
  152. package/dist/experimental/prompts/custom_format.js.map +1 -1
  153. package/dist/experimental/prompts/handlebars.cjs.map +1 -1
  154. package/dist/experimental/prompts/handlebars.d.cts +2 -2
  155. package/dist/experimental/prompts/handlebars.d.cts.map +1 -1
  156. package/dist/experimental/prompts/handlebars.d.ts +2 -2
  157. package/dist/experimental/prompts/handlebars.d.ts.map +1 -1
  158. package/dist/experimental/prompts/handlebars.js.map +1 -1
  159. package/dist/hub/base.cjs.map +1 -1
  160. package/dist/hub/base.d.cts +0 -1
  161. package/dist/hub/base.d.cts.map +1 -1
  162. package/dist/hub/base.d.ts +0 -1
  163. package/dist/hub/base.d.ts.map +1 -1
  164. package/dist/hub/base.js.map +1 -1
  165. package/dist/hub/index.cjs.map +1 -1
  166. package/dist/hub/index.js.map +1 -1
  167. package/dist/load/index.cjs.map +1 -1
  168. package/dist/load/index.js.map +1 -1
  169. package/dist/output_parsers/combining.cjs.map +1 -1
  170. package/dist/output_parsers/combining.js.map +1 -1
  171. package/dist/output_parsers/expression_type_handlers/base.cjs.map +1 -1
  172. package/dist/output_parsers/expression_type_handlers/base.d.cts.map +1 -1
  173. package/dist/output_parsers/expression_type_handlers/base.d.ts.map +1 -1
  174. package/dist/output_parsers/expression_type_handlers/base.js.map +1 -1
  175. package/dist/output_parsers/fix.cjs.map +1 -1
  176. package/dist/output_parsers/fix.js.map +1 -1
  177. package/dist/output_parsers/openai_functions.d.cts.map +1 -1
  178. package/dist/output_parsers/openai_functions.d.ts.map +1 -1
  179. package/dist/output_parsers/openai_tools.cjs.map +1 -1
  180. package/dist/output_parsers/openai_tools.js.map +1 -1
  181. package/dist/output_parsers/regex.cjs.map +1 -1
  182. package/dist/output_parsers/regex.js.map +1 -1
  183. package/dist/output_parsers/structured.d.cts +2 -2
  184. package/dist/output_parsers/structured.d.cts.map +1 -1
  185. package/dist/output_parsers/structured.d.ts +2 -2
  186. package/dist/output_parsers/structured.d.ts.map +1 -1
  187. package/dist/retrievers/document_compressors/index.cjs.map +1 -1
  188. package/dist/retrievers/document_compressors/index.js.map +1 -1
  189. package/dist/retrievers/matryoshka_retriever.cjs.map +1 -1
  190. package/dist/retrievers/matryoshka_retriever.d.cts +1 -1
  191. package/dist/retrievers/matryoshka_retriever.d.cts.map +1 -1
  192. package/dist/retrievers/matryoshka_retriever.d.ts +1 -1
  193. package/dist/retrievers/matryoshka_retriever.d.ts.map +1 -1
  194. package/dist/retrievers/matryoshka_retriever.js.map +1 -1
  195. package/dist/retrievers/multi_query.cjs.map +1 -1
  196. package/dist/retrievers/multi_query.js.map +1 -1
  197. package/dist/retrievers/parent_document.cjs.map +1 -1
  198. package/dist/retrievers/parent_document.js.map +1 -1
  199. package/dist/schema/prompt_template.d.cts +1 -2
  200. package/dist/schema/prompt_template.d.cts.map +1 -1
  201. package/dist/schema/prompt_template.d.ts +1 -2
  202. package/dist/schema/prompt_template.d.ts.map +1 -1
  203. package/dist/smith/config.cjs.map +1 -1
  204. package/dist/smith/config.d.cts +1 -1
  205. package/dist/smith/config.d.ts +1 -1
  206. package/dist/smith/config.d.ts.map +1 -1
  207. package/dist/smith/config.js.map +1 -1
  208. package/dist/smith/runner_utils.cjs.map +1 -1
  209. package/dist/smith/runner_utils.d.cts +1 -1
  210. package/dist/smith/runner_utils.d.cts.map +1 -1
  211. package/dist/smith/runner_utils.d.ts +1 -1
  212. package/dist/smith/runner_utils.d.ts.map +1 -1
  213. package/dist/smith/runner_utils.js.map +1 -1
  214. package/dist/sql_db.d.cts +2 -2
  215. package/dist/sql_db.d.cts.map +1 -1
  216. package/dist/sql_db.d.ts +2 -2
  217. package/dist/sql_db.d.ts.map +1 -1
  218. package/dist/storage/encoder_backed.cjs.map +1 -1
  219. package/dist/storage/encoder_backed.js.map +1 -1
  220. package/dist/storage/file_system.cjs.map +1 -1
  221. package/dist/storage/file_system.js.map +1 -1
  222. package/dist/stores/doc/in_memory.cjs.map +1 -1
  223. package/dist/stores/doc/in_memory.js.map +1 -1
  224. package/dist/tools/json.cjs.map +1 -1
  225. package/dist/tools/json.js.map +1 -1
  226. package/dist/tools/webbrowser.cjs.map +1 -1
  227. package/dist/tools/webbrowser.d.cts.map +1 -1
  228. package/dist/tools/webbrowser.d.ts.map +1 -1
  229. package/dist/tools/webbrowser.js.map +1 -1
  230. package/dist/util/document.d.cts +1 -1
  231. package/dist/util/document.d.cts.map +1 -1
  232. package/dist/util/document.d.ts +1 -1
  233. package/dist/util/document.d.ts.map +1 -1
  234. package/dist/util/is-network-error/index.cjs.map +1 -1
  235. package/dist/util/is-network-error/index.js.map +1 -1
  236. package/dist/util/load.cjs.map +1 -1
  237. package/dist/util/load.js.map +1 -1
  238. package/dist/util/openapi.cjs.map +1 -1
  239. package/dist/util/openapi.d.cts +32 -32
  240. package/dist/util/openapi.d.cts.map +1 -1
  241. package/dist/util/openapi.d.ts +32 -32
  242. package/dist/util/openapi.d.ts.map +1 -1
  243. package/dist/util/openapi.js.map +1 -1
  244. package/dist/util/p-retry/index.cjs.map +1 -1
  245. package/dist/util/p-retry/index.js.map +1 -1
  246. package/dist/vectorstores/memory.cjs.map +1 -1
  247. package/dist/vectorstores/memory.js.map +1 -1
  248. package/package.json +25 -48
  249. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/client.d.cts +0 -1494
  250. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/client.d.cts.map +0 -1
  251. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/client.d.ts +0 -1494
  252. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/client.d.ts.map +0 -1
  253. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/_runner.d.cts +0 -1
  254. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/_runner.d.ts +0 -1
  255. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluate_comparative.d.cts +0 -1
  256. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluate_comparative.d.ts +0 -1
  257. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluator.d.cts +0 -66
  258. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluator.d.cts.map +0 -1
  259. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluator.d.ts +0 -66
  260. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/evaluator.d.ts.map +0 -1
  261. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/index.d.cts +0 -1
  262. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/index.d.ts +0 -1
  263. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/string_evaluator.d.cts +0 -1
  264. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/evaluation/string_evaluator.d.ts +0 -1
  265. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/index.d.cts +0 -4
  266. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/index.d.ts +0 -4
  267. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/run_trees.d.cts +0 -145
  268. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/run_trees.d.cts.map +0 -1
  269. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/run_trees.d.ts +0 -145
  270. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/run_trees.d.ts.map +0 -1
  271. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/schemas.d.cts +0 -437
  272. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/schemas.d.cts.map +0 -1
  273. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/schemas.d.ts +0 -437
  274. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/schemas.d.ts.map +0 -1
  275. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/traceable.d.cts +0 -7
  276. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/traceable.d.cts.map +0 -1
  277. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/traceable.d.ts +0 -7
  278. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/traceable.d.ts.map +0 -1
  279. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/types.d.cts +0 -38
  280. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/types.d.cts.map +0 -1
  281. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/types.d.ts +0 -38
  282. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/singletons/types.d.ts.map +0 -1
  283. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/async_caller.d.cts +0 -25
  284. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/async_caller.d.cts.map +0 -1
  285. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/async_caller.d.ts +0 -25
  286. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/async_caller.d.ts.map +0 -1
  287. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/p-queue.d.cts +0 -1
  288. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/p-queue.d.ts +0 -1
  289. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/prompt_cache/index.d.cts +0 -129
  290. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/prompt_cache/index.d.cts.map +0 -1
  291. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/prompt_cache/index.d.ts +0 -129
  292. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/dist/utils/prompt_cache/index.d.ts.map +0 -1
  293. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/evaluation.d.cts +0 -1
  294. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/evaluation.d.ts +0 -1
  295. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/singletons/traceable.d.cts +0 -2
  296. package/dist/node_modules/.pnpm/langsmith@0.5.9_@opentelemetry_api@1.9.0_openai@6.22.0_ws@8.20.0_bufferutil@4.1.0__zod@4.3.6_/node_modules/langsmith/singletons/traceable.d.ts +0 -2
  297. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.d.cts +0 -1
  298. package/dist/node_modules/.pnpm/p-queue@6.6.2/node_modules/p-queue/dist/index.d.ts +0 -1
@@ -1 +1 @@
1
- {"version":3,"file":"openai_tools.cjs","names":["BaseLLMOutputParser"],"sources":["../../src/output_parsers/openai_tools.ts"],"sourcesContent":["import { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport type { ChatGeneration } from \"@langchain/core/outputs\";\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type ParsedToolCall = {\n id?: string;\n\n type: string;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n args: Record<string, any>;\n\n /** @deprecated Use `type` instead. Will be removed in 0.2.0. */\n name: string;\n\n /** @deprecated Use `args` instead. Will be removed in 0.2.0. */\n arguments: Record<string, any>; // eslint-disable-line @typescript-eslint/no-explicit-any\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type JsonOutputToolsParserParams = {\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport class JsonOutputToolsParser extends BaseLLMOutputParser<\n ParsedToolCall[]\n> {\n static lc_name() {\n return \"JsonOutputToolsParser\";\n }\n\n returnId = false;\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n constructor(fields?: JsonOutputToolsParserParams) {\n super(fields);\n this.returnId = fields?.returnId ?? this.returnId;\n }\n\n /**\n * Parses the output and returns a JSON object. If `argsOnly` is true,\n * only the arguments of the function call are returned.\n * @param generations The output of the LLM to parse.\n * @returns A JSON object representation of the function call or its arguments.\n */\n async parseResult(generations: ChatGeneration[]): Promise<ParsedToolCall[]> {\n const toolCalls = generations[0].message.additional_kwargs.tool_calls;\n if (!toolCalls) {\n throw new Error(\n `No tools_call in message ${JSON.stringify(generations)}`\n );\n }\n const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));\n const parsedToolCalls = [];\n for (const toolCall of clonedToolCalls) {\n if (toolCall.function !== undefined) {\n // @ts-expect-error name and arguemnts are defined by Object.defineProperty\n const parsedToolCall: ParsedToolCall = {\n type: toolCall.function.name,\n args: JSON.parse(toolCall.function.arguments),\n };\n\n if (this.returnId) {\n parsedToolCall.id = toolCall.id;\n }\n\n // backward-compatibility with previous\n // versions of Langchain JS, which uses `name` and `arguments`\n Object.defineProperty(parsedToolCall, \"name\", {\n get() {\n return this.type;\n },\n });\n\n Object.defineProperty(parsedToolCall, \"arguments\", {\n get() {\n return this.args;\n },\n });\n\n parsedToolCalls.push(parsedToolCall);\n }\n }\n return parsedToolCalls;\n }\n}\n\nexport type JsonOutputKeyToolsParserParams = {\n keyName: string;\n returnSingle?: boolean;\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class JsonOutputKeyToolsParser extends BaseLLMOutputParser<any> {\n static lc_name() {\n return \"JsonOutputKeyToolsParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n returnId = false;\n\n /** The type of tool calls to return. */\n keyName: string;\n\n /** Whether to return only the first tool call. */\n returnSingle = false;\n\n initialParser: JsonOutputToolsParser;\n\n constructor(params: JsonOutputKeyToolsParserParams) {\n super(params);\n this.keyName = params.keyName;\n this.returnSingle = params.returnSingle ?? this.returnSingle;\n this.initialParser = new JsonOutputToolsParser(params);\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n async parseResult(generations: ChatGeneration[]): Promise<any> {\n const results = await this.initialParser.parseResult(generations);\n const matchingResults = results.filter(\n (result) => result.type === this.keyName\n );\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let returnedValues: ParsedToolCall[] | Record<string, any>[] =\n matchingResults;\n if (!this.returnId) {\n returnedValues = matchingResults.map((result) => result.args);\n }\n if (this.returnSingle) {\n return returnedValues[0];\n }\n return returnedValues;\n }\n}\n"],"mappings":";;;;;;AAgCA,IAAa,wBAAb,cAA2CA,+BAAAA,oBAEzC;CACA,OAAO,UAAU;AACf,SAAO;;CAGT,WAAW;CAEX,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,YAAY,QAAsC;AAChD,QAAM,OAAO;AACb,OAAK,WAAW,QAAQ,YAAY,KAAK;;;;;;;;CAS3C,MAAM,YAAY,aAA0D;EAC1E,MAAM,YAAY,YAAY,GAAG,QAAQ,kBAAkB;AAC3D,MAAI,CAAC,UACH,OAAM,IAAI,MACR,4BAA4B,KAAK,UAAU,YAAY,GACxD;EAEH,MAAM,kBAAkB,KAAK,MAAM,KAAK,UAAU,UAAU,CAAC;EAC7D,MAAM,kBAAkB,EAAE;AAC1B,OAAK,MAAM,YAAY,gBACrB,KAAI,SAAS,aAAa,KAAA,GAAW;GAEnC,MAAM,iBAAiC;IACrC,MAAM,SAAS,SAAS;IACxB,MAAM,KAAK,MAAM,SAAS,SAAS,UAAU;IAC9C;AAED,OAAI,KAAK,SACP,gBAAe,KAAK,SAAS;AAK/B,UAAO,eAAe,gBAAgB,QAAQ,EAC5C,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,UAAO,eAAe,gBAAgB,aAAa,EACjD,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,mBAAgB,KAAK,eAAe;;AAGxC,SAAO;;;;;;AAeX,IAAa,2BAAb,cAA8CA,+BAAAA,oBAAyB;CACrE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,WAAW;;CAGX;;CAGA,eAAe;CAEf;CAEA,YAAY,QAAwC;AAClD,QAAM,OAAO;AACb,OAAK,UAAU,OAAO;AACtB,OAAK,eAAe,OAAO,gBAAgB,KAAK;AAChD,OAAK,gBAAgB,IAAI,sBAAsB,OAAO;;CAIxD,MAAM,YAAY,aAA6C;EAE7D,MAAM,mBADU,MAAM,KAAK,cAAc,YAAY,YAAY,EACjC,QAC7B,WAAW,OAAO,SAAS,KAAK,QAClC;EAED,IAAI,iBACF;AACF,MAAI,CAAC,KAAK,SACR,kBAAiB,gBAAgB,KAAK,WAAW,OAAO,KAAK;AAE/D,MAAI,KAAK,aACP,QAAO,eAAe;AAExB,SAAO"}
1
+ {"version":3,"file":"openai_tools.cjs","names":["BaseLLMOutputParser"],"sources":["../../src/output_parsers/openai_tools.ts"],"sourcesContent":["import { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport type { ChatGeneration } from \"@langchain/core/outputs\";\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type ParsedToolCall = {\n id?: string;\n\n type: string;\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n args: Record<string, any>;\n\n /** @deprecated Use `type` instead. Will be removed in 0.2.0. */\n name: string;\n\n /** @deprecated Use `args` instead. Will be removed in 0.2.0. */\n arguments: Record<string, any>; // oxlint-disable-line @typescript-eslint/no-explicit-any\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type JsonOutputToolsParserParams = {\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport class JsonOutputToolsParser extends BaseLLMOutputParser<\n ParsedToolCall[]\n> {\n static lc_name() {\n return \"JsonOutputToolsParser\";\n }\n\n returnId = false;\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n constructor(fields?: JsonOutputToolsParserParams) {\n super(fields);\n this.returnId = fields?.returnId ?? this.returnId;\n }\n\n /**\n * Parses the output and returns a JSON object. If `argsOnly` is true,\n * only the arguments of the function call are returned.\n * @param generations The output of the LLM to parse.\n * @returns A JSON object representation of the function call or its arguments.\n */\n async parseResult(generations: ChatGeneration[]): Promise<ParsedToolCall[]> {\n const toolCalls = generations[0].message.additional_kwargs.tool_calls;\n if (!toolCalls) {\n throw new Error(\n `No tools_call in message ${JSON.stringify(generations)}`\n );\n }\n const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));\n const parsedToolCalls = [];\n for (const toolCall of clonedToolCalls) {\n if (toolCall.function !== undefined) {\n // @ts-expect-error name and arguemnts are defined by Object.defineProperty\n const parsedToolCall: ParsedToolCall = {\n type: toolCall.function.name,\n args: JSON.parse(toolCall.function.arguments),\n };\n\n if (this.returnId) {\n parsedToolCall.id = toolCall.id;\n }\n\n // backward-compatibility with previous\n // versions of Langchain JS, which uses `name` and `arguments`\n Object.defineProperty(parsedToolCall, \"name\", {\n get() {\n return this.type;\n },\n });\n\n Object.defineProperty(parsedToolCall, \"arguments\", {\n get() {\n return this.args;\n },\n });\n\n parsedToolCalls.push(parsedToolCall);\n }\n }\n return parsedToolCalls;\n }\n}\n\nexport type JsonOutputKeyToolsParserParams = {\n keyName: string;\n returnSingle?: boolean;\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport class JsonOutputKeyToolsParser extends BaseLLMOutputParser<any> {\n static lc_name() {\n return \"JsonOutputKeyToolsParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n returnId = false;\n\n /** The type of tool calls to return. */\n keyName: string;\n\n /** Whether to return only the first tool call. */\n returnSingle = false;\n\n initialParser: JsonOutputToolsParser;\n\n constructor(params: JsonOutputKeyToolsParserParams) {\n super(params);\n this.keyName = params.keyName;\n this.returnSingle = params.returnSingle ?? this.returnSingle;\n this.initialParser = new JsonOutputToolsParser(params);\n }\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n async parseResult(generations: ChatGeneration[]): Promise<any> {\n const results = await this.initialParser.parseResult(generations);\n const matchingResults = results.filter(\n (result) => result.type === this.keyName\n );\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n let returnedValues: ParsedToolCall[] | Record<string, any>[] =\n matchingResults;\n if (!this.returnId) {\n returnedValues = matchingResults.map((result) => result.args);\n }\n if (this.returnSingle) {\n return returnedValues[0];\n }\n return returnedValues;\n }\n}\n"],"mappings":";;;;;;AAgCA,IAAa,wBAAb,cAA2CA,+BAAAA,oBAEzC;CACA,OAAO,UAAU;AACf,SAAO;;CAGT,WAAW;CAEX,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,YAAY,QAAsC;AAChD,QAAM,OAAO;AACb,OAAK,WAAW,QAAQ,YAAY,KAAK;;;;;;;;CAS3C,MAAM,YAAY,aAA0D;EAC1E,MAAM,YAAY,YAAY,GAAG,QAAQ,kBAAkB;AAC3D,MAAI,CAAC,UACH,OAAM,IAAI,MACR,4BAA4B,KAAK,UAAU,YAAY,GACxD;EAEH,MAAM,kBAAkB,KAAK,MAAM,KAAK,UAAU,UAAU,CAAC;EAC7D,MAAM,kBAAkB,EAAE;AAC1B,OAAK,MAAM,YAAY,gBACrB,KAAI,SAAS,aAAa,KAAA,GAAW;GAEnC,MAAM,iBAAiC;IACrC,MAAM,SAAS,SAAS;IACxB,MAAM,KAAK,MAAM,SAAS,SAAS,UAAU;IAC9C;AAED,OAAI,KAAK,SACP,gBAAe,KAAK,SAAS;AAK/B,UAAO,eAAe,gBAAgB,QAAQ,EAC5C,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,UAAO,eAAe,gBAAgB,aAAa,EACjD,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,mBAAgB,KAAK,eAAe;;AAGxC,SAAO;;;;;;AAeX,IAAa,2BAAb,cAA8CA,+BAAAA,oBAAyB;CACrE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,WAAW;;CAGX;;CAGA,eAAe;CAEf;CAEA,YAAY,QAAwC;AAClD,QAAM,OAAO;AACb,OAAK,UAAU,OAAO;AACtB,OAAK,eAAe,OAAO,gBAAgB,KAAK;AAChD,OAAK,gBAAgB,IAAI,sBAAsB,OAAO;;CAIxD,MAAM,YAAY,aAA6C;EAE7D,MAAM,mBADU,MAAM,KAAK,cAAc,YAAY,YAAY,EACjC,QAC7B,WAAW,OAAO,SAAS,KAAK,QAClC;EAED,IAAI,iBACF;AACF,MAAI,CAAC,KAAK,SACR,kBAAiB,gBAAgB,KAAK,WAAW,OAAO,KAAK;AAE/D,MAAI,KAAK,aACP,QAAO,eAAe;AAExB,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"openai_tools.js","names":[],"sources":["../../src/output_parsers/openai_tools.ts"],"sourcesContent":["import { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport type { ChatGeneration } from \"@langchain/core/outputs\";\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type ParsedToolCall = {\n id?: string;\n\n type: string;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n args: Record<string, any>;\n\n /** @deprecated Use `type` instead. Will be removed in 0.2.0. */\n name: string;\n\n /** @deprecated Use `args` instead. Will be removed in 0.2.0. */\n arguments: Record<string, any>; // eslint-disable-line @typescript-eslint/no-explicit-any\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type JsonOutputToolsParserParams = {\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport class JsonOutputToolsParser extends BaseLLMOutputParser<\n ParsedToolCall[]\n> {\n static lc_name() {\n return \"JsonOutputToolsParser\";\n }\n\n returnId = false;\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n constructor(fields?: JsonOutputToolsParserParams) {\n super(fields);\n this.returnId = fields?.returnId ?? this.returnId;\n }\n\n /**\n * Parses the output and returns a JSON object. If `argsOnly` is true,\n * only the arguments of the function call are returned.\n * @param generations The output of the LLM to parse.\n * @returns A JSON object representation of the function call or its arguments.\n */\n async parseResult(generations: ChatGeneration[]): Promise<ParsedToolCall[]> {\n const toolCalls = generations[0].message.additional_kwargs.tool_calls;\n if (!toolCalls) {\n throw new Error(\n `No tools_call in message ${JSON.stringify(generations)}`\n );\n }\n const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));\n const parsedToolCalls = [];\n for (const toolCall of clonedToolCalls) {\n if (toolCall.function !== undefined) {\n // @ts-expect-error name and arguemnts are defined by Object.defineProperty\n const parsedToolCall: ParsedToolCall = {\n type: toolCall.function.name,\n args: JSON.parse(toolCall.function.arguments),\n };\n\n if (this.returnId) {\n parsedToolCall.id = toolCall.id;\n }\n\n // backward-compatibility with previous\n // versions of Langchain JS, which uses `name` and `arguments`\n Object.defineProperty(parsedToolCall, \"name\", {\n get() {\n return this.type;\n },\n });\n\n Object.defineProperty(parsedToolCall, \"arguments\", {\n get() {\n return this.args;\n },\n });\n\n parsedToolCalls.push(parsedToolCall);\n }\n }\n return parsedToolCalls;\n }\n}\n\nexport type JsonOutputKeyToolsParserParams = {\n keyName: string;\n returnSingle?: boolean;\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport class JsonOutputKeyToolsParser extends BaseLLMOutputParser<any> {\n static lc_name() {\n return \"JsonOutputKeyToolsParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n returnId = false;\n\n /** The type of tool calls to return. */\n keyName: string;\n\n /** Whether to return only the first tool call. */\n returnSingle = false;\n\n initialParser: JsonOutputToolsParser;\n\n constructor(params: JsonOutputKeyToolsParserParams) {\n super(params);\n this.keyName = params.keyName;\n this.returnSingle = params.returnSingle ?? this.returnSingle;\n this.initialParser = new JsonOutputToolsParser(params);\n }\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n async parseResult(generations: ChatGeneration[]): Promise<any> {\n const results = await this.initialParser.parseResult(generations);\n const matchingResults = results.filter(\n (result) => result.type === this.keyName\n );\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n let returnedValues: ParsedToolCall[] | Record<string, any>[] =\n matchingResults;\n if (!this.returnId) {\n returnedValues = matchingResults.map((result) => result.args);\n }\n if (this.returnSingle) {\n return returnedValues[0];\n }\n return returnedValues;\n }\n}\n"],"mappings":";;;;;AAgCA,IAAa,wBAAb,cAA2C,oBAEzC;CACA,OAAO,UAAU;AACf,SAAO;;CAGT,WAAW;CAEX,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,YAAY,QAAsC;AAChD,QAAM,OAAO;AACb,OAAK,WAAW,QAAQ,YAAY,KAAK;;;;;;;;CAS3C,MAAM,YAAY,aAA0D;EAC1E,MAAM,YAAY,YAAY,GAAG,QAAQ,kBAAkB;AAC3D,MAAI,CAAC,UACH,OAAM,IAAI,MACR,4BAA4B,KAAK,UAAU,YAAY,GACxD;EAEH,MAAM,kBAAkB,KAAK,MAAM,KAAK,UAAU,UAAU,CAAC;EAC7D,MAAM,kBAAkB,EAAE;AAC1B,OAAK,MAAM,YAAY,gBACrB,KAAI,SAAS,aAAa,KAAA,GAAW;GAEnC,MAAM,iBAAiC;IACrC,MAAM,SAAS,SAAS;IACxB,MAAM,KAAK,MAAM,SAAS,SAAS,UAAU;IAC9C;AAED,OAAI,KAAK,SACP,gBAAe,KAAK,SAAS;AAK/B,UAAO,eAAe,gBAAgB,QAAQ,EAC5C,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,UAAO,eAAe,gBAAgB,aAAa,EACjD,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,mBAAgB,KAAK,eAAe;;AAGxC,SAAO;;;;;;AAeX,IAAa,2BAAb,cAA8C,oBAAyB;CACrE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,WAAW;;CAGX;;CAGA,eAAe;CAEf;CAEA,YAAY,QAAwC;AAClD,QAAM,OAAO;AACb,OAAK,UAAU,OAAO;AACtB,OAAK,eAAe,OAAO,gBAAgB,KAAK;AAChD,OAAK,gBAAgB,IAAI,sBAAsB,OAAO;;CAIxD,MAAM,YAAY,aAA6C;EAE7D,MAAM,mBADU,MAAM,KAAK,cAAc,YAAY,YAAY,EACjC,QAC7B,WAAW,OAAO,SAAS,KAAK,QAClC;EAED,IAAI,iBACF;AACF,MAAI,CAAC,KAAK,SACR,kBAAiB,gBAAgB,KAAK,WAAW,OAAO,KAAK;AAE/D,MAAI,KAAK,aACP,QAAO,eAAe;AAExB,SAAO"}
1
+ {"version":3,"file":"openai_tools.js","names":[],"sources":["../../src/output_parsers/openai_tools.ts"],"sourcesContent":["import { BaseLLMOutputParser } from \"@langchain/core/output_parsers\";\nimport type { ChatGeneration } from \"@langchain/core/outputs\";\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type ParsedToolCall = {\n id?: string;\n\n type: string;\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n args: Record<string, any>;\n\n /** @deprecated Use `type` instead. Will be removed in 0.2.0. */\n name: string;\n\n /** @deprecated Use `args` instead. Will be removed in 0.2.0. */\n arguments: Record<string, any>; // oxlint-disable-line @typescript-eslint/no-explicit-any\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport type JsonOutputToolsParserParams = {\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\nexport class JsonOutputToolsParser extends BaseLLMOutputParser<\n ParsedToolCall[]\n> {\n static lc_name() {\n return \"JsonOutputToolsParser\";\n }\n\n returnId = false;\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n constructor(fields?: JsonOutputToolsParserParams) {\n super(fields);\n this.returnId = fields?.returnId ?? this.returnId;\n }\n\n /**\n * Parses the output and returns a JSON object. If `argsOnly` is true,\n * only the arguments of the function call are returned.\n * @param generations The output of the LLM to parse.\n * @returns A JSON object representation of the function call or its arguments.\n */\n async parseResult(generations: ChatGeneration[]): Promise<ParsedToolCall[]> {\n const toolCalls = generations[0].message.additional_kwargs.tool_calls;\n if (!toolCalls) {\n throw new Error(\n `No tools_call in message ${JSON.stringify(generations)}`\n );\n }\n const clonedToolCalls = JSON.parse(JSON.stringify(toolCalls));\n const parsedToolCalls = [];\n for (const toolCall of clonedToolCalls) {\n if (toolCall.function !== undefined) {\n // @ts-expect-error name and arguemnts are defined by Object.defineProperty\n const parsedToolCall: ParsedToolCall = {\n type: toolCall.function.name,\n args: JSON.parse(toolCall.function.arguments),\n };\n\n if (this.returnId) {\n parsedToolCall.id = toolCall.id;\n }\n\n // backward-compatibility with previous\n // versions of Langchain JS, which uses `name` and `arguments`\n Object.defineProperty(parsedToolCall, \"name\", {\n get() {\n return this.type;\n },\n });\n\n Object.defineProperty(parsedToolCall, \"arguments\", {\n get() {\n return this.args;\n },\n });\n\n parsedToolCalls.push(parsedToolCall);\n }\n }\n return parsedToolCalls;\n }\n}\n\nexport type JsonOutputKeyToolsParserParams = {\n keyName: string;\n returnSingle?: boolean;\n /** Whether to return the tool call id. */\n returnId?: boolean;\n};\n\n/**\n * @deprecated Import from \"@langchain/core/output_parsers/openai_tools\"\n */\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport class JsonOutputKeyToolsParser extends BaseLLMOutputParser<any> {\n static lc_name() {\n return \"JsonOutputKeyToolsParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"openai_tools\"];\n\n lc_serializable = true;\n\n returnId = false;\n\n /** The type of tool calls to return. */\n keyName: string;\n\n /** Whether to return only the first tool call. */\n returnSingle = false;\n\n initialParser: JsonOutputToolsParser;\n\n constructor(params: JsonOutputKeyToolsParserParams) {\n super(params);\n this.keyName = params.keyName;\n this.returnSingle = params.returnSingle ?? this.returnSingle;\n this.initialParser = new JsonOutputToolsParser(params);\n }\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n async parseResult(generations: ChatGeneration[]): Promise<any> {\n const results = await this.initialParser.parseResult(generations);\n const matchingResults = results.filter(\n (result) => result.type === this.keyName\n );\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n let returnedValues: ParsedToolCall[] | Record<string, any>[] =\n matchingResults;\n if (!this.returnId) {\n returnedValues = matchingResults.map((result) => result.args);\n }\n if (this.returnSingle) {\n return returnedValues[0];\n }\n return returnedValues;\n }\n}\n"],"mappings":";;;;;AAgCA,IAAa,wBAAb,cAA2C,oBAEzC;CACA,OAAO,UAAU;AACf,SAAO;;CAGT,WAAW;CAEX,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,YAAY,QAAsC;AAChD,QAAM,OAAO;AACb,OAAK,WAAW,QAAQ,YAAY,KAAK;;;;;;;;CAS3C,MAAM,YAAY,aAA0D;EAC1E,MAAM,YAAY,YAAY,GAAG,QAAQ,kBAAkB;AAC3D,MAAI,CAAC,UACH,OAAM,IAAI,MACR,4BAA4B,KAAK,UAAU,YAAY,GACxD;EAEH,MAAM,kBAAkB,KAAK,MAAM,KAAK,UAAU,UAAU,CAAC;EAC7D,MAAM,kBAAkB,EAAE;AAC1B,OAAK,MAAM,YAAY,gBACrB,KAAI,SAAS,aAAa,KAAA,GAAW;GAEnC,MAAM,iBAAiC;IACrC,MAAM,SAAS,SAAS;IACxB,MAAM,KAAK,MAAM,SAAS,SAAS,UAAU;IAC9C;AAED,OAAI,KAAK,SACP,gBAAe,KAAK,SAAS;AAK/B,UAAO,eAAe,gBAAgB,QAAQ,EAC5C,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,UAAO,eAAe,gBAAgB,aAAa,EACjD,MAAM;AACJ,WAAO,KAAK;MAEf,CAAC;AAEF,mBAAgB,KAAK,eAAe;;AAGxC,SAAO;;;;;;AAeX,IAAa,2BAAb,cAA8C,oBAAyB;CACrE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAe;CAE9D,kBAAkB;CAElB,WAAW;;CAGX;;CAGA,eAAe;CAEf;CAEA,YAAY,QAAwC;AAClD,QAAM,OAAO;AACb,OAAK,UAAU,OAAO;AACtB,OAAK,eAAe,OAAO,gBAAgB,KAAK;AAChD,OAAK,gBAAgB,IAAI,sBAAsB,OAAO;;CAIxD,MAAM,YAAY,aAA6C;EAE7D,MAAM,mBADU,MAAM,KAAK,cAAc,YAAY,YAAY,EACjC,QAC7B,WAAW,OAAO,SAAS,KAAK,QAClC;EAED,IAAI,iBACF;AACF,MAAI,CAAC,KAAK,SACR,kBAAiB,gBAAgB,KAAK,WAAW,OAAO,KAAK;AAE/D,MAAI,KAAK,aACP,QAAO,eAAe;AAExB,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"regex.cjs","names":["BaseOutputParser","OutputParserException"],"sources":["../../src/output_parsers/regex.ts"],"sourcesContent":["import {\n BaseOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport type { SerializedFields } from \"../load/map_keys.js\";\n\nexport interface RegExpFields {\n pattern: string;\n flags?: string;\n}\n\n/**\n * Interface for the fields required to create a RegexParser instance.\n */\nexport interface RegexParserFields {\n regex: string | RegExp | RegExpFields;\n outputKeys: string[];\n defaultOutputKey?: string;\n}\n\n/**\n * Class to parse the output of an LLM call into a dictionary.\n * @augments BaseOutputParser\n */\nexport class RegexParser extends BaseOutputParser<Record<string, string>> {\n static lc_name() {\n return \"RegexParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"regex\"];\n\n lc_serializable = true;\n\n get lc_attributes(): SerializedFields | undefined {\n return {\n regex: this.lc_kwargs.regex,\n };\n }\n\n regex: string | RegExp;\n\n outputKeys: string[];\n\n defaultOutputKey?: string;\n\n constructor(fields: RegexParserFields);\n\n constructor(\n regex: string | RegExp,\n outputKeys: string[],\n defaultOutputKey?: string\n );\n\n constructor(\n fields: string | RegExp | RegexParserFields,\n outputKeys?: string[],\n defaultOutputKey?: string\n ) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (typeof fields === \"string\" || fields instanceof RegExp) {\n // eslint-disable-next-line no-param-reassign\n fields = { regex: fields, outputKeys: outputKeys!, defaultOutputKey };\n }\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (fields.regex instanceof RegExp) {\n fields.regex = {\n pattern: fields.regex.source,\n flags: fields.regex.flags,\n };\n }\n super(fields);\n this.regex =\n typeof fields.regex === \"string\"\n ? new RegExp(fields.regex)\n : \"pattern\" in fields.regex\n ? new RegExp(fields.regex.pattern, fields.regex.flags)\n : fields.regex;\n this.outputKeys = fields.outputKeys;\n this.defaultOutputKey = fields.defaultOutputKey;\n }\n\n _type() {\n return \"regex_parser\";\n }\n\n /**\n * Parses the given text using the regex pattern and returns a dictionary\n * with the parsed output. If the regex pattern does not match the text\n * and no defaultOutputKey is provided, throws an OutputParserException.\n * @param text The text to be parsed.\n * @returns A dictionary with the parsed output.\n */\n async parse(text: string): Promise<Record<string, string>> {\n const match = text.match(this.regex);\n if (match) {\n return this.outputKeys.reduce(\n (acc, key, index) => {\n acc[key] = match[index + 1];\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n if (this.defaultOutputKey === undefined) {\n throw new OutputParserException(`Could not parse output: ${text}`, text);\n }\n\n return this.outputKeys.reduce(\n (acc, key) => {\n acc[key] = key === this.defaultOutputKey ? text : \"\";\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n /**\n * Returns a string with instructions on how the LLM output should be\n * formatted to match the regex pattern.\n * @returns A string with formatting instructions.\n */\n getFormatInstructions(): string {\n return `Your response should match the following regex: ${this.regex}`;\n }\n}\n"],"mappings":";;;;;;;AAwBA,IAAa,cAAb,cAAiCA,+BAAAA,iBAAyC;CACxE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAQ;CAEvD,kBAAkB;CAElB,IAAI,gBAA8C;AAChD,SAAO,EACL,OAAO,KAAK,UAAU,OACvB;;CAGH;CAEA;CAEA;CAUA,YACE,QACA,YACA,kBACA;AAEA,MAAI,OAAO,WAAW,YAAY,kBAAkB,OAElD,UAAS;GAAE,OAAO;GAAoB;GAAa;GAAkB;AAGvE,MAAI,OAAO,iBAAiB,OAC1B,QAAO,QAAQ;GACb,SAAS,OAAO,MAAM;GACtB,OAAO,OAAO,MAAM;GACrB;AAEH,QAAM,OAAO;AACb,OAAK,QACH,OAAO,OAAO,UAAU,WACpB,IAAI,OAAO,OAAO,MAAM,GACxB,aAAa,OAAO,QAClB,IAAI,OAAO,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,GACpD,OAAO;AACf,OAAK,aAAa,OAAO;AACzB,OAAK,mBAAmB,OAAO;;CAGjC,QAAQ;AACN,SAAO;;;;;;;;;CAUT,MAAM,MAAM,MAA+C;EACzD,MAAM,QAAQ,KAAK,MAAM,KAAK,MAAM;AACpC,MAAI,MACF,QAAO,KAAK,WAAW,QACpB,KAAK,KAAK,UAAU;AACnB,OAAI,OAAO,MAAM,QAAQ;AACzB,UAAO;KAET,EAAE,CACH;AAGH,MAAI,KAAK,qBAAqB,KAAA,EAC5B,OAAM,IAAIC,+BAAAA,sBAAsB,2BAA2B,QAAQ,KAAK;AAG1E,SAAO,KAAK,WAAW,QACpB,KAAK,QAAQ;AACZ,OAAI,OAAO,QAAQ,KAAK,mBAAmB,OAAO;AAClD,UAAO;KAET,EAAE,CACH;;;;;;;CAQH,wBAAgC;AAC9B,SAAO,mDAAmD,KAAK"}
1
+ {"version":3,"file":"regex.cjs","names":["BaseOutputParser","OutputParserException"],"sources":["../../src/output_parsers/regex.ts"],"sourcesContent":["import {\n BaseOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport type { SerializedFields } from \"../load/map_keys.js\";\n\nexport interface RegExpFields {\n pattern: string;\n flags?: string;\n}\n\n/**\n * Interface for the fields required to create a RegexParser instance.\n */\nexport interface RegexParserFields {\n regex: string | RegExp | RegExpFields;\n outputKeys: string[];\n defaultOutputKey?: string;\n}\n\n/**\n * Class to parse the output of an LLM call into a dictionary.\n * @augments BaseOutputParser\n */\nexport class RegexParser extends BaseOutputParser<Record<string, string>> {\n static lc_name() {\n return \"RegexParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"regex\"];\n\n lc_serializable = true;\n\n get lc_attributes(): SerializedFields | undefined {\n return {\n regex: this.lc_kwargs.regex,\n };\n }\n\n regex: string | RegExp;\n\n outputKeys: string[];\n\n defaultOutputKey?: string;\n\n constructor(fields: RegexParserFields);\n\n constructor(\n regex: string | RegExp,\n outputKeys: string[],\n defaultOutputKey?: string\n );\n\n constructor(\n fields: string | RegExp | RegexParserFields,\n outputKeys?: string[],\n defaultOutputKey?: string\n ) {\n // oxlint-disable-next-line no-instanceof/no-instanceof\n if (typeof fields === \"string\" || fields instanceof RegExp) {\n // oxlint-disable-next-line no-param-reassign\n fields = { regex: fields, outputKeys: outputKeys!, defaultOutputKey };\n }\n // oxlint-disable-next-line no-instanceof/no-instanceof\n if (fields.regex instanceof RegExp) {\n fields.regex = {\n pattern: fields.regex.source,\n flags: fields.regex.flags,\n };\n }\n super(fields);\n this.regex =\n typeof fields.regex === \"string\"\n ? new RegExp(fields.regex)\n : \"pattern\" in fields.regex\n ? new RegExp(fields.regex.pattern, fields.regex.flags)\n : fields.regex;\n this.outputKeys = fields.outputKeys;\n this.defaultOutputKey = fields.defaultOutputKey;\n }\n\n _type() {\n return \"regex_parser\";\n }\n\n /**\n * Parses the given text using the regex pattern and returns a dictionary\n * with the parsed output. If the regex pattern does not match the text\n * and no defaultOutputKey is provided, throws an OutputParserException.\n * @param text The text to be parsed.\n * @returns A dictionary with the parsed output.\n */\n async parse(text: string): Promise<Record<string, string>> {\n const match = text.match(this.regex);\n if (match) {\n return this.outputKeys.reduce(\n (acc, key, index) => {\n acc[key] = match[index + 1];\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n if (this.defaultOutputKey === undefined) {\n throw new OutputParserException(`Could not parse output: ${text}`, text);\n }\n\n return this.outputKeys.reduce(\n (acc, key) => {\n acc[key] = key === this.defaultOutputKey ? text : \"\";\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n /**\n * Returns a string with instructions on how the LLM output should be\n * formatted to match the regex pattern.\n * @returns A string with formatting instructions.\n */\n getFormatInstructions(): string {\n return `Your response should match the following regex: ${this.regex}`;\n }\n}\n"],"mappings":";;;;;;;AAwBA,IAAa,cAAb,cAAiCA,+BAAAA,iBAAyC;CACxE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAQ;CAEvD,kBAAkB;CAElB,IAAI,gBAA8C;AAChD,SAAO,EACL,OAAO,KAAK,UAAU,OACvB;;CAGH;CAEA;CAEA;CAUA,YACE,QACA,YACA,kBACA;AAEA,MAAI,OAAO,WAAW,YAAY,kBAAkB,OAElD,UAAS;GAAE,OAAO;GAAoB;GAAa;GAAkB;AAGvE,MAAI,OAAO,iBAAiB,OAC1B,QAAO,QAAQ;GACb,SAAS,OAAO,MAAM;GACtB,OAAO,OAAO,MAAM;GACrB;AAEH,QAAM,OAAO;AACb,OAAK,QACH,OAAO,OAAO,UAAU,WACpB,IAAI,OAAO,OAAO,MAAM,GACxB,aAAa,OAAO,QAClB,IAAI,OAAO,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,GACpD,OAAO;AACf,OAAK,aAAa,OAAO;AACzB,OAAK,mBAAmB,OAAO;;CAGjC,QAAQ;AACN,SAAO;;;;;;;;;CAUT,MAAM,MAAM,MAA+C;EACzD,MAAM,QAAQ,KAAK,MAAM,KAAK,MAAM;AACpC,MAAI,MACF,QAAO,KAAK,WAAW,QACpB,KAAK,KAAK,UAAU;AACnB,OAAI,OAAO,MAAM,QAAQ;AACzB,UAAO;KAET,EAAE,CACH;AAGH,MAAI,KAAK,qBAAqB,KAAA,EAC5B,OAAM,IAAIC,+BAAAA,sBAAsB,2BAA2B,QAAQ,KAAK;AAG1E,SAAO,KAAK,WAAW,QACpB,KAAK,QAAQ;AACZ,OAAI,OAAO,QAAQ,KAAK,mBAAmB,OAAO;AAClD,UAAO;KAET,EAAE,CACH;;;;;;;CAQH,wBAAgC;AAC9B,SAAO,mDAAmD,KAAK"}
@@ -1 +1 @@
1
- {"version":3,"file":"regex.js","names":[],"sources":["../../src/output_parsers/regex.ts"],"sourcesContent":["import {\n BaseOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport type { SerializedFields } from \"../load/map_keys.js\";\n\nexport interface RegExpFields {\n pattern: string;\n flags?: string;\n}\n\n/**\n * Interface for the fields required to create a RegexParser instance.\n */\nexport interface RegexParserFields {\n regex: string | RegExp | RegExpFields;\n outputKeys: string[];\n defaultOutputKey?: string;\n}\n\n/**\n * Class to parse the output of an LLM call into a dictionary.\n * @augments BaseOutputParser\n */\nexport class RegexParser extends BaseOutputParser<Record<string, string>> {\n static lc_name() {\n return \"RegexParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"regex\"];\n\n lc_serializable = true;\n\n get lc_attributes(): SerializedFields | undefined {\n return {\n regex: this.lc_kwargs.regex,\n };\n }\n\n regex: string | RegExp;\n\n outputKeys: string[];\n\n defaultOutputKey?: string;\n\n constructor(fields: RegexParserFields);\n\n constructor(\n regex: string | RegExp,\n outputKeys: string[],\n defaultOutputKey?: string\n );\n\n constructor(\n fields: string | RegExp | RegexParserFields,\n outputKeys?: string[],\n defaultOutputKey?: string\n ) {\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (typeof fields === \"string\" || fields instanceof RegExp) {\n // eslint-disable-next-line no-param-reassign\n fields = { regex: fields, outputKeys: outputKeys!, defaultOutputKey };\n }\n // eslint-disable-next-line no-instanceof/no-instanceof\n if (fields.regex instanceof RegExp) {\n fields.regex = {\n pattern: fields.regex.source,\n flags: fields.regex.flags,\n };\n }\n super(fields);\n this.regex =\n typeof fields.regex === \"string\"\n ? new RegExp(fields.regex)\n : \"pattern\" in fields.regex\n ? new RegExp(fields.regex.pattern, fields.regex.flags)\n : fields.regex;\n this.outputKeys = fields.outputKeys;\n this.defaultOutputKey = fields.defaultOutputKey;\n }\n\n _type() {\n return \"regex_parser\";\n }\n\n /**\n * Parses the given text using the regex pattern and returns a dictionary\n * with the parsed output. If the regex pattern does not match the text\n * and no defaultOutputKey is provided, throws an OutputParserException.\n * @param text The text to be parsed.\n * @returns A dictionary with the parsed output.\n */\n async parse(text: string): Promise<Record<string, string>> {\n const match = text.match(this.regex);\n if (match) {\n return this.outputKeys.reduce(\n (acc, key, index) => {\n acc[key] = match[index + 1];\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n if (this.defaultOutputKey === undefined) {\n throw new OutputParserException(`Could not parse output: ${text}`, text);\n }\n\n return this.outputKeys.reduce(\n (acc, key) => {\n acc[key] = key === this.defaultOutputKey ? text : \"\";\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n /**\n * Returns a string with instructions on how the LLM output should be\n * formatted to match the regex pattern.\n * @returns A string with formatting instructions.\n */\n getFormatInstructions(): string {\n return `Your response should match the following regex: ${this.regex}`;\n }\n}\n"],"mappings":";;;;;;AAwBA,IAAa,cAAb,cAAiC,iBAAyC;CACxE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAQ;CAEvD,kBAAkB;CAElB,IAAI,gBAA8C;AAChD,SAAO,EACL,OAAO,KAAK,UAAU,OACvB;;CAGH;CAEA;CAEA;CAUA,YACE,QACA,YACA,kBACA;AAEA,MAAI,OAAO,WAAW,YAAY,kBAAkB,OAElD,UAAS;GAAE,OAAO;GAAoB;GAAa;GAAkB;AAGvE,MAAI,OAAO,iBAAiB,OAC1B,QAAO,QAAQ;GACb,SAAS,OAAO,MAAM;GACtB,OAAO,OAAO,MAAM;GACrB;AAEH,QAAM,OAAO;AACb,OAAK,QACH,OAAO,OAAO,UAAU,WACpB,IAAI,OAAO,OAAO,MAAM,GACxB,aAAa,OAAO,QAClB,IAAI,OAAO,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,GACpD,OAAO;AACf,OAAK,aAAa,OAAO;AACzB,OAAK,mBAAmB,OAAO;;CAGjC,QAAQ;AACN,SAAO;;;;;;;;;CAUT,MAAM,MAAM,MAA+C;EACzD,MAAM,QAAQ,KAAK,MAAM,KAAK,MAAM;AACpC,MAAI,MACF,QAAO,KAAK,WAAW,QACpB,KAAK,KAAK,UAAU;AACnB,OAAI,OAAO,MAAM,QAAQ;AACzB,UAAO;KAET,EAAE,CACH;AAGH,MAAI,KAAK,qBAAqB,KAAA,EAC5B,OAAM,IAAI,sBAAsB,2BAA2B,QAAQ,KAAK;AAG1E,SAAO,KAAK,WAAW,QACpB,KAAK,QAAQ;AACZ,OAAI,OAAO,QAAQ,KAAK,mBAAmB,OAAO;AAClD,UAAO;KAET,EAAE,CACH;;;;;;;CAQH,wBAAgC;AAC9B,SAAO,mDAAmD,KAAK"}
1
+ {"version":3,"file":"regex.js","names":[],"sources":["../../src/output_parsers/regex.ts"],"sourcesContent":["import {\n BaseOutputParser,\n OutputParserException,\n} from \"@langchain/core/output_parsers\";\nimport type { SerializedFields } from \"../load/map_keys.js\";\n\nexport interface RegExpFields {\n pattern: string;\n flags?: string;\n}\n\n/**\n * Interface for the fields required to create a RegexParser instance.\n */\nexport interface RegexParserFields {\n regex: string | RegExp | RegExpFields;\n outputKeys: string[];\n defaultOutputKey?: string;\n}\n\n/**\n * Class to parse the output of an LLM call into a dictionary.\n * @augments BaseOutputParser\n */\nexport class RegexParser extends BaseOutputParser<Record<string, string>> {\n static lc_name() {\n return \"RegexParser\";\n }\n\n lc_namespace = [\"langchain\", \"output_parsers\", \"regex\"];\n\n lc_serializable = true;\n\n get lc_attributes(): SerializedFields | undefined {\n return {\n regex: this.lc_kwargs.regex,\n };\n }\n\n regex: string | RegExp;\n\n outputKeys: string[];\n\n defaultOutputKey?: string;\n\n constructor(fields: RegexParserFields);\n\n constructor(\n regex: string | RegExp,\n outputKeys: string[],\n defaultOutputKey?: string\n );\n\n constructor(\n fields: string | RegExp | RegexParserFields,\n outputKeys?: string[],\n defaultOutputKey?: string\n ) {\n // oxlint-disable-next-line no-instanceof/no-instanceof\n if (typeof fields === \"string\" || fields instanceof RegExp) {\n // oxlint-disable-next-line no-param-reassign\n fields = { regex: fields, outputKeys: outputKeys!, defaultOutputKey };\n }\n // oxlint-disable-next-line no-instanceof/no-instanceof\n if (fields.regex instanceof RegExp) {\n fields.regex = {\n pattern: fields.regex.source,\n flags: fields.regex.flags,\n };\n }\n super(fields);\n this.regex =\n typeof fields.regex === \"string\"\n ? new RegExp(fields.regex)\n : \"pattern\" in fields.regex\n ? new RegExp(fields.regex.pattern, fields.regex.flags)\n : fields.regex;\n this.outputKeys = fields.outputKeys;\n this.defaultOutputKey = fields.defaultOutputKey;\n }\n\n _type() {\n return \"regex_parser\";\n }\n\n /**\n * Parses the given text using the regex pattern and returns a dictionary\n * with the parsed output. If the regex pattern does not match the text\n * and no defaultOutputKey is provided, throws an OutputParserException.\n * @param text The text to be parsed.\n * @returns A dictionary with the parsed output.\n */\n async parse(text: string): Promise<Record<string, string>> {\n const match = text.match(this.regex);\n if (match) {\n return this.outputKeys.reduce(\n (acc, key, index) => {\n acc[key] = match[index + 1];\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n if (this.defaultOutputKey === undefined) {\n throw new OutputParserException(`Could not parse output: ${text}`, text);\n }\n\n return this.outputKeys.reduce(\n (acc, key) => {\n acc[key] = key === this.defaultOutputKey ? text : \"\";\n return acc;\n },\n {} as Record<string, string>\n );\n }\n\n /**\n * Returns a string with instructions on how the LLM output should be\n * formatted to match the regex pattern.\n * @returns A string with formatting instructions.\n */\n getFormatInstructions(): string {\n return `Your response should match the following regex: ${this.regex}`;\n }\n}\n"],"mappings":";;;;;;AAwBA,IAAa,cAAb,cAAiC,iBAAyC;CACxE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAkB;EAAQ;CAEvD,kBAAkB;CAElB,IAAI,gBAA8C;AAChD,SAAO,EACL,OAAO,KAAK,UAAU,OACvB;;CAGH;CAEA;CAEA;CAUA,YACE,QACA,YACA,kBACA;AAEA,MAAI,OAAO,WAAW,YAAY,kBAAkB,OAElD,UAAS;GAAE,OAAO;GAAoB;GAAa;GAAkB;AAGvE,MAAI,OAAO,iBAAiB,OAC1B,QAAO,QAAQ;GACb,SAAS,OAAO,MAAM;GACtB,OAAO,OAAO,MAAM;GACrB;AAEH,QAAM,OAAO;AACb,OAAK,QACH,OAAO,OAAO,UAAU,WACpB,IAAI,OAAO,OAAO,MAAM,GACxB,aAAa,OAAO,QAClB,IAAI,OAAO,OAAO,MAAM,SAAS,OAAO,MAAM,MAAM,GACpD,OAAO;AACf,OAAK,aAAa,OAAO;AACzB,OAAK,mBAAmB,OAAO;;CAGjC,QAAQ;AACN,SAAO;;;;;;;;;CAUT,MAAM,MAAM,MAA+C;EACzD,MAAM,QAAQ,KAAK,MAAM,KAAK,MAAM;AACpC,MAAI,MACF,QAAO,KAAK,WAAW,QACpB,KAAK,KAAK,UAAU;AACnB,OAAI,OAAO,MAAM,QAAQ;AACzB,UAAO;KAET,EAAE,CACH;AAGH,MAAI,KAAK,qBAAqB,KAAA,EAC5B,OAAM,IAAI,sBAAsB,2BAA2B,QAAQ,KAAK;AAG1E,SAAO,KAAK,WAAW,QACpB,KAAK,QAAQ;AACZ,OAAI,OAAO,QAAQ,KAAK,mBAAmB,OAAO;AAClD,UAAO;KAET,EAAE,CACH;;;;;;;CAQH,wBAAgC;AAC9B,SAAO,mDAAmD,KAAK"}
@@ -1,6 +1,6 @@
1
1
  import { BaseOutputParser, FormatInstructionsOptions } from "@langchain/core/output_parsers";
2
2
  import { InferInteropZodOutput, InteropZodType } from "@langchain/core/utils/types";
3
- import * as _langchain_core_load_serializable0 from "@langchain/core/load/serializable";
3
+ import * as _$_langchain_core_load_serializable0 from "@langchain/core/load/serializable";
4
4
  import { z } from "zod/v3";
5
5
 
6
6
  //#region src/output_parsers/structured.d.ts
@@ -14,7 +14,7 @@ declare class StructuredOutputParser<T extends InteropZodType> extends BaseOutpu
14
14
  schema: T;
15
15
  static lc_name(): string;
16
16
  lc_namespace: string[];
17
- toJSON(): _langchain_core_load_serializable0.SerializedNotImplemented;
17
+ toJSON(): _$_langchain_core_load_serializable0.SerializedNotImplemented;
18
18
  constructor(schema: T);
19
19
  /**
20
20
  * Creates a new StructuredOutputParser from a Zod schema.
@@ -1 +1 @@
1
- {"version":3,"file":"structured.d.cts","names":[],"sources":["../../src/output_parsers/structured.ts"],"mappings":";;;;;;KAqBY,uCAAA;EACV,kBAAA;AAAA;AAAA,UAGe,qCAAA,SAA8C,yBAAA;EAC7D,kBAAA;AAAA;AAAA,cAGW,sBAAA,WACD,cAAA,UACF,gBAAA,CAAiB,qBAAA,CAAsB,CAAA;EAW5B,MAAA,EAAQ,CAAA;EAAA,OAVpB,OAAA,CAAA;EAIP,YAAA;EAEA,MAAA,CAAA,GAI4B,kCAAA,CAJtB,wBAAA;EAIN,WAAA,CAAmB,MAAA,EAAQ,CAAA;EAhBT;AAGpB;;;;EAHoB,OAyBX,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,sBAAA,CAAA,CAAA;EApB/B;;;;;;EAAA,OA8BlB,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,sBAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;EA2Ce;;;;;;EAvB3B,qBAAA,CAAA;EAnDyB;;;;;EA0EnB,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,qBAAA,CAAsB,CAAA;AAAA;;;;;cAuB9C,kCAAA,WACD,cAAA,UACF,sBAAA,CAAuB,CAAA;EAAA,OACxB,OAAA,CAAA;EAIP,qBAAA,CACE,OAAA,GAAU,qCAAA;EAAA,QAcJ,oBAAA;EAAA,OAmED,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,kCAAA,CAAA,CAAA;EAAA,OAIjD,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,kCAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;;UAeG,sCAAA,WACL,cAAA;EAEV,WAAA,EAAa,CAAA;AAAA;;;;;uBAOO,gCAAA,WACV,cAAA,uBAEF,gBAAA,CAAiB,CAAA;EAAA,QACjB,qBAAA;EAER,WAAA,CAAA;IAAc;EAAA,GAAe,sCAAA,CAAuC,CAAA;EApJzC;;;;;AAuB7B;EAvB6B,SAiKlB,eAAA,CAAgB,KAAA,EAAO,qBAAA,CAAsB,CAAA,IAAK,OAAA,CAAQ,CAAA;EAE7D,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,CAAA;EAcnC,qBAAA,CAAA;AAAA"}
1
+ {"version":3,"file":"structured.d.cts","names":[],"sources":["../../src/output_parsers/structured.ts"],"mappings":";;;;;;KAqBY,uCAAA;EACV,kBAAA;AAAA;AAAA,UAGe,qCAAA,SAA8C,yBAAA;EAC7D,kBAAA;AAAA;AAAA,cAGW,sBAAA,WACD,cAAA,UACF,gBAAA,CAAiB,qBAAA,CAAsB,CAAA;EAW5B,MAAA,EAAQ,CAAA;EAAA,OAVpB,OAAA,CAAA;EAIP,YAAA;EAEA,MAAA,CAAA,GAI4B,oCAAA,CAJtB,wBAAA;EAIN,WAAA,CAAmB,MAAA,EAAQ,CAAA;EAhBT;AAGpB;;;;EAHoB,OAyBX,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,sBAAA,CAAA,CAAA;EApB/B;;;;;;EAAA,OA8BlB,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,sBAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;EA2Ce;;;;;;EAvB3B,qBAAA,CAAA;EAnDyB;;;;;EA0EnB,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,qBAAA,CAAsB,CAAA;AAAA;;;;;cAuB9C,kCAAA,WACD,cAAA,UACF,sBAAA,CAAuB,CAAA;EAAA,OACxB,OAAA,CAAA;EAIP,qBAAA,CACE,OAAA,GAAU,qCAAA;EAAA,QAcJ,oBAAA;EAAA,OAmED,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,kCAAA,CAAA,CAAA;EAAA,OAIjD,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,kCAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;;UAeG,sCAAA,WACL,cAAA;EAEV,WAAA,EAAa,CAAA;AAAA;;;;;uBAOO,gCAAA,WACV,cAAA,uBAEF,gBAAA,CAAiB,CAAA;EAAA,QACjB,qBAAA;EAER,WAAA,CAAA;IAAc;EAAA,GAAe,sCAAA,CAAuC,CAAA;EApJzC;;;;;AAuB7B;EAvB6B,SAiKlB,eAAA,CAAgB,KAAA,EAAO,qBAAA,CAAsB,CAAA,IAAK,OAAA,CAAQ,CAAA;EAE7D,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,CAAA;EAcnC,qBAAA,CAAA;AAAA"}
@@ -1,4 +1,4 @@
1
- import * as _langchain_core_load_serializable0 from "@langchain/core/load/serializable";
1
+ import * as _$_langchain_core_load_serializable0 from "@langchain/core/load/serializable";
2
2
  import { BaseOutputParser, FormatInstructionsOptions } from "@langchain/core/output_parsers";
3
3
  import { InferInteropZodOutput, InteropZodType } from "@langchain/core/utils/types";
4
4
  import { z } from "zod/v3";
@@ -14,7 +14,7 @@ declare class StructuredOutputParser<T extends InteropZodType> extends BaseOutpu
14
14
  schema: T;
15
15
  static lc_name(): string;
16
16
  lc_namespace: string[];
17
- toJSON(): _langchain_core_load_serializable0.SerializedNotImplemented;
17
+ toJSON(): _$_langchain_core_load_serializable0.SerializedNotImplemented;
18
18
  constructor(schema: T);
19
19
  /**
20
20
  * Creates a new StructuredOutputParser from a Zod schema.
@@ -1 +1 @@
1
- {"version":3,"file":"structured.d.ts","names":[],"sources":["../../src/output_parsers/structured.ts"],"mappings":";;;;;;KAqBY,uCAAA;EACV,kBAAA;AAAA;AAAA,UAGe,qCAAA,SAA8C,yBAAA;EAC7D,kBAAA;AAAA;AAAA,cAGW,sBAAA,WACD,cAAA,UACF,gBAAA,CAAiB,qBAAA,CAAsB,CAAA;EAW5B,MAAA,EAAQ,CAAA;EAAA,OAVpB,OAAA,CAAA;EAIP,YAAA;EAEA,MAAA,CAAA,GAI4B,kCAAA,CAJtB,wBAAA;EAIN,WAAA,CAAmB,MAAA,EAAQ,CAAA;EAhBT;AAGpB;;;;EAHoB,OAyBX,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,sBAAA,CAAA,CAAA;EApB/B;;;;;;EAAA,OA8BlB,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,sBAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;EA2Ce;;;;;;EAvB3B,qBAAA,CAAA;EAnDyB;;;;;EA0EnB,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,qBAAA,CAAsB,CAAA;AAAA;;;;;cAuB9C,kCAAA,WACD,cAAA,UACF,sBAAA,CAAuB,CAAA;EAAA,OACxB,OAAA,CAAA;EAIP,qBAAA,CACE,OAAA,GAAU,qCAAA;EAAA,QAcJ,oBAAA;EAAA,OAmED,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,kCAAA,CAAA,CAAA;EAAA,OAIjD,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,kCAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;;UAeG,sCAAA,WACL,cAAA;EAEV,WAAA,EAAa,CAAA;AAAA;;;;;uBAOO,gCAAA,WACV,cAAA,uBAEF,gBAAA,CAAiB,CAAA;EAAA,QACjB,qBAAA;EAER,WAAA,CAAA;IAAc;EAAA,GAAe,sCAAA,CAAuC,CAAA;EApJzC;;;;;AAuB7B;EAvB6B,SAiKlB,eAAA,CAAgB,KAAA,EAAO,qBAAA,CAAsB,CAAA,IAAK,OAAA,CAAQ,CAAA;EAE7D,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,CAAA;EAcnC,qBAAA,CAAA;AAAA"}
1
+ {"version":3,"file":"structured.d.ts","names":[],"sources":["../../src/output_parsers/structured.ts"],"mappings":";;;;;;KAqBY,uCAAA;EACV,kBAAA;AAAA;AAAA,UAGe,qCAAA,SAA8C,yBAAA;EAC7D,kBAAA;AAAA;AAAA,cAGW,sBAAA,WACD,cAAA,UACF,gBAAA,CAAiB,qBAAA,CAAsB,CAAA;EAW5B,MAAA,EAAQ,CAAA;EAAA,OAVpB,OAAA,CAAA;EAIP,YAAA;EAEA,MAAA,CAAA,GAI4B,oCAAA,CAJtB,wBAAA;EAIN,WAAA,CAAmB,MAAA,EAAQ,CAAA;EAhBT;AAGpB;;;;EAHoB,OAyBX,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,sBAAA,CAAA,CAAA;EApB/B;;;;;;EAAA,OA8BlB,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,sBAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;EA2Ce;;;;;;EAvB3B,qBAAA,CAAA;EAnDyB;;;;;EA0EnB,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,qBAAA,CAAsB,CAAA;AAAA;;;;;cAuB9C,kCAAA,WACD,cAAA,UACF,sBAAA,CAAuB,CAAA;EAAA,OACxB,OAAA,CAAA;EAIP,qBAAA,CACE,OAAA,GAAU,qCAAA;EAAA,QAcJ,oBAAA;EAAA,OAmED,aAAA,WAAwB,cAAA,CAAA,CAAgB,MAAA,EAAQ,CAAA,GAAC,kCAAA,CAAA,CAAA;EAAA,OAIjD,wBAAA;IAAA,CAAsC,GAAA;EAAA,EAAA,CAC3C,OAAA,EAAS,CAAA,GAAC,kCAAA,CAAA,CAAA,CAAA,SAAA;IAAA;;;;;;;UAeG,sCAAA,WACL,cAAA;EAEV,WAAA,EAAa,CAAA;AAAA;;;;;uBAOO,gCAAA,WACV,cAAA,uBAEF,gBAAA,CAAiB,CAAA;EAAA,QACjB,qBAAA;EAER,WAAA,CAAA;IAAc;EAAA,GAAe,sCAAA,CAAuC,CAAA;EApJzC;;;;;AAuB7B;EAvB6B,SAiKlB,eAAA,CAAgB,KAAA,EAAO,qBAAA,CAAsB,CAAA,IAAK,OAAA,CAAQ,CAAA;EAE7D,KAAA,CAAM,IAAA,WAAe,OAAA,CAAQ,CAAA;EAcnC,qBAAA,CAAA;AAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.cjs","names":[],"sources":["../../../src/retrievers/document_compressors/index.ts"],"sourcesContent":["import type { DocumentInterface } from \"@langchain/core/documents\";\nimport { BaseDocumentTransformer } from \"@langchain/core/documents\";\nimport { Callbacks } from \"@langchain/core/callbacks/manager\";\n\n/**\n * Base Document Compression class. All compressors should extend this class.\n */\nexport abstract class BaseDocumentCompressor {\n /**\n * Abstract method that must be implemented by any class that extends\n * `BaseDocumentCompressor`. This method takes an array of `Document`\n * objects and a query string as parameters and returns a Promise that\n * resolves with an array of compressed `Document` objects.\n * @param documents An array of `Document` objects to be compressed.\n * @param query A query string.\n * @returns A Promise that resolves with an array of compressed `Document` objects.\n */\n abstract compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]>;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor {\n return x?.compressDocuments !== undefined;\n }\n}\n\n/**\n * Document compressor that uses a pipeline of Transformers.\n * @example\n * ```typescript\n * const compressorPipeline = new DocumentCompressorPipeline({\n * transformers: [\n * new RecursiveCharacterTextSplitter({\n * chunkSize: 200,\n * chunkOverlap: 0,\n * }),\n * new EmbeddingsFilter({\n * embeddings: new OpenAIEmbeddings(),\n * similarityThreshold: 0.8,\n * k: 5,\n * }),\n * ],\n * });\n * const retriever = new ContextualCompressionRetriever({\n * baseCompressor: compressorPipeline,\n * baseRetriever: new TavilySearchAPIRetriever({\n * includeRawContent: true,\n * }),\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\",\n * );\n * console.log({ retrievedDocs });\n * ```\n */\nexport class DocumentCompressorPipeline extends BaseDocumentCompressor {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n\n constructor(fields: {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n }) {\n super();\n this.transformers = fields.transformers;\n }\n\n async compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]> {\n let transformedDocuments = documents;\n for (const transformer of this.transformers) {\n if (BaseDocumentCompressor.isBaseDocumentCompressor(transformer)) {\n transformedDocuments = await transformer.compressDocuments(\n transformedDocuments,\n query,\n callbacks\n );\n } else {\n transformedDocuments =\n await transformer.transformDocuments(transformedDocuments);\n }\n }\n return transformedDocuments;\n }\n}\n"],"mappings":";;;;;;;;;AAOA,IAAsB,yBAAtB,MAA6C;CAiB3C,OAAO,yBAAyB,GAAqC;AACnE,SAAO,GAAG,sBAAsB,KAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiCpC,IAAa,6BAAb,cAAgD,uBAAuB;CACrE;CAEA,YAAY,QAET;AACD,SAAO;AACP,OAAK,eAAe,OAAO;;CAG7B,MAAM,kBACJ,WACA,OACA,WAC8B;EAC9B,IAAI,uBAAuB;AAC3B,OAAK,MAAM,eAAe,KAAK,aAC7B,KAAI,uBAAuB,yBAAyB,YAAY,CAC9D,wBAAuB,MAAM,YAAY,kBACvC,sBACA,OACA,UACD;MAED,wBACE,MAAM,YAAY,mBAAmB,qBAAqB;AAGhE,SAAO"}
1
+ {"version":3,"file":"index.cjs","names":[],"sources":["../../../src/retrievers/document_compressors/index.ts"],"sourcesContent":["import type { DocumentInterface } from \"@langchain/core/documents\";\nimport { BaseDocumentTransformer } from \"@langchain/core/documents\";\nimport { Callbacks } from \"@langchain/core/callbacks/manager\";\n\n/**\n * Base Document Compression class. All compressors should extend this class.\n */\nexport abstract class BaseDocumentCompressor {\n /**\n * Abstract method that must be implemented by any class that extends\n * `BaseDocumentCompressor`. This method takes an array of `Document`\n * objects and a query string as parameters and returns a Promise that\n * resolves with an array of compressed `Document` objects.\n * @param documents An array of `Document` objects to be compressed.\n * @param query A query string.\n * @returns A Promise that resolves with an array of compressed `Document` objects.\n */\n abstract compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]>;\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor {\n return x?.compressDocuments !== undefined;\n }\n}\n\n/**\n * Document compressor that uses a pipeline of Transformers.\n * @example\n * ```typescript\n * const compressorPipeline = new DocumentCompressorPipeline({\n * transformers: [\n * new RecursiveCharacterTextSplitter({\n * chunkSize: 200,\n * chunkOverlap: 0,\n * }),\n * new EmbeddingsFilter({\n * embeddings: new OpenAIEmbeddings(),\n * similarityThreshold: 0.8,\n * k: 5,\n * }),\n * ],\n * });\n * const retriever = new ContextualCompressionRetriever({\n * baseCompressor: compressorPipeline,\n * baseRetriever: new TavilySearchAPIRetriever({\n * includeRawContent: true,\n * }),\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\",\n * );\n * console.log({ retrievedDocs });\n * ```\n */\nexport class DocumentCompressorPipeline extends BaseDocumentCompressor {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n\n constructor(fields: {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n }) {\n super();\n this.transformers = fields.transformers;\n }\n\n async compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]> {\n let transformedDocuments = documents;\n for (const transformer of this.transformers) {\n if (BaseDocumentCompressor.isBaseDocumentCompressor(transformer)) {\n transformedDocuments = await transformer.compressDocuments(\n transformedDocuments,\n query,\n callbacks\n );\n } else {\n transformedDocuments =\n await transformer.transformDocuments(transformedDocuments);\n }\n }\n return transformedDocuments;\n }\n}\n"],"mappings":";;;;;;;;;AAOA,IAAsB,yBAAtB,MAA6C;CAiB3C,OAAO,yBAAyB,GAAqC;AACnE,SAAO,GAAG,sBAAsB,KAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiCpC,IAAa,6BAAb,cAAgD,uBAAuB;CACrE;CAEA,YAAY,QAET;AACD,SAAO;AACP,OAAK,eAAe,OAAO;;CAG7B,MAAM,kBACJ,WACA,OACA,WAC8B;EAC9B,IAAI,uBAAuB;AAC3B,OAAK,MAAM,eAAe,KAAK,aAC7B,KAAI,uBAAuB,yBAAyB,YAAY,CAC9D,wBAAuB,MAAM,YAAY,kBACvC,sBACA,OACA,UACD;MAED,wBACE,MAAM,YAAY,mBAAmB,qBAAqB;AAGhE,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","names":[],"sources":["../../../src/retrievers/document_compressors/index.ts"],"sourcesContent":["import type { DocumentInterface } from \"@langchain/core/documents\";\nimport { BaseDocumentTransformer } from \"@langchain/core/documents\";\nimport { Callbacks } from \"@langchain/core/callbacks/manager\";\n\n/**\n * Base Document Compression class. All compressors should extend this class.\n */\nexport abstract class BaseDocumentCompressor {\n /**\n * Abstract method that must be implemented by any class that extends\n * `BaseDocumentCompressor`. This method takes an array of `Document`\n * objects and a query string as parameters and returns a Promise that\n * resolves with an array of compressed `Document` objects.\n * @param documents An array of `Document` objects to be compressed.\n * @param query A query string.\n * @returns A Promise that resolves with an array of compressed `Document` objects.\n */\n abstract compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]>;\n\n // eslint-disable-next-line @typescript-eslint/no-explicit-any\n static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor {\n return x?.compressDocuments !== undefined;\n }\n}\n\n/**\n * Document compressor that uses a pipeline of Transformers.\n * @example\n * ```typescript\n * const compressorPipeline = new DocumentCompressorPipeline({\n * transformers: [\n * new RecursiveCharacterTextSplitter({\n * chunkSize: 200,\n * chunkOverlap: 0,\n * }),\n * new EmbeddingsFilter({\n * embeddings: new OpenAIEmbeddings(),\n * similarityThreshold: 0.8,\n * k: 5,\n * }),\n * ],\n * });\n * const retriever = new ContextualCompressionRetriever({\n * baseCompressor: compressorPipeline,\n * baseRetriever: new TavilySearchAPIRetriever({\n * includeRawContent: true,\n * }),\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\",\n * );\n * console.log({ retrievedDocs });\n * ```\n */\nexport class DocumentCompressorPipeline extends BaseDocumentCompressor {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n\n constructor(fields: {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n }) {\n super();\n this.transformers = fields.transformers;\n }\n\n async compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]> {\n let transformedDocuments = documents;\n for (const transformer of this.transformers) {\n if (BaseDocumentCompressor.isBaseDocumentCompressor(transformer)) {\n transformedDocuments = await transformer.compressDocuments(\n transformedDocuments,\n query,\n callbacks\n );\n } else {\n transformedDocuments =\n await transformer.transformDocuments(transformedDocuments);\n }\n }\n return transformedDocuments;\n }\n}\n"],"mappings":";;;;;;;;;AAOA,IAAsB,yBAAtB,MAA6C;CAiB3C,OAAO,yBAAyB,GAAqC;AACnE,SAAO,GAAG,sBAAsB,KAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiCpC,IAAa,6BAAb,cAAgD,uBAAuB;CACrE;CAEA,YAAY,QAET;AACD,SAAO;AACP,OAAK,eAAe,OAAO;;CAG7B,MAAM,kBACJ,WACA,OACA,WAC8B;EAC9B,IAAI,uBAAuB;AAC3B,OAAK,MAAM,eAAe,KAAK,aAC7B,KAAI,uBAAuB,yBAAyB,YAAY,CAC9D,wBAAuB,MAAM,YAAY,kBACvC,sBACA,OACA,UACD;MAED,wBACE,MAAM,YAAY,mBAAmB,qBAAqB;AAGhE,SAAO"}
1
+ {"version":3,"file":"index.js","names":[],"sources":["../../../src/retrievers/document_compressors/index.ts"],"sourcesContent":["import type { DocumentInterface } from \"@langchain/core/documents\";\nimport { BaseDocumentTransformer } from \"@langchain/core/documents\";\nimport { Callbacks } from \"@langchain/core/callbacks/manager\";\n\n/**\n * Base Document Compression class. All compressors should extend this class.\n */\nexport abstract class BaseDocumentCompressor {\n /**\n * Abstract method that must be implemented by any class that extends\n * `BaseDocumentCompressor`. This method takes an array of `Document`\n * objects and a query string as parameters and returns a Promise that\n * resolves with an array of compressed `Document` objects.\n * @param documents An array of `Document` objects to be compressed.\n * @param query A query string.\n * @returns A Promise that resolves with an array of compressed `Document` objects.\n */\n abstract compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]>;\n\n // oxlint-disable-next-line @typescript-eslint/no-explicit-any\n static isBaseDocumentCompressor(x: any): x is BaseDocumentCompressor {\n return x?.compressDocuments !== undefined;\n }\n}\n\n/**\n * Document compressor that uses a pipeline of Transformers.\n * @example\n * ```typescript\n * const compressorPipeline = new DocumentCompressorPipeline({\n * transformers: [\n * new RecursiveCharacterTextSplitter({\n * chunkSize: 200,\n * chunkOverlap: 0,\n * }),\n * new EmbeddingsFilter({\n * embeddings: new OpenAIEmbeddings(),\n * similarityThreshold: 0.8,\n * k: 5,\n * }),\n * ],\n * });\n * const retriever = new ContextualCompressionRetriever({\n * baseCompressor: compressorPipeline,\n * baseRetriever: new TavilySearchAPIRetriever({\n * includeRawContent: true,\n * }),\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What did the speaker say about Justice Breyer in the 2022 State of the Union?\",\n * );\n * console.log({ retrievedDocs });\n * ```\n */\nexport class DocumentCompressorPipeline extends BaseDocumentCompressor {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n\n constructor(fields: {\n transformers: (BaseDocumentTransformer | BaseDocumentCompressor)[];\n }) {\n super();\n this.transformers = fields.transformers;\n }\n\n async compressDocuments(\n documents: DocumentInterface[],\n query: string,\n callbacks?: Callbacks\n ): Promise<DocumentInterface[]> {\n let transformedDocuments = documents;\n for (const transformer of this.transformers) {\n if (BaseDocumentCompressor.isBaseDocumentCompressor(transformer)) {\n transformedDocuments = await transformer.compressDocuments(\n transformedDocuments,\n query,\n callbacks\n );\n } else {\n transformedDocuments =\n await transformer.transformDocuments(transformedDocuments);\n }\n }\n return transformedDocuments;\n }\n}\n"],"mappings":";;;;;;;;;AAOA,IAAsB,yBAAtB,MAA6C;CAiB3C,OAAO,yBAAyB,GAAqC;AACnE,SAAO,GAAG,sBAAsB,KAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAiCpC,IAAa,6BAAb,cAAgD,uBAAuB;CACrE;CAEA,YAAY,QAET;AACD,SAAO;AACP,OAAK,eAAe,OAAO;;CAG7B,MAAM,kBACJ,WACA,OACA,WAC8B;EAC9B,IAAI,uBAAuB;AAC3B,OAAK,MAAM,eAAe,KAAK,aAC7B,KAAI,uBAAuB,yBAAyB,YAAY,CAC9D,wBAAuB,MAAM,YAAY,kBACvC,sBACA,OACA,UACD;MAED,wBACE,MAAM,YAAY,mBAAmB,qBAAqB;AAGhE,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"matryoshka_retriever.cjs","names":["VectorStoreRetriever"],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"sourcesContent":["import { DocumentInterface } from \"@langchain/core/documents\";\nimport { Embeddings } from \"@langchain/core/embeddings\";\nimport {\n cosineSimilarity,\n euclideanDistance,\n innerProduct,\n} from \"@langchain/core/utils/math\";\nimport {\n VectorStore,\n VectorStoreRetriever,\n VectorStoreRetrieverInput,\n} from \"@langchain/core/vectorstores\";\n\n/**\n * Type for options when adding a document to the VectorStore.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype AddDocumentOptions = Record<string, any>;\n\nexport interface MatryoshkaRetrieverFields {\n /**\n * The number of documents to retrieve from the small store.\n * @default 50\n */\n smallK?: number;\n /**\n * The number of documents to retrieve from the large store.\n * @default 8\n */\n largeK?: number;\n /**\n * The metadata key to store the larger embeddings.\n * @default \"lc_large_embedding\"\n */\n largeEmbeddingKey?: string;\n /**\n * The embedding model to use when generating the large\n * embeddings.\n */\n largeEmbeddingModel: Embeddings;\n /**\n * The type of search to perform using the large embeddings.\n * @default \"cosine\"\n */\n searchType?: \"cosine\" | \"innerProduct\" | \"euclidean\";\n}\n\n/**\n * A retriever that uses two sets of embeddings to perform adaptive retrieval. Based\n * off of the \"Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval\"\n * blog post {@link https://supabase.com/blog/matryoshka-embeddings}.\n *\n *\n * This class performs \"Adaptive Retrieval\" for searching text embeddings efficiently using the\n * Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query\n * embedding in two steps:\n *\n * First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,\n * but less accurate search.\n *\n * Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional\n * embedding for higher accuracy.\n *\n *\n * This code implements MRL embeddings for efficient vector search by combining faster,\n * lower-dimensional initial search with accurate, high-dimensional re-ranking.\n */\nexport class MatryoshkaRetriever<\n Store extends VectorStore = VectorStore,\n> extends VectorStoreRetriever<Store> {\n smallK = 50;\n\n largeK = 8;\n\n largeEmbeddingKey = \"lc_large_embedding\";\n\n largeEmbeddingModel: Embeddings;\n\n searchType: \"cosine\" | \"innerProduct\" | \"euclidean\" = \"cosine\";\n\n constructor(\n fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>\n ) {\n super(fields);\n this.smallK = fields.smallK ?? this.smallK;\n this.largeK = fields.largeK ?? this.largeK;\n this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;\n this.largeEmbeddingModel = fields.largeEmbeddingModel;\n this.searchType = fields.searchType ?? this.searchType;\n }\n\n /**\n * Ranks documents based on their similarity to a query embedding using larger embeddings.\n *\n * This method takes a query embedding and a list of documents (smallResults) as input. Each document\n * in the smallResults array has previously been associated with a large embedding stored in its metadata.\n * Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores\n * between the query embedding and each document's large embedding. It then ranks the documents based on\n * these similarity scores, from the most similar to the least similar.\n *\n * The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`\n * is a class property defining the number of documents to return. This subset of documents is determined\n * by sorting the entire list of documents based on their similarity scores and then selecting the top\n * `largeK` documents.\n *\n * @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.\n * @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.\n * @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.\n */\n private _rankByLargeEmbeddings(\n embeddedQuery: number[],\n smallResults: DocumentInterface[]\n ): DocumentInterface[] {\n const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>\n JSON.parse(doc.metadata[this.largeEmbeddingKey])\n );\n let func: () => Array<number[]>;\n\n switch (this.searchType) {\n case \"cosine\":\n func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);\n break;\n case \"innerProduct\":\n func = () => innerProduct([embeddedQuery], largeEmbeddings);\n break;\n case \"euclidean\":\n func = () => euclideanDistance([embeddedQuery], largeEmbeddings);\n break;\n default:\n throw new Error(`Unknown search type: ${this.searchType}`);\n }\n\n // Calculate the similarity scores between the query embedding and the large embeddings\n const [similarityScores] = func();\n\n // Create an array of indices from 0 to N-1, where N is the number of documents\n let indices = Array.from(\n { length: smallResults.length },\n (_, index) => index\n );\n\n indices = indices\n .map((v, i) => [similarityScores[i], v])\n .sort(([a], [b]) => b - a)\n .slice(0, this.largeK)\n .map(([, i]) => i);\n\n return indices.map((i) => smallResults[i]);\n }\n\n async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {\n const [embeddedQuery, smallResults] = await Promise.all([\n this.largeEmbeddingModel.embedQuery(query),\n this.vectorStore.similaritySearch(query, this.smallK, this.filter),\n ]);\n\n return this._rankByLargeEmbeddings(embeddedQuery, smallResults);\n }\n\n /**\n * Override the default `addDocuments` method to embed the documents twice,\n * once using the larger embeddings model, and then again using the default\n * embedding model linked to the vector store.\n *\n * @param {DocumentInterface[]} documents - An array of documents to add to the vector store.\n * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.\n * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.\n */\n override addDocuments = async (\n documents: DocumentInterface[],\n options?: AddDocumentOptions\n ): Promise<string[] | void> => {\n // Insure documents metadata does not contain the large embedding key\n if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {\n throw new Error(\n `All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`\n );\n }\n\n const allDocPageContent = documents.map((doc) => doc.pageContent);\n const allDocLargeEmbeddings =\n await this.largeEmbeddingModel.embedDocuments(allDocPageContent);\n\n const newDocuments: Array<DocumentInterface> = documents.map(\n (doc, idx) => ({\n ...doc,\n metadata: {\n ...doc.metadata,\n [this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),\n },\n })\n );\n\n return this.vectorStore.addDocuments(newDocuments, options);\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;AAmEA,IAAa,sBAAb,cAEUA,6BAAAA,qBAA4B;CACpC,SAAS;CAET,SAAS;CAET,oBAAoB;CAEpB;CAEA,aAAsD;CAEtD,YACE,QACA;AACA,QAAM,OAAO;AACb,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,oBAAoB,OAAO,qBAAqB,KAAK;AAC1D,OAAK,sBAAsB,OAAO;AAClC,OAAK,aAAa,OAAO,cAAc,KAAK;;;;;;;;;;;;;;;;;;;;CAqB9C,uBACE,eACA,cACqB;EACrB,MAAM,kBAAmC,aAAa,KAAK,QACzD,KAAK,MAAM,IAAI,SAAS,KAAK,mBAAmB,CACjD;EACD,IAAI;AAEJ,UAAQ,KAAK,YAAb;GACE,KAAK;AACH,kBAAA,GAAA,2BAAA,kBAA8B,CAAC,cAAc,EAAE,gBAAgB;AAC/D;GACF,KAAK;AACH,kBAAA,GAAA,2BAAA,cAA0B,CAAC,cAAc,EAAE,gBAAgB;AAC3D;GACF,KAAK;AACH,kBAAA,GAAA,2BAAA,mBAA+B,CAAC,cAAc,EAAE,gBAAgB;AAChE;GACF,QACE,OAAM,IAAI,MAAM,wBAAwB,KAAK,aAAa;;EAI9D,MAAM,CAAC,oBAAoB,MAAM;EAGjC,IAAI,UAAU,MAAM,KAClB,EAAE,QAAQ,aAAa,QAAQ,GAC9B,GAAG,UAAU,MACf;AAED,YAAU,QACP,KAAK,GAAG,MAAM,CAAC,iBAAiB,IAAI,EAAE,CAAC,CACvC,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE,CACzB,MAAM,GAAG,KAAK,OAAO,CACrB,KAAK,GAAG,OAAO,EAAE;AAEpB,SAAO,QAAQ,KAAK,MAAM,aAAa,GAAG;;CAG5C,MAAM,sBAAsB,OAA6C;EACvE,MAAM,CAAC,eAAe,gBAAgB,MAAM,QAAQ,IAAI,CACtD,KAAK,oBAAoB,WAAW,MAAM,EAC1C,KAAK,YAAY,iBAAiB,OAAO,KAAK,QAAQ,KAAK,OAAO,CACnE,CAAC;AAEF,SAAO,KAAK,uBAAuB,eAAe,aAAa;;;;;;;;;;;CAYjE,eAAwB,OACtB,WACA,YAC6B;AAE7B,MAAI,UAAU,MAAM,QAAQ,KAAK,qBAAqB,IAAI,SAAS,CACjE,OAAM,IAAI,MACR,2DAA2D,KAAK,kBAAkB,qBACnF;EAGH,MAAM,oBAAoB,UAAU,KAAK,QAAQ,IAAI,YAAY;EACjE,MAAM,wBACJ,MAAM,KAAK,oBAAoB,eAAe,kBAAkB;EAElE,MAAM,eAAyC,UAAU,KACtD,KAAK,SAAS;GACb,GAAG;GACH,UAAU;IACR,GAAG,IAAI;KACN,KAAK,oBAAoB,KAAK,UAAU,sBAAsB,KAAK;IACrE;GACF,EACF;AAED,SAAO,KAAK,YAAY,aAAa,cAAc,QAAQ"}
1
+ {"version":3,"file":"matryoshka_retriever.cjs","names":["VectorStoreRetriever"],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"sourcesContent":["import { DocumentInterface } from \"@langchain/core/documents\";\nimport { Embeddings } from \"@langchain/core/embeddings\";\nimport {\n cosineSimilarity,\n euclideanDistance,\n innerProduct,\n} from \"@langchain/core/utils/math\";\nimport {\n VectorStore,\n VectorStoreRetriever,\n VectorStoreRetrieverInput,\n} from \"@langchain/core/vectorstores\";\n\n/**\n * Type for options when adding a document to the VectorStore.\n */\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\ntype AddDocumentOptions = Record<string, any>;\n\nexport interface MatryoshkaRetrieverFields {\n /**\n * The number of documents to retrieve from the small store.\n * @default 50\n */\n smallK?: number;\n /**\n * The number of documents to retrieve from the large store.\n * @default 8\n */\n largeK?: number;\n /**\n * The metadata key to store the larger embeddings.\n * @default \"lc_large_embedding\"\n */\n largeEmbeddingKey?: string;\n /**\n * The embedding model to use when generating the large\n * embeddings.\n */\n largeEmbeddingModel: Embeddings;\n /**\n * The type of search to perform using the large embeddings.\n * @default \"cosine\"\n */\n searchType?: \"cosine\" | \"innerProduct\" | \"euclidean\";\n}\n\n/**\n * A retriever that uses two sets of embeddings to perform adaptive retrieval. Based\n * off of the \"Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval\"\n * blog post {@link https://supabase.com/blog/matryoshka-embeddings}.\n *\n *\n * This class performs \"Adaptive Retrieval\" for searching text embeddings efficiently using the\n * Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query\n * embedding in two steps:\n *\n * First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,\n * but less accurate search.\n *\n * Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional\n * embedding for higher accuracy.\n *\n *\n * This code implements MRL embeddings for efficient vector search by combining faster,\n * lower-dimensional initial search with accurate, high-dimensional re-ranking.\n */\nexport class MatryoshkaRetriever<\n Store extends VectorStore = VectorStore,\n> extends VectorStoreRetriever<Store> {\n smallK = 50;\n\n largeK = 8;\n\n largeEmbeddingKey = \"lc_large_embedding\";\n\n largeEmbeddingModel: Embeddings;\n\n searchType: \"cosine\" | \"innerProduct\" | \"euclidean\" = \"cosine\";\n\n constructor(\n fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>\n ) {\n super(fields);\n this.smallK = fields.smallK ?? this.smallK;\n this.largeK = fields.largeK ?? this.largeK;\n this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;\n this.largeEmbeddingModel = fields.largeEmbeddingModel;\n this.searchType = fields.searchType ?? this.searchType;\n }\n\n /**\n * Ranks documents based on their similarity to a query embedding using larger embeddings.\n *\n * This method takes a query embedding and a list of documents (smallResults) as input. Each document\n * in the smallResults array has previously been associated with a large embedding stored in its metadata.\n * Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores\n * between the query embedding and each document's large embedding. It then ranks the documents based on\n * these similarity scores, from the most similar to the least similar.\n *\n * The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`\n * is a class property defining the number of documents to return. This subset of documents is determined\n * by sorting the entire list of documents based on their similarity scores and then selecting the top\n * `largeK` documents.\n *\n * @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.\n * @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.\n * @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.\n */\n private _rankByLargeEmbeddings(\n embeddedQuery: number[],\n smallResults: DocumentInterface[]\n ): DocumentInterface[] {\n const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>\n JSON.parse(doc.metadata[this.largeEmbeddingKey])\n );\n let func: () => Array<number[]>;\n\n switch (this.searchType) {\n case \"cosine\":\n func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);\n break;\n case \"innerProduct\":\n func = () => innerProduct([embeddedQuery], largeEmbeddings);\n break;\n case \"euclidean\":\n func = () => euclideanDistance([embeddedQuery], largeEmbeddings);\n break;\n default:\n throw new Error(`Unknown search type: ${this.searchType}`);\n }\n\n // Calculate the similarity scores between the query embedding and the large embeddings\n const [similarityScores] = func();\n\n // Create an array of indices from 0 to N-1, where N is the number of documents\n let indices = Array.from(\n { length: smallResults.length },\n (_, index) => index\n );\n\n indices = indices\n .map((v, i) => [similarityScores[i], v])\n .sort(([a], [b]) => b - a)\n .slice(0, this.largeK)\n .map(([, i]) => i);\n\n return indices.map((i) => smallResults[i]);\n }\n\n async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {\n const [embeddedQuery, smallResults] = await Promise.all([\n this.largeEmbeddingModel.embedQuery(query),\n this.vectorStore.similaritySearch(query, this.smallK, this.filter),\n ]);\n\n return this._rankByLargeEmbeddings(embeddedQuery, smallResults);\n }\n\n /**\n * Override the default `addDocuments` method to embed the documents twice,\n * once using the larger embeddings model, and then again using the default\n * embedding model linked to the vector store.\n *\n * @param {DocumentInterface[]} documents - An array of documents to add to the vector store.\n * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.\n * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.\n */\n override addDocuments = async (\n documents: DocumentInterface[],\n options?: AddDocumentOptions\n ): Promise<string[] | void> => {\n // Insure documents metadata does not contain the large embedding key\n if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {\n throw new Error(\n `All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`\n );\n }\n\n const allDocPageContent = documents.map((doc) => doc.pageContent);\n const allDocLargeEmbeddings =\n await this.largeEmbeddingModel.embedDocuments(allDocPageContent);\n\n const newDocuments: Array<DocumentInterface> = documents.map(\n (doc, idx) => ({\n ...doc,\n metadata: {\n ...doc.metadata,\n [this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),\n },\n })\n );\n\n return this.vectorStore.addDocuments(newDocuments, options);\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;AAmEA,IAAa,sBAAb,cAEUA,6BAAAA,qBAA4B;CACpC,SAAS;CAET,SAAS;CAET,oBAAoB;CAEpB;CAEA,aAAsD;CAEtD,YACE,QACA;AACA,QAAM,OAAO;AACb,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,oBAAoB,OAAO,qBAAqB,KAAK;AAC1D,OAAK,sBAAsB,OAAO;AAClC,OAAK,aAAa,OAAO,cAAc,KAAK;;;;;;;;;;;;;;;;;;;;CAqB9C,uBACE,eACA,cACqB;EACrB,MAAM,kBAAmC,aAAa,KAAK,QACzD,KAAK,MAAM,IAAI,SAAS,KAAK,mBAAmB,CACjD;EACD,IAAI;AAEJ,UAAQ,KAAK,YAAb;GACE,KAAK;AACH,kBAAA,GAAA,2BAAA,kBAA8B,CAAC,cAAc,EAAE,gBAAgB;AAC/D;GACF,KAAK;AACH,kBAAA,GAAA,2BAAA,cAA0B,CAAC,cAAc,EAAE,gBAAgB;AAC3D;GACF,KAAK;AACH,kBAAA,GAAA,2BAAA,mBAA+B,CAAC,cAAc,EAAE,gBAAgB;AAChE;GACF,QACE,OAAM,IAAI,MAAM,wBAAwB,KAAK,aAAa;;EAI9D,MAAM,CAAC,oBAAoB,MAAM;EAGjC,IAAI,UAAU,MAAM,KAClB,EAAE,QAAQ,aAAa,QAAQ,GAC9B,GAAG,UAAU,MACf;AAED,YAAU,QACP,KAAK,GAAG,MAAM,CAAC,iBAAiB,IAAI,EAAE,CAAC,CACvC,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE,CACzB,MAAM,GAAG,KAAK,OAAO,CACrB,KAAK,GAAG,OAAO,EAAE;AAEpB,SAAO,QAAQ,KAAK,MAAM,aAAa,GAAG;;CAG5C,MAAM,sBAAsB,OAA6C;EACvE,MAAM,CAAC,eAAe,gBAAgB,MAAM,QAAQ,IAAI,CACtD,KAAK,oBAAoB,WAAW,MAAM,EAC1C,KAAK,YAAY,iBAAiB,OAAO,KAAK,QAAQ,KAAK,OAAO,CACnE,CAAC;AAEF,SAAO,KAAK,uBAAuB,eAAe,aAAa;;;;;;;;;;;CAYjE,eAAwB,OACtB,WACA,YAC6B;AAE7B,MAAI,UAAU,MAAM,QAAQ,KAAK,qBAAqB,IAAI,SAAS,CACjE,OAAM,IAAI,MACR,2DAA2D,KAAK,kBAAkB,qBACnF;EAGH,MAAM,oBAAoB,UAAU,KAAK,QAAQ,IAAI,YAAY;EACjE,MAAM,wBACJ,MAAM,KAAK,oBAAoB,eAAe,kBAAkB;EAElE,MAAM,eAAyC,UAAU,KACtD,KAAK,SAAS;GACb,GAAG;GACH,UAAU;IACR,GAAG,IAAI;KACN,KAAK,oBAAoB,KAAK,UAAU,sBAAsB,KAAK;IACrE;GACF,EACF;AAED,SAAO,KAAK,YAAY,aAAa,cAAc,QAAQ"}
@@ -90,7 +90,7 @@ declare class MatryoshkaRetriever<Store extends VectorStore = VectorStore> exten
90
90
  * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.
91
91
  * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.
92
92
  */
93
- addDocuments: (documents: DocumentInterface<Record<string, any>>[], options?: AddDocumentOptions | undefined) => Promise<void | string[]>;
93
+ addDocuments: (documents: DocumentInterface[], options?: AddDocumentOptions) => Promise<string[] | void>;
94
94
  }
95
95
  //#endregion
96
96
  export { MatryoshkaRetriever, MatryoshkaRetrieverFields };
@@ -1 +1 @@
1
- {"version":3,"file":"matryoshka_retriever.d.cts","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"mappings":";;;;;;;AAWsC;KAMjC,kBAAA,GAAqB,MAAA;AAAA,UAET,yBAAA;EAFS;;AAE1B;;EAKE,MAAA;EAe+B;;;;EAV/B,MAAA;EAUqB;;;;EALrB,iBAAA;EAiC8B;;;;EA5B9B,mBAAA,EAAqB,UAAA;EAqCA;;;;EAhCrB,UAAA;AAAA;;;;;;;;;;;;;;;;;;;;;cAuBW,mBAAA,eACG,WAAA,GAAc,WAAA,UACpB,oBAAA,CAAqB,KAAA;EAC7B,MAAA;EAEA,MAAA;EAEA,iBAAA;EAEA,mBAAA,EAAqB,UAAA;EAErB,UAAA;EAEA,WAAA,CACE,MAAA,EAAQ,yBAAA,GAA4B,yBAAA,CAA0B,KAAA;EAqEZ;;;;;;;;;;;;;;;;;;EAAA,QAzC5C,sBAAA;EAyCF,qBAAA,CAAsB,KAAA,WAAgB,OAAA,CAAQ,iBAAA;;;;;;;;;;EAkB3C,YAAA,GAAY,SAAA,EAAA,iBAAA,CAAA,MAAA,kBAAA,OAAA,GAAA,kBAAA,iBAAA,OAAA;AAAA"}
1
+ {"version":3,"file":"matryoshka_retriever.d.cts","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"mappings":";;;;;;;AAWsC;KAMjC,kBAAA,GAAqB,MAAA;AAAA,UAET,yBAAA;EAFS;;AAE1B;;EAKE,MAAA;EAe+B;;;;EAV/B,MAAA;EAUqB;;;;EALrB,iBAAA;EAiC8B;;;;EA5B9B,mBAAA,EAAqB,UAAA;EAqCA;;;;EAhCrB,UAAA;AAAA;;;;;;;;;;;;;;;;;;;;;cAuBW,mBAAA,eACG,WAAA,GAAc,WAAA,UACpB,oBAAA,CAAqB,KAAA;EAC7B,MAAA;EAEA,MAAA;EAEA,iBAAA;EAEA,mBAAA,EAAqB,UAAA;EAErB,UAAA;EAEA,WAAA,CACE,MAAA,EAAQ,yBAAA,GAA4B,yBAAA,CAA0B,KAAA;EAuFvD;;;;;;;;;;;;;;;;;;EAAA,QA3DD,sBAAA;EAyCF,qBAAA,CAAsB,KAAA,WAAgB,OAAA,CAAQ,iBAAA;;;;;;;;;;EAkB3C,YAAA,GAAY,SAAA,EACR,iBAAA,IAAmB,OAAA,GACpB,kBAAA,KACT,OAAA;AAAA"}
@@ -90,7 +90,7 @@ declare class MatryoshkaRetriever<Store extends VectorStore = VectorStore> exten
90
90
  * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.
91
91
  * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.
92
92
  */
93
- addDocuments: (documents: DocumentInterface<Record<string, any>>[], options?: AddDocumentOptions | undefined) => Promise<void | string[]>;
93
+ addDocuments: (documents: DocumentInterface[], options?: AddDocumentOptions) => Promise<string[] | void>;
94
94
  }
95
95
  //#endregion
96
96
  export { MatryoshkaRetriever, MatryoshkaRetrieverFields };
@@ -1 +1 @@
1
- {"version":3,"file":"matryoshka_retriever.d.ts","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"mappings":";;;;;;;AAWsC;KAMjC,kBAAA,GAAqB,MAAA;AAAA,UAET,yBAAA;EAFS;;AAE1B;;EAKE,MAAA;EAe+B;;;;EAV/B,MAAA;EAUqB;;;;EALrB,iBAAA;EAiC8B;;;;EA5B9B,mBAAA,EAAqB,UAAA;EAqCA;;;;EAhCrB,UAAA;AAAA;;;;;;;;;;;;;;;;;;;;;cAuBW,mBAAA,eACG,WAAA,GAAc,WAAA,UACpB,oBAAA,CAAqB,KAAA;EAC7B,MAAA;EAEA,MAAA;EAEA,iBAAA;EAEA,mBAAA,EAAqB,UAAA;EAErB,UAAA;EAEA,WAAA,CACE,MAAA,EAAQ,yBAAA,GAA4B,yBAAA,CAA0B,KAAA;EAqEZ;;;;;;;;;;;;;;;;;;EAAA,QAzC5C,sBAAA;EAyCF,qBAAA,CAAsB,KAAA,WAAgB,OAAA,CAAQ,iBAAA;;;;;;;;;;EAkB3C,YAAA,GAAY,SAAA,EAAA,iBAAA,CAAA,MAAA,kBAAA,OAAA,GAAA,kBAAA,iBAAA,OAAA;AAAA"}
1
+ {"version":3,"file":"matryoshka_retriever.d.ts","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"mappings":";;;;;;;AAWsC;KAMjC,kBAAA,GAAqB,MAAA;AAAA,UAET,yBAAA;EAFS;;AAE1B;;EAKE,MAAA;EAe+B;;;;EAV/B,MAAA;EAUqB;;;;EALrB,iBAAA;EAiC8B;;;;EA5B9B,mBAAA,EAAqB,UAAA;EAqCA;;;;EAhCrB,UAAA;AAAA;;;;;;;;;;;;;;;;;;;;;cAuBW,mBAAA,eACG,WAAA,GAAc,WAAA,UACpB,oBAAA,CAAqB,KAAA;EAC7B,MAAA;EAEA,MAAA;EAEA,iBAAA;EAEA,mBAAA,EAAqB,UAAA;EAErB,UAAA;EAEA,WAAA,CACE,MAAA,EAAQ,yBAAA,GAA4B,yBAAA,CAA0B,KAAA;EAuFvD;;;;;;;;;;;;;;;;;;EAAA,QA3DD,sBAAA;EAyCF,qBAAA,CAAsB,KAAA,WAAgB,OAAA,CAAQ,iBAAA;;;;;;;;;;EAkB3C,YAAA,GAAY,SAAA,EACR,iBAAA,IAAmB,OAAA,GACpB,kBAAA,KACT,OAAA;AAAA"}
@@ -1 +1 @@
1
- {"version":3,"file":"matryoshka_retriever.js","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"sourcesContent":["import { DocumentInterface } from \"@langchain/core/documents\";\nimport { Embeddings } from \"@langchain/core/embeddings\";\nimport {\n cosineSimilarity,\n euclideanDistance,\n innerProduct,\n} from \"@langchain/core/utils/math\";\nimport {\n VectorStore,\n VectorStoreRetriever,\n VectorStoreRetrieverInput,\n} from \"@langchain/core/vectorstores\";\n\n/**\n * Type for options when adding a document to the VectorStore.\n */\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\ntype AddDocumentOptions = Record<string, any>;\n\nexport interface MatryoshkaRetrieverFields {\n /**\n * The number of documents to retrieve from the small store.\n * @default 50\n */\n smallK?: number;\n /**\n * The number of documents to retrieve from the large store.\n * @default 8\n */\n largeK?: number;\n /**\n * The metadata key to store the larger embeddings.\n * @default \"lc_large_embedding\"\n */\n largeEmbeddingKey?: string;\n /**\n * The embedding model to use when generating the large\n * embeddings.\n */\n largeEmbeddingModel: Embeddings;\n /**\n * The type of search to perform using the large embeddings.\n * @default \"cosine\"\n */\n searchType?: \"cosine\" | \"innerProduct\" | \"euclidean\";\n}\n\n/**\n * A retriever that uses two sets of embeddings to perform adaptive retrieval. Based\n * off of the \"Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval\"\n * blog post {@link https://supabase.com/blog/matryoshka-embeddings}.\n *\n *\n * This class performs \"Adaptive Retrieval\" for searching text embeddings efficiently using the\n * Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query\n * embedding in two steps:\n *\n * First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,\n * but less accurate search.\n *\n * Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional\n * embedding for higher accuracy.\n *\n *\n * This code implements MRL embeddings for efficient vector search by combining faster,\n * lower-dimensional initial search with accurate, high-dimensional re-ranking.\n */\nexport class MatryoshkaRetriever<\n Store extends VectorStore = VectorStore,\n> extends VectorStoreRetriever<Store> {\n smallK = 50;\n\n largeK = 8;\n\n largeEmbeddingKey = \"lc_large_embedding\";\n\n largeEmbeddingModel: Embeddings;\n\n searchType: \"cosine\" | \"innerProduct\" | \"euclidean\" = \"cosine\";\n\n constructor(\n fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>\n ) {\n super(fields);\n this.smallK = fields.smallK ?? this.smallK;\n this.largeK = fields.largeK ?? this.largeK;\n this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;\n this.largeEmbeddingModel = fields.largeEmbeddingModel;\n this.searchType = fields.searchType ?? this.searchType;\n }\n\n /**\n * Ranks documents based on their similarity to a query embedding using larger embeddings.\n *\n * This method takes a query embedding and a list of documents (smallResults) as input. Each document\n * in the smallResults array has previously been associated with a large embedding stored in its metadata.\n * Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores\n * between the query embedding and each document's large embedding. It then ranks the documents based on\n * these similarity scores, from the most similar to the least similar.\n *\n * The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`\n * is a class property defining the number of documents to return. This subset of documents is determined\n * by sorting the entire list of documents based on their similarity scores and then selecting the top\n * `largeK` documents.\n *\n * @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.\n * @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.\n * @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.\n */\n private _rankByLargeEmbeddings(\n embeddedQuery: number[],\n smallResults: DocumentInterface[]\n ): DocumentInterface[] {\n const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>\n JSON.parse(doc.metadata[this.largeEmbeddingKey])\n );\n let func: () => Array<number[]>;\n\n switch (this.searchType) {\n case \"cosine\":\n func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);\n break;\n case \"innerProduct\":\n func = () => innerProduct([embeddedQuery], largeEmbeddings);\n break;\n case \"euclidean\":\n func = () => euclideanDistance([embeddedQuery], largeEmbeddings);\n break;\n default:\n throw new Error(`Unknown search type: ${this.searchType}`);\n }\n\n // Calculate the similarity scores between the query embedding and the large embeddings\n const [similarityScores] = func();\n\n // Create an array of indices from 0 to N-1, where N is the number of documents\n let indices = Array.from(\n { length: smallResults.length },\n (_, index) => index\n );\n\n indices = indices\n .map((v, i) => [similarityScores[i], v])\n .sort(([a], [b]) => b - a)\n .slice(0, this.largeK)\n .map(([, i]) => i);\n\n return indices.map((i) => smallResults[i]);\n }\n\n async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {\n const [embeddedQuery, smallResults] = await Promise.all([\n this.largeEmbeddingModel.embedQuery(query),\n this.vectorStore.similaritySearch(query, this.smallK, this.filter),\n ]);\n\n return this._rankByLargeEmbeddings(embeddedQuery, smallResults);\n }\n\n /**\n * Override the default `addDocuments` method to embed the documents twice,\n * once using the larger embeddings model, and then again using the default\n * embedding model linked to the vector store.\n *\n * @param {DocumentInterface[]} documents - An array of documents to add to the vector store.\n * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.\n * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.\n */\n override addDocuments = async (\n documents: DocumentInterface[],\n options?: AddDocumentOptions\n ): Promise<string[] | void> => {\n // Insure documents metadata does not contain the large embedding key\n if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {\n throw new Error(\n `All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`\n );\n }\n\n const allDocPageContent = documents.map((doc) => doc.pageContent);\n const allDocLargeEmbeddings =\n await this.largeEmbeddingModel.embedDocuments(allDocPageContent);\n\n const newDocuments: Array<DocumentInterface> = documents.map(\n (doc, idx) => ({\n ...doc,\n metadata: {\n ...doc.metadata,\n [this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),\n },\n })\n );\n\n return this.vectorStore.addDocuments(newDocuments, options);\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AAmEA,IAAa,sBAAb,cAEU,qBAA4B;CACpC,SAAS;CAET,SAAS;CAET,oBAAoB;CAEpB;CAEA,aAAsD;CAEtD,YACE,QACA;AACA,QAAM,OAAO;AACb,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,oBAAoB,OAAO,qBAAqB,KAAK;AAC1D,OAAK,sBAAsB,OAAO;AAClC,OAAK,aAAa,OAAO,cAAc,KAAK;;;;;;;;;;;;;;;;;;;;CAqB9C,uBACE,eACA,cACqB;EACrB,MAAM,kBAAmC,aAAa,KAAK,QACzD,KAAK,MAAM,IAAI,SAAS,KAAK,mBAAmB,CACjD;EACD,IAAI;AAEJ,UAAQ,KAAK,YAAb;GACE,KAAK;AACH,iBAAa,iBAAiB,CAAC,cAAc,EAAE,gBAAgB;AAC/D;GACF,KAAK;AACH,iBAAa,aAAa,CAAC,cAAc,EAAE,gBAAgB;AAC3D;GACF,KAAK;AACH,iBAAa,kBAAkB,CAAC,cAAc,EAAE,gBAAgB;AAChE;GACF,QACE,OAAM,IAAI,MAAM,wBAAwB,KAAK,aAAa;;EAI9D,MAAM,CAAC,oBAAoB,MAAM;EAGjC,IAAI,UAAU,MAAM,KAClB,EAAE,QAAQ,aAAa,QAAQ,GAC9B,GAAG,UAAU,MACf;AAED,YAAU,QACP,KAAK,GAAG,MAAM,CAAC,iBAAiB,IAAI,EAAE,CAAC,CACvC,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE,CACzB,MAAM,GAAG,KAAK,OAAO,CACrB,KAAK,GAAG,OAAO,EAAE;AAEpB,SAAO,QAAQ,KAAK,MAAM,aAAa,GAAG;;CAG5C,MAAM,sBAAsB,OAA6C;EACvE,MAAM,CAAC,eAAe,gBAAgB,MAAM,QAAQ,IAAI,CACtD,KAAK,oBAAoB,WAAW,MAAM,EAC1C,KAAK,YAAY,iBAAiB,OAAO,KAAK,QAAQ,KAAK,OAAO,CACnE,CAAC;AAEF,SAAO,KAAK,uBAAuB,eAAe,aAAa;;;;;;;;;;;CAYjE,eAAwB,OACtB,WACA,YAC6B;AAE7B,MAAI,UAAU,MAAM,QAAQ,KAAK,qBAAqB,IAAI,SAAS,CACjE,OAAM,IAAI,MACR,2DAA2D,KAAK,kBAAkB,qBACnF;EAGH,MAAM,oBAAoB,UAAU,KAAK,QAAQ,IAAI,YAAY;EACjE,MAAM,wBACJ,MAAM,KAAK,oBAAoB,eAAe,kBAAkB;EAElE,MAAM,eAAyC,UAAU,KACtD,KAAK,SAAS;GACb,GAAG;GACH,UAAU;IACR,GAAG,IAAI;KACN,KAAK,oBAAoB,KAAK,UAAU,sBAAsB,KAAK;IACrE;GACF,EACF;AAED,SAAO,KAAK,YAAY,aAAa,cAAc,QAAQ"}
1
+ {"version":3,"file":"matryoshka_retriever.js","names":[],"sources":["../../src/retrievers/matryoshka_retriever.ts"],"sourcesContent":["import { DocumentInterface } from \"@langchain/core/documents\";\nimport { Embeddings } from \"@langchain/core/embeddings\";\nimport {\n cosineSimilarity,\n euclideanDistance,\n innerProduct,\n} from \"@langchain/core/utils/math\";\nimport {\n VectorStore,\n VectorStoreRetriever,\n VectorStoreRetrieverInput,\n} from \"@langchain/core/vectorstores\";\n\n/**\n * Type for options when adding a document to the VectorStore.\n */\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\ntype AddDocumentOptions = Record<string, any>;\n\nexport interface MatryoshkaRetrieverFields {\n /**\n * The number of documents to retrieve from the small store.\n * @default 50\n */\n smallK?: number;\n /**\n * The number of documents to retrieve from the large store.\n * @default 8\n */\n largeK?: number;\n /**\n * The metadata key to store the larger embeddings.\n * @default \"lc_large_embedding\"\n */\n largeEmbeddingKey?: string;\n /**\n * The embedding model to use when generating the large\n * embeddings.\n */\n largeEmbeddingModel: Embeddings;\n /**\n * The type of search to perform using the large embeddings.\n * @default \"cosine\"\n */\n searchType?: \"cosine\" | \"innerProduct\" | \"euclidean\";\n}\n\n/**\n * A retriever that uses two sets of embeddings to perform adaptive retrieval. Based\n * off of the \"Matryoshka embeddings: faster OpenAI vector search using Adaptive Retrieval\"\n * blog post {@link https://supabase.com/blog/matryoshka-embeddings}.\n *\n *\n * This class performs \"Adaptive Retrieval\" for searching text embeddings efficiently using the\n * Matryoshka Representation Learning (MRL) technique. It retrieves documents similar to a query\n * embedding in two steps:\n *\n * First-pass: Uses a lower dimensional sub-vector from the MRL embedding for an initial, fast,\n * but less accurate search.\n *\n * Second-pass: Re-ranks the top results from the first pass using the full, high-dimensional\n * embedding for higher accuracy.\n *\n *\n * This code implements MRL embeddings for efficient vector search by combining faster,\n * lower-dimensional initial search with accurate, high-dimensional re-ranking.\n */\nexport class MatryoshkaRetriever<\n Store extends VectorStore = VectorStore,\n> extends VectorStoreRetriever<Store> {\n smallK = 50;\n\n largeK = 8;\n\n largeEmbeddingKey = \"lc_large_embedding\";\n\n largeEmbeddingModel: Embeddings;\n\n searchType: \"cosine\" | \"innerProduct\" | \"euclidean\" = \"cosine\";\n\n constructor(\n fields: MatryoshkaRetrieverFields & VectorStoreRetrieverInput<Store>\n ) {\n super(fields);\n this.smallK = fields.smallK ?? this.smallK;\n this.largeK = fields.largeK ?? this.largeK;\n this.largeEmbeddingKey = fields.largeEmbeddingKey ?? this.largeEmbeddingKey;\n this.largeEmbeddingModel = fields.largeEmbeddingModel;\n this.searchType = fields.searchType ?? this.searchType;\n }\n\n /**\n * Ranks documents based on their similarity to a query embedding using larger embeddings.\n *\n * This method takes a query embedding and a list of documents (smallResults) as input. Each document\n * in the smallResults array has previously been associated with a large embedding stored in its metadata.\n * Depending on the `searchType` (cosine, innerProduct, or euclidean), it calculates the similarity scores\n * between the query embedding and each document's large embedding. It then ranks the documents based on\n * these similarity scores, from the most similar to the least similar.\n *\n * The method returns a promise that resolves to an array of the top `largeK` documents, where `largeK`\n * is a class property defining the number of documents to return. This subset of documents is determined\n * by sorting the entire list of documents based on their similarity scores and then selecting the top\n * `largeK` documents.\n *\n * @param {number[]} embeddedQuery The embedding of the query, represented as an array of numbers.\n * @param {DocumentInterface[]} smallResults An array of documents, each with metadata that includes a large embedding for similarity comparison.\n * @returns {Promise<DocumentInterface[]>} A promise that resolves to an array of the top `largeK` ranked documents based on their similarity to the query embedding.\n */\n private _rankByLargeEmbeddings(\n embeddedQuery: number[],\n smallResults: DocumentInterface[]\n ): DocumentInterface[] {\n const largeEmbeddings: Array<number[]> = smallResults.map((doc) =>\n JSON.parse(doc.metadata[this.largeEmbeddingKey])\n );\n let func: () => Array<number[]>;\n\n switch (this.searchType) {\n case \"cosine\":\n func = () => cosineSimilarity([embeddedQuery], largeEmbeddings);\n break;\n case \"innerProduct\":\n func = () => innerProduct([embeddedQuery], largeEmbeddings);\n break;\n case \"euclidean\":\n func = () => euclideanDistance([embeddedQuery], largeEmbeddings);\n break;\n default:\n throw new Error(`Unknown search type: ${this.searchType}`);\n }\n\n // Calculate the similarity scores between the query embedding and the large embeddings\n const [similarityScores] = func();\n\n // Create an array of indices from 0 to N-1, where N is the number of documents\n let indices = Array.from(\n { length: smallResults.length },\n (_, index) => index\n );\n\n indices = indices\n .map((v, i) => [similarityScores[i], v])\n .sort(([a], [b]) => b - a)\n .slice(0, this.largeK)\n .map(([, i]) => i);\n\n return indices.map((i) => smallResults[i]);\n }\n\n async _getRelevantDocuments(query: string): Promise<DocumentInterface[]> {\n const [embeddedQuery, smallResults] = await Promise.all([\n this.largeEmbeddingModel.embedQuery(query),\n this.vectorStore.similaritySearch(query, this.smallK, this.filter),\n ]);\n\n return this._rankByLargeEmbeddings(embeddedQuery, smallResults);\n }\n\n /**\n * Override the default `addDocuments` method to embed the documents twice,\n * once using the larger embeddings model, and then again using the default\n * embedding model linked to the vector store.\n *\n * @param {DocumentInterface[]} documents - An array of documents to add to the vector store.\n * @param {AddDocumentOptions} options - An optional object containing additional options for adding documents.\n * @returns {Promise<string[] | void>} A promise that resolves to an array of the document IDs that were added to the vector store.\n */\n override addDocuments = async (\n documents: DocumentInterface[],\n options?: AddDocumentOptions\n ): Promise<string[] | void> => {\n // Insure documents metadata does not contain the large embedding key\n if (documents.some((doc) => this.largeEmbeddingKey in doc.metadata)) {\n throw new Error(\n `All documents must not contain the large embedding key: ${this.largeEmbeddingKey} in their metadata.`\n );\n }\n\n const allDocPageContent = documents.map((doc) => doc.pageContent);\n const allDocLargeEmbeddings =\n await this.largeEmbeddingModel.embedDocuments(allDocPageContent);\n\n const newDocuments: Array<DocumentInterface> = documents.map(\n (doc, idx) => ({\n ...doc,\n metadata: {\n ...doc.metadata,\n [this.largeEmbeddingKey]: JSON.stringify(allDocLargeEmbeddings[idx]),\n },\n })\n );\n\n return this.vectorStore.addDocuments(newDocuments, options);\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AAmEA,IAAa,sBAAb,cAEU,qBAA4B;CACpC,SAAS;CAET,SAAS;CAET,oBAAoB;CAEpB;CAEA,aAAsD;CAEtD,YACE,QACA;AACA,QAAM,OAAO;AACb,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,SAAS,OAAO,UAAU,KAAK;AACpC,OAAK,oBAAoB,OAAO,qBAAqB,KAAK;AAC1D,OAAK,sBAAsB,OAAO;AAClC,OAAK,aAAa,OAAO,cAAc,KAAK;;;;;;;;;;;;;;;;;;;;CAqB9C,uBACE,eACA,cACqB;EACrB,MAAM,kBAAmC,aAAa,KAAK,QACzD,KAAK,MAAM,IAAI,SAAS,KAAK,mBAAmB,CACjD;EACD,IAAI;AAEJ,UAAQ,KAAK,YAAb;GACE,KAAK;AACH,iBAAa,iBAAiB,CAAC,cAAc,EAAE,gBAAgB;AAC/D;GACF,KAAK;AACH,iBAAa,aAAa,CAAC,cAAc,EAAE,gBAAgB;AAC3D;GACF,KAAK;AACH,iBAAa,kBAAkB,CAAC,cAAc,EAAE,gBAAgB;AAChE;GACF,QACE,OAAM,IAAI,MAAM,wBAAwB,KAAK,aAAa;;EAI9D,MAAM,CAAC,oBAAoB,MAAM;EAGjC,IAAI,UAAU,MAAM,KAClB,EAAE,QAAQ,aAAa,QAAQ,GAC9B,GAAG,UAAU,MACf;AAED,YAAU,QACP,KAAK,GAAG,MAAM,CAAC,iBAAiB,IAAI,EAAE,CAAC,CACvC,MAAM,CAAC,IAAI,CAAC,OAAO,IAAI,EAAE,CACzB,MAAM,GAAG,KAAK,OAAO,CACrB,KAAK,GAAG,OAAO,EAAE;AAEpB,SAAO,QAAQ,KAAK,MAAM,aAAa,GAAG;;CAG5C,MAAM,sBAAsB,OAA6C;EACvE,MAAM,CAAC,eAAe,gBAAgB,MAAM,QAAQ,IAAI,CACtD,KAAK,oBAAoB,WAAW,MAAM,EAC1C,KAAK,YAAY,iBAAiB,OAAO,KAAK,QAAQ,KAAK,OAAO,CACnE,CAAC;AAEF,SAAO,KAAK,uBAAuB,eAAe,aAAa;;;;;;;;;;;CAYjE,eAAwB,OACtB,WACA,YAC6B;AAE7B,MAAI,UAAU,MAAM,QAAQ,KAAK,qBAAqB,IAAI,SAAS,CACjE,OAAM,IAAI,MACR,2DAA2D,KAAK,kBAAkB,qBACnF;EAGH,MAAM,oBAAoB,UAAU,KAAK,QAAQ,IAAI,YAAY;EACjE,MAAM,wBACJ,MAAM,KAAK,oBAAoB,eAAe,kBAAkB;EAElE,MAAM,eAAyC,UAAU,KACtD,KAAK,SAAS;GACb,GAAG;GACH,UAAU;IACR,GAAG,IAAI;KACN,KAAK,oBAAoB,KAAK,UAAU,sBAAsB,KAAK;IACrE;GACF,EACF;AAED,SAAO,KAAK,YAAY,aAAa,cAAc,QAAQ"}
@@ -1 +1 @@
1
- {"version":3,"file":"multi_query.cjs","names":["BaseOutputParser","PromptTemplate","BaseRetriever","LLMChain"],"sources":["../../src/retrievers/multi_query.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport {\n BaseRetriever,\n type BaseRetrieverInput,\n type BaseRetrieverInterface,\n} from \"@langchain/core/retrievers\";\nimport { Document } from \"@langchain/core/documents\";\nimport { BaseOutputParser } from \"@langchain/core/output_parsers\";\nimport { PromptTemplate, BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { CallbackManagerForRetrieverRun } from \"@langchain/core/callbacks/manager\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\n\ninterface LineList {\n lines: string[];\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type MultiDocs = Document<Record<string, any>>[];\n\nclass LineListOutputParser extends BaseOutputParser<LineList> {\n static lc_name() {\n return \"LineListOutputParser\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n async parse(text: string): Promise<LineList> {\n const startKeyIndex = text.indexOf(\"<questions>\");\n const endKeyIndex = text.indexOf(\"</questions>\");\n const questionsStartIndex =\n startKeyIndex === -1 ? 0 : startKeyIndex + \"<questions>\".length;\n const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n const lines = text\n .slice(questionsStartIndex, questionsEndIndex)\n .trim()\n .split(\"\\n\")\n .filter((line) => line.trim() !== \"\");\n return { lines };\n }\n\n getFormatInstructions(): string {\n throw new Error(\"Not implemented.\");\n }\n}\n\n// Create template\nconst DEFAULT_QUERY_PROMPT = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"question\", \"queryCount\"],\n template: `You are an AI language model assistant. Your task is\nto generate {queryCount} different versions of the given user\nquestion to retrieve relevant documents from a vector database.\nBy generating multiple perspectives on the user question,\nyour goal is to help the user overcome some of the limitations\nof distance-based similarity search.\n\nProvide these alternative questions separated by newlines between XML tags. For example:\n\n<questions>\nQuestion 1\nQuestion 2\nQuestion 3\n</questions>\n\nOriginal question: {question}`,\n});\n\nexport interface MultiQueryRetrieverInput extends BaseRetrieverInput {\n retriever: BaseRetrieverInterface;\n /** @deprecated Pass a custom prompt into `.fromLLM` instead. */\n llmChain: LLMChain<LineList>;\n queryCount?: number;\n parserKey?: string;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: MultiDocs) => MultiDocs;\n}\n\n/**\n * @example\n * ```typescript\n * const retriever = new MultiQueryRetriever.fromLLM({\n * llm: new ChatAnthropic({}),\n * retriever: new MemoryVectorStore().asRetriever(),\n * verbose: true,\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What are mitochondria made of?\",\n * );\n * ```\n */\nexport class MultiQueryRetriever extends BaseRetriever {\n static lc_name() {\n return \"MultiQueryRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n private retriever: BaseRetrieverInterface;\n\n private llmChain: LLMChain<LineList>;\n\n private queryCount = 3;\n\n private parserKey = \"lines\";\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: MultiQueryRetrieverInput[\"documentCompressorFilteringFn\"];\n\n constructor(fields: MultiQueryRetrieverInput) {\n super(fields);\n this.retriever = fields.retriever;\n this.llmChain = fields.llmChain;\n this.queryCount = fields.queryCount ?? this.queryCount;\n this.parserKey = fields.parserKey ?? this.parserKey;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n static fromLLM(\n fields: Omit<MultiQueryRetrieverInput, \"llmChain\"> & {\n llm: BaseLanguageModelInterface;\n prompt?: BasePromptTemplate;\n }\n ): MultiQueryRetriever {\n const {\n retriever,\n llm,\n prompt = DEFAULT_QUERY_PROMPT,\n queryCount,\n parserKey,\n ...rest\n } = fields;\n const outputParser = new LineListOutputParser();\n const llmChain = new LLMChain({ llm, prompt, outputParser });\n return new this({ retriever, llmChain, queryCount, parserKey, ...rest });\n }\n\n // Generate the different queries for each retrieval, using our llmChain\n private async _generateQueries(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<string[]> {\n const response = await this.llmChain.call(\n { question, queryCount: this.queryCount },\n runManager?.getChild()\n );\n const lines = response.text[this.parserKey] || [];\n if (this.verbose) {\n console.log(`Generated queries: ${lines}`);\n }\n return lines;\n }\n\n // Retrieve documents using the original retriever\n private async _retrieveDocuments(\n queries: string[],\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const documents: Document[] = [];\n await Promise.all(\n queries.map(async (query) => {\n const docs = await this.retriever.invoke(query, runManager?.getChild());\n documents.push(...docs);\n })\n );\n return documents;\n }\n\n // Deduplicate the documents that were returned in multiple retrievals\n private _uniqueUnion(documents: Document[]): Document[] {\n const uniqueDocumentsDict: { [key: string]: Document } = {};\n\n for (const doc of documents) {\n const key = `${doc.pageContent}:${JSON.stringify(\n Object.entries(doc.metadata).sort()\n )}`;\n uniqueDocumentsDict[key] = doc;\n }\n\n const uniqueDocuments = Object.values(uniqueDocumentsDict);\n return uniqueDocuments;\n }\n\n async _getRelevantDocuments(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const queries = await this._generateQueries(question, runManager);\n const documents = await this._retrieveDocuments(queries, runManager);\n const uniqueDocuments = this._uniqueUnion(documents);\n\n let outputDocs = uniqueDocuments;\n if (this.documentCompressor && uniqueDocuments.length) {\n outputDocs = await this.documentCompressor.compressDocuments(\n uniqueDocuments,\n question,\n runManager?.getChild()\n );\n if (this.documentCompressorFilteringFn) {\n outputDocs = this.documentCompressorFilteringFn(outputDocs);\n }\n }\n\n return outputDocs;\n }\n}\n"],"mappings":";;;;;;;;AAoBA,IAAM,uBAAN,cAAmCA,+BAAAA,iBAA2B;CAC5D,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD,MAAM,MAAM,MAAiC;EAC3C,MAAM,gBAAgB,KAAK,QAAQ,cAAc;EACjD,MAAM,cAAc,KAAK,QAAQ,eAAe;EAChD,MAAM,sBACJ,kBAAkB,KAAK,IAAI,gBAAgB;EAC7C,MAAM,oBAAoB,gBAAgB,KAAK,KAAK,SAAS;AAM7D,SAAO,EAAE,OALK,KACX,MAAM,qBAAqB,kBAAkB,CAC7C,MAAM,CACN,MAAM,KAAK,CACX,QAAQ,SAAS,KAAK,MAAM,KAAK,GAAG,EACvB;;CAGlB,wBAAgC;AAC9B,QAAM,IAAI,MAAM,mBAAmB;;;AAKvC,MAAM,uCAAuC,IAAIC,wBAAAA,eAAe;CAC9D,gBAAgB,CAAC,YAAY,aAAa;CAC1C,UAAU;;;;;;;;;;;;;;;;CAgBX,CAAC;;;;;;;;;;;;;;AAyBF,IAAa,sBAAb,cAAyCC,2BAAAA,cAAc;CACrD,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD;CAEA;CAEA,aAAqB;CAErB,YAAoB;CAEpB;CAEA;CAEA,YAAY,QAAkC;AAC5C,QAAM,OAAO;AACb,OAAK,YAAY,OAAO;AACxB,OAAK,WAAW,OAAO;AACvB,OAAK,aAAa,OAAO,cAAc,KAAK;AAC5C,OAAK,YAAY,OAAO,aAAa,KAAK;AAC1C,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,OAAO,QACL,QAIqB;EACrB,MAAM,EACJ,WACA,KACA,SAAS,sBACT,YACA,WACA,GAAG,SACD;EAEJ,MAAM,WAAW,IAAIC,kBAAAA,SAAS;GAAE;GAAK;GAAQ,cADxB,IAAI,sBAAsB;GACY,CAAC;AAC5D,SAAO,IAAI,KAAK;GAAE;GAAW;GAAU;GAAY;GAAW,GAAG;GAAM,CAAC;;CAI1E,MAAc,iBACZ,UACA,YACmB;EAKnB,MAAM,SAJW,MAAM,KAAK,SAAS,KACnC;GAAE;GAAU,YAAY,KAAK;GAAY,EACzC,YAAY,UAAU,CACvB,EACsB,KAAK,KAAK,cAAc,EAAE;AACjD,MAAI,KAAK,QACP,SAAQ,IAAI,sBAAsB,QAAQ;AAE5C,SAAO;;CAIT,MAAc,mBACZ,SACA,YACqB;EACrB,MAAM,YAAwB,EAAE;AAChC,QAAM,QAAQ,IACZ,QAAQ,IAAI,OAAO,UAAU;GAC3B,MAAM,OAAO,MAAM,KAAK,UAAU,OAAO,OAAO,YAAY,UAAU,CAAC;AACvE,aAAU,KAAK,GAAG,KAAK;IACvB,CACH;AACD,SAAO;;CAIT,aAAqB,WAAmC;EACtD,MAAM,sBAAmD,EAAE;AAE3D,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,MAAM,GAAG,IAAI,YAAY,GAAG,KAAK,UACrC,OAAO,QAAQ,IAAI,SAAS,CAAC,MAAM,CACpC;AACD,uBAAoB,OAAO;;AAI7B,SADwB,OAAO,OAAO,oBAAoB;;CAI5D,MAAM,sBACJ,UACA,YACqB;EACrB,MAAM,UAAU,MAAM,KAAK,iBAAiB,UAAU,WAAW;EACjE,MAAM,YAAY,MAAM,KAAK,mBAAmB,SAAS,WAAW;EACpE,MAAM,kBAAkB,KAAK,aAAa,UAAU;EAEpD,IAAI,aAAa;AACjB,MAAI,KAAK,sBAAsB,gBAAgB,QAAQ;AACrD,gBAAa,MAAM,KAAK,mBAAmB,kBACzC,iBACA,UACA,YAAY,UAAU,CACvB;AACD,OAAI,KAAK,8BACP,cAAa,KAAK,8BAA8B,WAAW;;AAI/D,SAAO"}
1
+ {"version":3,"file":"multi_query.cjs","names":["BaseOutputParser","PromptTemplate","BaseRetriever","LLMChain"],"sources":["../../src/retrievers/multi_query.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport {\n BaseRetriever,\n type BaseRetrieverInput,\n type BaseRetrieverInterface,\n} from \"@langchain/core/retrievers\";\nimport { Document } from \"@langchain/core/documents\";\nimport { BaseOutputParser } from \"@langchain/core/output_parsers\";\nimport { PromptTemplate, BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { CallbackManagerForRetrieverRun } from \"@langchain/core/callbacks/manager\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\n\ninterface LineList {\n lines: string[];\n}\n\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport type MultiDocs = Document<Record<string, any>>[];\n\nclass LineListOutputParser extends BaseOutputParser<LineList> {\n static lc_name() {\n return \"LineListOutputParser\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n async parse(text: string): Promise<LineList> {\n const startKeyIndex = text.indexOf(\"<questions>\");\n const endKeyIndex = text.indexOf(\"</questions>\");\n const questionsStartIndex =\n startKeyIndex === -1 ? 0 : startKeyIndex + \"<questions>\".length;\n const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n const lines = text\n .slice(questionsStartIndex, questionsEndIndex)\n .trim()\n .split(\"\\n\")\n .filter((line) => line.trim() !== \"\");\n return { lines };\n }\n\n getFormatInstructions(): string {\n throw new Error(\"Not implemented.\");\n }\n}\n\n// Create template\nconst DEFAULT_QUERY_PROMPT = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"question\", \"queryCount\"],\n template: `You are an AI language model assistant. Your task is\nto generate {queryCount} different versions of the given user\nquestion to retrieve relevant documents from a vector database.\nBy generating multiple perspectives on the user question,\nyour goal is to help the user overcome some of the limitations\nof distance-based similarity search.\n\nProvide these alternative questions separated by newlines between XML tags. For example:\n\n<questions>\nQuestion 1\nQuestion 2\nQuestion 3\n</questions>\n\nOriginal question: {question}`,\n});\n\nexport interface MultiQueryRetrieverInput extends BaseRetrieverInput {\n retriever: BaseRetrieverInterface;\n /** @deprecated Pass a custom prompt into `.fromLLM` instead. */\n llmChain: LLMChain<LineList>;\n queryCount?: number;\n parserKey?: string;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: MultiDocs) => MultiDocs;\n}\n\n/**\n * @example\n * ```typescript\n * const retriever = new MultiQueryRetriever.fromLLM({\n * llm: new ChatAnthropic({}),\n * retriever: new MemoryVectorStore().asRetriever(),\n * verbose: true,\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What are mitochondria made of?\",\n * );\n * ```\n */\nexport class MultiQueryRetriever extends BaseRetriever {\n static lc_name() {\n return \"MultiQueryRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n private retriever: BaseRetrieverInterface;\n\n private llmChain: LLMChain<LineList>;\n\n private queryCount = 3;\n\n private parserKey = \"lines\";\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: MultiQueryRetrieverInput[\"documentCompressorFilteringFn\"];\n\n constructor(fields: MultiQueryRetrieverInput) {\n super(fields);\n this.retriever = fields.retriever;\n this.llmChain = fields.llmChain;\n this.queryCount = fields.queryCount ?? this.queryCount;\n this.parserKey = fields.parserKey ?? this.parserKey;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n static fromLLM(\n fields: Omit<MultiQueryRetrieverInput, \"llmChain\"> & {\n llm: BaseLanguageModelInterface;\n prompt?: BasePromptTemplate;\n }\n ): MultiQueryRetriever {\n const {\n retriever,\n llm,\n prompt = DEFAULT_QUERY_PROMPT,\n queryCount,\n parserKey,\n ...rest\n } = fields;\n const outputParser = new LineListOutputParser();\n const llmChain = new LLMChain({ llm, prompt, outputParser });\n return new this({ retriever, llmChain, queryCount, parserKey, ...rest });\n }\n\n // Generate the different queries for each retrieval, using our llmChain\n private async _generateQueries(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<string[]> {\n const response = await this.llmChain.call(\n { question, queryCount: this.queryCount },\n runManager?.getChild()\n );\n const lines = response.text[this.parserKey] || [];\n if (this.verbose) {\n console.log(`Generated queries: ${lines}`);\n }\n return lines;\n }\n\n // Retrieve documents using the original retriever\n private async _retrieveDocuments(\n queries: string[],\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const documents: Document[] = [];\n await Promise.all(\n queries.map(async (query) => {\n const docs = await this.retriever.invoke(query, runManager?.getChild());\n documents.push(...docs);\n })\n );\n return documents;\n }\n\n // Deduplicate the documents that were returned in multiple retrievals\n private _uniqueUnion(documents: Document[]): Document[] {\n const uniqueDocumentsDict: { [key: string]: Document } = {};\n\n for (const doc of documents) {\n const key = `${doc.pageContent}:${JSON.stringify(\n Object.entries(doc.metadata).sort()\n )}`;\n uniqueDocumentsDict[key] = doc;\n }\n\n const uniqueDocuments = Object.values(uniqueDocumentsDict);\n return uniqueDocuments;\n }\n\n async _getRelevantDocuments(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const queries = await this._generateQueries(question, runManager);\n const documents = await this._retrieveDocuments(queries, runManager);\n const uniqueDocuments = this._uniqueUnion(documents);\n\n let outputDocs = uniqueDocuments;\n if (this.documentCompressor && uniqueDocuments.length) {\n outputDocs = await this.documentCompressor.compressDocuments(\n uniqueDocuments,\n question,\n runManager?.getChild()\n );\n if (this.documentCompressorFilteringFn) {\n outputDocs = this.documentCompressorFilteringFn(outputDocs);\n }\n }\n\n return outputDocs;\n }\n}\n"],"mappings":";;;;;;;;AAoBA,IAAM,uBAAN,cAAmCA,+BAAAA,iBAA2B;CAC5D,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD,MAAM,MAAM,MAAiC;EAC3C,MAAM,gBAAgB,KAAK,QAAQ,cAAc;EACjD,MAAM,cAAc,KAAK,QAAQ,eAAe;EAChD,MAAM,sBACJ,kBAAkB,KAAK,IAAI,gBAAgB;EAC7C,MAAM,oBAAoB,gBAAgB,KAAK,KAAK,SAAS;AAM7D,SAAO,EAAE,OALK,KACX,MAAM,qBAAqB,kBAAkB,CAC7C,MAAM,CACN,MAAM,KAAK,CACX,QAAQ,SAAS,KAAK,MAAM,KAAK,GAAG,EACvB;;CAGlB,wBAAgC;AAC9B,QAAM,IAAI,MAAM,mBAAmB;;;AAKvC,MAAM,uCAAuC,IAAIC,wBAAAA,eAAe;CAC9D,gBAAgB,CAAC,YAAY,aAAa;CAC1C,UAAU;;;;;;;;;;;;;;;;CAgBX,CAAC;;;;;;;;;;;;;;AAyBF,IAAa,sBAAb,cAAyCC,2BAAAA,cAAc;CACrD,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD;CAEA;CAEA,aAAqB;CAErB,YAAoB;CAEpB;CAEA;CAEA,YAAY,QAAkC;AAC5C,QAAM,OAAO;AACb,OAAK,YAAY,OAAO;AACxB,OAAK,WAAW,OAAO;AACvB,OAAK,aAAa,OAAO,cAAc,KAAK;AAC5C,OAAK,YAAY,OAAO,aAAa,KAAK;AAC1C,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,OAAO,QACL,QAIqB;EACrB,MAAM,EACJ,WACA,KACA,SAAS,sBACT,YACA,WACA,GAAG,SACD;EAEJ,MAAM,WAAW,IAAIC,kBAAAA,SAAS;GAAE;GAAK;GAAQ,cADxB,IAAI,sBAAsB;GACY,CAAC;AAC5D,SAAO,IAAI,KAAK;GAAE;GAAW;GAAU;GAAY;GAAW,GAAG;GAAM,CAAC;;CAI1E,MAAc,iBACZ,UACA,YACmB;EAKnB,MAAM,SAJW,MAAM,KAAK,SAAS,KACnC;GAAE;GAAU,YAAY,KAAK;GAAY,EACzC,YAAY,UAAU,CACvB,EACsB,KAAK,KAAK,cAAc,EAAE;AACjD,MAAI,KAAK,QACP,SAAQ,IAAI,sBAAsB,QAAQ;AAE5C,SAAO;;CAIT,MAAc,mBACZ,SACA,YACqB;EACrB,MAAM,YAAwB,EAAE;AAChC,QAAM,QAAQ,IACZ,QAAQ,IAAI,OAAO,UAAU;GAC3B,MAAM,OAAO,MAAM,KAAK,UAAU,OAAO,OAAO,YAAY,UAAU,CAAC;AACvE,aAAU,KAAK,GAAG,KAAK;IACvB,CACH;AACD,SAAO;;CAIT,aAAqB,WAAmC;EACtD,MAAM,sBAAmD,EAAE;AAE3D,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,MAAM,GAAG,IAAI,YAAY,GAAG,KAAK,UACrC,OAAO,QAAQ,IAAI,SAAS,CAAC,MAAM,CACpC;AACD,uBAAoB,OAAO;;AAI7B,SADwB,OAAO,OAAO,oBAAoB;;CAI5D,MAAM,sBACJ,UACA,YACqB;EACrB,MAAM,UAAU,MAAM,KAAK,iBAAiB,UAAU,WAAW;EACjE,MAAM,YAAY,MAAM,KAAK,mBAAmB,SAAS,WAAW;EACpE,MAAM,kBAAkB,KAAK,aAAa,UAAU;EAEpD,IAAI,aAAa;AACjB,MAAI,KAAK,sBAAsB,gBAAgB,QAAQ;AACrD,gBAAa,MAAM,KAAK,mBAAmB,kBACzC,iBACA,UACA,YAAY,UAAU,CACvB;AACD,OAAI,KAAK,8BACP,cAAa,KAAK,8BAA8B,WAAW;;AAI/D,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"multi_query.js","names":[],"sources":["../../src/retrievers/multi_query.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport {\n BaseRetriever,\n type BaseRetrieverInput,\n type BaseRetrieverInterface,\n} from \"@langchain/core/retrievers\";\nimport { Document } from \"@langchain/core/documents\";\nimport { BaseOutputParser } from \"@langchain/core/output_parsers\";\nimport { PromptTemplate, BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { CallbackManagerForRetrieverRun } from \"@langchain/core/callbacks/manager\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\n\ninterface LineList {\n lines: string[];\n}\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type MultiDocs = Document<Record<string, any>>[];\n\nclass LineListOutputParser extends BaseOutputParser<LineList> {\n static lc_name() {\n return \"LineListOutputParser\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n async parse(text: string): Promise<LineList> {\n const startKeyIndex = text.indexOf(\"<questions>\");\n const endKeyIndex = text.indexOf(\"</questions>\");\n const questionsStartIndex =\n startKeyIndex === -1 ? 0 : startKeyIndex + \"<questions>\".length;\n const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n const lines = text\n .slice(questionsStartIndex, questionsEndIndex)\n .trim()\n .split(\"\\n\")\n .filter((line) => line.trim() !== \"\");\n return { lines };\n }\n\n getFormatInstructions(): string {\n throw new Error(\"Not implemented.\");\n }\n}\n\n// Create template\nconst DEFAULT_QUERY_PROMPT = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"question\", \"queryCount\"],\n template: `You are an AI language model assistant. Your task is\nto generate {queryCount} different versions of the given user\nquestion to retrieve relevant documents from a vector database.\nBy generating multiple perspectives on the user question,\nyour goal is to help the user overcome some of the limitations\nof distance-based similarity search.\n\nProvide these alternative questions separated by newlines between XML tags. For example:\n\n<questions>\nQuestion 1\nQuestion 2\nQuestion 3\n</questions>\n\nOriginal question: {question}`,\n});\n\nexport interface MultiQueryRetrieverInput extends BaseRetrieverInput {\n retriever: BaseRetrieverInterface;\n /** @deprecated Pass a custom prompt into `.fromLLM` instead. */\n llmChain: LLMChain<LineList>;\n queryCount?: number;\n parserKey?: string;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: MultiDocs) => MultiDocs;\n}\n\n/**\n * @example\n * ```typescript\n * const retriever = new MultiQueryRetriever.fromLLM({\n * llm: new ChatAnthropic({}),\n * retriever: new MemoryVectorStore().asRetriever(),\n * verbose: true,\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What are mitochondria made of?\",\n * );\n * ```\n */\nexport class MultiQueryRetriever extends BaseRetriever {\n static lc_name() {\n return \"MultiQueryRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n private retriever: BaseRetrieverInterface;\n\n private llmChain: LLMChain<LineList>;\n\n private queryCount = 3;\n\n private parserKey = \"lines\";\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: MultiQueryRetrieverInput[\"documentCompressorFilteringFn\"];\n\n constructor(fields: MultiQueryRetrieverInput) {\n super(fields);\n this.retriever = fields.retriever;\n this.llmChain = fields.llmChain;\n this.queryCount = fields.queryCount ?? this.queryCount;\n this.parserKey = fields.parserKey ?? this.parserKey;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n static fromLLM(\n fields: Omit<MultiQueryRetrieverInput, \"llmChain\"> & {\n llm: BaseLanguageModelInterface;\n prompt?: BasePromptTemplate;\n }\n ): MultiQueryRetriever {\n const {\n retriever,\n llm,\n prompt = DEFAULT_QUERY_PROMPT,\n queryCount,\n parserKey,\n ...rest\n } = fields;\n const outputParser = new LineListOutputParser();\n const llmChain = new LLMChain({ llm, prompt, outputParser });\n return new this({ retriever, llmChain, queryCount, parserKey, ...rest });\n }\n\n // Generate the different queries for each retrieval, using our llmChain\n private async _generateQueries(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<string[]> {\n const response = await this.llmChain.call(\n { question, queryCount: this.queryCount },\n runManager?.getChild()\n );\n const lines = response.text[this.parserKey] || [];\n if (this.verbose) {\n console.log(`Generated queries: ${lines}`);\n }\n return lines;\n }\n\n // Retrieve documents using the original retriever\n private async _retrieveDocuments(\n queries: string[],\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const documents: Document[] = [];\n await Promise.all(\n queries.map(async (query) => {\n const docs = await this.retriever.invoke(query, runManager?.getChild());\n documents.push(...docs);\n })\n );\n return documents;\n }\n\n // Deduplicate the documents that were returned in multiple retrievals\n private _uniqueUnion(documents: Document[]): Document[] {\n const uniqueDocumentsDict: { [key: string]: Document } = {};\n\n for (const doc of documents) {\n const key = `${doc.pageContent}:${JSON.stringify(\n Object.entries(doc.metadata).sort()\n )}`;\n uniqueDocumentsDict[key] = doc;\n }\n\n const uniqueDocuments = Object.values(uniqueDocumentsDict);\n return uniqueDocuments;\n }\n\n async _getRelevantDocuments(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const queries = await this._generateQueries(question, runManager);\n const documents = await this._retrieveDocuments(queries, runManager);\n const uniqueDocuments = this._uniqueUnion(documents);\n\n let outputDocs = uniqueDocuments;\n if (this.documentCompressor && uniqueDocuments.length) {\n outputDocs = await this.documentCompressor.compressDocuments(\n uniqueDocuments,\n question,\n runManager?.getChild()\n );\n if (this.documentCompressorFilteringFn) {\n outputDocs = this.documentCompressorFilteringFn(outputDocs);\n }\n }\n\n return outputDocs;\n }\n}\n"],"mappings":";;;;;;;AAoBA,IAAM,uBAAN,cAAmC,iBAA2B;CAC5D,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD,MAAM,MAAM,MAAiC;EAC3C,MAAM,gBAAgB,KAAK,QAAQ,cAAc;EACjD,MAAM,cAAc,KAAK,QAAQ,eAAe;EAChD,MAAM,sBACJ,kBAAkB,KAAK,IAAI,gBAAgB;EAC7C,MAAM,oBAAoB,gBAAgB,KAAK,KAAK,SAAS;AAM7D,SAAO,EAAE,OALK,KACX,MAAM,qBAAqB,kBAAkB,CAC7C,MAAM,CACN,MAAM,KAAK,CACX,QAAQ,SAAS,KAAK,MAAM,KAAK,GAAG,EACvB;;CAGlB,wBAAgC;AAC9B,QAAM,IAAI,MAAM,mBAAmB;;;AAKvC,MAAM,uCAAuC,IAAI,eAAe;CAC9D,gBAAgB,CAAC,YAAY,aAAa;CAC1C,UAAU;;;;;;;;;;;;;;;;CAgBX,CAAC;;;;;;;;;;;;;;AAyBF,IAAa,sBAAb,cAAyC,cAAc;CACrD,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD;CAEA;CAEA,aAAqB;CAErB,YAAoB;CAEpB;CAEA;CAEA,YAAY,QAAkC;AAC5C,QAAM,OAAO;AACb,OAAK,YAAY,OAAO;AACxB,OAAK,WAAW,OAAO;AACvB,OAAK,aAAa,OAAO,cAAc,KAAK;AAC5C,OAAK,YAAY,OAAO,aAAa,KAAK;AAC1C,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,OAAO,QACL,QAIqB;EACrB,MAAM,EACJ,WACA,KACA,SAAS,sBACT,YACA,WACA,GAAG,SACD;EAEJ,MAAM,WAAW,IAAI,SAAS;GAAE;GAAK;GAAQ,cADxB,IAAI,sBAAsB;GACY,CAAC;AAC5D,SAAO,IAAI,KAAK;GAAE;GAAW;GAAU;GAAY;GAAW,GAAG;GAAM,CAAC;;CAI1E,MAAc,iBACZ,UACA,YACmB;EAKnB,MAAM,SAJW,MAAM,KAAK,SAAS,KACnC;GAAE;GAAU,YAAY,KAAK;GAAY,EACzC,YAAY,UAAU,CACvB,EACsB,KAAK,KAAK,cAAc,EAAE;AACjD,MAAI,KAAK,QACP,SAAQ,IAAI,sBAAsB,QAAQ;AAE5C,SAAO;;CAIT,MAAc,mBACZ,SACA,YACqB;EACrB,MAAM,YAAwB,EAAE;AAChC,QAAM,QAAQ,IACZ,QAAQ,IAAI,OAAO,UAAU;GAC3B,MAAM,OAAO,MAAM,KAAK,UAAU,OAAO,OAAO,YAAY,UAAU,CAAC;AACvE,aAAU,KAAK,GAAG,KAAK;IACvB,CACH;AACD,SAAO;;CAIT,aAAqB,WAAmC;EACtD,MAAM,sBAAmD,EAAE;AAE3D,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,MAAM,GAAG,IAAI,YAAY,GAAG,KAAK,UACrC,OAAO,QAAQ,IAAI,SAAS,CAAC,MAAM,CACpC;AACD,uBAAoB,OAAO;;AAI7B,SADwB,OAAO,OAAO,oBAAoB;;CAI5D,MAAM,sBACJ,UACA,YACqB;EACrB,MAAM,UAAU,MAAM,KAAK,iBAAiB,UAAU,WAAW;EACjE,MAAM,YAAY,MAAM,KAAK,mBAAmB,SAAS,WAAW;EACpE,MAAM,kBAAkB,KAAK,aAAa,UAAU;EAEpD,IAAI,aAAa;AACjB,MAAI,KAAK,sBAAsB,gBAAgB,QAAQ;AACrD,gBAAa,MAAM,KAAK,mBAAmB,kBACzC,iBACA,UACA,YAAY,UAAU,CACvB;AACD,OAAI,KAAK,8BACP,cAAa,KAAK,8BAA8B,WAAW;;AAI/D,SAAO"}
1
+ {"version":3,"file":"multi_query.js","names":[],"sources":["../../src/retrievers/multi_query.ts"],"sourcesContent":["import type { BaseLanguageModelInterface } from \"@langchain/core/language_models/base\";\nimport {\n BaseRetriever,\n type BaseRetrieverInput,\n type BaseRetrieverInterface,\n} from \"@langchain/core/retrievers\";\nimport { Document } from \"@langchain/core/documents\";\nimport { BaseOutputParser } from \"@langchain/core/output_parsers\";\nimport { PromptTemplate, BasePromptTemplate } from \"@langchain/core/prompts\";\nimport { CallbackManagerForRetrieverRun } from \"@langchain/core/callbacks/manager\";\nimport { LLMChain } from \"../chains/llm_chain.js\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\n\ninterface LineList {\n lines: string[];\n}\n\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport type MultiDocs = Document<Record<string, any>>[];\n\nclass LineListOutputParser extends BaseOutputParser<LineList> {\n static lc_name() {\n return \"LineListOutputParser\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n async parse(text: string): Promise<LineList> {\n const startKeyIndex = text.indexOf(\"<questions>\");\n const endKeyIndex = text.indexOf(\"</questions>\");\n const questionsStartIndex =\n startKeyIndex === -1 ? 0 : startKeyIndex + \"<questions>\".length;\n const questionsEndIndex = endKeyIndex === -1 ? text.length : endKeyIndex;\n const lines = text\n .slice(questionsStartIndex, questionsEndIndex)\n .trim()\n .split(\"\\n\")\n .filter((line) => line.trim() !== \"\");\n return { lines };\n }\n\n getFormatInstructions(): string {\n throw new Error(\"Not implemented.\");\n }\n}\n\n// Create template\nconst DEFAULT_QUERY_PROMPT = /* #__PURE__ */ new PromptTemplate({\n inputVariables: [\"question\", \"queryCount\"],\n template: `You are an AI language model assistant. Your task is\nto generate {queryCount} different versions of the given user\nquestion to retrieve relevant documents from a vector database.\nBy generating multiple perspectives on the user question,\nyour goal is to help the user overcome some of the limitations\nof distance-based similarity search.\n\nProvide these alternative questions separated by newlines between XML tags. For example:\n\n<questions>\nQuestion 1\nQuestion 2\nQuestion 3\n</questions>\n\nOriginal question: {question}`,\n});\n\nexport interface MultiQueryRetrieverInput extends BaseRetrieverInput {\n retriever: BaseRetrieverInterface;\n /** @deprecated Pass a custom prompt into `.fromLLM` instead. */\n llmChain: LLMChain<LineList>;\n queryCount?: number;\n parserKey?: string;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: MultiDocs) => MultiDocs;\n}\n\n/**\n * @example\n * ```typescript\n * const retriever = new MultiQueryRetriever.fromLLM({\n * llm: new ChatAnthropic({}),\n * retriever: new MemoryVectorStore().asRetriever(),\n * verbose: true,\n * });\n * const retrievedDocs = await retriever.invoke(\n * \"What are mitochondria made of?\",\n * );\n * ```\n */\nexport class MultiQueryRetriever extends BaseRetriever {\n static lc_name() {\n return \"MultiQueryRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"multiquery\"];\n\n private retriever: BaseRetrieverInterface;\n\n private llmChain: LLMChain<LineList>;\n\n private queryCount = 3;\n\n private parserKey = \"lines\";\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: MultiQueryRetrieverInput[\"documentCompressorFilteringFn\"];\n\n constructor(fields: MultiQueryRetrieverInput) {\n super(fields);\n this.retriever = fields.retriever;\n this.llmChain = fields.llmChain;\n this.queryCount = fields.queryCount ?? this.queryCount;\n this.parserKey = fields.parserKey ?? this.parserKey;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n static fromLLM(\n fields: Omit<MultiQueryRetrieverInput, \"llmChain\"> & {\n llm: BaseLanguageModelInterface;\n prompt?: BasePromptTemplate;\n }\n ): MultiQueryRetriever {\n const {\n retriever,\n llm,\n prompt = DEFAULT_QUERY_PROMPT,\n queryCount,\n parserKey,\n ...rest\n } = fields;\n const outputParser = new LineListOutputParser();\n const llmChain = new LLMChain({ llm, prompt, outputParser });\n return new this({ retriever, llmChain, queryCount, parserKey, ...rest });\n }\n\n // Generate the different queries for each retrieval, using our llmChain\n private async _generateQueries(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<string[]> {\n const response = await this.llmChain.call(\n { question, queryCount: this.queryCount },\n runManager?.getChild()\n );\n const lines = response.text[this.parserKey] || [];\n if (this.verbose) {\n console.log(`Generated queries: ${lines}`);\n }\n return lines;\n }\n\n // Retrieve documents using the original retriever\n private async _retrieveDocuments(\n queries: string[],\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const documents: Document[] = [];\n await Promise.all(\n queries.map(async (query) => {\n const docs = await this.retriever.invoke(query, runManager?.getChild());\n documents.push(...docs);\n })\n );\n return documents;\n }\n\n // Deduplicate the documents that were returned in multiple retrievals\n private _uniqueUnion(documents: Document[]): Document[] {\n const uniqueDocumentsDict: { [key: string]: Document } = {};\n\n for (const doc of documents) {\n const key = `${doc.pageContent}:${JSON.stringify(\n Object.entries(doc.metadata).sort()\n )}`;\n uniqueDocumentsDict[key] = doc;\n }\n\n const uniqueDocuments = Object.values(uniqueDocumentsDict);\n return uniqueDocuments;\n }\n\n async _getRelevantDocuments(\n question: string,\n runManager?: CallbackManagerForRetrieverRun\n ): Promise<Document[]> {\n const queries = await this._generateQueries(question, runManager);\n const documents = await this._retrieveDocuments(queries, runManager);\n const uniqueDocuments = this._uniqueUnion(documents);\n\n let outputDocs = uniqueDocuments;\n if (this.documentCompressor && uniqueDocuments.length) {\n outputDocs = await this.documentCompressor.compressDocuments(\n uniqueDocuments,\n question,\n runManager?.getChild()\n );\n if (this.documentCompressorFilteringFn) {\n outputDocs = this.documentCompressorFilteringFn(outputDocs);\n }\n }\n\n return outputDocs;\n }\n}\n"],"mappings":";;;;;;;AAoBA,IAAM,uBAAN,cAAmC,iBAA2B;CAC5D,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD,MAAM,MAAM,MAAiC;EAC3C,MAAM,gBAAgB,KAAK,QAAQ,cAAc;EACjD,MAAM,cAAc,KAAK,QAAQ,eAAe;EAChD,MAAM,sBACJ,kBAAkB,KAAK,IAAI,gBAAgB;EAC7C,MAAM,oBAAoB,gBAAgB,KAAK,KAAK,SAAS;AAM7D,SAAO,EAAE,OALK,KACX,MAAM,qBAAqB,kBAAkB,CAC7C,MAAM,CACN,MAAM,KAAK,CACX,QAAQ,SAAS,KAAK,MAAM,KAAK,GAAG,EACvB;;CAGlB,wBAAgC;AAC9B,QAAM,IAAI,MAAM,mBAAmB;;;AAKvC,MAAM,uCAAuC,IAAI,eAAe;CAC9D,gBAAgB,CAAC,YAAY,aAAa;CAC1C,UAAU;;;;;;;;;;;;;;;;CAgBX,CAAC;;;;;;;;;;;;;;AAyBF,IAAa,sBAAb,cAAyC,cAAc;CACrD,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAa;CAExD;CAEA;CAEA,aAAqB;CAErB,YAAoB;CAEpB;CAEA;CAEA,YAAY,QAAkC;AAC5C,QAAM,OAAO;AACb,OAAK,YAAY,OAAO;AACxB,OAAK,WAAW,OAAO;AACvB,OAAK,aAAa,OAAO,cAAc,KAAK;AAC5C,OAAK,YAAY,OAAO,aAAa,KAAK;AAC1C,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,OAAO,QACL,QAIqB;EACrB,MAAM,EACJ,WACA,KACA,SAAS,sBACT,YACA,WACA,GAAG,SACD;EAEJ,MAAM,WAAW,IAAI,SAAS;GAAE;GAAK;GAAQ,cADxB,IAAI,sBAAsB;GACY,CAAC;AAC5D,SAAO,IAAI,KAAK;GAAE;GAAW;GAAU;GAAY;GAAW,GAAG;GAAM,CAAC;;CAI1E,MAAc,iBACZ,UACA,YACmB;EAKnB,MAAM,SAJW,MAAM,KAAK,SAAS,KACnC;GAAE;GAAU,YAAY,KAAK;GAAY,EACzC,YAAY,UAAU,CACvB,EACsB,KAAK,KAAK,cAAc,EAAE;AACjD,MAAI,KAAK,QACP,SAAQ,IAAI,sBAAsB,QAAQ;AAE5C,SAAO;;CAIT,MAAc,mBACZ,SACA,YACqB;EACrB,MAAM,YAAwB,EAAE;AAChC,QAAM,QAAQ,IACZ,QAAQ,IAAI,OAAO,UAAU;GAC3B,MAAM,OAAO,MAAM,KAAK,UAAU,OAAO,OAAO,YAAY,UAAU,CAAC;AACvE,aAAU,KAAK,GAAG,KAAK;IACvB,CACH;AACD,SAAO;;CAIT,aAAqB,WAAmC;EACtD,MAAM,sBAAmD,EAAE;AAE3D,OAAK,MAAM,OAAO,WAAW;GAC3B,MAAM,MAAM,GAAG,IAAI,YAAY,GAAG,KAAK,UACrC,OAAO,QAAQ,IAAI,SAAS,CAAC,MAAM,CACpC;AACD,uBAAoB,OAAO;;AAI7B,SADwB,OAAO,OAAO,oBAAoB;;CAI5D,MAAM,sBACJ,UACA,YACqB;EACrB,MAAM,UAAU,MAAM,KAAK,iBAAiB,UAAU,WAAW;EACjE,MAAM,YAAY,MAAM,KAAK,mBAAmB,SAAS,WAAW;EACpE,MAAM,kBAAkB,KAAK,aAAa,UAAU;EAEpD,IAAI,aAAa;AACjB,MAAI,KAAK,sBAAsB,gBAAgB,QAAQ;AACrD,gBAAa,MAAM,KAAK,mBAAmB,kBACzC,iBACA,UACA,YAAY,UAAU,CACvB;AACD,OAAI,KAAK,8BACP,cAAa,KAAK,8BAA8B,WAAW;;AAI/D,SAAO"}
@@ -1 +1 @@
1
- {"version":3,"file":"parent_document.cjs","names":["MultiVectorRetriever","Document"],"sources":["../../src/retrievers/parent_document.ts"],"sourcesContent":["import * as uuid from \"uuid\";\n\nimport {\n type VectorStoreInterface,\n type VectorStoreRetrieverInterface,\n} from \"@langchain/core/vectorstores\";\nimport { Document } from \"@langchain/core/documents\";\nimport {\n TextSplitter,\n TextSplitterChunkHeaderOptions,\n} from \"@langchain/textsplitters\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\nimport {\n MultiVectorRetriever,\n type MultiVectorRetrieverInput,\n} from \"./multi_vector.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type SubDocs = Document<Record<string, any>>[];\n\n/**\n * Interface for the fields required to initialize a\n * ParentDocumentRetriever instance.\n */\nexport type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {\n childSplitter: TextSplitter;\n parentSplitter?: TextSplitter;\n /**\n * A custom retriever to use when retrieving instead of\n * the `.similaritySearch` method of the vectorstore.\n */\n childDocumentRetriever?: VectorStoreRetrieverInterface<VectorStoreInterface>;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: SubDocs) => SubDocs;\n};\n\n/**\n * A type of document retriever that splits input documents into smaller chunks\n * while separately storing and preserving the original documents.\n * The small chunks are embedded, then on retrieval, the original\n * \"parent\" documents are retrieved.\n *\n * This strikes a balance between better targeted retrieval with small documents\n * and the more context-rich larger documents.\n * @example\n * ```typescript\n * const retriever = new ParentDocumentRetriever({\n * vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),\n * byteStore: new InMemoryStore<Uint8Array>(),\n * parentSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 500,\n * }),\n * childSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 50,\n * }),\n * childK: 20,\n * parentK: 5,\n * });\n *\n * const parentDocuments = await getDocuments();\n * await retriever.addDocuments(parentDocuments);\n * const retrievedDocs = await retriever.invoke(\"justice breyer\");\n * ```\n */\nexport class ParentDocumentRetriever extends MultiVectorRetriever {\n static lc_name() {\n return \"ParentDocumentRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"parent_document\"];\n\n vectorstore: VectorStoreInterface;\n\n protected childSplitter: TextSplitter;\n\n protected parentSplitter?: TextSplitter;\n\n protected idKey = \"doc_id\";\n\n protected childK?: number;\n\n protected parentK?: number;\n\n childDocumentRetriever:\n | VectorStoreRetrieverInterface<VectorStoreInterface>\n | undefined;\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: ParentDocumentRetrieverFields[\"documentCompressorFilteringFn\"];\n\n constructor(fields: ParentDocumentRetrieverFields) {\n super(fields);\n this.vectorstore = fields.vectorstore;\n this.childSplitter = fields.childSplitter;\n this.parentSplitter = fields.parentSplitter;\n this.idKey = fields.idKey ?? this.idKey;\n this.childK = fields.childK;\n this.parentK = fields.parentK;\n this.childDocumentRetriever = fields.childDocumentRetriever;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n async _getRelevantDocuments(query: string): Promise<Document[]> {\n let subDocs: SubDocs = [];\n if (this.childDocumentRetriever) {\n subDocs = await this.childDocumentRetriever.invoke(query);\n } else {\n subDocs = await this.vectorstore.similaritySearch(query, this.childK);\n }\n\n if (this.documentCompressor && subDocs.length) {\n subDocs = await this.documentCompressor.compressDocuments(subDocs, query);\n if (this.documentCompressorFilteringFn) {\n subDocs = this.documentCompressorFilteringFn(subDocs);\n }\n }\n\n // Maintain order\n const parentDocIds: string[] = [];\n for (const doc of subDocs) {\n if (!parentDocIds.includes(doc.metadata[this.idKey])) {\n parentDocIds.push(doc.metadata[this.idKey]);\n }\n }\n const parentDocs: Document[] = [];\n const storedParentDocs = await this.docstore.mget(parentDocIds);\n const retrievedDocs: Document[] = storedParentDocs.filter(\n (doc?: Document): doc is Document => doc !== undefined\n );\n parentDocs.push(...retrievedDocs);\n return parentDocs.slice(0, this.parentK);\n }\n\n async _storeDocuments(\n parentDoc: Record<string, Document>,\n childDocs: Document[],\n addToDocstore: boolean\n ) {\n if (this.childDocumentRetriever) {\n await this.childDocumentRetriever.addDocuments(childDocs);\n } else {\n await this.vectorstore.addDocuments(childDocs);\n }\n if (addToDocstore) {\n await this.docstore.mset(Object.entries(parentDoc));\n }\n }\n\n /**\n * Adds documents to the docstore and vectorstores.\n * If a retriever is provided, it will be used to add documents instead of the vectorstore.\n * @param docs The documents to add\n * @param config.ids Optional list of ids for documents. If provided should be the same\n * length as the list of documents. Can provided if parent documents\n * are already in the document store and you don't want to re-add\n * to the docstore. If not provided, random UUIDs will be used as ids.\n * @param config.addToDocstore Boolean of whether to add documents to docstore.\n * This can be false if and only if `ids` are provided. You may want\n * to set this to False if the documents are already in the docstore\n * and you don't want to re-add them.\n * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers\n */\n async addDocuments(\n docs: Document[],\n config?: {\n ids?: string[];\n addToDocstore?: boolean;\n childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;\n }\n ): Promise<void> {\n const {\n ids,\n addToDocstore = true,\n childDocChunkHeaderOptions = {},\n } = config ?? {};\n const parentDocs = this.parentSplitter\n ? await this.parentSplitter.splitDocuments(docs)\n : docs;\n let parentDocIds;\n if (ids === undefined) {\n if (!addToDocstore) {\n throw new Error(\n `If ids are not passed in, \"config.addToDocstore\" MUST be true`\n );\n }\n parentDocIds = parentDocs.map((_doc: Document) => uuid.v4());\n } else {\n parentDocIds = ids;\n }\n if (parentDocs.length !== parentDocIds.length) {\n throw new Error(\n `Got uneven list of documents and ids.\\nIf \"ids\" is provided, should be same length as \"documents\".`\n );\n }\n for (let i = 0; i < parentDocs.length; i += 1) {\n const parentDoc = parentDocs[i];\n const parentDocId = parentDocIds[i];\n const subDocs = await this.childSplitter.splitDocuments(\n [parentDoc],\n childDocChunkHeaderOptions\n );\n const taggedSubDocs = subDocs.map(\n (subDoc: Document) =>\n new Document({\n pageContent: subDoc.pageContent,\n metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },\n })\n );\n await this._storeDocuments(\n { [parentDocId]: parentDoc },\n taggedSubDocs,\n addToDocstore\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkEA,IAAa,0BAAb,cAA6CA,gCAAAA,qBAAqB;CAChE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAkB;CAE7D;CAEA;CAEA;CAEA,QAAkB;CAElB;CAEA;CAEA;CAIA;CAEA;CAEA,YAAY,QAAuC;AACjD,QAAM,OAAO;AACb,OAAK,cAAc,OAAO;AAC1B,OAAK,gBAAgB,OAAO;AAC5B,OAAK,iBAAiB,OAAO;AAC7B,OAAK,QAAQ,OAAO,SAAS,KAAK;AAClC,OAAK,SAAS,OAAO;AACrB,OAAK,UAAU,OAAO;AACtB,OAAK,yBAAyB,OAAO;AACrC,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,MAAM,sBAAsB,OAAoC;EAC9D,IAAI,UAAmB,EAAE;AACzB,MAAI,KAAK,uBACP,WAAU,MAAM,KAAK,uBAAuB,OAAO,MAAM;MAEzD,WAAU,MAAM,KAAK,YAAY,iBAAiB,OAAO,KAAK,OAAO;AAGvE,MAAI,KAAK,sBAAsB,QAAQ,QAAQ;AAC7C,aAAU,MAAM,KAAK,mBAAmB,kBAAkB,SAAS,MAAM;AACzE,OAAI,KAAK,8BACP,WAAU,KAAK,8BAA8B,QAAQ;;EAKzD,MAAM,eAAyB,EAAE;AACjC,OAAK,MAAM,OAAO,QAChB,KAAI,CAAC,aAAa,SAAS,IAAI,SAAS,KAAK,OAAO,CAClD,cAAa,KAAK,IAAI,SAAS,KAAK,OAAO;EAG/C,MAAM,aAAyB,EAAE;EAEjC,MAAM,iBADmB,MAAM,KAAK,SAAS,KAAK,aAAa,EACZ,QAChD,QAAoC,QAAQ,KAAA,EAC9C;AACD,aAAW,KAAK,GAAG,cAAc;AACjC,SAAO,WAAW,MAAM,GAAG,KAAK,QAAQ;;CAG1C,MAAM,gBACJ,WACA,WACA,eACA;AACA,MAAI,KAAK,uBACP,OAAM,KAAK,uBAAuB,aAAa,UAAU;MAEzD,OAAM,KAAK,YAAY,aAAa,UAAU;AAEhD,MAAI,cACF,OAAM,KAAK,SAAS,KAAK,OAAO,QAAQ,UAAU,CAAC;;;;;;;;;;;;;;;;CAkBvD,MAAM,aACJ,MACA,QAKe;EACf,MAAM,EACJ,KACA,gBAAgB,MAChB,6BAA6B,EAAE,KAC7B,UAAU,EAAE;EAChB,MAAM,aAAa,KAAK,iBACpB,MAAM,KAAK,eAAe,eAAe,KAAK,GAC9C;EACJ,IAAI;AACJ,MAAI,QAAQ,KAAA,GAAW;AACrB,OAAI,CAAC,cACH,OAAM,IAAI,MACR,gEACD;AAEH,kBAAe,WAAW,KAAK,SAAmB,KAAK,IAAI,CAAC;QAE5D,gBAAe;AAEjB,MAAI,WAAW,WAAW,aAAa,OACrC,OAAM,IAAI,MACR,qGACD;AAEH,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,YAAY,WAAW;GAC7B,MAAM,cAAc,aAAa;GAKjC,MAAM,iBAJU,MAAM,KAAK,cAAc,eACvC,CAAC,UAAU,EACX,2BACD,EAC6B,KAC3B,WACC,IAAIC,0BAAAA,SAAS;IACX,aAAa,OAAO;IACpB,UAAU;KAAE,GAAG,OAAO;MAAW,KAAK,QAAQ;KAAa;IAC5D,CAAC,CACL;AACD,SAAM,KAAK,gBACT,GAAG,cAAc,WAAW,EAC5B,eACA,cACD"}
1
+ {"version":3,"file":"parent_document.cjs","names":["MultiVectorRetriever","Document"],"sources":["../../src/retrievers/parent_document.ts"],"sourcesContent":["import * as uuid from \"uuid\";\n\nimport {\n type VectorStoreInterface,\n type VectorStoreRetrieverInterface,\n} from \"@langchain/core/vectorstores\";\nimport { Document } from \"@langchain/core/documents\";\nimport {\n TextSplitter,\n TextSplitterChunkHeaderOptions,\n} from \"@langchain/textsplitters\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\nimport {\n MultiVectorRetriever,\n type MultiVectorRetrieverInput,\n} from \"./multi_vector.js\";\n\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport type SubDocs = Document<Record<string, any>>[];\n\n/**\n * Interface for the fields required to initialize a\n * ParentDocumentRetriever instance.\n */\nexport type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {\n childSplitter: TextSplitter;\n parentSplitter?: TextSplitter;\n /**\n * A custom retriever to use when retrieving instead of\n * the `.similaritySearch` method of the vectorstore.\n */\n childDocumentRetriever?: VectorStoreRetrieverInterface<VectorStoreInterface>;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: SubDocs) => SubDocs;\n};\n\n/**\n * A type of document retriever that splits input documents into smaller chunks\n * while separately storing and preserving the original documents.\n * The small chunks are embedded, then on retrieval, the original\n * \"parent\" documents are retrieved.\n *\n * This strikes a balance between better targeted retrieval with small documents\n * and the more context-rich larger documents.\n * @example\n * ```typescript\n * const retriever = new ParentDocumentRetriever({\n * vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),\n * byteStore: new InMemoryStore<Uint8Array>(),\n * parentSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 500,\n * }),\n * childSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 50,\n * }),\n * childK: 20,\n * parentK: 5,\n * });\n *\n * const parentDocuments = await getDocuments();\n * await retriever.addDocuments(parentDocuments);\n * const retrievedDocs = await retriever.invoke(\"justice breyer\");\n * ```\n */\nexport class ParentDocumentRetriever extends MultiVectorRetriever {\n static lc_name() {\n return \"ParentDocumentRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"parent_document\"];\n\n vectorstore: VectorStoreInterface;\n\n protected childSplitter: TextSplitter;\n\n protected parentSplitter?: TextSplitter;\n\n protected idKey = \"doc_id\";\n\n protected childK?: number;\n\n protected parentK?: number;\n\n childDocumentRetriever:\n | VectorStoreRetrieverInterface<VectorStoreInterface>\n | undefined;\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: ParentDocumentRetrieverFields[\"documentCompressorFilteringFn\"];\n\n constructor(fields: ParentDocumentRetrieverFields) {\n super(fields);\n this.vectorstore = fields.vectorstore;\n this.childSplitter = fields.childSplitter;\n this.parentSplitter = fields.parentSplitter;\n this.idKey = fields.idKey ?? this.idKey;\n this.childK = fields.childK;\n this.parentK = fields.parentK;\n this.childDocumentRetriever = fields.childDocumentRetriever;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n async _getRelevantDocuments(query: string): Promise<Document[]> {\n let subDocs: SubDocs = [];\n if (this.childDocumentRetriever) {\n subDocs = await this.childDocumentRetriever.invoke(query);\n } else {\n subDocs = await this.vectorstore.similaritySearch(query, this.childK);\n }\n\n if (this.documentCompressor && subDocs.length) {\n subDocs = await this.documentCompressor.compressDocuments(subDocs, query);\n if (this.documentCompressorFilteringFn) {\n subDocs = this.documentCompressorFilteringFn(subDocs);\n }\n }\n\n // Maintain order\n const parentDocIds: string[] = [];\n for (const doc of subDocs) {\n if (!parentDocIds.includes(doc.metadata[this.idKey])) {\n parentDocIds.push(doc.metadata[this.idKey]);\n }\n }\n const parentDocs: Document[] = [];\n const storedParentDocs = await this.docstore.mget(parentDocIds);\n const retrievedDocs: Document[] = storedParentDocs.filter(\n (doc?: Document): doc is Document => doc !== undefined\n );\n parentDocs.push(...retrievedDocs);\n return parentDocs.slice(0, this.parentK);\n }\n\n async _storeDocuments(\n parentDoc: Record<string, Document>,\n childDocs: Document[],\n addToDocstore: boolean\n ) {\n if (this.childDocumentRetriever) {\n await this.childDocumentRetriever.addDocuments(childDocs);\n } else {\n await this.vectorstore.addDocuments(childDocs);\n }\n if (addToDocstore) {\n await this.docstore.mset(Object.entries(parentDoc));\n }\n }\n\n /**\n * Adds documents to the docstore and vectorstores.\n * If a retriever is provided, it will be used to add documents instead of the vectorstore.\n * @param docs The documents to add\n * @param config.ids Optional list of ids for documents. If provided should be the same\n * length as the list of documents. Can provided if parent documents\n * are already in the document store and you don't want to re-add\n * to the docstore. If not provided, random UUIDs will be used as ids.\n * @param config.addToDocstore Boolean of whether to add documents to docstore.\n * This can be false if and only if `ids` are provided. You may want\n * to set this to False if the documents are already in the docstore\n * and you don't want to re-add them.\n * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers\n */\n async addDocuments(\n docs: Document[],\n config?: {\n ids?: string[];\n addToDocstore?: boolean;\n childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;\n }\n ): Promise<void> {\n const {\n ids,\n addToDocstore = true,\n childDocChunkHeaderOptions = {},\n } = config ?? {};\n const parentDocs = this.parentSplitter\n ? await this.parentSplitter.splitDocuments(docs)\n : docs;\n let parentDocIds;\n if (ids === undefined) {\n if (!addToDocstore) {\n throw new Error(\n `If ids are not passed in, \"config.addToDocstore\" MUST be true`\n );\n }\n parentDocIds = parentDocs.map((_doc: Document) => uuid.v4());\n } else {\n parentDocIds = ids;\n }\n if (parentDocs.length !== parentDocIds.length) {\n throw new Error(\n `Got uneven list of documents and ids.\\nIf \"ids\" is provided, should be same length as \"documents\".`\n );\n }\n for (let i = 0; i < parentDocs.length; i += 1) {\n const parentDoc = parentDocs[i];\n const parentDocId = parentDocIds[i];\n const subDocs = await this.childSplitter.splitDocuments(\n [parentDoc],\n childDocChunkHeaderOptions\n );\n const taggedSubDocs = subDocs.map(\n (subDoc: Document) =>\n new Document({\n pageContent: subDoc.pageContent,\n metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },\n })\n );\n await this._storeDocuments(\n { [parentDocId]: parentDoc },\n taggedSubDocs,\n addToDocstore\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkEA,IAAa,0BAAb,cAA6CA,gCAAAA,qBAAqB;CAChE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAkB;CAE7D;CAEA;CAEA;CAEA,QAAkB;CAElB;CAEA;CAEA;CAIA;CAEA;CAEA,YAAY,QAAuC;AACjD,QAAM,OAAO;AACb,OAAK,cAAc,OAAO;AAC1B,OAAK,gBAAgB,OAAO;AAC5B,OAAK,iBAAiB,OAAO;AAC7B,OAAK,QAAQ,OAAO,SAAS,KAAK;AAClC,OAAK,SAAS,OAAO;AACrB,OAAK,UAAU,OAAO;AACtB,OAAK,yBAAyB,OAAO;AACrC,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,MAAM,sBAAsB,OAAoC;EAC9D,IAAI,UAAmB,EAAE;AACzB,MAAI,KAAK,uBACP,WAAU,MAAM,KAAK,uBAAuB,OAAO,MAAM;MAEzD,WAAU,MAAM,KAAK,YAAY,iBAAiB,OAAO,KAAK,OAAO;AAGvE,MAAI,KAAK,sBAAsB,QAAQ,QAAQ;AAC7C,aAAU,MAAM,KAAK,mBAAmB,kBAAkB,SAAS,MAAM;AACzE,OAAI,KAAK,8BACP,WAAU,KAAK,8BAA8B,QAAQ;;EAKzD,MAAM,eAAyB,EAAE;AACjC,OAAK,MAAM,OAAO,QAChB,KAAI,CAAC,aAAa,SAAS,IAAI,SAAS,KAAK,OAAO,CAClD,cAAa,KAAK,IAAI,SAAS,KAAK,OAAO;EAG/C,MAAM,aAAyB,EAAE;EAEjC,MAAM,iBADmB,MAAM,KAAK,SAAS,KAAK,aAAa,EACZ,QAChD,QAAoC,QAAQ,KAAA,EAC9C;AACD,aAAW,KAAK,GAAG,cAAc;AACjC,SAAO,WAAW,MAAM,GAAG,KAAK,QAAQ;;CAG1C,MAAM,gBACJ,WACA,WACA,eACA;AACA,MAAI,KAAK,uBACP,OAAM,KAAK,uBAAuB,aAAa,UAAU;MAEzD,OAAM,KAAK,YAAY,aAAa,UAAU;AAEhD,MAAI,cACF,OAAM,KAAK,SAAS,KAAK,OAAO,QAAQ,UAAU,CAAC;;;;;;;;;;;;;;;;CAkBvD,MAAM,aACJ,MACA,QAKe;EACf,MAAM,EACJ,KACA,gBAAgB,MAChB,6BAA6B,EAAE,KAC7B,UAAU,EAAE;EAChB,MAAM,aAAa,KAAK,iBACpB,MAAM,KAAK,eAAe,eAAe,KAAK,GAC9C;EACJ,IAAI;AACJ,MAAI,QAAQ,KAAA,GAAW;AACrB,OAAI,CAAC,cACH,OAAM,IAAI,MACR,gEACD;AAEH,kBAAe,WAAW,KAAK,SAAmB,KAAK,IAAI,CAAC;QAE5D,gBAAe;AAEjB,MAAI,WAAW,WAAW,aAAa,OACrC,OAAM,IAAI,MACR,qGACD;AAEH,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,YAAY,WAAW;GAC7B,MAAM,cAAc,aAAa;GAKjC,MAAM,iBAJU,MAAM,KAAK,cAAc,eACvC,CAAC,UAAU,EACX,2BACD,EAC6B,KAC3B,WACC,IAAIC,0BAAAA,SAAS;IACX,aAAa,OAAO;IACpB,UAAU;KAAE,GAAG,OAAO;MAAW,KAAK,QAAQ;KAAa;IAC5D,CAAC,CACL;AACD,SAAM,KAAK,gBACT,GAAG,cAAc,WAAW,EAC5B,eACA,cACD"}
@@ -1 +1 @@
1
- {"version":3,"file":"parent_document.js","names":[],"sources":["../../src/retrievers/parent_document.ts"],"sourcesContent":["import * as uuid from \"uuid\";\n\nimport {\n type VectorStoreInterface,\n type VectorStoreRetrieverInterface,\n} from \"@langchain/core/vectorstores\";\nimport { Document } from \"@langchain/core/documents\";\nimport {\n TextSplitter,\n TextSplitterChunkHeaderOptions,\n} from \"@langchain/textsplitters\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\nimport {\n MultiVectorRetriever,\n type MultiVectorRetrieverInput,\n} from \"./multi_vector.js\";\n\n// eslint-disable-next-line @typescript-eslint/no-explicit-any\nexport type SubDocs = Document<Record<string, any>>[];\n\n/**\n * Interface for the fields required to initialize a\n * ParentDocumentRetriever instance.\n */\nexport type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {\n childSplitter: TextSplitter;\n parentSplitter?: TextSplitter;\n /**\n * A custom retriever to use when retrieving instead of\n * the `.similaritySearch` method of the vectorstore.\n */\n childDocumentRetriever?: VectorStoreRetrieverInterface<VectorStoreInterface>;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: SubDocs) => SubDocs;\n};\n\n/**\n * A type of document retriever that splits input documents into smaller chunks\n * while separately storing and preserving the original documents.\n * The small chunks are embedded, then on retrieval, the original\n * \"parent\" documents are retrieved.\n *\n * This strikes a balance between better targeted retrieval with small documents\n * and the more context-rich larger documents.\n * @example\n * ```typescript\n * const retriever = new ParentDocumentRetriever({\n * vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),\n * byteStore: new InMemoryStore<Uint8Array>(),\n * parentSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 500,\n * }),\n * childSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 50,\n * }),\n * childK: 20,\n * parentK: 5,\n * });\n *\n * const parentDocuments = await getDocuments();\n * await retriever.addDocuments(parentDocuments);\n * const retrievedDocs = await retriever.invoke(\"justice breyer\");\n * ```\n */\nexport class ParentDocumentRetriever extends MultiVectorRetriever {\n static lc_name() {\n return \"ParentDocumentRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"parent_document\"];\n\n vectorstore: VectorStoreInterface;\n\n protected childSplitter: TextSplitter;\n\n protected parentSplitter?: TextSplitter;\n\n protected idKey = \"doc_id\";\n\n protected childK?: number;\n\n protected parentK?: number;\n\n childDocumentRetriever:\n | VectorStoreRetrieverInterface<VectorStoreInterface>\n | undefined;\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: ParentDocumentRetrieverFields[\"documentCompressorFilteringFn\"];\n\n constructor(fields: ParentDocumentRetrieverFields) {\n super(fields);\n this.vectorstore = fields.vectorstore;\n this.childSplitter = fields.childSplitter;\n this.parentSplitter = fields.parentSplitter;\n this.idKey = fields.idKey ?? this.idKey;\n this.childK = fields.childK;\n this.parentK = fields.parentK;\n this.childDocumentRetriever = fields.childDocumentRetriever;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n async _getRelevantDocuments(query: string): Promise<Document[]> {\n let subDocs: SubDocs = [];\n if (this.childDocumentRetriever) {\n subDocs = await this.childDocumentRetriever.invoke(query);\n } else {\n subDocs = await this.vectorstore.similaritySearch(query, this.childK);\n }\n\n if (this.documentCompressor && subDocs.length) {\n subDocs = await this.documentCompressor.compressDocuments(subDocs, query);\n if (this.documentCompressorFilteringFn) {\n subDocs = this.documentCompressorFilteringFn(subDocs);\n }\n }\n\n // Maintain order\n const parentDocIds: string[] = [];\n for (const doc of subDocs) {\n if (!parentDocIds.includes(doc.metadata[this.idKey])) {\n parentDocIds.push(doc.metadata[this.idKey]);\n }\n }\n const parentDocs: Document[] = [];\n const storedParentDocs = await this.docstore.mget(parentDocIds);\n const retrievedDocs: Document[] = storedParentDocs.filter(\n (doc?: Document): doc is Document => doc !== undefined\n );\n parentDocs.push(...retrievedDocs);\n return parentDocs.slice(0, this.parentK);\n }\n\n async _storeDocuments(\n parentDoc: Record<string, Document>,\n childDocs: Document[],\n addToDocstore: boolean\n ) {\n if (this.childDocumentRetriever) {\n await this.childDocumentRetriever.addDocuments(childDocs);\n } else {\n await this.vectorstore.addDocuments(childDocs);\n }\n if (addToDocstore) {\n await this.docstore.mset(Object.entries(parentDoc));\n }\n }\n\n /**\n * Adds documents to the docstore and vectorstores.\n * If a retriever is provided, it will be used to add documents instead of the vectorstore.\n * @param docs The documents to add\n * @param config.ids Optional list of ids for documents. If provided should be the same\n * length as the list of documents. Can provided if parent documents\n * are already in the document store and you don't want to re-add\n * to the docstore. If not provided, random UUIDs will be used as ids.\n * @param config.addToDocstore Boolean of whether to add documents to docstore.\n * This can be false if and only if `ids` are provided. You may want\n * to set this to False if the documents are already in the docstore\n * and you don't want to re-add them.\n * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers\n */\n async addDocuments(\n docs: Document[],\n config?: {\n ids?: string[];\n addToDocstore?: boolean;\n childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;\n }\n ): Promise<void> {\n const {\n ids,\n addToDocstore = true,\n childDocChunkHeaderOptions = {},\n } = config ?? {};\n const parentDocs = this.parentSplitter\n ? await this.parentSplitter.splitDocuments(docs)\n : docs;\n let parentDocIds;\n if (ids === undefined) {\n if (!addToDocstore) {\n throw new Error(\n `If ids are not passed in, \"config.addToDocstore\" MUST be true`\n );\n }\n parentDocIds = parentDocs.map((_doc: Document) => uuid.v4());\n } else {\n parentDocIds = ids;\n }\n if (parentDocs.length !== parentDocIds.length) {\n throw new Error(\n `Got uneven list of documents and ids.\\nIf \"ids\" is provided, should be same length as \"documents\".`\n );\n }\n for (let i = 0; i < parentDocs.length; i += 1) {\n const parentDoc = parentDocs[i];\n const parentDocId = parentDocIds[i];\n const subDocs = await this.childSplitter.splitDocuments(\n [parentDoc],\n childDocChunkHeaderOptions\n );\n const taggedSubDocs = subDocs.map(\n (subDoc: Document) =>\n new Document({\n pageContent: subDoc.pageContent,\n metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },\n })\n );\n await this._storeDocuments(\n { [parentDocId]: parentDoc },\n taggedSubDocs,\n addToDocstore\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkEA,IAAa,0BAAb,cAA6C,qBAAqB;CAChE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAkB;CAE7D;CAEA;CAEA;CAEA,QAAkB;CAElB;CAEA;CAEA;CAIA;CAEA;CAEA,YAAY,QAAuC;AACjD,QAAM,OAAO;AACb,OAAK,cAAc,OAAO;AAC1B,OAAK,gBAAgB,OAAO;AAC5B,OAAK,iBAAiB,OAAO;AAC7B,OAAK,QAAQ,OAAO,SAAS,KAAK;AAClC,OAAK,SAAS,OAAO;AACrB,OAAK,UAAU,OAAO;AACtB,OAAK,yBAAyB,OAAO;AACrC,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,MAAM,sBAAsB,OAAoC;EAC9D,IAAI,UAAmB,EAAE;AACzB,MAAI,KAAK,uBACP,WAAU,MAAM,KAAK,uBAAuB,OAAO,MAAM;MAEzD,WAAU,MAAM,KAAK,YAAY,iBAAiB,OAAO,KAAK,OAAO;AAGvE,MAAI,KAAK,sBAAsB,QAAQ,QAAQ;AAC7C,aAAU,MAAM,KAAK,mBAAmB,kBAAkB,SAAS,MAAM;AACzE,OAAI,KAAK,8BACP,WAAU,KAAK,8BAA8B,QAAQ;;EAKzD,MAAM,eAAyB,EAAE;AACjC,OAAK,MAAM,OAAO,QAChB,KAAI,CAAC,aAAa,SAAS,IAAI,SAAS,KAAK,OAAO,CAClD,cAAa,KAAK,IAAI,SAAS,KAAK,OAAO;EAG/C,MAAM,aAAyB,EAAE;EAEjC,MAAM,iBADmB,MAAM,KAAK,SAAS,KAAK,aAAa,EACZ,QAChD,QAAoC,QAAQ,KAAA,EAC9C;AACD,aAAW,KAAK,GAAG,cAAc;AACjC,SAAO,WAAW,MAAM,GAAG,KAAK,QAAQ;;CAG1C,MAAM,gBACJ,WACA,WACA,eACA;AACA,MAAI,KAAK,uBACP,OAAM,KAAK,uBAAuB,aAAa,UAAU;MAEzD,OAAM,KAAK,YAAY,aAAa,UAAU;AAEhD,MAAI,cACF,OAAM,KAAK,SAAS,KAAK,OAAO,QAAQ,UAAU,CAAC;;;;;;;;;;;;;;;;CAkBvD,MAAM,aACJ,MACA,QAKe;EACf,MAAM,EACJ,KACA,gBAAgB,MAChB,6BAA6B,EAAE,KAC7B,UAAU,EAAE;EAChB,MAAM,aAAa,KAAK,iBACpB,MAAM,KAAK,eAAe,eAAe,KAAK,GAC9C;EACJ,IAAI;AACJ,MAAI,QAAQ,KAAA,GAAW;AACrB,OAAI,CAAC,cACH,OAAM,IAAI,MACR,gEACD;AAEH,kBAAe,WAAW,KAAK,SAAmB,KAAK,IAAI,CAAC;QAE5D,gBAAe;AAEjB,MAAI,WAAW,WAAW,aAAa,OACrC,OAAM,IAAI,MACR,qGACD;AAEH,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,YAAY,WAAW;GAC7B,MAAM,cAAc,aAAa;GAKjC,MAAM,iBAJU,MAAM,KAAK,cAAc,eACvC,CAAC,UAAU,EACX,2BACD,EAC6B,KAC3B,WACC,IAAI,SAAS;IACX,aAAa,OAAO;IACpB,UAAU;KAAE,GAAG,OAAO;MAAW,KAAK,QAAQ;KAAa;IAC5D,CAAC,CACL;AACD,SAAM,KAAK,gBACT,GAAG,cAAc,WAAW,EAC5B,eACA,cACD"}
1
+ {"version":3,"file":"parent_document.js","names":[],"sources":["../../src/retrievers/parent_document.ts"],"sourcesContent":["import * as uuid from \"uuid\";\n\nimport {\n type VectorStoreInterface,\n type VectorStoreRetrieverInterface,\n} from \"@langchain/core/vectorstores\";\nimport { Document } from \"@langchain/core/documents\";\nimport {\n TextSplitter,\n TextSplitterChunkHeaderOptions,\n} from \"@langchain/textsplitters\";\nimport type { BaseDocumentCompressor } from \"./document_compressors/index.js\";\nimport {\n MultiVectorRetriever,\n type MultiVectorRetrieverInput,\n} from \"./multi_vector.js\";\n\n// oxlint-disable-next-line @typescript-eslint/no-explicit-any\nexport type SubDocs = Document<Record<string, any>>[];\n\n/**\n * Interface for the fields required to initialize a\n * ParentDocumentRetriever instance.\n */\nexport type ParentDocumentRetrieverFields = MultiVectorRetrieverInput & {\n childSplitter: TextSplitter;\n parentSplitter?: TextSplitter;\n /**\n * A custom retriever to use when retrieving instead of\n * the `.similaritySearch` method of the vectorstore.\n */\n childDocumentRetriever?: VectorStoreRetrieverInterface<VectorStoreInterface>;\n documentCompressor?: BaseDocumentCompressor | undefined;\n documentCompressorFilteringFn?: (docs: SubDocs) => SubDocs;\n};\n\n/**\n * A type of document retriever that splits input documents into smaller chunks\n * while separately storing and preserving the original documents.\n * The small chunks are embedded, then on retrieval, the original\n * \"parent\" documents are retrieved.\n *\n * This strikes a balance between better targeted retrieval with small documents\n * and the more context-rich larger documents.\n * @example\n * ```typescript\n * const retriever = new ParentDocumentRetriever({\n * vectorstore: new MemoryVectorStore(new OpenAIEmbeddings()),\n * byteStore: new InMemoryStore<Uint8Array>(),\n * parentSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 500,\n * }),\n * childSplitter: new RecursiveCharacterTextSplitter({\n * chunkOverlap: 0,\n * chunkSize: 50,\n * }),\n * childK: 20,\n * parentK: 5,\n * });\n *\n * const parentDocuments = await getDocuments();\n * await retriever.addDocuments(parentDocuments);\n * const retrievedDocs = await retriever.invoke(\"justice breyer\");\n * ```\n */\nexport class ParentDocumentRetriever extends MultiVectorRetriever {\n static lc_name() {\n return \"ParentDocumentRetriever\";\n }\n\n lc_namespace = [\"langchain\", \"retrievers\", \"parent_document\"];\n\n vectorstore: VectorStoreInterface;\n\n protected childSplitter: TextSplitter;\n\n protected parentSplitter?: TextSplitter;\n\n protected idKey = \"doc_id\";\n\n protected childK?: number;\n\n protected parentK?: number;\n\n childDocumentRetriever:\n | VectorStoreRetrieverInterface<VectorStoreInterface>\n | undefined;\n\n documentCompressor: BaseDocumentCompressor | undefined;\n\n documentCompressorFilteringFn?: ParentDocumentRetrieverFields[\"documentCompressorFilteringFn\"];\n\n constructor(fields: ParentDocumentRetrieverFields) {\n super(fields);\n this.vectorstore = fields.vectorstore;\n this.childSplitter = fields.childSplitter;\n this.parentSplitter = fields.parentSplitter;\n this.idKey = fields.idKey ?? this.idKey;\n this.childK = fields.childK;\n this.parentK = fields.parentK;\n this.childDocumentRetriever = fields.childDocumentRetriever;\n this.documentCompressor = fields.documentCompressor;\n this.documentCompressorFilteringFn = fields.documentCompressorFilteringFn;\n }\n\n async _getRelevantDocuments(query: string): Promise<Document[]> {\n let subDocs: SubDocs = [];\n if (this.childDocumentRetriever) {\n subDocs = await this.childDocumentRetriever.invoke(query);\n } else {\n subDocs = await this.vectorstore.similaritySearch(query, this.childK);\n }\n\n if (this.documentCompressor && subDocs.length) {\n subDocs = await this.documentCompressor.compressDocuments(subDocs, query);\n if (this.documentCompressorFilteringFn) {\n subDocs = this.documentCompressorFilteringFn(subDocs);\n }\n }\n\n // Maintain order\n const parentDocIds: string[] = [];\n for (const doc of subDocs) {\n if (!parentDocIds.includes(doc.metadata[this.idKey])) {\n parentDocIds.push(doc.metadata[this.idKey]);\n }\n }\n const parentDocs: Document[] = [];\n const storedParentDocs = await this.docstore.mget(parentDocIds);\n const retrievedDocs: Document[] = storedParentDocs.filter(\n (doc?: Document): doc is Document => doc !== undefined\n );\n parentDocs.push(...retrievedDocs);\n return parentDocs.slice(0, this.parentK);\n }\n\n async _storeDocuments(\n parentDoc: Record<string, Document>,\n childDocs: Document[],\n addToDocstore: boolean\n ) {\n if (this.childDocumentRetriever) {\n await this.childDocumentRetriever.addDocuments(childDocs);\n } else {\n await this.vectorstore.addDocuments(childDocs);\n }\n if (addToDocstore) {\n await this.docstore.mset(Object.entries(parentDoc));\n }\n }\n\n /**\n * Adds documents to the docstore and vectorstores.\n * If a retriever is provided, it will be used to add documents instead of the vectorstore.\n * @param docs The documents to add\n * @param config.ids Optional list of ids for documents. If provided should be the same\n * length as the list of documents. Can provided if parent documents\n * are already in the document store and you don't want to re-add\n * to the docstore. If not provided, random UUIDs will be used as ids.\n * @param config.addToDocstore Boolean of whether to add documents to docstore.\n * This can be false if and only if `ids` are provided. You may want\n * to set this to False if the documents are already in the docstore\n * and you don't want to re-add them.\n * @param config.chunkHeaderOptions Object with options for adding Contextual chunk headers\n */\n async addDocuments(\n docs: Document[],\n config?: {\n ids?: string[];\n addToDocstore?: boolean;\n childDocChunkHeaderOptions?: TextSplitterChunkHeaderOptions;\n }\n ): Promise<void> {\n const {\n ids,\n addToDocstore = true,\n childDocChunkHeaderOptions = {},\n } = config ?? {};\n const parentDocs = this.parentSplitter\n ? await this.parentSplitter.splitDocuments(docs)\n : docs;\n let parentDocIds;\n if (ids === undefined) {\n if (!addToDocstore) {\n throw new Error(\n `If ids are not passed in, \"config.addToDocstore\" MUST be true`\n );\n }\n parentDocIds = parentDocs.map((_doc: Document) => uuid.v4());\n } else {\n parentDocIds = ids;\n }\n if (parentDocs.length !== parentDocIds.length) {\n throw new Error(\n `Got uneven list of documents and ids.\\nIf \"ids\" is provided, should be same length as \"documents\".`\n );\n }\n for (let i = 0; i < parentDocs.length; i += 1) {\n const parentDoc = parentDocs[i];\n const parentDocId = parentDocIds[i];\n const subDocs = await this.childSplitter.splitDocuments(\n [parentDoc],\n childDocChunkHeaderOptions\n );\n const taggedSubDocs = subDocs.map(\n (subDoc: Document) =>\n new Document({\n pageContent: subDoc.pageContent,\n metadata: { ...subDoc.metadata, [this.idKey]: parentDocId },\n })\n );\n await this._storeDocuments(\n { [parentDocId]: parentDoc },\n taggedSubDocs,\n addToDocstore\n );\n }\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAkEA,IAAa,0BAAb,cAA6C,qBAAqB;CAChE,OAAO,UAAU;AACf,SAAO;;CAGT,eAAe;EAAC;EAAa;EAAc;EAAkB;CAE7D;CAEA;CAEA;CAEA,QAAkB;CAElB;CAEA;CAEA;CAIA;CAEA;CAEA,YAAY,QAAuC;AACjD,QAAM,OAAO;AACb,OAAK,cAAc,OAAO;AAC1B,OAAK,gBAAgB,OAAO;AAC5B,OAAK,iBAAiB,OAAO;AAC7B,OAAK,QAAQ,OAAO,SAAS,KAAK;AAClC,OAAK,SAAS,OAAO;AACrB,OAAK,UAAU,OAAO;AACtB,OAAK,yBAAyB,OAAO;AACrC,OAAK,qBAAqB,OAAO;AACjC,OAAK,gCAAgC,OAAO;;CAG9C,MAAM,sBAAsB,OAAoC;EAC9D,IAAI,UAAmB,EAAE;AACzB,MAAI,KAAK,uBACP,WAAU,MAAM,KAAK,uBAAuB,OAAO,MAAM;MAEzD,WAAU,MAAM,KAAK,YAAY,iBAAiB,OAAO,KAAK,OAAO;AAGvE,MAAI,KAAK,sBAAsB,QAAQ,QAAQ;AAC7C,aAAU,MAAM,KAAK,mBAAmB,kBAAkB,SAAS,MAAM;AACzE,OAAI,KAAK,8BACP,WAAU,KAAK,8BAA8B,QAAQ;;EAKzD,MAAM,eAAyB,EAAE;AACjC,OAAK,MAAM,OAAO,QAChB,KAAI,CAAC,aAAa,SAAS,IAAI,SAAS,KAAK,OAAO,CAClD,cAAa,KAAK,IAAI,SAAS,KAAK,OAAO;EAG/C,MAAM,aAAyB,EAAE;EAEjC,MAAM,iBADmB,MAAM,KAAK,SAAS,KAAK,aAAa,EACZ,QAChD,QAAoC,QAAQ,KAAA,EAC9C;AACD,aAAW,KAAK,GAAG,cAAc;AACjC,SAAO,WAAW,MAAM,GAAG,KAAK,QAAQ;;CAG1C,MAAM,gBACJ,WACA,WACA,eACA;AACA,MAAI,KAAK,uBACP,OAAM,KAAK,uBAAuB,aAAa,UAAU;MAEzD,OAAM,KAAK,YAAY,aAAa,UAAU;AAEhD,MAAI,cACF,OAAM,KAAK,SAAS,KAAK,OAAO,QAAQ,UAAU,CAAC;;;;;;;;;;;;;;;;CAkBvD,MAAM,aACJ,MACA,QAKe;EACf,MAAM,EACJ,KACA,gBAAgB,MAChB,6BAA6B,EAAE,KAC7B,UAAU,EAAE;EAChB,MAAM,aAAa,KAAK,iBACpB,MAAM,KAAK,eAAe,eAAe,KAAK,GAC9C;EACJ,IAAI;AACJ,MAAI,QAAQ,KAAA,GAAW;AACrB,OAAI,CAAC,cACH,OAAM,IAAI,MACR,gEACD;AAEH,kBAAe,WAAW,KAAK,SAAmB,KAAK,IAAI,CAAC;QAE5D,gBAAe;AAEjB,MAAI,WAAW,WAAW,aAAa,OACrC,OAAM,IAAI,MACR,qGACD;AAEH,OAAK,IAAI,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK,GAAG;GAC7C,MAAM,YAAY,WAAW;GAC7B,MAAM,cAAc,aAAa;GAKjC,MAAM,iBAJU,MAAM,KAAK,cAAc,eACvC,CAAC,UAAU,EACX,2BACD,EAC6B,KAC3B,WACC,IAAI,SAAS;IACX,aAAa,OAAO;IACpB,UAAU;KAAE,GAAG,OAAO;MAAW,KAAK,QAAQ;KAAa;IAC5D,CAAC,CACL;AACD,SAAM,KAAK,gBACT,GAAG,cAAc,WAAW,EAC5B,eACA,cACD"}
@@ -1,5 +1,4 @@
1
1
  import { BasePromptTemplate } from "@langchain/core/prompts";
2
- import * as _langchain_core_prompt_values0 from "@langchain/core/prompt_values";
3
2
  import { Document } from "@langchain/core/documents";
4
3
 
5
4
  //#region src/schema/prompt_template.d.ts
@@ -12,7 +11,7 @@ import { Document } from "@langchain/core/documents";
12
11
  * @returns {Promise<string>} A Promise that resolves to the formatted document as a string.
13
12
  * @throws {Error} If the document is missing required metadata variables specified in the prompt template.
14
13
  */
15
- declare const formatDocument: (document: Document<Record<string, any>>, prompt: BasePromptTemplate<any, _langchain_core_prompt_values0.BasePromptValueInterface, any>) => Promise<string>;
14
+ declare const formatDocument: (document: Document, prompt: BasePromptTemplate) => Promise<string>;
16
15
  //#endregion
17
16
  export { formatDocument };
18
17
  //# sourceMappingURL=prompt_template.d.cts.map
@@ -1 +1 @@
1
- {"version":3,"file":"prompt_template.d.cts","names":[],"sources":["../../src/schema/prompt_template.ts"],"mappings":";;;;;;;;AAYA;;;;;;cAAa,cAAA,GAAc,QAAA,EAAA,QAAA,CAAA,MAAA,gBAAA,MAAA,EAAA,kBAAA,MAAA,8BAAA,CAAA,wBAAA,WAAA,OAAA"}
1
+ {"version":3,"file":"prompt_template.d.cts","names":[],"sources":["../../src/schema/prompt_template.ts"],"mappings":";;;;;;AAYA;;;;;;;cAAa,cAAA,GAAc,QAAA,EACf,QAAA,EAAQ,MAAA,EACV,kBAAA,KACP,OAAA"}
@@ -1,6 +1,5 @@
1
1
  import { BasePromptTemplate } from "@langchain/core/prompts";
2
2
  import { Document } from "@langchain/core/documents";
3
- import * as _langchain_core_prompt_values0 from "@langchain/core/prompt_values";
4
3
 
5
4
  //#region src/schema/prompt_template.d.ts
6
5
  /**
@@ -12,7 +11,7 @@ import * as _langchain_core_prompt_values0 from "@langchain/core/prompt_values";
12
11
  * @returns {Promise<string>} A Promise that resolves to the formatted document as a string.
13
12
  * @throws {Error} If the document is missing required metadata variables specified in the prompt template.
14
13
  */
15
- declare const formatDocument: (document: Document<Record<string, any>>, prompt: BasePromptTemplate<any, _langchain_core_prompt_values0.BasePromptValueInterface, any>) => Promise<string>;
14
+ declare const formatDocument: (document: Document, prompt: BasePromptTemplate) => Promise<string>;
16
15
  //#endregion
17
16
  export { formatDocument };
18
17
  //# sourceMappingURL=prompt_template.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"prompt_template.d.ts","names":[],"sources":["../../src/schema/prompt_template.ts"],"mappings":";;;;;;;;AAYA;;;;;;cAAa,cAAA,GAAc,QAAA,EAAA,QAAA,CAAA,MAAA,gBAAA,MAAA,EAAA,kBAAA,MAAA,8BAAA,CAAA,wBAAA,WAAA,OAAA"}
1
+ {"version":3,"file":"prompt_template.d.ts","names":[],"sources":["../../src/schema/prompt_template.ts"],"mappings":";;;;;;AAYA;;;;;;;cAAa,cAAA,GAAc,QAAA,EACf,QAAA,EAAQ,MAAA,EACV,kBAAA,KACP,OAAA"}