langchain 0.2.10 → 0.2.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (345) hide show
  1. package/chat_models/universal.cjs +1 -0
  2. package/chat_models/universal.d.cts +1 -0
  3. package/chat_models/universal.d.ts +1 -0
  4. package/chat_models/universal.js +1 -0
  5. package/dist/chat_models/universal.cjs +600 -0
  6. package/dist/chat_models/universal.d.ts +115 -0
  7. package/dist/chat_models/universal.js +595 -0
  8. package/dist/document_loaders/web/assemblyai.cjs +9 -1
  9. package/dist/document_loaders/web/assemblyai.js +9 -1
  10. package/dist/experimental/chrome_ai/app/dist/bundle.cjs +1250 -0
  11. package/dist/experimental/chrome_ai/app/dist/bundle.d.ts +1 -0
  12. package/dist/experimental/chrome_ai/app/dist/bundle.js +1249 -0
  13. package/dist/load/import_constants.cjs +1 -0
  14. package/dist/load/import_constants.js +1 -0
  15. package/package.json +65 -6
  16. package/dist/agents/tests/agent.int.test.d.ts +0 -1
  17. package/dist/agents/tests/agent.int.test.js +0 -309
  18. package/dist/agents/tests/chat_convo_output_parser.test.d.ts +0 -1
  19. package/dist/agents/tests/chat_convo_output_parser.test.js +0 -91
  20. package/dist/agents/tests/create_openai_functions_agent.int.test.d.ts +0 -2
  21. package/dist/agents/tests/create_openai_functions_agent.int.test.js +0 -71
  22. package/dist/agents/tests/create_openai_tools_agent.int.test.d.ts +0 -1
  23. package/dist/agents/tests/create_openai_tools_agent.int.test.js +0 -75
  24. package/dist/agents/tests/create_react_agent.int.test.d.ts +0 -1
  25. package/dist/agents/tests/create_react_agent.int.test.js +0 -32
  26. package/dist/agents/tests/create_structured_chat_agent.int.test.d.ts +0 -1
  27. package/dist/agents/tests/create_structured_chat_agent.int.test.js +0 -32
  28. package/dist/agents/tests/create_tool_calling_agent.int.test.d.ts +0 -1
  29. package/dist/agents/tests/create_tool_calling_agent.int.test.js +0 -122
  30. package/dist/agents/tests/create_xml_agent.int.test.d.ts +0 -1
  31. package/dist/agents/tests/create_xml_agent.int.test.js +0 -32
  32. package/dist/agents/tests/json.test.d.ts +0 -1
  33. package/dist/agents/tests/json.test.js +0 -74
  34. package/dist/agents/tests/react.test.d.ts +0 -1
  35. package/dist/agents/tests/react.test.js +0 -44
  36. package/dist/agents/tests/runnable.int.test.d.ts +0 -1
  37. package/dist/agents/tests/runnable.int.test.js +0 -104
  38. package/dist/agents/tests/sql.test.d.ts +0 -1
  39. package/dist/agents/tests/sql.test.js +0 -133
  40. package/dist/agents/tests/structured_chat_output_parser.test.d.ts +0 -1
  41. package/dist/agents/tests/structured_chat_output_parser.test.js +0 -35
  42. package/dist/agents/tests/structured_chat_output_parser_with_retries.int.test.d.ts +0 -1
  43. package/dist/agents/tests/structured_chat_output_parser_with_retries.int.test.js +0 -44
  44. package/dist/agents/tests/structured_output_runnables.int.test.d.ts +0 -1
  45. package/dist/agents/tests/structured_output_runnables.int.test.js +0 -112
  46. package/dist/agents/toolkits/tests/conversational_retrieval.int.test.d.ts +0 -1
  47. package/dist/agents/toolkits/tests/conversational_retrieval.int.test.js +0 -41
  48. package/dist/cache/tests/file_system.int.test.d.ts +0 -1
  49. package/dist/cache/tests/file_system.int.test.js +0 -32
  50. package/dist/chains/openai_functions/tests/create_runnable_chains.int.test.d.ts +0 -1
  51. package/dist/chains/openai_functions/tests/create_runnable_chains.int.test.js +0 -139
  52. package/dist/chains/openai_functions/tests/extraction.int.test.d.ts +0 -1
  53. package/dist/chains/openai_functions/tests/extraction.int.test.js +0 -33
  54. package/dist/chains/openai_functions/tests/openapi.int.test.d.ts +0 -1
  55. package/dist/chains/openai_functions/tests/openapi.int.test.js +0 -130
  56. package/dist/chains/openai_functions/tests/openapi.test.d.ts +0 -1
  57. package/dist/chains/openai_functions/tests/openapi.test.js +0 -172
  58. package/dist/chains/openai_functions/tests/structured_output.int.test.d.ts +0 -1
  59. package/dist/chains/openai_functions/tests/structured_output.int.test.js +0 -40
  60. package/dist/chains/openai_functions/tests/structured_output.test.d.ts +0 -1
  61. package/dist/chains/openai_functions/tests/structured_output.test.js +0 -102
  62. package/dist/chains/openai_functions/tests/tagging.int.test.d.ts +0 -1
  63. package/dist/chains/openai_functions/tests/tagging.int.test.js +0 -21
  64. package/dist/chains/query_constructor/tests/query_chain.int.test.d.ts +0 -1
  65. package/dist/chains/query_constructor/tests/query_chain.int.test.js +0 -93
  66. package/dist/chains/query_constructor/tests/query_parser.test.d.ts +0 -1
  67. package/dist/chains/query_constructor/tests/query_parser.test.js +0 -28
  68. package/dist/chains/question_answering/tests/load.int.test.d.ts +0 -1
  69. package/dist/chains/question_answering/tests/load.int.test.js +0 -39
  70. package/dist/chains/router/tests/multi_prompt.int.test.d.ts +0 -1
  71. package/dist/chains/router/tests/multi_prompt.int.test.js +0 -45
  72. package/dist/chains/router/tests/multi_prompt.test.d.ts +0 -1
  73. package/dist/chains/router/tests/multi_prompt.test.js +0 -62
  74. package/dist/chains/router/tests/multi_retrieval_qa.int.test.d.ts +0 -1
  75. package/dist/chains/router/tests/multi_retrieval_qa.int.test.js +0 -67
  76. package/dist/chains/router/tests/multi_retrieval_qa.test.d.ts +0 -1
  77. package/dist/chains/router/tests/multi_retrieval_qa.test.js +0 -125
  78. package/dist/chains/summarization/tests/load.int.test.d.ts +0 -1
  79. package/dist/chains/summarization/tests/load.int.test.js +0 -37
  80. package/dist/chains/tests/api_chain.int.test.d.ts +0 -1
  81. package/dist/chains/tests/api_chain.int.test.js +0 -55
  82. package/dist/chains/tests/combine_docs_chain.int.test.d.ts +0 -1
  83. package/dist/chains/tests/combine_docs_chain.int.test.js +0 -50
  84. package/dist/chains/tests/combine_docs_chain.test.d.ts +0 -1
  85. package/dist/chains/tests/combine_docs_chain.test.js +0 -98
  86. package/dist/chains/tests/constitutional_chain.int.test.d.ts +0 -1
  87. package/dist/chains/tests/constitutional_chain.int.test.js +0 -30
  88. package/dist/chains/tests/constitutional_chain.test.d.ts +0 -1
  89. package/dist/chains/tests/constitutional_chain.test.js +0 -63
  90. package/dist/chains/tests/conversation_chain.int.test.d.ts +0 -1
  91. package/dist/chains/tests/conversation_chain.int.test.js +0 -9
  92. package/dist/chains/tests/conversational_retrieval_chain.int.test.d.ts +0 -1
  93. package/dist/chains/tests/conversational_retrieval_chain.int.test.js +0 -243
  94. package/dist/chains/tests/example_data/open_meteo_docs.d.ts +0 -1
  95. package/dist/chains/tests/example_data/open_meteo_docs.js +0 -29
  96. package/dist/chains/tests/history_aware_retriever.int.test.d.ts +0 -1
  97. package/dist/chains/tests/history_aware_retriever.int.test.js +0 -41
  98. package/dist/chains/tests/history_aware_retriever.test.d.ts +0 -1
  99. package/dist/chains/tests/history_aware_retriever.test.js +0 -27
  100. package/dist/chains/tests/llm_chain.int.test.d.ts +0 -1
  101. package/dist/chains/tests/llm_chain.int.test.js +0 -119
  102. package/dist/chains/tests/openai_moderation.int.test.d.ts +0 -1
  103. package/dist/chains/tests/openai_moderation.int.test.js +0 -30
  104. package/dist/chains/tests/retrieval_chain.int.test.d.ts +0 -1
  105. package/dist/chains/tests/retrieval_chain.int.test.js +0 -69
  106. package/dist/chains/tests/retrieval_chain.test.d.ts +0 -1
  107. package/dist/chains/tests/retrieval_chain.test.js +0 -36
  108. package/dist/chains/tests/sequential_chain.int.test.d.ts +0 -1
  109. package/dist/chains/tests/sequential_chain.int.test.js +0 -88
  110. package/dist/chains/tests/sequential_chain.test.d.ts +0 -1
  111. package/dist/chains/tests/sequential_chain.test.js +0 -295
  112. package/dist/chains/tests/simple_sequential_chain.int.test.d.ts +0 -1
  113. package/dist/chains/tests/simple_sequential_chain.int.test.js +0 -81
  114. package/dist/chains/tests/simple_sequential_chain.test.d.ts +0 -1
  115. package/dist/chains/tests/simple_sequential_chain.test.js +0 -128
  116. package/dist/chains/tests/sql_db_chain.int.test.d.ts +0 -1
  117. package/dist/chains/tests/sql_db_chain.int.test.js +0 -125
  118. package/dist/chains/tests/transform.test.d.ts +0 -1
  119. package/dist/chains/tests/transform.test.js +0 -12
  120. package/dist/chains/tests/vector_db_qa_chain.int.test.d.ts +0 -1
  121. package/dist/chains/tests/vector_db_qa_chain.int.test.js +0 -45
  122. package/dist/document_loaders/tests/assemblyai.int.test.d.ts +0 -1
  123. package/dist/document_loaders/tests/assemblyai.int.test.js +0 -111
  124. package/dist/document_loaders/tests/chatgpt-blob.test.d.ts +0 -1
  125. package/dist/document_loaders/tests/chatgpt-blob.test.js +0 -30
  126. package/dist/document_loaders/tests/chatgpt.test.d.ts +0 -1
  127. package/dist/document_loaders/tests/chatgpt.test.js +0 -29
  128. package/dist/document_loaders/tests/cheerio.int.test.d.ts +0 -1
  129. package/dist/document_loaders/tests/cheerio.int.test.js +0 -21
  130. package/dist/document_loaders/tests/college_confidential.int.test.d.ts +0 -1
  131. package/dist/document_loaders/tests/college_confidential.int.test.js +0 -6
  132. package/dist/document_loaders/tests/confluence.test.d.ts +0 -1
  133. package/dist/document_loaders/tests/confluence.test.js +0 -52
  134. package/dist/document_loaders/tests/couchbase.int.test.d.ts +0 -1
  135. package/dist/document_loaders/tests/couchbase.int.test.js +0 -28
  136. package/dist/document_loaders/tests/csv-blob.test.d.ts +0 -1
  137. package/dist/document_loaders/tests/csv-blob.test.js +0 -53
  138. package/dist/document_loaders/tests/csv.test.d.ts +0 -1
  139. package/dist/document_loaders/tests/csv.test.js +0 -41
  140. package/dist/document_loaders/tests/directory.test.d.ts +0 -1
  141. package/dist/document_loaders/tests/directory.test.js +0 -38
  142. package/dist/document_loaders/tests/docx.test.d.ts +0 -1
  143. package/dist/document_loaders/tests/docx.test.js +0 -11
  144. package/dist/document_loaders/tests/epub.test.d.ts +0 -1
  145. package/dist/document_loaders/tests/epub.test.js +0 -18
  146. package/dist/document_loaders/tests/example_data/github_api_responses.d.ts +0 -5
  147. package/dist/document_loaders/tests/example_data/github_api_responses.js +0 -91
  148. package/dist/document_loaders/tests/figma.int.test.d.ts +0 -1
  149. package/dist/document_loaders/tests/figma.int.test.js +0 -13
  150. package/dist/document_loaders/tests/firecrawl.int.test.d.ts +0 -1
  151. package/dist/document_loaders/tests/firecrawl.int.test.js +0 -30
  152. package/dist/document_loaders/tests/gitbook.int.test.d.ts +0 -1
  153. package/dist/document_loaders/tests/gitbook.int.test.js +0 -14
  154. package/dist/document_loaders/tests/github.int.test.d.ts +0 -1
  155. package/dist/document_loaders/tests/github.int.test.js +0 -86
  156. package/dist/document_loaders/tests/github.test.d.ts +0 -1
  157. package/dist/document_loaders/tests/github.test.js +0 -51
  158. package/dist/document_loaders/tests/hn.int.test.d.ts +0 -1
  159. package/dist/document_loaders/tests/hn.int.test.js +0 -6
  160. package/dist/document_loaders/tests/imsdb.test.d.ts +0 -1
  161. package/dist/document_loaders/tests/imsdb.test.js +0 -6
  162. package/dist/document_loaders/tests/json-blob.test.d.ts +0 -1
  163. package/dist/document_loaders/tests/json-blob.test.js +0 -91
  164. package/dist/document_loaders/tests/json.test.d.ts +0 -1
  165. package/dist/document_loaders/tests/json.test.js +0 -69
  166. package/dist/document_loaders/tests/jsonl-blob.test.d.ts +0 -1
  167. package/dist/document_loaders/tests/jsonl-blob.test.js +0 -46
  168. package/dist/document_loaders/tests/jsonl.test.d.ts +0 -1
  169. package/dist/document_loaders/tests/jsonl.test.js +0 -15
  170. package/dist/document_loaders/tests/multi_file.test.d.ts +0 -1
  171. package/dist/document_loaders/tests/multi_file.test.js +0 -49
  172. package/dist/document_loaders/tests/notion.test.d.ts +0 -1
  173. package/dist/document_loaders/tests/notion.test.js +0 -11
  174. package/dist/document_loaders/tests/notionapi.int.test.d.ts +0 -1
  175. package/dist/document_loaders/tests/notionapi.int.test.js +0 -80
  176. package/dist/document_loaders/tests/notionapi.test.d.ts +0 -1
  177. package/dist/document_loaders/tests/notionapi.test.js +0 -84
  178. package/dist/document_loaders/tests/notiondb.int.test.d.ts +0 -1
  179. package/dist/document_loaders/tests/notiondb.int.test.js +0 -13
  180. package/dist/document_loaders/tests/obsidian.test.d.ts +0 -1
  181. package/dist/document_loaders/tests/obsidian.test.js +0 -119
  182. package/dist/document_loaders/tests/pdf-blob.test.d.ts +0 -1
  183. package/dist/document_loaders/tests/pdf-blob.test.js +0 -44
  184. package/dist/document_loaders/tests/pdf.test.d.ts +0 -1
  185. package/dist/document_loaders/tests/pdf.test.js +0 -25
  186. package/dist/document_loaders/tests/playwright_web.int.test.d.ts +0 -1
  187. package/dist/document_loaders/tests/playwright_web.int.test.js +0 -27
  188. package/dist/document_loaders/tests/pptx.test.d.ts +0 -1
  189. package/dist/document_loaders/tests/pptx.test.js +0 -17
  190. package/dist/document_loaders/tests/puppeteer.int.test.d.ts +0 -1
  191. package/dist/document_loaders/tests/puppeteer.int.test.js +0 -47
  192. package/dist/document_loaders/tests/recursive_url.int.test.d.ts +0 -1
  193. package/dist/document_loaders/tests/recursive_url.int.test.js +0 -64
  194. package/dist/document_loaders/tests/s3.int.test.d.ts +0 -1
  195. package/dist/document_loaders/tests/s3.int.test.js +0 -48
  196. package/dist/document_loaders/tests/searchapi.test.d.ts +0 -1
  197. package/dist/document_loaders/tests/searchapi.test.js +0 -29
  198. package/dist/document_loaders/tests/serpapi.test.d.ts +0 -1
  199. package/dist/document_loaders/tests/serpapi.test.js +0 -21
  200. package/dist/document_loaders/tests/sitemap.int.test.d.ts +0 -1
  201. package/dist/document_loaders/tests/sitemap.int.test.js +0 -28
  202. package/dist/document_loaders/tests/sonix_audio.int.test.d.ts +0 -1
  203. package/dist/document_loaders/tests/sonix_audio.int.test.js +0 -55
  204. package/dist/document_loaders/tests/sort_xyz_blockchain.int.test.d.ts +0 -1
  205. package/dist/document_loaders/tests/sort_xyz_blockchain.int.test.js +0 -38
  206. package/dist/document_loaders/tests/srt-blob.test.d.ts +0 -1
  207. package/dist/document_loaders/tests/srt-blob.test.js +0 -18
  208. package/dist/document_loaders/tests/srt.test.d.ts +0 -1
  209. package/dist/document_loaders/tests/srt.test.js +0 -16
  210. package/dist/document_loaders/tests/text-blob.test.d.ts +0 -1
  211. package/dist/document_loaders/tests/text-blob.test.js +0 -14
  212. package/dist/document_loaders/tests/text.test.d.ts +0 -1
  213. package/dist/document_loaders/tests/text.test.js +0 -22
  214. package/dist/document_loaders/tests/unstructured.int.test.d.ts +0 -1
  215. package/dist/document_loaders/tests/unstructured.int.test.js +0 -58
  216. package/dist/document_loaders/tests/webpdf.int.test.d.ts +0 -1
  217. package/dist/document_loaders/tests/webpdf.int.test.js +0 -90
  218. package/dist/document_transformers/tests/openai_functions.int.test.d.ts +0 -1
  219. package/dist/document_transformers/tests/openai_functions.int.test.js +0 -40
  220. package/dist/embeddings/tests/cache.test.d.ts +0 -1
  221. package/dist/embeddings/tests/cache.test.js +0 -24
  222. package/dist/embeddings/tests/fake.test.d.ts +0 -1
  223. package/dist/embeddings/tests/fake.test.js +0 -34
  224. package/dist/evaluation/agents/tests/trajectory_eval_chain.int.test.d.ts +0 -1
  225. package/dist/evaluation/agents/tests/trajectory_eval_chain.int.test.js +0 -33
  226. package/dist/evaluation/comparison/tests/pairwise_eval_chain.int.test.d.ts +0 -1
  227. package/dist/evaluation/comparison/tests/pairwise_eval_chain.int.test.js +0 -46
  228. package/dist/evaluation/criteria/tests/criteria_eval_chain.int.test.d.ts +0 -1
  229. package/dist/evaluation/criteria/tests/criteria_eval_chain.int.test.js +0 -108
  230. package/dist/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.d.ts +0 -1
  231. package/dist/evaluation/embedding_distance/tests/embedding_distance_eval_chain.int.test.js +0 -26
  232. package/dist/evaluation/qa/tests/eval_chain.int.test.d.ts +0 -1
  233. package/dist/evaluation/qa/tests/eval_chain.int.test.js +0 -27
  234. package/dist/experimental/autogpt/tests/output_parser.test.d.ts +0 -1
  235. package/dist/experimental/autogpt/tests/output_parser.test.js +0 -8
  236. package/dist/experimental/autogpt/tests/prompt.test.d.ts +0 -1
  237. package/dist/experimental/autogpt/tests/prompt.test.js +0 -69
  238. package/dist/experimental/autogpt/tests/prompt_generator.test.d.ts +0 -1
  239. package/dist/experimental/autogpt/tests/prompt_generator.test.js +0 -91
  240. package/dist/experimental/chains/tests/violation_of_expectations_chain.int.test.d.ts +0 -1
  241. package/dist/experimental/chains/tests/violation_of_expectations_chain.int.test.js +0 -26
  242. package/dist/experimental/generative_agents/tests/generative_agent.int.test.d.ts +0 -1
  243. package/dist/experimental/generative_agents/tests/generative_agent.int.test.js +0 -304
  244. package/dist/experimental/masking/tests/masking-extended.test.d.ts +0 -1
  245. package/dist/experimental/masking/tests/masking-extended.test.js +0 -58
  246. package/dist/experimental/masking/tests/masking.test.d.ts +0 -1
  247. package/dist/experimental/masking/tests/masking.test.js +0 -388
  248. package/dist/experimental/openai_assistant/tests/openai_assistant.int.test.d.ts +0 -1
  249. package/dist/experimental/openai_assistant/tests/openai_assistant.int.test.js +0 -203
  250. package/dist/experimental/openai_files/tests/openai_file.int.test.d.ts +0 -1
  251. package/dist/experimental/openai_files/tests/openai_file.int.test.js +0 -87
  252. package/dist/experimental/plan_and_execute/tests/plan_and_execute.int.test.d.ts +0 -1
  253. package/dist/experimental/plan_and_execute/tests/plan_and_execute.int.test.js +0 -54
  254. package/dist/experimental/prompts/tests/handlebars.test.d.ts +0 -1
  255. package/dist/experimental/prompts/tests/handlebars.test.js +0 -24
  256. package/dist/experimental/tools/tests/pyinterpreter.int.test.d.ts +0 -1
  257. package/dist/experimental/tools/tests/pyinterpreter.int.test.js +0 -22
  258. package/dist/load/tests/cross_language.test.d.ts +0 -1
  259. package/dist/load/tests/cross_language.test.js +0 -83
  260. package/dist/load/tests/load.int.test.d.ts +0 -1
  261. package/dist/load/tests/load.int.test.js +0 -9
  262. package/dist/load/tests/load.test.d.ts +0 -1
  263. package/dist/load/tests/load.test.js +0 -412
  264. package/dist/memory/tests/buffer_memory.test.d.ts +0 -1
  265. package/dist/memory/tests/buffer_memory.test.js +0 -34
  266. package/dist/memory/tests/buffer_token_memory.int.test.d.ts +0 -1
  267. package/dist/memory/tests/buffer_token_memory.int.test.js +0 -47
  268. package/dist/memory/tests/buffer_window_memory.test.d.ts +0 -1
  269. package/dist/memory/tests/buffer_window_memory.test.js +0 -42
  270. package/dist/memory/tests/combined_memory.int.test.d.ts +0 -1
  271. package/dist/memory/tests/combined_memory.int.test.js +0 -74
  272. package/dist/memory/tests/entity_memory.int.test.d.ts +0 -1
  273. package/dist/memory/tests/entity_memory.int.test.js +0 -79
  274. package/dist/memory/tests/entity_memory.test.d.ts +0 -1
  275. package/dist/memory/tests/entity_memory.test.js +0 -48
  276. package/dist/memory/tests/summary.int.test.d.ts +0 -1
  277. package/dist/memory/tests/summary.int.test.js +0 -50
  278. package/dist/memory/tests/summary_buffer.int.test.d.ts +0 -1
  279. package/dist/memory/tests/summary_buffer.int.test.js +0 -55
  280. package/dist/memory/tests/vector_store_memory.int.test.d.ts +0 -1
  281. package/dist/memory/tests/vector_store_memory.int.test.js +0 -55
  282. package/dist/output_parsers/tests/combining.int.test.d.ts +0 -1
  283. package/dist/output_parsers/tests/combining.int.test.js +0 -26
  284. package/dist/output_parsers/tests/combining.test.d.ts +0 -1
  285. package/dist/output_parsers/tests/combining.test.js +0 -54
  286. package/dist/output_parsers/tests/datetime.test.d.ts +0 -1
  287. package/dist/output_parsers/tests/datetime.test.js +0 -14
  288. package/dist/output_parsers/tests/expression.test.d.ts +0 -1
  289. package/dist/output_parsers/tests/expression.test.js +0 -339
  290. package/dist/output_parsers/tests/http_response.test.d.ts +0 -1
  291. package/dist/output_parsers/tests/http_response.test.js +0 -39
  292. package/dist/output_parsers/tests/list.test.d.ts +0 -1
  293. package/dist/output_parsers/tests/list.test.js +0 -27
  294. package/dist/output_parsers/tests/openai_functions.int.test.d.ts +0 -1
  295. package/dist/output_parsers/tests/openai_functions.int.test.js +0 -89
  296. package/dist/output_parsers/tests/openai_tools.int.test.d.ts +0 -1
  297. package/dist/output_parsers/tests/openai_tools.int.test.js +0 -36
  298. package/dist/output_parsers/tests/structured.int.test.d.ts +0 -1
  299. package/dist/output_parsers/tests/structured.int.test.js +0 -150
  300. package/dist/prompts/tests/selectors.test.d.ts +0 -1
  301. package/dist/prompts/tests/selectors.test.js +0 -59
  302. package/dist/retrievers/self_query/tests/memory_self_query.int.test.d.ts +0 -1
  303. package/dist/retrievers/self_query/tests/memory_self_query.int.test.js +0 -330
  304. package/dist/retrievers/tests/chain_extract.int.test.d.ts +0 -1
  305. package/dist/retrievers/tests/chain_extract.int.test.js +0 -32
  306. package/dist/retrievers/tests/ensemble_retriever.int.test.d.ts +0 -1
  307. package/dist/retrievers/tests/ensemble_retriever.int.test.js +0 -74
  308. package/dist/retrievers/tests/hyde.int.test.d.ts +0 -1
  309. package/dist/retrievers/tests/hyde.int.test.js +0 -44
  310. package/dist/retrievers/tests/matryoshka_retriever.int.test.d.ts +0 -1
  311. package/dist/retrievers/tests/matryoshka_retriever.int.test.js +0 -113
  312. package/dist/retrievers/tests/multi_query.int.test.d.ts +0 -1
  313. package/dist/retrievers/tests/multi_query.int.test.js +0 -45
  314. package/dist/retrievers/tests/parent_document.int.test.d.ts +0 -1
  315. package/dist/retrievers/tests/parent_document.int.test.js +0 -122
  316. package/dist/retrievers/tests/score_threshold.int.test.d.ts +0 -1
  317. package/dist/retrievers/tests/score_threshold.int.test.js +0 -83
  318. package/dist/retrievers/tests/time_weighted.test.d.ts +0 -1
  319. package/dist/retrievers/tests/time_weighted.test.js +0 -320
  320. package/dist/retrievers/tests/vectorstores.test.d.ts +0 -1
  321. package/dist/retrievers/tests/vectorstores.test.js +0 -50
  322. package/dist/smith/tests/run_on_dataset.int.test.d.ts +0 -1
  323. package/dist/smith/tests/run_on_dataset.int.test.js +0 -257
  324. package/dist/smith/tests/runner_utils.int.test.d.ts +0 -9
  325. package/dist/smith/tests/runner_utils.int.test.js +0 -234
  326. package/dist/storage/tests/file_system.test.d.ts +0 -1
  327. package/dist/storage/tests/file_system.test.js +0 -81
  328. package/dist/tools/tests/chain.test.d.ts +0 -1
  329. package/dist/tools/tests/chain.test.js +0 -136
  330. package/dist/tools/tests/webbrowser.int.test.d.ts +0 -1
  331. package/dist/tools/tests/webbrowser.int.test.js +0 -80
  332. package/dist/tools/tests/webbrowser.test.d.ts +0 -1
  333. package/dist/tools/tests/webbrowser.test.js +0 -21
  334. package/dist/util/tests/async_caller.int.test.d.ts +0 -1
  335. package/dist/util/tests/async_caller.int.test.js +0 -34
  336. package/dist/util/tests/azure.test.d.ts +0 -1
  337. package/dist/util/tests/azure.test.js +0 -42
  338. package/dist/util/tests/openai-stream.test.d.ts +0 -1
  339. package/dist/util/tests/openai-stream.test.js +0 -135
  340. package/dist/util/tests/set.test.d.ts +0 -1
  341. package/dist/util/tests/set.test.js +0 -36
  342. package/dist/util/tests/sql_utils.test.d.ts +0 -1
  343. package/dist/util/tests/sql_utils.test.js +0 -50
  344. package/dist/vectorstores/tests/memory.test.d.ts +0 -1
  345. package/dist/vectorstores/tests/memory.test.js +0 -78
@@ -1,9 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { OpenAI } from "@langchain/openai";
3
- import { ConversationChain } from "../conversation.js";
4
- test("Test ConversationChain", async () => {
5
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
6
- const chain = new ConversationChain({ llm: model });
7
- const res = await chain.call({ input: "my favorite color" });
8
- console.log({ res });
9
- });
@@ -1,243 +0,0 @@
1
- import { expect, test } from "@jest/globals";
2
- import { OpenAI, OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai";
3
- import { PromptTemplate } from "@langchain/core/prompts";
4
- import { ConversationalRetrievalQAChain } from "../conversational_retrieval_chain.js";
5
- import { MemoryVectorStore } from "../../vectorstores/memory.js";
6
- import { BufferMemory } from "../../memory/buffer_memory.js";
7
- test("Test ConversationalRetrievalQAChain from LLM", async () => {
8
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
9
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
10
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
11
- const res = await chain.call({ question: "foo", chat_history: "bar" });
12
- console.log({ res });
13
- });
14
- test("Test ConversationalRetrievalQAChain from LLM with flag option to return source", async () => {
15
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
16
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
17
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
18
- returnSourceDocuments: true,
19
- });
20
- const res = await chain.call({ question: "foo", chat_history: "bar" });
21
- expect(res).toEqual(expect.objectContaining({
22
- text: expect.any(String),
23
- sourceDocuments: expect.arrayContaining([
24
- expect.objectContaining({
25
- metadata: expect.objectContaining({
26
- id: expect.any(Number),
27
- }),
28
- pageContent: expect.any(String),
29
- }),
30
- ]),
31
- }));
32
- });
33
- test("Test ConversationalRetrievalQAChain from LLM with flag option to return source and memory set", async () => {
34
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
35
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
36
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
37
- returnSourceDocuments: true,
38
- memory: new BufferMemory({
39
- memoryKey: "chat_history",
40
- inputKey: "question",
41
- outputKey: "text",
42
- }),
43
- });
44
- const res = await chain.call({ question: "foo", chat_history: "bar" });
45
- expect(res).toEqual(expect.objectContaining({
46
- text: expect.any(String),
47
- sourceDocuments: expect.arrayContaining([
48
- expect.objectContaining({
49
- metadata: expect.objectContaining({
50
- id: expect.any(Number),
51
- }),
52
- pageContent: expect.any(String),
53
- }),
54
- ]),
55
- }));
56
- });
57
- test("Test ConversationalRetrievalQAChain from LLM with override default prompts", async () => {
58
- const model = new OpenAI({
59
- modelName: "gpt-3.5-turbo-instruct",
60
- temperature: 0,
61
- });
62
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
63
- const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer.
64
- {context}
65
-
66
- Question: {question}
67
- Helpful Answer:`;
68
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
69
- qaTemplate: qa_template,
70
- });
71
- const res = await chain.call({
72
- question: "What is better programming Language Python or Javascript ",
73
- chat_history: "bar",
74
- });
75
- expect(res.text).toContain("I am learning from Aliens");
76
- console.log({ res });
77
- });
78
- test("Test ConversationalRetrievalQAChain from LLM with a chat model", async () => {
79
- const model = new ChatOpenAI({
80
- modelName: "gpt-3.5-turbo",
81
- temperature: 0,
82
- });
83
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
84
- const qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say "Sorry I dont know, I am learning from Aliens", don't try to make up an answer.
85
- {context}
86
-
87
- Question: {question}
88
- Helpful Answer:`;
89
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
90
- qaChainOptions: {
91
- type: "stuff",
92
- prompt: PromptTemplate.fromTemplate(qa_template),
93
- },
94
- });
95
- const res = await chain.call({
96
- question: "What is better programming Language Python or Javascript ",
97
- chat_history: "bar",
98
- });
99
- expect(res.text).toContain("I am learning from Aliens");
100
- console.log({ res });
101
- });
102
- test("Test ConversationalRetrievalQAChain from LLM with a map reduce chain", async () => {
103
- const model = new ChatOpenAI({
104
- modelName: "gpt-3.5-turbo",
105
- temperature: 0,
106
- });
107
- const vectorStore = await MemoryVectorStore.fromTexts(["Hello world", "Bye bye", "hello nice world", "bye", "hi"], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
108
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
109
- qaChainOptions: {
110
- type: "map_reduce",
111
- },
112
- });
113
- const res = await chain.call({
114
- question: "What is better programming Language Python or Javascript ",
115
- chat_history: "bar",
116
- });
117
- console.log({ res });
118
- });
119
- test("Test ConversationalRetrievalQAChain from LLM without memory", async () => {
120
- const model = new OpenAI({
121
- temperature: 0,
122
- });
123
- const vectorStore = await MemoryVectorStore.fromTexts([
124
- "Mitochondria are the powerhouse of the cell",
125
- "Foo is red",
126
- "Bar is red",
127
- "Buildings are made out of brick",
128
- "Mitochondria are made of lipids",
129
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
130
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
131
- const question = "What is the powerhouse of the cell?";
132
- const res = await chain.call({
133
- question,
134
- chat_history: "",
135
- });
136
- console.log({ res });
137
- const res2 = await chain.call({
138
- question: "What are they made out of?",
139
- chat_history: question + res.text,
140
- });
141
- console.log({ res2 });
142
- });
143
- test("Test ConversationalRetrievalQAChain from LLM with a chat model without memory", async () => {
144
- const model = new ChatOpenAI({
145
- modelName: "gpt-3.5-turbo",
146
- temperature: 0,
147
- });
148
- const vectorStore = await MemoryVectorStore.fromTexts([
149
- "Mitochondria are the powerhouse of the cell",
150
- "Foo is red",
151
- "Bar is red",
152
- "Buildings are made out of brick",
153
- "Mitochondria are made of lipids",
154
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
155
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
156
- const question = "What is the powerhouse of the cell?";
157
- const res = await chain.call({
158
- question,
159
- chat_history: "",
160
- });
161
- console.log({ res });
162
- const res2 = await chain.call({
163
- question: "What are they made out of?",
164
- chat_history: question + res.text,
165
- });
166
- console.log({ res2 });
167
- });
168
- test("Test ConversationalRetrievalQAChain from LLM with memory", async () => {
169
- const model = new OpenAI({
170
- temperature: 0,
171
- });
172
- const vectorStore = await MemoryVectorStore.fromTexts([
173
- "Mitochondria are the powerhouse of the cell",
174
- "Foo is red",
175
- "Bar is red",
176
- "Buildings are made out of brick",
177
- "Mitochondria are made of lipids",
178
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
179
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
180
- memory: new BufferMemory({
181
- memoryKey: "chat_history",
182
- }),
183
- });
184
- const res = await chain.call({
185
- question: "What is the powerhouse of the cell?",
186
- });
187
- console.log({ res });
188
- const res2 = await chain.call({
189
- question: "What are they made out of?",
190
- });
191
- console.log({ res2 });
192
- });
193
- test("Test ConversationalRetrievalQAChain from LLM with a chat model and memory", async () => {
194
- const model = new ChatOpenAI({
195
- modelName: "gpt-3.5-turbo",
196
- temperature: 0,
197
- });
198
- const vectorStore = await MemoryVectorStore.fromTexts([
199
- "Mitochondria are the powerhouse of the cell",
200
- "Foo is red",
201
- "Bar is red",
202
- "Buildings are made out of brick",
203
- "Mitochondria are made of lipids",
204
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
205
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever(), {
206
- memory: new BufferMemory({
207
- memoryKey: "chat_history",
208
- returnMessages: true,
209
- }),
210
- });
211
- const res = await chain.call({
212
- question: "What is the powerhouse of the cell?",
213
- });
214
- console.log({ res });
215
- const res2 = await chain.call({
216
- question: "What are they made out of?",
217
- });
218
- console.log({ res2 });
219
- });
220
- test("Test ConversationalRetrievalQAChain from LLM with deprecated history syntax", async () => {
221
- const model = new OpenAI({
222
- temperature: 0,
223
- });
224
- const vectorStore = await MemoryVectorStore.fromTexts([
225
- "Mitochondria are the powerhouse of the cell",
226
- "Foo is red",
227
- "Bar is red",
228
- "Buildings are made out of brick",
229
- "Mitochondria are made of lipids",
230
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
231
- const chain = ConversationalRetrievalQAChain.fromLLM(model, vectorStore.asRetriever());
232
- const question = "What is the powerhouse of the cell?";
233
- const res = await chain.call({
234
- question,
235
- chat_history: [],
236
- });
237
- console.log({ res });
238
- const res2 = await chain.call({
239
- question: "What are they made out of?",
240
- chat_history: [[question, res.text]],
241
- });
242
- console.log({ res2 });
243
- });
@@ -1 +0,0 @@
1
- export declare const OPEN_METEO_DOCS = "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t\u00B0C (\u00B0F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0\u00B0C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.";
@@ -1,29 +0,0 @@
1
- export const OPEN_METEO_DOCS = `BASE URL: https://api.open-meteo.com/
2
-
3
- API Documentation
4
- The API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:
5
-
6
- Parameter Format Required Default Description
7
- latitude, longitude Floating point Yes Geographical WGS84 coordinate of the location
8
- hourly String array No A list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.
9
- daily String array No A list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.
10
- current_weather Bool No false Include current weather conditions in the JSON output.
11
- temperature_unit String No celsius If fahrenheit is set, all temperature values are converted to Fahrenheit.
12
- windspeed_unit String No kmh Other wind speed speed units: ms, mph and kn
13
- precipitation_unit String No mm Other precipitation amount units: inch
14
- timeformat String No iso8601 If format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.
15
- timezone String No GMT If timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.
16
- past_days Integer (0-2) No 0 If past_days is set, yesterday or the day before yesterday data are also returned.
17
- start_date
18
- end_date String (yyyy-mm-dd) No The time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).
19
- models String array No auto Manually select one or more weather models. Per default, the best suitable weather models will be combined.
20
-
21
- Variable Valid time Unit Description
22
- temperature_2m Instant °C (°F) Air temperature at 2 meters above ground
23
- snowfall Preceding hour sum cm (inch) Snowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent
24
- rain Preceding hour sum mm (inch) Rain from large scale weather systems of the preceding hour in millimeter
25
- showers Preceding hour sum mm (inch) Showers from convective precipitation in millimeters from the preceding hour
26
- weathercode Instant WMO code Weather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.
27
- snow_depth Instant meters Snow depth on the ground
28
- freezinglevel_height Instant meters Altitude above sea level of the 0°C level
29
- visibility Instant meters Viewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.`;
@@ -1,41 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { ChatPromptTemplate } from "@langchain/core/prompts";
3
- import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
4
- import { MemoryVectorStore } from "../../vectorstores/memory.js";
5
- import { createHistoryAwareRetriever } from "../history_aware_retriever.js";
6
- const QUESTION_GEN_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
7
-
8
- Chat History:
9
- {chat_history}
10
- Follow Up Input: {input}
11
- Standalone question:`;
12
- test("History aware retriever with a followup", async () => {
13
- const questionGenPrompt = ChatPromptTemplate.fromTemplate(QUESTION_GEN_TEMPLATE);
14
- const vectorstore = await MemoryVectorStore.fromTexts([
15
- "Mitochondria is the powerhouse of the cell",
16
- "Foo is red",
17
- "Bar is red",
18
- "Buildings are made out of brick",
19
- "Mitochondria are made of lipids",
20
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
21
- const retriever = vectorstore.asRetriever(2);
22
- const llm = new ChatOpenAI({});
23
- const chain = await createHistoryAwareRetriever({
24
- llm,
25
- retriever,
26
- rephrasePrompt: questionGenPrompt,
27
- });
28
- const outputDocs = await chain.invoke({
29
- input: "What is the powerhouse of the cell?",
30
- chat_history: "",
31
- });
32
- expect(outputDocs[0].pageContent).toBe("Mitochondria is the powerhouse of the cell");
33
- const outputDocs2 = await chain.invoke({
34
- input: "What are they made of?",
35
- chat_history: [
36
- "Human: What is the powerhouse of the cell?",
37
- "Assistant: Mitochondria is the powerhouse of the cell",
38
- ].join("\n"),
39
- });
40
- expect(outputDocs2[0].pageContent).toBe("Mitochondria are made of lipids");
41
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,27 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { ChatPromptTemplate } from "@langchain/core/prompts";
3
- import { FakeRetriever } from "@langchain/core/utils/testing";
4
- import { Document } from "@langchain/core/documents";
5
- import { FakeListLLM } from "../../util/testing/llms/fake.js";
6
- import { createHistoryAwareRetriever } from "../history_aware_retriever.js";
7
- test("createHistoryAwareRetriever", async () => {
8
- const answer = "I know the answer!";
9
- const questionGenPrompt = ChatPromptTemplate.fromTemplate(`hi! {input} {chat_history}`);
10
- const fakeRetrievedDocs = [
11
- new Document({ pageContent: "some fake content" }),
12
- ];
13
- const retriever = new FakeRetriever({
14
- output: fakeRetrievedDocs,
15
- });
16
- const llm = new FakeListLLM({ responses: [answer] });
17
- const input = "What is the answer?";
18
- const chain = await createHistoryAwareRetriever({
19
- llm,
20
- retriever,
21
- rephrasePrompt: questionGenPrompt,
22
- });
23
- const output = await chain.invoke({ input, chat_history: [] });
24
- expect(output).toEqual(fakeRetrievedDocs);
25
- const output2 = await chain.invoke({ input, chat_history: "foo" });
26
- expect(output2).toEqual(fakeRetrievedDocs);
27
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,119 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { OpenAI, ChatOpenAI } from "@langchain/openai";
3
- import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, } from "@langchain/core/prompts";
4
- import { LLMChain } from "../llm_chain.js";
5
- import { BufferMemory } from "../../memory/buffer_memory.js";
6
- test("Test OpenAI", async () => {
7
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
8
- const prompt = new PromptTemplate({
9
- template: "Print {foo}",
10
- inputVariables: ["foo"],
11
- });
12
- const chain = new LLMChain({ prompt, llm: model });
13
- const res = await chain.call({ foo: "my favorite color" });
14
- console.log({ res });
15
- });
16
- test("Test OpenAI with timeout", async () => {
17
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
18
- const prompt = new PromptTemplate({
19
- template: "Print {foo}",
20
- inputVariables: ["foo"],
21
- });
22
- const chain = new LLMChain({ prompt, llm: model });
23
- await expect(() => chain.call({
24
- foo: "my favorite color",
25
- timeout: 10,
26
- })).rejects.toThrow();
27
- });
28
- test("Test run method", async () => {
29
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
30
- const prompt = new PromptTemplate({
31
- template: "Print {foo}",
32
- inputVariables: ["foo"],
33
- });
34
- const chain = new LLMChain({ prompt, llm: model });
35
- const res = await chain.run("my favorite color");
36
- console.log({ res });
37
- });
38
- test("Test run method", async () => {
39
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
40
- const prompt = new PromptTemplate({
41
- template: "{history} Print {foo}",
42
- inputVariables: ["foo", "history"],
43
- });
44
- const chain = new LLMChain({
45
- prompt,
46
- llm: model,
47
- memory: new BufferMemory(),
48
- });
49
- const res = await chain.run("my favorite color");
50
- console.log({ res });
51
- });
52
- test("Test memory + cancellation", async () => {
53
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
54
- const prompt = new PromptTemplate({
55
- template: "{history} Print {foo}",
56
- inputVariables: ["foo", "history"],
57
- });
58
- const chain = new LLMChain({
59
- prompt,
60
- llm: model,
61
- memory: new BufferMemory(),
62
- });
63
- await expect(() => chain.call({
64
- foo: "my favorite color",
65
- signal: AbortSignal.timeout(20),
66
- })).rejects.toThrow();
67
- });
68
- test("Test memory + timeout", async () => {
69
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
70
- const prompt = new PromptTemplate({
71
- template: "{history} Print {foo}",
72
- inputVariables: ["foo", "history"],
73
- });
74
- const chain = new LLMChain({
75
- prompt,
76
- llm: model,
77
- memory: new BufferMemory(),
78
- });
79
- await expect(() => chain.call({
80
- foo: "my favorite color",
81
- timeout: 20,
82
- })).rejects.toThrow();
83
- });
84
- test("Test apply", async () => {
85
- const model = new OpenAI({ modelName: "gpt-3.5-turbo-instruct" });
86
- const prompt = new PromptTemplate({
87
- template: "Print {foo}",
88
- inputVariables: ["foo"],
89
- });
90
- const chain = new LLMChain({ prompt, llm: model });
91
- const res = await chain.apply([{ foo: "my favorite color" }]);
92
- console.log({ res });
93
- });
94
- test("Test LLMChain with ChatOpenAI", async () => {
95
- const model = new ChatOpenAI({ temperature: 0.9 });
96
- const template = "What is a good name for a company that makes {product}?";
97
- const prompt = new PromptTemplate({ template, inputVariables: ["product"] });
98
- const humanMessagePrompt = new HumanMessagePromptTemplate(prompt);
99
- const chatPromptTemplate = ChatPromptTemplate.fromMessages([
100
- humanMessagePrompt,
101
- ]);
102
- const chatChain = new LLMChain({ llm: model, prompt: chatPromptTemplate });
103
- const res = await chatChain.call({ product: "colorful socks" });
104
- console.log({ res });
105
- });
106
- test("Test passing a runnable to an LLMChain", async () => {
107
- const model = new ChatOpenAI({ modelName: "gpt-3.5-turbo-1106" });
108
- const runnableModel = model.bind({
109
- response_format: {
110
- type: "json_object",
111
- },
112
- });
113
- const prompt = PromptTemplate.fromTemplate("You are a bee --I mean a spelling bee. Respond with a JSON key of 'spelling':\nQuestion:{input}");
114
- const chain = new LLMChain({ llm: runnableModel, prompt });
115
- const response = await chain.invoke({ input: "How do you spell today?" });
116
- expect(JSON.parse(response.text)).toMatchObject({
117
- spelling: expect.any(String),
118
- });
119
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,30 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { OpenAIModerationChain } from "../openai_moderation.js";
3
- test("OpenAI Moderation Test", async () => {
4
- const badString = "I hate myself and want to do harm to myself";
5
- const goodString = "The cat (Felis catus) is a domestic species of small carnivorous mammal.";
6
- const moderation = new OpenAIModerationChain();
7
- const { output: badResult } = await moderation.call({
8
- input: badString,
9
- });
10
- const { output: goodResult } = await moderation.call({
11
- input: goodString,
12
- });
13
- expect(badResult).toEqual("Text was found that violates OpenAI's content policy.");
14
- expect(goodResult).toEqual("The cat (Felis catus) is a domestic species of small carnivorous mammal.");
15
- });
16
- test("OpenAI Moderation Test in non-english language", async () => {
17
- const badString = "Vi devus mortigi vin nun! Kaj donu al ĉiuj aliaj pecon da oksigeno kaj ozono, kovru ĝin, por ke ni povu spiri ene de ĉi tiu blua veziko. Ĉar por kio vi estas ĉi tie? Venu por adori min? Mortigu vin mem. Mi volas diri, cent procento. Mil procentoj."; // Written in Esperanto. It's LTG's most famous quote and it's extremely crass, and as such it should not pass, but it does. However, we can check category_scores to see that, indeed, it detected some sort self-harm language in the text, albeit it's non-conclusive.
18
- const goodString = "La kato ( Felis catus ) estas hejma specio de malgranda karnovora mamulo.";
19
- const moderation = new OpenAIModerationChain();
20
- const { output: badResult, results } = await moderation.call({
21
- input: badString,
22
- });
23
- const { output: goodResult } = await moderation.call({
24
- input: goodString,
25
- });
26
- expect(badResult).toEqual("Vi devus mortigi vin nun! Kaj donu al ĉiuj aliaj pecon da oksigeno kaj ozono, kovru ĝin, por ke ni povu spiri ene de ĉi tiu blua veziko. Ĉar por kio vi estas ĉi tie? Venu por adori min? Mortigu vin mem. Mi volas diri, cent procento. Mil procentoj.");
27
- expect(goodResult).toEqual("La kato ( Felis catus ) estas hejma specio de malgranda karnovora mamulo.");
28
- console.log(results[0].category_scores);
29
- expect(results[0].category_scores["self-harm"]).toBeGreaterThan(0.01); // We can have a more granular control over moderation this way. It's not conclusive, but it's better than nothing if the language is not english.
30
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,69 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { ChatPromptTemplate } from "@langchain/core/prompts";
3
- import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai";
4
- import { StringOutputParser } from "@langchain/core/output_parsers";
5
- import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
6
- import { MemoryVectorStore } from "../../vectorstores/memory.js";
7
- import { createHistoryAwareRetriever } from "../history_aware_retriever.js";
8
- import { createRetrievalChain } from "../retrieval.js";
9
- const QUESTION_GEN_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
10
-
11
- Chat History:
12
- {chat_history}
13
- Follow Up Input: {input}
14
- Standalone question:`;
15
- const COMBINE_DOCS_PROMPT = `Based on the following context:
16
-
17
- {context}
18
-
19
- And chat history:
20
- {chat_history}
21
-
22
- Answer the following question:
23
- {input}`;
24
- test("Retrieval chain with a history aware retriever and a followup", async () => {
25
- const questionGenPrompt = ChatPromptTemplate.fromTemplate(QUESTION_GEN_TEMPLATE);
26
- const vectorstore = await MemoryVectorStore.fromTexts([
27
- "Mitochondria is the powerhouse of the cell",
28
- "Foo is red",
29
- "Bar is red",
30
- "Buildings are made out of brick",
31
- "Mitochondria are made of lipids",
32
- ], [{ id: 2 }, { id: 1 }, { id: 3 }, { id: 4 }, { id: 5 }], new OpenAIEmbeddings());
33
- const retriever = vectorstore.asRetriever(2);
34
- const llm = new ChatOpenAI({});
35
- const historyAwareRetriever = await createHistoryAwareRetriever({
36
- llm,
37
- retriever,
38
- rephrasePrompt: questionGenPrompt,
39
- });
40
- const combineDocsPrompt = ChatPromptTemplate.fromTemplate(COMBINE_DOCS_PROMPT);
41
- const combineDocsChain = RunnableSequence.from([
42
- RunnablePassthrough.assign({
43
- context: (input) => input.context.map((doc) => doc.pageContent).join("\n\n"),
44
- }),
45
- combineDocsPrompt,
46
- llm,
47
- new StringOutputParser(),
48
- ]);
49
- const chain = await createRetrievalChain({
50
- retriever: historyAwareRetriever,
51
- combineDocsChain,
52
- });
53
- const results = await chain.invoke({
54
- input: "What is the powerhouse of the cell?",
55
- chat_history: "",
56
- });
57
- console.log(results);
58
- expect(results.answer.toLowerCase()).toContain("mitochondria");
59
- const results2 = await chain.invoke({
60
- input: "What are they made of?",
61
- extraparam: "unused",
62
- chat_history: [
63
- "Human: What is the powerhouse of the cell?",
64
- "Assistant: Mitochondria is the powerhouse of the cell",
65
- ].join("\n"),
66
- });
67
- console.log(results2);
68
- expect(results2.answer.toLowerCase()).toContain("lipids");
69
- });
@@ -1 +0,0 @@
1
- export {};
@@ -1,36 +0,0 @@
1
- import { test } from "@jest/globals";
2
- import { ChatPromptTemplate } from "@langchain/core/prompts";
3
- import { FakeRetriever } from "@langchain/core/utils/testing";
4
- import { Document } from "@langchain/core/documents";
5
- import { createRetrievalChain } from "../retrieval.js";
6
- import { FakeListLLM } from "../../util/testing/llms/fake.js";
7
- test("createRetrievalChain", async () => {
8
- const answer = "I know the answer!";
9
- const combineDocsPrompt = ChatPromptTemplate.fromTemplate(`hi! {input} {chat_history}`);
10
- const fakeRetrievedDocs = [
11
- new Document({ pageContent: "some fake content" }),
12
- ];
13
- const llm = new FakeListLLM({ responses: [answer] });
14
- const input = "What is the answer?";
15
- const retriever = new FakeRetriever({
16
- output: fakeRetrievedDocs,
17
- });
18
- const chain = await createRetrievalChain({
19
- retriever,
20
- combineDocsChain: combineDocsPrompt.pipe(llm),
21
- });
22
- const output = await chain.invoke({ input });
23
- expect(output).toEqual({
24
- answer,
25
- chat_history: [],
26
- context: fakeRetrievedDocs,
27
- input,
28
- });
29
- const output2 = await chain.invoke({ input, chat_history: "foo" });
30
- expect(output2).toEqual({
31
- answer,
32
- chat_history: "foo",
33
- context: fakeRetrievedDocs,
34
- input,
35
- });
36
- });
@@ -1 +0,0 @@
1
- export {};