illuma-agents 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (437) hide show
  1. package/LICENSE +21 -0
  2. package/dist/cjs/common/enum.cjs +163 -0
  3. package/dist/cjs/common/enum.cjs.map +1 -0
  4. package/dist/cjs/events.cjs +143 -0
  5. package/dist/cjs/events.cjs.map +1 -0
  6. package/dist/cjs/graphs/Graph.cjs +581 -0
  7. package/dist/cjs/graphs/Graph.cjs.map +1 -0
  8. package/dist/cjs/instrumentation.cjs +21 -0
  9. package/dist/cjs/instrumentation.cjs.map +1 -0
  10. package/dist/cjs/llm/anthropic/index.cjs +292 -0
  11. package/dist/cjs/llm/anthropic/index.cjs.map +1 -0
  12. package/dist/cjs/llm/anthropic/types.cjs +50 -0
  13. package/dist/cjs/llm/anthropic/types.cjs.map +1 -0
  14. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +553 -0
  15. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -0
  16. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +218 -0
  17. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -0
  18. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  19. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  20. package/dist/cjs/llm/fake.cjs +97 -0
  21. package/dist/cjs/llm/fake.cjs.map +1 -0
  22. package/dist/cjs/llm/google/index.cjs +147 -0
  23. package/dist/cjs/llm/google/index.cjs.map +1 -0
  24. package/dist/cjs/llm/google/utils/common.cjs +490 -0
  25. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  26. package/dist/cjs/llm/ollama/index.cjs +70 -0
  27. package/dist/cjs/llm/ollama/index.cjs.map +1 -0
  28. package/dist/cjs/llm/ollama/utils.cjs +158 -0
  29. package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
  30. package/dist/cjs/llm/openai/index.cjs +613 -0
  31. package/dist/cjs/llm/openai/index.cjs.map +1 -0
  32. package/dist/cjs/llm/openai/utils/index.cjs +677 -0
  33. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  34. package/dist/cjs/llm/openrouter/index.cjs +29 -0
  35. package/dist/cjs/llm/openrouter/index.cjs.map +1 -0
  36. package/dist/cjs/llm/providers.cjs +47 -0
  37. package/dist/cjs/llm/providers.cjs.map +1 -0
  38. package/dist/cjs/llm/text.cjs +69 -0
  39. package/dist/cjs/llm/text.cjs.map +1 -0
  40. package/dist/cjs/llm/vertexai/index.cjs +330 -0
  41. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  42. package/dist/cjs/main.cjs +127 -0
  43. package/dist/cjs/main.cjs.map +1 -0
  44. package/dist/cjs/messages/core.cjs +359 -0
  45. package/dist/cjs/messages/core.cjs.map +1 -0
  46. package/dist/cjs/messages/format.cjs +455 -0
  47. package/dist/cjs/messages/format.cjs.map +1 -0
  48. package/dist/cjs/messages/ids.cjs +23 -0
  49. package/dist/cjs/messages/ids.cjs.map +1 -0
  50. package/dist/cjs/messages/prune.cjs +398 -0
  51. package/dist/cjs/messages/prune.cjs.map +1 -0
  52. package/dist/cjs/run.cjs +264 -0
  53. package/dist/cjs/run.cjs.map +1 -0
  54. package/dist/cjs/splitStream.cjs +210 -0
  55. package/dist/cjs/splitStream.cjs.map +1 -0
  56. package/dist/cjs/stream.cjs +504 -0
  57. package/dist/cjs/stream.cjs.map +1 -0
  58. package/dist/cjs/tools/CodeExecutor.cjs +192 -0
  59. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -0
  60. package/dist/cjs/tools/ToolNode.cjs +125 -0
  61. package/dist/cjs/tools/ToolNode.cjs.map +1 -0
  62. package/dist/cjs/tools/handlers.cjs +250 -0
  63. package/dist/cjs/tools/handlers.cjs.map +1 -0
  64. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  65. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  66. package/dist/cjs/tools/search/content.cjs +140 -0
  67. package/dist/cjs/tools/search/content.cjs.map +1 -0
  68. package/dist/cjs/tools/search/firecrawl.cjs +179 -0
  69. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -0
  70. package/dist/cjs/tools/search/format.cjs +203 -0
  71. package/dist/cjs/tools/search/format.cjs.map +1 -0
  72. package/dist/cjs/tools/search/highlights.cjs +245 -0
  73. package/dist/cjs/tools/search/highlights.cjs.map +1 -0
  74. package/dist/cjs/tools/search/rerankers.cjs +174 -0
  75. package/dist/cjs/tools/search/rerankers.cjs.map +1 -0
  76. package/dist/cjs/tools/search/schema.cjs +70 -0
  77. package/dist/cjs/tools/search/schema.cjs.map +1 -0
  78. package/dist/cjs/tools/search/search.cjs +561 -0
  79. package/dist/cjs/tools/search/search.cjs.map +1 -0
  80. package/dist/cjs/tools/search/serper-scraper.cjs +132 -0
  81. package/dist/cjs/tools/search/serper-scraper.cjs.map +1 -0
  82. package/dist/cjs/tools/search/tool.cjs +331 -0
  83. package/dist/cjs/tools/search/tool.cjs.map +1 -0
  84. package/dist/cjs/tools/search/utils.cjs +66 -0
  85. package/dist/cjs/tools/search/utils.cjs.map +1 -0
  86. package/dist/cjs/utils/graph.cjs +16 -0
  87. package/dist/cjs/utils/graph.cjs.map +1 -0
  88. package/dist/cjs/utils/llm.cjs +28 -0
  89. package/dist/cjs/utils/llm.cjs.map +1 -0
  90. package/dist/cjs/utils/misc.cjs +56 -0
  91. package/dist/cjs/utils/misc.cjs.map +1 -0
  92. package/dist/cjs/utils/run.cjs +69 -0
  93. package/dist/cjs/utils/run.cjs.map +1 -0
  94. package/dist/cjs/utils/title.cjs +111 -0
  95. package/dist/cjs/utils/title.cjs.map +1 -0
  96. package/dist/cjs/utils/tokens.cjs +65 -0
  97. package/dist/cjs/utils/tokens.cjs.map +1 -0
  98. package/dist/esm/common/enum.mjs +163 -0
  99. package/dist/esm/common/enum.mjs.map +1 -0
  100. package/dist/esm/events.mjs +135 -0
  101. package/dist/esm/events.mjs.map +1 -0
  102. package/dist/esm/graphs/Graph.mjs +578 -0
  103. package/dist/esm/graphs/Graph.mjs.map +1 -0
  104. package/dist/esm/instrumentation.mjs +19 -0
  105. package/dist/esm/instrumentation.mjs.map +1 -0
  106. package/dist/esm/llm/anthropic/index.mjs +290 -0
  107. package/dist/esm/llm/anthropic/index.mjs.map +1 -0
  108. package/dist/esm/llm/anthropic/types.mjs +48 -0
  109. package/dist/esm/llm/anthropic/types.mjs.map +1 -0
  110. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +550 -0
  111. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -0
  112. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +216 -0
  113. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -0
  114. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  115. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  116. package/dist/esm/llm/fake.mjs +94 -0
  117. package/dist/esm/llm/fake.mjs.map +1 -0
  118. package/dist/esm/llm/google/index.mjs +145 -0
  119. package/dist/esm/llm/google/index.mjs.map +1 -0
  120. package/dist/esm/llm/google/utils/common.mjs +484 -0
  121. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  122. package/dist/esm/llm/ollama/index.mjs +68 -0
  123. package/dist/esm/llm/ollama/index.mjs.map +1 -0
  124. package/dist/esm/llm/ollama/utils.mjs +155 -0
  125. package/dist/esm/llm/ollama/utils.mjs.map +1 -0
  126. package/dist/esm/llm/openai/index.mjs +604 -0
  127. package/dist/esm/llm/openai/index.mjs.map +1 -0
  128. package/dist/esm/llm/openai/utils/index.mjs +671 -0
  129. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  130. package/dist/esm/llm/openrouter/index.mjs +27 -0
  131. package/dist/esm/llm/openrouter/index.mjs.map +1 -0
  132. package/dist/esm/llm/providers.mjs +43 -0
  133. package/dist/esm/llm/providers.mjs.map +1 -0
  134. package/dist/esm/llm/text.mjs +67 -0
  135. package/dist/esm/llm/text.mjs.map +1 -0
  136. package/dist/esm/llm/vertexai/index.mjs +328 -0
  137. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  138. package/dist/esm/main.mjs +20 -0
  139. package/dist/esm/main.mjs.map +1 -0
  140. package/dist/esm/messages/core.mjs +351 -0
  141. package/dist/esm/messages/core.mjs.map +1 -0
  142. package/dist/esm/messages/format.mjs +447 -0
  143. package/dist/esm/messages/format.mjs.map +1 -0
  144. package/dist/esm/messages/ids.mjs +21 -0
  145. package/dist/esm/messages/ids.mjs.map +1 -0
  146. package/dist/esm/messages/prune.mjs +393 -0
  147. package/dist/esm/messages/prune.mjs.map +1 -0
  148. package/dist/esm/run.mjs +261 -0
  149. package/dist/esm/run.mjs.map +1 -0
  150. package/dist/esm/splitStream.mjs +207 -0
  151. package/dist/esm/splitStream.mjs.map +1 -0
  152. package/dist/esm/stream.mjs +500 -0
  153. package/dist/esm/stream.mjs.map +1 -0
  154. package/dist/esm/tools/CodeExecutor.mjs +188 -0
  155. package/dist/esm/tools/CodeExecutor.mjs.map +1 -0
  156. package/dist/esm/tools/ToolNode.mjs +122 -0
  157. package/dist/esm/tools/ToolNode.mjs.map +1 -0
  158. package/dist/esm/tools/handlers.mjs +245 -0
  159. package/dist/esm/tools/handlers.mjs.map +1 -0
  160. package/dist/esm/tools/search/anthropic.mjs +37 -0
  161. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  162. package/dist/esm/tools/search/content.mjs +119 -0
  163. package/dist/esm/tools/search/content.mjs.map +1 -0
  164. package/dist/esm/tools/search/firecrawl.mjs +176 -0
  165. package/dist/esm/tools/search/firecrawl.mjs.map +1 -0
  166. package/dist/esm/tools/search/format.mjs +201 -0
  167. package/dist/esm/tools/search/format.mjs.map +1 -0
  168. package/dist/esm/tools/search/highlights.mjs +243 -0
  169. package/dist/esm/tools/search/highlights.mjs.map +1 -0
  170. package/dist/esm/tools/search/rerankers.mjs +168 -0
  171. package/dist/esm/tools/search/rerankers.mjs.map +1 -0
  172. package/dist/esm/tools/search/schema.mjs +61 -0
  173. package/dist/esm/tools/search/schema.mjs.map +1 -0
  174. package/dist/esm/tools/search/search.mjs +558 -0
  175. package/dist/esm/tools/search/search.mjs.map +1 -0
  176. package/dist/esm/tools/search/serper-scraper.mjs +129 -0
  177. package/dist/esm/tools/search/serper-scraper.mjs.map +1 -0
  178. package/dist/esm/tools/search/tool.mjs +329 -0
  179. package/dist/esm/tools/search/tool.mjs.map +1 -0
  180. package/dist/esm/tools/search/utils.mjs +61 -0
  181. package/dist/esm/tools/search/utils.mjs.map +1 -0
  182. package/dist/esm/utils/graph.mjs +13 -0
  183. package/dist/esm/utils/graph.mjs.map +1 -0
  184. package/dist/esm/utils/llm.mjs +25 -0
  185. package/dist/esm/utils/llm.mjs.map +1 -0
  186. package/dist/esm/utils/misc.mjs +53 -0
  187. package/dist/esm/utils/misc.mjs.map +1 -0
  188. package/dist/esm/utils/run.mjs +66 -0
  189. package/dist/esm/utils/run.mjs.map +1 -0
  190. package/dist/esm/utils/title.mjs +108 -0
  191. package/dist/esm/utils/title.mjs.map +1 -0
  192. package/dist/esm/utils/tokens.mjs +62 -0
  193. package/dist/esm/utils/tokens.mjs.map +1 -0
  194. package/dist/types/common/enum.d.ts +128 -0
  195. package/dist/types/common/index.d.ts +1 -0
  196. package/dist/types/events.d.ts +29 -0
  197. package/dist/types/graphs/Graph.d.ts +122 -0
  198. package/dist/types/graphs/index.d.ts +1 -0
  199. package/dist/types/index.d.ts +13 -0
  200. package/dist/types/instrumentation.d.ts +1 -0
  201. package/dist/types/llm/anthropic/index.d.ts +39 -0
  202. package/dist/types/llm/anthropic/types.d.ts +37 -0
  203. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +14 -0
  204. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +14 -0
  205. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +22 -0
  206. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  207. package/dist/types/llm/fake.d.ts +31 -0
  208. package/dist/types/llm/google/index.d.ts +14 -0
  209. package/dist/types/llm/google/types.d.ts +32 -0
  210. package/dist/types/llm/google/utils/common.d.ts +19 -0
  211. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  212. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  213. package/dist/types/llm/ollama/index.d.ts +8 -0
  214. package/dist/types/llm/ollama/utils.d.ts +7 -0
  215. package/dist/types/llm/openai/index.d.ts +103 -0
  216. package/dist/types/llm/openai/types.d.ts +10 -0
  217. package/dist/types/llm/openai/utils/index.d.ts +20 -0
  218. package/dist/types/llm/openrouter/index.d.ts +12 -0
  219. package/dist/types/llm/providers.d.ts +5 -0
  220. package/dist/types/llm/text.d.ts +21 -0
  221. package/dist/types/llm/vertexai/index.d.ts +293 -0
  222. package/dist/types/messages/core.d.ts +14 -0
  223. package/dist/types/messages/format.d.ts +113 -0
  224. package/dist/types/messages/ids.d.ts +3 -0
  225. package/dist/types/messages/index.d.ts +4 -0
  226. package/dist/types/messages/prune.d.ts +51 -0
  227. package/dist/types/mockStream.d.ts +32 -0
  228. package/dist/types/prompts/collab.d.ts +1 -0
  229. package/dist/types/prompts/index.d.ts +2 -0
  230. package/dist/types/prompts/taskmanager.d.ts +41 -0
  231. package/dist/types/run.d.ts +30 -0
  232. package/dist/types/scripts/abort.d.ts +1 -0
  233. package/dist/types/scripts/ant_web_search.d.ts +1 -0
  234. package/dist/types/scripts/args.d.ts +7 -0
  235. package/dist/types/scripts/caching.d.ts +1 -0
  236. package/dist/types/scripts/cli.d.ts +1 -0
  237. package/dist/types/scripts/cli2.d.ts +1 -0
  238. package/dist/types/scripts/cli3.d.ts +1 -0
  239. package/dist/types/scripts/cli4.d.ts +1 -0
  240. package/dist/types/scripts/cli5.d.ts +1 -0
  241. package/dist/types/scripts/code_exec.d.ts +1 -0
  242. package/dist/types/scripts/code_exec_files.d.ts +1 -0
  243. package/dist/types/scripts/code_exec_simple.d.ts +1 -0
  244. package/dist/types/scripts/content.d.ts +1 -0
  245. package/dist/types/scripts/empty_input.d.ts +1 -0
  246. package/dist/types/scripts/image.d.ts +1 -0
  247. package/dist/types/scripts/memory.d.ts +1 -0
  248. package/dist/types/scripts/search.d.ts +1 -0
  249. package/dist/types/scripts/simple.d.ts +1 -0
  250. package/dist/types/scripts/stream.d.ts +1 -0
  251. package/dist/types/scripts/thinking.d.ts +1 -0
  252. package/dist/types/scripts/tools.d.ts +1 -0
  253. package/dist/types/specs/spec.utils.d.ts +1 -0
  254. package/dist/types/splitStream.d.ts +37 -0
  255. package/dist/types/stream.d.ts +14 -0
  256. package/dist/types/tools/CodeExecutor.d.ts +23 -0
  257. package/dist/types/tools/ToolNode.d.ts +22 -0
  258. package/dist/types/tools/example.d.ts +78 -0
  259. package/dist/types/tools/handlers.d.ts +19 -0
  260. package/dist/types/tools/search/anthropic.d.ts +16 -0
  261. package/dist/types/tools/search/content.d.ts +4 -0
  262. package/dist/types/tools/search/firecrawl.d.ts +54 -0
  263. package/dist/types/tools/search/format.d.ts +5 -0
  264. package/dist/types/tools/search/highlights.d.ts +13 -0
  265. package/dist/types/tools/search/index.d.ts +2 -0
  266. package/dist/types/tools/search/rerankers.d.ts +38 -0
  267. package/dist/types/tools/search/schema.d.ts +16 -0
  268. package/dist/types/tools/search/search.d.ts +8 -0
  269. package/dist/types/tools/search/serper-scraper.d.ts +59 -0
  270. package/dist/types/tools/search/test.d.ts +1 -0
  271. package/dist/types/tools/search/tool.d.ts +54 -0
  272. package/dist/types/tools/search/types.d.ts +591 -0
  273. package/dist/types/tools/search/utils.d.ts +10 -0
  274. package/dist/types/types/graph.d.ts +138 -0
  275. package/dist/types/types/index.d.ts +5 -0
  276. package/dist/types/types/llm.d.ts +102 -0
  277. package/dist/types/types/run.d.ts +74 -0
  278. package/dist/types/types/stream.d.ts +293 -0
  279. package/dist/types/types/tools.d.ts +61 -0
  280. package/dist/types/utils/graph.d.ts +2 -0
  281. package/dist/types/utils/index.d.ts +5 -0
  282. package/dist/types/utils/llm.d.ts +3 -0
  283. package/dist/types/utils/llmConfig.d.ts +3 -0
  284. package/dist/types/utils/logging.d.ts +1 -0
  285. package/dist/types/utils/misc.d.ts +7 -0
  286. package/dist/types/utils/run.d.ts +27 -0
  287. package/dist/types/utils/title.d.ts +4 -0
  288. package/dist/types/utils/tokens.d.ts +3 -0
  289. package/package.json +145 -0
  290. package/src/common/enum.ts +176 -0
  291. package/src/common/index.ts +2 -0
  292. package/src/events.ts +191 -0
  293. package/src/graphs/Graph.ts +846 -0
  294. package/src/graphs/index.ts +1 -0
  295. package/src/index.ts +24 -0
  296. package/src/instrumentation.ts +22 -0
  297. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  298. package/src/llm/anthropic/index.ts +413 -0
  299. package/src/llm/anthropic/llm.spec.ts +1442 -0
  300. package/src/llm/anthropic/types.ts +140 -0
  301. package/src/llm/anthropic/utils/message_inputs.ts +660 -0
  302. package/src/llm/anthropic/utils/message_outputs.ts +289 -0
  303. package/src/llm/anthropic/utils/output_parsers.ts +133 -0
  304. package/src/llm/anthropic/utils/tools.ts +29 -0
  305. package/src/llm/fake.ts +133 -0
  306. package/src/llm/google/index.ts +222 -0
  307. package/src/llm/google/types.ts +43 -0
  308. package/src/llm/google/utils/common.ts +660 -0
  309. package/src/llm/google/utils/tools.ts +160 -0
  310. package/src/llm/google/utils/zod_to_genai_parameters.ts +88 -0
  311. package/src/llm/ollama/index.ts +92 -0
  312. package/src/llm/ollama/utils.ts +193 -0
  313. package/src/llm/openai/index.ts +853 -0
  314. package/src/llm/openai/types.ts +24 -0
  315. package/src/llm/openai/utils/index.ts +918 -0
  316. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  317. package/src/llm/openrouter/index.ts +60 -0
  318. package/src/llm/providers.ts +57 -0
  319. package/src/llm/text.ts +94 -0
  320. package/src/llm/vertexai/index.ts +360 -0
  321. package/src/messages/core.ts +463 -0
  322. package/src/messages/format.ts +625 -0
  323. package/src/messages/formatAgentMessages.test.ts +917 -0
  324. package/src/messages/formatAgentMessages.tools.test.ts +400 -0
  325. package/src/messages/formatMessage.test.ts +693 -0
  326. package/src/messages/ids.ts +26 -0
  327. package/src/messages/index.ts +4 -0
  328. package/src/messages/prune.ts +567 -0
  329. package/src/messages/shiftIndexTokenCountMap.test.ts +81 -0
  330. package/src/mockStream.ts +99 -0
  331. package/src/prompts/collab.ts +6 -0
  332. package/src/prompts/index.ts +2 -0
  333. package/src/prompts/taskmanager.ts +61 -0
  334. package/src/proto/CollabGraph.ts +269 -0
  335. package/src/proto/TaskManager.ts +243 -0
  336. package/src/proto/collab.ts +200 -0
  337. package/src/proto/collab_design.ts +184 -0
  338. package/src/proto/collab_design_v2.ts +224 -0
  339. package/src/proto/collab_design_v3.ts +255 -0
  340. package/src/proto/collab_design_v4.ts +220 -0
  341. package/src/proto/collab_design_v5.ts +251 -0
  342. package/src/proto/collab_graph.ts +181 -0
  343. package/src/proto/collab_original.ts +123 -0
  344. package/src/proto/example.ts +93 -0
  345. package/src/proto/example_new.ts +68 -0
  346. package/src/proto/example_old.ts +201 -0
  347. package/src/proto/example_test.ts +152 -0
  348. package/src/proto/example_test_anthropic.ts +100 -0
  349. package/src/proto/log_stream.ts +202 -0
  350. package/src/proto/main_collab_community_event.ts +133 -0
  351. package/src/proto/main_collab_design_v2.ts +96 -0
  352. package/src/proto/main_collab_design_v4.ts +100 -0
  353. package/src/proto/main_collab_design_v5.ts +135 -0
  354. package/src/proto/main_collab_global_analysis.ts +122 -0
  355. package/src/proto/main_collab_hackathon_event.ts +153 -0
  356. package/src/proto/main_collab_space_mission.ts +153 -0
  357. package/src/proto/main_philosophy.ts +210 -0
  358. package/src/proto/original_script.ts +126 -0
  359. package/src/proto/standard.ts +100 -0
  360. package/src/proto/stream.ts +56 -0
  361. package/src/proto/tasks.ts +118 -0
  362. package/src/proto/tools/global_analysis_tools.ts +86 -0
  363. package/src/proto/tools/space_mission_tools.ts +60 -0
  364. package/src/proto/vertexai.ts +54 -0
  365. package/src/run.ts +381 -0
  366. package/src/scripts/abort.ts +138 -0
  367. package/src/scripts/ant_web_search.ts +158 -0
  368. package/src/scripts/args.ts +48 -0
  369. package/src/scripts/caching.ts +124 -0
  370. package/src/scripts/cli.ts +167 -0
  371. package/src/scripts/cli2.ts +125 -0
  372. package/src/scripts/cli3.ts +178 -0
  373. package/src/scripts/cli4.ts +184 -0
  374. package/src/scripts/cli5.ts +184 -0
  375. package/src/scripts/code_exec.ts +214 -0
  376. package/src/scripts/code_exec_files.ts +193 -0
  377. package/src/scripts/code_exec_simple.ts +129 -0
  378. package/src/scripts/content.ts +120 -0
  379. package/src/scripts/empty_input.ts +137 -0
  380. package/src/scripts/image.ts +178 -0
  381. package/src/scripts/memory.ts +97 -0
  382. package/src/scripts/search.ts +150 -0
  383. package/src/scripts/simple.ts +225 -0
  384. package/src/scripts/stream.ts +122 -0
  385. package/src/scripts/thinking.ts +150 -0
  386. package/src/scripts/tools.ts +155 -0
  387. package/src/specs/anthropic.simple.test.ts +317 -0
  388. package/src/specs/azure.simple.test.ts +316 -0
  389. package/src/specs/openai.simple.test.ts +316 -0
  390. package/src/specs/prune.test.ts +763 -0
  391. package/src/specs/reasoning.test.ts +165 -0
  392. package/src/specs/spec.utils.ts +3 -0
  393. package/src/specs/thinking-prune.test.ts +703 -0
  394. package/src/specs/token-distribution-edge-case.test.ts +316 -0
  395. package/src/specs/tool-error.test.ts +193 -0
  396. package/src/splitStream.test.ts +691 -0
  397. package/src/splitStream.ts +234 -0
  398. package/src/stream.test.ts +94 -0
  399. package/src/stream.ts +651 -0
  400. package/src/tools/CodeExecutor.ts +220 -0
  401. package/src/tools/ToolNode.ts +170 -0
  402. package/src/tools/example.ts +129 -0
  403. package/src/tools/handlers.ts +336 -0
  404. package/src/tools/search/anthropic.ts +51 -0
  405. package/src/tools/search/content.test.ts +173 -0
  406. package/src/tools/search/content.ts +147 -0
  407. package/src/tools/search/firecrawl.ts +210 -0
  408. package/src/tools/search/format.ts +250 -0
  409. package/src/tools/search/highlights.ts +320 -0
  410. package/src/tools/search/index.ts +2 -0
  411. package/src/tools/search/jina-reranker.test.ts +126 -0
  412. package/src/tools/search/output.md +2775 -0
  413. package/src/tools/search/rerankers.ts +242 -0
  414. package/src/tools/search/schema.ts +63 -0
  415. package/src/tools/search/search.ts +759 -0
  416. package/src/tools/search/serper-scraper.ts +155 -0
  417. package/src/tools/search/test.html +884 -0
  418. package/src/tools/search/test.md +643 -0
  419. package/src/tools/search/test.ts +159 -0
  420. package/src/tools/search/tool.ts +471 -0
  421. package/src/tools/search/types.ts +687 -0
  422. package/src/tools/search/utils.ts +79 -0
  423. package/src/types/graph.ts +185 -0
  424. package/src/types/index.ts +6 -0
  425. package/src/types/llm.ts +140 -0
  426. package/src/types/run.ts +89 -0
  427. package/src/types/stream.ts +400 -0
  428. package/src/types/tools.ts +80 -0
  429. package/src/utils/graph.ts +11 -0
  430. package/src/utils/index.ts +5 -0
  431. package/src/utils/llm.ts +27 -0
  432. package/src/utils/llmConfig.ts +183 -0
  433. package/src/utils/logging.ts +48 -0
  434. package/src/utils/misc.ts +57 -0
  435. package/src/utils/run.ts +101 -0
  436. package/src/utils/title.ts +165 -0
  437. package/src/utils/tokens.ts +70 -0
@@ -0,0 +1,1442 @@
1
+ /* eslint-disable no-process-env */
2
+ /* eslint-disable @typescript-eslint/no-explicit-any */
3
+ import { config } from 'dotenv';
4
+ config();
5
+ import { expect, test } from '@jest/globals';
6
+ import * as fs from 'fs/promises';
7
+ import {
8
+ AIMessage,
9
+ AIMessageChunk,
10
+ BaseMessage,
11
+ HumanMessage,
12
+ SystemMessage,
13
+ ToolMessage,
14
+ } from '@langchain/core/messages';
15
+ import { ChatPromptValue } from '@langchain/core/prompt_values';
16
+ import {
17
+ PromptTemplate,
18
+ ChatPromptTemplate,
19
+ AIMessagePromptTemplate,
20
+ HumanMessagePromptTemplate,
21
+ SystemMessagePromptTemplate,
22
+ } from '@langchain/core/prompts';
23
+ import { CallbackManager } from '@langchain/core/callbacks/manager';
24
+ import { concat } from '@langchain/core/utils/stream';
25
+ import { AnthropicVertex } from '@anthropic-ai/vertex-sdk';
26
+ import { BaseLanguageModelInput } from '@langchain/core/language_models/base';
27
+ import { tool } from '@langchain/core/tools';
28
+ import { z } from 'zod';
29
+ import { CustomAnthropic as ChatAnthropic } from './index';
30
+ import { AnthropicMessageResponse, ChatAnthropicContentBlock } from './types';
31
+ jest.setTimeout(120000);
32
+
33
+ async function invoke(
34
+ chat: ChatAnthropic,
35
+ invocationType: string,
36
+ input: BaseLanguageModelInput
37
+ ): Promise<AIMessageChunk | AIMessage> {
38
+ if (invocationType === 'stream') {
39
+ let output: AIMessageChunk | undefined;
40
+
41
+ const stream = await chat.stream(input);
42
+
43
+ for await (const chunk of stream) {
44
+ if (!output) {
45
+ output = chunk;
46
+ } else {
47
+ output = output.concat(chunk);
48
+ }
49
+ }
50
+
51
+ return output!;
52
+ }
53
+
54
+ return chat.invoke(input);
55
+ }
56
+
57
+ // use this for tests involving "extended thinking"
58
+ const extendedThinkingModelName = 'claude-3-7-sonnet-20250219';
59
+
60
+ // use this for tests involving citations
61
+ const citationsModelName = 'claude-sonnet-4-5-20250929';
62
+
63
+ // use this for tests involving PDF documents
64
+ const pdfModelName = 'claude-3-5-haiku-20241022';
65
+
66
+ // Use this model for all other tests
67
+ const modelName = 'claude-3-haiku-20240307';
68
+
69
+ test('Test ChatAnthropic', async () => {
70
+ const chat = new ChatAnthropic({
71
+ modelName,
72
+ maxRetries: 0,
73
+ });
74
+ const message = new HumanMessage('Hello!');
75
+ const res = await chat.invoke([message]);
76
+ expect(res.response_metadata.usage).toBeDefined();
77
+ });
78
+
79
+ test('Test ChatAnthropic with a bad API key throws appropriate error', async () => {
80
+ const chat = new ChatAnthropic({
81
+ modelName,
82
+ maxRetries: 0,
83
+ apiKey: 'bad',
84
+ });
85
+ let error;
86
+ try {
87
+ const message = new HumanMessage('Hello!');
88
+ await chat.invoke([message]);
89
+ } catch (e) {
90
+ error = e;
91
+ }
92
+ expect(error).toBeDefined();
93
+ expect((error as any).lc_error_code).toEqual('MODEL_AUTHENTICATION');
94
+ });
95
+
96
+ test('Test ChatAnthropic with unknown model throws appropriate error', async () => {
97
+ const chat = new ChatAnthropic({
98
+ modelName: 'badbad',
99
+ maxRetries: 0,
100
+ });
101
+ let error;
102
+ try {
103
+ const message = new HumanMessage('Hello!');
104
+ await chat.invoke([message]);
105
+ } catch (e) {
106
+ error = e;
107
+ }
108
+ expect(error).toBeDefined();
109
+ expect((error as any).lc_error_code).toEqual('MODEL_NOT_FOUND');
110
+ });
111
+
112
+ test('Test ChatAnthropic Generate', async () => {
113
+ const chat = new ChatAnthropic({
114
+ modelName,
115
+ maxRetries: 0,
116
+ });
117
+ const message = new HumanMessage('Hello!');
118
+ const res = await chat.generate([[message], [message]]);
119
+ expect(res.generations.length).toBe(2);
120
+ for (const generation of res.generations) {
121
+ expect(generation.length).toBe(1);
122
+ for (const message of generation) {
123
+ // console.log(message.text);
124
+ }
125
+ }
126
+ // console.log({ res });
127
+ });
128
+
129
+ test.skip('Test ChatAnthropic Generate w/ ClientOptions', async () => {
130
+ const chat = new ChatAnthropic({
131
+ modelName,
132
+ maxRetries: 0,
133
+ clientOptions: {
134
+ defaultHeaders: {
135
+ 'Helicone-Auth': 'HELICONE_API_KEY',
136
+ },
137
+ },
138
+ });
139
+ const message = new HumanMessage('Hello!');
140
+ const res = await chat.generate([[message], [message]]);
141
+ expect(res.generations.length).toBe(2);
142
+ for (const generation of res.generations) {
143
+ expect(generation.length).toBe(1);
144
+ for (const message of generation) {
145
+ // console.log(message.text);
146
+ }
147
+ }
148
+ // console.log({ res });
149
+ });
150
+
151
+ test('Test ChatAnthropic Generate with a signal in call options', async () => {
152
+ const chat = new ChatAnthropic({
153
+ modelName,
154
+ maxRetries: 0,
155
+ });
156
+ const controller = new AbortController();
157
+ const message = new HumanMessage(
158
+ 'How is your day going? Be extremely verbose!'
159
+ );
160
+ await expect(() => {
161
+ const res = chat.generate([[message], [message]], {
162
+ signal: controller.signal,
163
+ });
164
+ setTimeout(() => {
165
+ controller.abort();
166
+ }, 1000);
167
+ return res;
168
+ }).rejects.toThrow();
169
+ }, 10000);
170
+
171
+ test('Test ChatAnthropic tokenUsage with a batch', async () => {
172
+ const model = new ChatAnthropic({
173
+ temperature: 0,
174
+ maxRetries: 0,
175
+ modelName,
176
+ });
177
+ const res = await model.generate([
178
+ [new HumanMessage(`Hello!`)],
179
+ [new HumanMessage(`Hi!`)],
180
+ ]);
181
+ // console.log({ res });
182
+ });
183
+
184
+ test('Test ChatAnthropic in streaming mode', async () => {
185
+ let nrNewTokens = 0;
186
+ let streamedCompletion = '';
187
+
188
+ const model = new ChatAnthropic({
189
+ modelName,
190
+ maxRetries: 0,
191
+ streaming: true,
192
+ callbacks: CallbackManager.fromHandlers({
193
+ async handleLLMNewToken(token: string) {
194
+ nrNewTokens += 1;
195
+ streamedCompletion += token;
196
+ },
197
+ }),
198
+ });
199
+ const message = new HumanMessage('Hello!');
200
+ const res = await model.invoke([message]);
201
+ // console.log({ res });
202
+
203
+ expect(nrNewTokens > 0).toBe(true);
204
+ expect(res.content).toBe(streamedCompletion);
205
+ });
206
+
207
+ test('Test ChatAnthropic in streaming mode with a signal', async () => {
208
+ let nrNewTokens = 0;
209
+ let streamedCompletion = '';
210
+
211
+ const model = new ChatAnthropic({
212
+ modelName,
213
+ maxRetries: 0,
214
+ streaming: true,
215
+ callbacks: CallbackManager.fromHandlers({
216
+ async handleLLMNewToken(token: string) {
217
+ nrNewTokens += 1;
218
+ streamedCompletion += token;
219
+ },
220
+ }),
221
+ });
222
+ const controller = new AbortController();
223
+ const message = new HumanMessage(
224
+ 'Hello! Give me an extremely verbose response'
225
+ );
226
+ await expect(() => {
227
+ const res = model.invoke([message], {
228
+ signal: controller.signal,
229
+ });
230
+ setTimeout(() => {
231
+ controller.abort();
232
+ }, 500);
233
+ return res;
234
+ }).rejects.toThrow();
235
+
236
+ // console.log({ nrNewTokens, streamedCompletion });
237
+ }, 5000);
238
+
239
+ test.skip('Test ChatAnthropic prompt value', async () => {
240
+ const chat = new ChatAnthropic({
241
+ modelName,
242
+ maxRetries: 0,
243
+ });
244
+ const message = new HumanMessage('Hello!');
245
+ const res = await chat.generatePrompt([new ChatPromptValue([message])]);
246
+ expect(res.generations.length).toBe(1);
247
+ for (const generation of res.generations) {
248
+ for (const g of generation) {
249
+ // console.log(g.text);
250
+ }
251
+ }
252
+ // console.log({ res });
253
+ });
254
+
255
+ test.skip('ChatAnthropic, docs, prompt templates', async () => {
256
+ const chat = new ChatAnthropic({
257
+ modelName,
258
+ maxRetries: 0,
259
+ temperature: 0,
260
+ });
261
+
262
+ const systemPrompt = PromptTemplate.fromTemplate(
263
+ 'You are a helpful assistant that translates {input_language} to {output_language}.'
264
+ );
265
+
266
+ const chatPrompt = ChatPromptTemplate.fromMessages([
267
+ new SystemMessagePromptTemplate(systemPrompt),
268
+ HumanMessagePromptTemplate.fromTemplate('{text}'),
269
+ ]);
270
+
271
+ const responseA = await chat.generatePrompt([
272
+ await chatPrompt.formatPromptValue({
273
+ input_language: 'English',
274
+ output_language: 'French',
275
+ text: 'I love programming.',
276
+ }),
277
+ ]);
278
+
279
+ // console.log(responseA.generations);
280
+ });
281
+
282
+ test.skip('ChatAnthropic, longer chain of messages', async () => {
283
+ const chat = new ChatAnthropic({
284
+ modelName,
285
+ maxRetries: 0,
286
+ temperature: 0,
287
+ });
288
+
289
+ const chatPrompt = ChatPromptTemplate.fromMessages([
290
+ HumanMessagePromptTemplate.fromTemplate(`Hi, my name is Joe!`),
291
+ AIMessagePromptTemplate.fromTemplate(`Nice to meet you, Joe!`),
292
+ HumanMessagePromptTemplate.fromTemplate('{text}'),
293
+ ]);
294
+
295
+ const responseA = await chat.generatePrompt([
296
+ await chatPrompt.formatPromptValue({
297
+ text: 'What did I just say my name was?',
298
+ }),
299
+ ]);
300
+
301
+ // console.log(responseA.generations);
302
+ });
303
+
304
+ test.skip('ChatAnthropic, Anthropic apiUrl set manually via constructor', async () => {
305
+ // Pass the default URL through (should use this, and work as normal)
306
+ const anthropicApiUrl = 'https://api.anthropic.com';
307
+ const chat = new ChatAnthropic({
308
+ modelName,
309
+ maxRetries: 0,
310
+ anthropicApiUrl,
311
+ });
312
+ const message = new HumanMessage('Hello!');
313
+ const res = await chat.call([message]);
314
+ // console.log({ res });
315
+ });
316
+
317
+ test('Test ChatAnthropic stream method', async () => {
318
+ const model = new ChatAnthropic({
319
+ maxTokens: 50,
320
+ maxRetries: 0,
321
+ modelName,
322
+ });
323
+ const stream = await model.stream('Print hello world.');
324
+ const chunks: AIMessageChunk[] = [];
325
+ for await (const chunk of stream) {
326
+ chunks.push(chunk);
327
+ }
328
+ expect(chunks.length).toBeGreaterThan(1);
329
+ });
330
+
331
+ test('Test ChatAnthropic stream method with abort', async () => {
332
+ await expect(async () => {
333
+ const model = new ChatAnthropic({
334
+ maxTokens: 500,
335
+ maxRetries: 0,
336
+ modelName,
337
+ });
338
+ const stream = await model.stream(
339
+ 'How is your day going? Be extremely verbose.',
340
+ {
341
+ signal: AbortSignal.timeout(1000),
342
+ }
343
+ );
344
+ for await (const chunk of stream) {
345
+ // console.log(chunk);
346
+ }
347
+ }).rejects.toThrow();
348
+ });
349
+
350
+ test('Test ChatAnthropic stream method with early break', async () => {
351
+ const model = new ChatAnthropic({
352
+ maxTokens: 50,
353
+ maxRetries: 0,
354
+ modelName,
355
+ });
356
+ const stream = await model.stream(
357
+ 'How is your day going? Be extremely verbose.'
358
+ );
359
+ let i = 0;
360
+ for await (const chunk of stream) {
361
+ // console.log(chunk);
362
+ i += 1;
363
+ if (i > 10) {
364
+ break;
365
+ }
366
+ }
367
+ });
368
+
369
+ test('Test ChatAnthropic headers passed through', async () => {
370
+ const chat = new ChatAnthropic({
371
+ modelName,
372
+ maxRetries: 0,
373
+ apiKey: 'NOT_REAL',
374
+ clientOptions: {
375
+ defaultHeaders: {
376
+ 'X-Api-Key': process.env.ANTHROPIC_API_KEY,
377
+ },
378
+ },
379
+ });
380
+ const message = new HumanMessage('Hello!');
381
+ const res = await chat.invoke([message]);
382
+ // console.log({ res });
383
+ });
384
+
385
+ describe('ChatAnthropic image inputs', () => {
386
+ test.each(['invoke', 'stream'])(
387
+ 'Test ChatAnthropic image_url, %s',
388
+ async (invocationType: string) => {
389
+ const chat = new ChatAnthropic({
390
+ modelName,
391
+ maxRetries: 0,
392
+ });
393
+
394
+ const dataUrlRes = invoke(chat, invocationType, [
395
+ new HumanMessage({
396
+ content: [
397
+ {
398
+ type: 'image_url',
399
+ image_url: {
400
+ url: 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAMCAggHCQgGCQgICAcICAgICAgICAYICAgHDAgHCAgICAgIBggICAgICAgICBYICAgICwkKCAgNDQoIDggICQgBAwQEBgUGCgYGCBALCg0QCg0NEA0KCg8LDQoKCgoLDgoQDQoLDQoKCg4NDQ0NDgsQDw0OCg4NDQ4NDQoJDg8OCP/AABEIALAAsAMBEQACEQEDEQH/xAAdAAEAAgEFAQAAAAAAAAAAAAAABwgJAQIEBQYD/8QANBAAAgIBAwIDBwQCAgIDAAAAAQIAAwQFERIIEwYhMQcUFyJVldQjQVGBcZEJMzJiFRYk/8QAGwEBAAMAAwEAAAAAAAAAAAAAAAQFBgEDBwL/xAA5EQACAQIDBQQJBAIBBQAAAAAAAQIDEQQhMQVBUWGREhRxgRMVIjJSU8HR8CNyobFCguEGJGKi4v/aAAwDAQACEQMRAD8ApfJplBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBANl16qOTEKB6kkAD+z5Tkcj0On+z7Ub1FlOmanejeavj6dqV6kfsQ1OK4IP8AIM6pVYR1kuqJdLCV6qvCnJ/6v66nL+Ems/RNc+y63+BOvvFL411O/wBW4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6HE1D2e6lQpsu0zU6EXzZ8jTtSoUD9yWuxUAA/kmdkasJaSXVHRVwlekrzpyX+r+mh56m9WHJSGU+hUgg/wBjynaRORvnAEAQBAEAQBAEAQCbennpVzfER95LHE0tX4tlsnJr2B2srw6yQLCpBQ3Me1W+4/VZLKlh4jFRo5ay4cPH7f0XWA2XUxft37MONs34ffRcy/Xsu6bdG0UK2Nh1tkAbHMyAt+Wx2HIi11/SDcQe3jrTXv6IJRVcRUqe88uC0Nxhdn0MMv0458XnJ+e7wVlyJPJkYsTSAIAgCAIAgCAIBqDAIx9qHTbo2tBmycOtcgjYZmOBRlqdjxJtQDuhdye3ette/qhkmliKlP3XlwehXYrZ9DEr9SOfFZS6rXwd1yKCdQ3Srm+HT7yGOXpbPxXLVOLUMTtXXmVgkVliQgvU9qx9h+kz11Ne4fFRrZaS4cfD7f2YfH7LqYT279qHHevH76PlvhKTClEAQBAEAQBAJp6WOn0+I80i7mumYnF8x1LIbSSe3iV2DYq13ElnQ8q6gdijWUuIeKxHoY5e89PuXWy8D3qp7S9iOvN/D9+XiZRNN06uiuvHqrSqmpFrqqrVUrrrUBUREUBVVVAAUAAATNNtu7PR4xUUoxVkskloktxyCZwfRj26jetHPtzrMXSM4Uabj7Vrfj10O2ZdsDbb3bqrCKEYmpeyED8Hs53LZVwvsPg4qN6kbt+OS8t5hdobYqOo44edorK6SzfmtFpz14H16f8Arkz6cmrD1e9crBvsFZy3ropvxC2yo7NTXXXbjhtuXcTmisz91hX2yr4KLjemrNbuPXeMDtuoqihiGnF/5ZJx55ZNceF76GQSUJuhAEAQBAEAhb239WWl+H391s7mXnbAnExu2WqUjdWyLHda6Qw2IXdrCCGFZX5pMo4WdXNZLiyoxm1KOFfZl7UuCtdeN2kvzcRB4d/5JMV7OOVpWRRSWAFmPk1ZTKN9uT1PRi+QHnsj2H12DHYGXLZzS9mV3zVvuVFL/qGDlapSaXFST6qyfS/3tb4M8a4up49WoYlyZGLcCUsTf1B2ZGVgHrsRgVNbqrIwIYAjaVc4Sg+zJWZqaVWFWCnB3T0/PodnqOnV312Y9taW02o1dtViq9dlbAq6OjAqyspIKkEEGfKbTuj7lFSTjJXTyaejXAxd9U/T6fDmYBTzbTMvm+G7FnNRBHcxLLDuWankCrueVlRG5dq7nOlwuI9NHP3lr9zzjamA7rU9n3Jacn8P25eBC0mFKIAgCAIBtdwASfQDc/4nIbsZXulr2ZDR9HwsYpxybqxmZe4Xl71cquyMR69hO3jg+fy0r5n1OWxNX0lRvdovBflz1DZuG7vh4xtZtXl+55vpp5EsyKWZ5X2seH783TdRwsZgmVk4OVRQzMUUXPRYle7gEoCxA5gEqDvsdp2U5KM03omv7I+Ig6lKUIuzaaXmigPtb6HNQ0bEytTGXjZeLiKlhWuu6rINPMLbY1bFqkXHQ908b7CyK+wUqFe+pY2FSSjZpvnl+MwmJ2JVw9OVTtqUYq+Sadt+WaVtd9+W+uLLv5HzB8j/AIlgZ8yRdGfUXXq2JXpGTZtquFUE+cnfMxU2Wu9CzEvaicEsG+/MdzYLbsmexmHdOXaS9l/w+H2PQ9kY9V6apyftxVtdUtJc3x58iykrjQCAIAgFdurzqbPh+lMHFKHVspC6FuLLh427Icp0O4d2ZWREb5WZLGbktJrssMJhvSu8vdX8vh9zP7X2i8LBRp27b46Rj8Vt73JebyVnCfSz0jNqh/8AsGsrZZRcxuoxrms7ua7HmcvLYkOaXJ5Ctjvkb8n/AE+K3TcVi+x+nS6rdyX33eJTbL2S636+JTaeaTveTf8AlLlwjv35ZFmfHnSnoWo47Yo0/FxLOBWnJw8ejHuobb5GVqkUOqnY9qwOjDyI9CKyGKqwd+03ybdjS19mYarHs+jSe5pJNdP6KudBPiTIwNYz/D1jA1WJk91AWKLqGJctDWVg+QFlfdQtsGcVY+//AFgSzx0VKmqi5dJK/wCeZm9iVJ0sRPDye6WWdu1BpXWeV78M8uGd/wCURuCJuqX2YjWNHzMYJyyaKzmYm3Hl71SrOqKW8h307mOT5fLc3mPUSsNV9HUT3aPwf5crNpYbvGHlG2azj+5Zrrp5mKFHBAI9CNx/iak8vTubpwBAEAQDtPCekLk5WHiON0yczFx3H8pbkVVMP7VyJ8zfZi3wTfRHdRh26kI8ZRXk5IzREf6mPPXTSAIB1/iPQa8yjIwrVD05NFuPYrAFWrsrat1YHyIKsRsf2nMXZpo+ZR7UXF77rqYW2xHrJqsHG2smu1T6rapKWKf8OCP6mxvfNHj1nH2XqsnfW6yOVpGr241teVRY9ORS4sqtrPF67B6Mp/2NiCGBIIYMQeGlJWaujsp1JU5KcHZrQyZdK/U3X4ipONdwq1fGQNkVL5JkVbhfe8cE/wDgWKq1e5NFjKD8ttLPm8ThnSd17r0+35qej7N2hHFQs8prVfVcv6J4kIuBAKtdWnV8uj89I090fVeP/wCi8hXq05CvIcg26PmMpDCpgVqUrZaCGqrussLhPSe3P3f7/wCOf4s9tTaXd16On77/APXn48EU58OYl+RremrrRyHbJzdPbI9+LvZZjW21vUlgs5FMe4OqmshVrrscca9jtcSaVKXotydrcVr58zH04znioLFXd3G/a17L08E3u5vJEveGeobX/Cuq2YmttbbjX3NflUu7ZC1VW2OTlaZZuzDHrIbbGXZOFbV9qmwfLElh6Venelqsl4rc+fP6FtT2hicHiHDEu8W7u+ii8lKObtHL3fH/AC1tn1AdReJ4exVvJW/MyEJwcVWG9x2G1zkb8MVNwTbt83kqhmYCVVDDyqytot7/ADeanG46GFh2nm37q4/8c/qVr/4/fZ9k5Obm+J7+Xa430V2soVcrNuuW3LtT+RQUNZKjj3L2QHlRYqWOPqJRVJcvJJWRnth4epKpLE1FqnZ8XJ3b8MuG/LQvdKQ2ZqB/qAYXfFmkLjZWZiINkxszKx0H8JVkW1KP6VAJsIPtRT4pPqjyKtDsVJx4SkvJSdjq59HSIAgCAdp4T1dcbKw8tzsmNmYuQ5/hKsiq1j/SoTPma7UWuKa6o7qM+xUhLhKL8lJXM0RP+pjz100gCAIBjA6x/Y9ZpGq35KofcdSssy8ewA8Vvcl8rHJ3OzrazXAeQNVq8d+3Zx0mDrKpTS3rLy3P6HnG18I6FdzS9mWa/c9V9fPkQTJxRnf+AfHeRpOXj6pjHa/GsDhd+K2p6W0WHY/p31lqidiVDchsyqR8VIKpFxlo/wAv5EjD15UKiqw1X8revMy++DfFtOo4uNqNDcsfKprvrJ8iFZQeLD1Dod0KnzVlI/aZKcXCTi9UerUqkasFOLumk14M8T1L+0uzRdHzdRp8skKlGO2wPC+6xKUt2PkezzN3E7g8NtjvO7D01UqKL03+CzIe0MQ8Ph5VI66Lxbsv7Ks9D3ThTqG/iXOBvSvJsGHTae4L8lWDXZ2QzMzXMt7MoWzzNyW2PzPaYWeNxDj+nDLLPw4dPsZ7Y+CVb/ua3tO7tfitZPzyS5XJS6zOlu3XAmrYSh9Rpq7N2OzKozMYF3RUZyEXIqZ325lVtVyrMOFUjYPEql7MtP6f2J+1tmvE2qU/fWWusfo1/P8AVWfbjruoWabpFGrl/wD5Wq/UOyMhO3mV6QFxaU98BCuzW5dNxW2wcraqeZawku1pQjFVJOn7uWmna1y8uhmMdUqOhSjiPfTlr73o0rXfi1k96V7nq/YP0n6lr99OdqgysfS6qqKw2QbK8rKx6kWrHxcdG2toxlrUA3lU+Q71c3ta+rpr4qFJONOzlnpom9/N8vpkTMBsyriZKeITUEla+rSyUbapLyvzeZkT0fR6saqvFprSmilFrqqrUJXXWo2VEUABVUDbYSgbbd3qbyMVFWSskcucH0ag/wCoBhd8WauuTlZmWh3TIzMrIQ/yluRbap/tXBmwguzFLgkuiPIq0+3UnLjKT8nJ2Orn0dIgCAIBtdAQQfQjY/4nIauZXulr2nDWNHw8kvyyaKxh5e/Hl71SqozsF8h307eQB5fLcvkPQZbE0vR1Gt2q8H+WPUNm4nvGHjK92spfuWT66+ZLMilmIAgHm/aL4ExtVxL9PyaVvptRtkb1WwA9uyths1dqNsRYhDKf39Z905uElKLszor0YVoOE1dP86mH7R/DORdi5OeKz2sI4iZZIKtU+Q11dPJSvl+rS1ZBIKsyDY7krrXJKSjxvbyzPKY0ZuMprSNlLim21p4rPh1t6fA9ieq34Ka1RhW5OA7XKbMcC6ypq7DU/doT9cLyBPNK7ECglmT0nW60FLsN2fPnnroSI4KvKl6aMLxz0zeTavbW3hfy3Wq/4+fbVQKbPDd9wW7vWZGnK2wW2l17l9FTehsS0W5PA/M62uV5CqzhV4+i7+kS5Px4/T8z02wcXHsvDyed24+DzaXg7u3PLLSderP2f3arombi0KXyEFWVVWBu1jU2pc1SD93sqWxAP3dlkHC1FCqm9NOuRd7ToOvhpwjrk14xadv4K7dEPU5gYOI2iZ+RXiql1l2Hk2fJjtVae5ZVbaSUrsW42WB7O2jpYqg8k+exxuGnKXbgr8eOWXmUGxtpUqdP0FV9m12m9Gm72/8AFp8dfEmb22dZmlaXjv7nk42pag4K0U49q3U1t5fqZV1LFErTfl2g4st/8VCjnZXDo4Oc37ScVvv9L/iLXG7Xo0IfpyU57kndeLa0X8vRcq59OnsAzPFWY3iTVmezBa3uMbQOWo2qdhSibcUwa+IrPEBSq9pB/wBjV2GIrxoR9HT1/r/6M/s7A1MbU7ziHeN75/5tbuUF/Oml28h0oDfCAIBE/VL7TRo+j5uSr8cm6s4eJtx5e9XKyK6hvJuwncyCPP5aW8j6GVhqXpKiW7V+C/LFZtLE93w8pXzeUf3PJdNfIxQIgAAHoBsP8TUnl6VjdOAIAgCAIBNPSx1BHw5mE3c20zL4JmIoZjUQT28uusblmp5EMiDlZUTsHaulDDxWH9NHL3lp9i62Xj+61Pa9yWvJ/F9+XgZRNN1Ku+uvIqsS2m1FsqtrZXrsrYBkdHUlWVlIIYEggzNNNOzPR4yUkpRd081bRp7zkTg+jUQCH9Q8FeJjnNdVrmImmPx/QfTKXuqAVOXa2ZeTO5tAe29hWq1bpeS8lKdLs2cH2v3Zfn5kVjpYr0t1VXY4djNaaZ+OumWpGh9j2vaVi6pp+NVpep4+ouxQXY9ZzMnKybbGy8rVbNsHENdKMdiot2Raa0pbtjud/pac5RlK6a4PJJaJasivD4inCcIdmSle11m3JttyeStn/RJ/sG8A6no2LgaTaultiY+MwuuxmzUyDlFue4rek1XGxmd3yWspLvuwoTnskevONSTkr58bafm7dxJuDpVaNONOXZsln2b6+evjv4I6jVejTRLMp9TqTLw8xrRkV24eVZT7vkcuZtorKvUjM25KMj1+Z2RdzOxYuoo9l2a5rVcOJGnsnDubqxTjLVOMmrPilnG/k1yJxrXYAbkkADkdtyf5OwA3Pr5AD+APSQi5K7e1zod0nVrnzanu07KtZnuOMK3x7rWO7WPjuNlsY7sWoenmzMzB2YtLCljZ012XmuevUoMVsWhXk5puEnra1m+Nnl0tffmeY8Df8dum49iXZmZkZ4Q79gImJjv/AALQj23Mv/qt6BvRuQJU9lTaE5K0Vb+X9iNQ2BRg71JOfKyUemb/AJ/gtXhYSVIlNaLXVWqpXWiqqIigBURVACqoAAUAAASrbvmzTpJKy0PtByIBx9R1KuiuzItsSqmpGsttsZUrrrUFnd3YhVVVBJYkAATlJt2R8ykopyk7JZtvRJbzF31T9QR8R5gNPNdMxOSYaMGQ2kkdzLsrOxVruICo45V1AbhGsuQaXC4f0Mc/eev2PONqY7vVT2fcjpzfxfbl4kLSYUogCAIAgCAIBNvTz1VZvh0+7FTl6Wz8mxGfi1DE72WYdhBFZYkuaGHasfc/os9lrQ8RhY1s9JcePj9/7LrAbUnhPYt2ocN68Pto+W+/fsv6ktG1oKuNmVrkEbnDyCKMtTsOQFTkd0LuB3KGtr39HMoquHqU/eWXFaG4wu0KGJX6cs+DykvJ6+KuuZJxEjFiaQBAEAQBAEAQBANQIBGHtR6ktG0UMuTmVtkAbjDxyt+Wx2PEGpG/SDcSO5kNTXv6uJJpYepV91ZcXoV2K2hQwy/UlnwWcn5bvF2XMoL1DdVWb4iPuwU4mlq/JcRX5NewO9dmZYABYVIDilR2q32P6rJXat7h8LGjnrLjw8Pv/Rh8ftSpi/Yt2YcL5vx+2i5kJSYUogCAIAgCAIAgCAbLqFYcWAZT6hgCD/R8pyOZ6HT/AGg6lQorp1PU6EXyVMfUdSoUD9gFpykAA/gCdUqUJaxXREuli69JWhUkv9n9Tl/FvWfreufetb/PnX3el8C6Hf6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/wA+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819Tiah7QdRvU13anqd6N5MmRqOpXqR+4K3ZTgg/wROyNKEdIrojoqYuvVVp1JP/Z/TU89TQqjioCgegAAA/oeU7SJzN84AgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgH/9k=',
401
+ },
402
+ },
403
+ { type: 'text', text: 'Describe this image.' },
404
+ ],
405
+ }),
406
+ ]);
407
+
408
+ await expect(dataUrlRes).resolves.toBeDefined();
409
+
410
+ const urlRes = invoke(chat, invocationType, [
411
+ new HumanMessage({
412
+ content: [
413
+ {
414
+ type: 'image_url',
415
+ image_url:
416
+ 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/30/RedDisc.svg/24px-RedDisc.svg.png',
417
+ },
418
+ { type: 'text', text: 'Describe this image.' },
419
+ ],
420
+ }),
421
+ ]);
422
+
423
+ await expect(urlRes).resolves.toBeDefined();
424
+
425
+ const invalidSchemeRes = invoke(chat, invocationType, [
426
+ new HumanMessage({
427
+ content: [
428
+ {
429
+ type: 'image_url',
430
+ image_url: 'file:///path/to/image.png',
431
+ },
432
+ { type: 'text', text: 'Describe this image.' },
433
+ ],
434
+ }),
435
+ ]);
436
+
437
+ await expect(invalidSchemeRes).rejects.toThrow(
438
+ [
439
+ 'Invalid image URL protocol: "file:". Anthropic only supports images as http, https, or base64-encoded data URLs on \'image_url\' content blocks.',
440
+ 'Example: data:image/png;base64,/9j/4AAQSk...',
441
+ 'Example: https://example.com/image.jpg',
442
+ ].join('\n\n')
443
+ );
444
+
445
+ const invalidUrlRes = invoke(chat, invocationType, [
446
+ new HumanMessage({
447
+ content: [
448
+ {
449
+ type: 'image_url',
450
+ image_url: "this isn't a valid URL",
451
+ },
452
+ { type: 'text', text: 'Describe this image.' },
453
+ ],
454
+ }),
455
+ ]);
456
+
457
+ await expect(invalidUrlRes).rejects.toThrow(
458
+ [
459
+ `Malformed image URL: "this isn't a valid URL". Content blocks of type 'image_url' must be a valid http, https, or base64-encoded data URL.`,
460
+ 'Example: data:image/png;base64,/9j/4AAQSk...',
461
+ 'Example: https://example.com/image.jpg',
462
+ ].join('\n\n')
463
+ );
464
+ }
465
+ );
466
+
467
+ test.each(['invoke', 'stream'])(
468
+ 'Test ChatAnthropic Anthropic Image Block, %s',
469
+ async (invocationType: string) => {
470
+ const chat = new ChatAnthropic({
471
+ modelName,
472
+ maxRetries: 0,
473
+ });
474
+ const base64Res = invoke(chat, invocationType, [
475
+ new HumanMessage({
476
+ content: [
477
+ {
478
+ type: 'image',
479
+ source: {
480
+ type: 'base64',
481
+ media_type: 'image/jpeg',
482
+ data: '/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAMCAggHCQgGCQgICAcICAgICAgICAYICAgHDAgHCAgICAgIBggICAgICAgICBYICAgICwkKCAgNDQoIDggICQgBAwQEBgUGCgYGCBALCg0QCg0NEA0KCg8LDQoKCgoLDgoQDQoLDQoKCg4NDQ0NDgsQDw0OCg4NDQ4NDQoJDg8OCP/AABEIALAAsAMBEQACEQEDEQH/xAAdAAEAAgEFAQAAAAAAAAAAAAAABwgJAQIEBQYD/8QANBAAAgIBAwIDBwQCAgIDAAAAAQIAAwQFERIIEwYhMQcUFyJVldQjQVGBcZEJMzJiFRYk/8QAGwEBAAMAAwEAAAAAAAAAAAAAAAQFBgEDBwL/xAA5EQACAQIDBQQJBAIBBQAAAAAAAQIDEQQhMQVBUWGREhRxgRMVIjJSU8HR8CNyobFCguEGJGKi4v/aAAwDAQACEQMRAD8ApfJplBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBAEAQBANl16qOTEKB6kkAD+z5Tkcj0On+z7Ub1FlOmanejeavj6dqV6kfsQ1OK4IP8AIM6pVYR1kuqJdLCV6qvCnJ/6v66nL+Ems/RNc+y63+BOvvFL411O/wBW4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6D4Saz9E1z7Lrf4Ed4pfGuo9W4r5T6HE1D2e6lQpsu0zU6EXzZ8jTtSoUD9yWuxUAA/kmdkasJaSXVHRVwlekrzpyX+r+mh56m9WHJSGU+hUgg/wBjynaRORvnAEAQBAEAQBAEAQCbennpVzfER95LHE0tX4tlsnJr2B2srw6yQLCpBQ3Me1W+4/VZLKlh4jFRo5ay4cPH7f0XWA2XUxft37MONs34ffRcy/Xsu6bdG0UK2Nh1tkAbHMyAt+Wx2HIi11/SDcQe3jrTXv6IJRVcRUqe88uC0Nxhdn0MMv0458XnJ+e7wVlyJPJkYsTSAIAgCAIAgCAIBqDAIx9qHTbo2tBmycOtcgjYZmOBRlqdjxJtQDuhdye3ette/qhkmliKlP3XlwehXYrZ9DEr9SOfFZS6rXwd1yKCdQ3Srm+HT7yGOXpbPxXLVOLUMTtXXmVgkVliQgvU9qx9h+kz11Ne4fFRrZaS4cfD7f2YfH7LqYT279qHHevH76PlvhKTClEAQBAEAQBAJp6WOn0+I80i7mumYnF8x1LIbSSe3iV2DYq13ElnQ8q6gdijWUuIeKxHoY5e89PuXWy8D3qp7S9iOvN/D9+XiZRNN06uiuvHqrSqmpFrqqrVUrrrUBUREUBVVVAAUAAATNNtu7PR4xUUoxVkskloktxyCZwfRj26jetHPtzrMXSM4Uabj7Vrfj10O2ZdsDbb3bqrCKEYmpeyED8Hs53LZVwvsPg4qN6kbt+OS8t5hdobYqOo44edorK6SzfmtFpz14H16f8Arkz6cmrD1e9crBvsFZy3ropvxC2yo7NTXXXbjhtuXcTmisz91hX2yr4KLjemrNbuPXeMDtuoqihiGnF/5ZJx55ZNceF76GQSUJuhAEAQBAEAhb239WWl+H391s7mXnbAnExu2WqUjdWyLHda6Qw2IXdrCCGFZX5pMo4WdXNZLiyoxm1KOFfZl7UuCtdeN2kvzcRB4d/5JMV7OOVpWRRSWAFmPk1ZTKN9uT1PRi+QHnsj2H12DHYGXLZzS9mV3zVvuVFL/qGDlapSaXFST6qyfS/3tb4M8a4up49WoYlyZGLcCUsTf1B2ZGVgHrsRgVNbqrIwIYAjaVc4Sg+zJWZqaVWFWCnB3T0/PodnqOnV312Y9taW02o1dtViq9dlbAq6OjAqyspIKkEEGfKbTuj7lFSTjJXTyaejXAxd9U/T6fDmYBTzbTMvm+G7FnNRBHcxLLDuWankCrueVlRG5dq7nOlwuI9NHP3lr9zzjamA7rU9n3Jacn8P25eBC0mFKIAgCAIBtdwASfQDc/4nIbsZXulr2ZDR9HwsYpxybqxmZe4Xl71cquyMR69hO3jg+fy0r5n1OWxNX0lRvdovBflz1DZuG7vh4xtZtXl+55vpp5EsyKWZ5X2seH783TdRwsZgmVk4OVRQzMUUXPRYle7gEoCxA5gEqDvsdp2U5KM03omv7I+Ig6lKUIuzaaXmigPtb6HNQ0bEytTGXjZeLiKlhWuu6rINPMLbY1bFqkXHQ908b7CyK+wUqFe+pY2FSSjZpvnl+MwmJ2JVw9OVTtqUYq+Sadt+WaVtd9+W+uLLv5HzB8j/AIlgZ8yRdGfUXXq2JXpGTZtquFUE+cnfMxU2Wu9CzEvaicEsG+/MdzYLbsmexmHdOXaS9l/w+H2PQ9kY9V6apyftxVtdUtJc3x58iykrjQCAIAgFdurzqbPh+lMHFKHVspC6FuLLh427Icp0O4d2ZWREb5WZLGbktJrssMJhvSu8vdX8vh9zP7X2i8LBRp27b46Rj8Vt73JebyVnCfSz0jNqh/8AsGsrZZRcxuoxrms7ua7HmcvLYkOaXJ5Ctjvkb8n/AE+K3TcVi+x+nS6rdyX33eJTbL2S636+JTaeaTveTf8AlLlwjv35ZFmfHnSnoWo47Yo0/FxLOBWnJw8ejHuobb5GVqkUOqnY9qwOjDyI9CKyGKqwd+03ybdjS19mYarHs+jSe5pJNdP6KudBPiTIwNYz/D1jA1WJk91AWKLqGJctDWVg+QFlfdQtsGcVY+//AFgSzx0VKmqi5dJK/wCeZm9iVJ0sRPDye6WWdu1BpXWeV78M8uGd/wCURuCJuqX2YjWNHzMYJyyaKzmYm3Hl71SrOqKW8h307mOT5fLc3mPUSsNV9HUT3aPwf5crNpYbvGHlG2azj+5Zrrp5mKFHBAI9CNx/iak8vTubpwBAEAQDtPCekLk5WHiON0yczFx3H8pbkVVMP7VyJ8zfZi3wTfRHdRh26kI8ZRXk5IzREf6mPPXTSAIB1/iPQa8yjIwrVD05NFuPYrAFWrsrat1YHyIKsRsf2nMXZpo+ZR7UXF77rqYW2xHrJqsHG2smu1T6rapKWKf8OCP6mxvfNHj1nH2XqsnfW6yOVpGr241teVRY9ORS4sqtrPF67B6Mp/2NiCGBIIYMQeGlJWaujsp1JU5KcHZrQyZdK/U3X4ipONdwq1fGQNkVL5JkVbhfe8cE/wDgWKq1e5NFjKD8ttLPm8ThnSd17r0+35qej7N2hHFQs8prVfVcv6J4kIuBAKtdWnV8uj89I090fVeP/wCi8hXq05CvIcg26PmMpDCpgVqUrZaCGqrussLhPSe3P3f7/wCOf4s9tTaXd16On77/APXn48EU58OYl+RremrrRyHbJzdPbI9+LvZZjW21vUlgs5FMe4OqmshVrrscca9jtcSaVKXotydrcVr58zH04znioLFXd3G/a17L08E3u5vJEveGeobX/Cuq2YmttbbjX3NflUu7ZC1VW2OTlaZZuzDHrIbbGXZOFbV9qmwfLElh6Venelqsl4rc+fP6FtT2hicHiHDEu8W7u+ii8lKObtHL3fH/AC1tn1AdReJ4exVvJW/MyEJwcVWG9x2G1zkb8MVNwTbt83kqhmYCVVDDyqytot7/ADeanG46GFh2nm37q4/8c/qVr/4/fZ9k5Obm+J7+Xa430V2soVcrNuuW3LtT+RQUNZKjj3L2QHlRYqWOPqJRVJcvJJWRnth4epKpLE1FqnZ8XJ3b8MuG/LQvdKQ2ZqB/qAYXfFmkLjZWZiINkxszKx0H8JVkW1KP6VAJsIPtRT4pPqjyKtDsVJx4SkvJSdjq59HSIAgCAdp4T1dcbKw8tzsmNmYuQ5/hKsiq1j/SoTPma7UWuKa6o7qM+xUhLhKL8lJXM0RP+pjz100gCAIBjA6x/Y9ZpGq35KofcdSssy8ewA8Vvcl8rHJ3OzrazXAeQNVq8d+3Zx0mDrKpTS3rLy3P6HnG18I6FdzS9mWa/c9V9fPkQTJxRnf+AfHeRpOXj6pjHa/GsDhd+K2p6W0WHY/p31lqidiVDchsyqR8VIKpFxlo/wAv5EjD15UKiqw1X8revMy++DfFtOo4uNqNDcsfKprvrJ8iFZQeLD1Dod0KnzVlI/aZKcXCTi9UerUqkasFOLumk14M8T1L+0uzRdHzdRp8skKlGO2wPC+6xKUt2PkezzN3E7g8NtjvO7D01UqKL03+CzIe0MQ8Ph5VI66Lxbsv7Ks9D3ThTqG/iXOBvSvJsGHTae4L8lWDXZ2QzMzXMt7MoWzzNyW2PzPaYWeNxDj+nDLLPw4dPsZ7Y+CVb/ua3tO7tfitZPzyS5XJS6zOlu3XAmrYSh9Rpq7N2OzKozMYF3RUZyEXIqZ325lVtVyrMOFUjYPEql7MtP6f2J+1tmvE2qU/fWWusfo1/P8AVWfbjruoWabpFGrl/wD5Wq/UOyMhO3mV6QFxaU98BCuzW5dNxW2wcraqeZawku1pQjFVJOn7uWmna1y8uhmMdUqOhSjiPfTlr73o0rXfi1k96V7nq/YP0n6lr99OdqgysfS6qqKw2QbK8rKx6kWrHxcdG2toxlrUA3lU+Q71c3ta+rpr4qFJONOzlnpom9/N8vpkTMBsyriZKeITUEla+rSyUbapLyvzeZkT0fR6saqvFprSmilFrqqrUJXXWo2VEUABVUDbYSgbbd3qbyMVFWSskcucH0ag/wCoBhd8WauuTlZmWh3TIzMrIQ/yluRbap/tXBmwguzFLgkuiPIq0+3UnLjKT8nJ2Orn0dIgCAIBtdAQQfQjY/4nIauZXulr2nDWNHw8kvyyaKxh5e/Hl71SqozsF8h307eQB5fLcvkPQZbE0vR1Gt2q8H+WPUNm4nvGHjK92spfuWT66+ZLMilmIAgHm/aL4ExtVxL9PyaVvptRtkb1WwA9uyths1dqNsRYhDKf39Z905uElKLszor0YVoOE1dP86mH7R/DORdi5OeKz2sI4iZZIKtU+Q11dPJSvl+rS1ZBIKsyDY7krrXJKSjxvbyzPKY0ZuMprSNlLim21p4rPh1t6fA9ieq34Ka1RhW5OA7XKbMcC6ypq7DU/doT9cLyBPNK7ECglmT0nW60FLsN2fPnnroSI4KvKl6aMLxz0zeTavbW3hfy3Wq/4+fbVQKbPDd9wW7vWZGnK2wW2l17l9FTehsS0W5PA/M62uV5CqzhV4+i7+kS5Px4/T8z02wcXHsvDyed24+DzaXg7u3PLLSderP2f3arombi0KXyEFWVVWBu1jU2pc1SD93sqWxAP3dlkHC1FCqm9NOuRd7ToOvhpwjrk14xadv4K7dEPU5gYOI2iZ+RXiql1l2Hk2fJjtVae5ZVbaSUrsW42WB7O2jpYqg8k+exxuGnKXbgr8eOWXmUGxtpUqdP0FV9m12m9Gm72/8AFp8dfEmb22dZmlaXjv7nk42pag4K0U49q3U1t5fqZV1LFErTfl2g4st/8VCjnZXDo4Oc37ScVvv9L/iLXG7Xo0IfpyU57kndeLa0X8vRcq59OnsAzPFWY3iTVmezBa3uMbQOWo2qdhSibcUwa+IrPEBSq9pB/wBjV2GIrxoR9HT1/r/6M/s7A1MbU7ziHeN75/5tbuUF/Oml28h0oDfCAIBE/VL7TRo+j5uSr8cm6s4eJtx5e9XKyK6hvJuwncyCPP5aW8j6GVhqXpKiW7V+C/LFZtLE93w8pXzeUf3PJdNfIxQIgAAHoBsP8TUnl6VjdOAIAgCAIBNPSx1BHw5mE3c20zL4JmIoZjUQT28uusblmp5EMiDlZUTsHaulDDxWH9NHL3lp9i62Xj+61Pa9yWvJ/F9+XgZRNN1Ku+uvIqsS2m1FsqtrZXrsrYBkdHUlWVlIIYEggzNNNOzPR4yUkpRd081bRp7zkTg+jUQCH9Q8FeJjnNdVrmImmPx/QfTKXuqAVOXa2ZeTO5tAe29hWq1bpeS8lKdLs2cH2v3Zfn5kVjpYr0t1VXY4djNaaZ+OumWpGh9j2vaVi6pp+NVpep4+ouxQXY9ZzMnKybbGy8rVbNsHENdKMdiot2Raa0pbtjud/pac5RlK6a4PJJaJasivD4inCcIdmSle11m3JttyeStn/RJ/sG8A6no2LgaTaultiY+MwuuxmzUyDlFue4rek1XGxmd3yWspLvuwoTnskevONSTkr58bafm7dxJuDpVaNONOXZsln2b6+evjv4I6jVejTRLMp9TqTLw8xrRkV24eVZT7vkcuZtorKvUjM25KMj1+Z2RdzOxYuoo9l2a5rVcOJGnsnDubqxTjLVOMmrPilnG/k1yJxrXYAbkkADkdtyf5OwA3Pr5AD+APSQi5K7e1zod0nVrnzanu07KtZnuOMK3x7rWO7WPjuNlsY7sWoenmzMzB2YtLCljZ012XmuevUoMVsWhXk5puEnra1m+Nnl0tffmeY8Df8dum49iXZmZkZ4Q79gImJjv/AALQj23Mv/qt6BvRuQJU9lTaE5K0Vb+X9iNQ2BRg71JOfKyUemb/AJ/gtXhYSVIlNaLXVWqpXWiqqIigBURVACqoAAUAAASrbvmzTpJKy0PtByIBx9R1KuiuzItsSqmpGsttsZUrrrUFnd3YhVVVBJYkAATlJt2R8ykopyk7JZtvRJbzF31T9QR8R5gNPNdMxOSYaMGQ2kkdzLsrOxVruICo45V1AbhGsuQaXC4f0Mc/eev2PONqY7vVT2fcjpzfxfbl4kLSYUogCAIAgCAIBNvTz1VZvh0+7FTl6Wz8mxGfi1DE72WYdhBFZYkuaGHasfc/os9lrQ8RhY1s9JcePj9/7LrAbUnhPYt2ocN68Pto+W+/fsv6ktG1oKuNmVrkEbnDyCKMtTsOQFTkd0LuB3KGtr39HMoquHqU/eWXFaG4wu0KGJX6cs+DykvJ6+KuuZJxEjFiaQBAEAQBAEAQBANQIBGHtR6ktG0UMuTmVtkAbjDxyt+Wx2PEGpG/SDcSO5kNTXv6uJJpYepV91ZcXoV2K2hQwy/UlnwWcn5bvF2XMoL1DdVWb4iPuwU4mlq/JcRX5NewO9dmZYABYVIDilR2q32P6rJXat7h8LGjnrLjw8Pv/Rh8ftSpi/Yt2YcL5vx+2i5kJSYUogCAIAgCAIAgCAbLqFYcWAZT6hgCD/R8pyOZ6HT/AGg6lQorp1PU6EXyVMfUdSoUD9gFpykAA/gCdUqUJaxXREuli69JWhUkv9n9Tl/FvWfreufetb/PnX3el8C6Hf6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/z47vS+BdB6yxXzX1Hxb1n63rn3rW/wA+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819R8W9Z+t65961v8+O70vgXQessV819Tiah7QdRvU13anqd6N5MmRqOpXqR+4K3ZTgg/wROyNKEdIrojoqYuvVVp1JP/Z/TU89TQqjioCgegAAA/oeU7SJzN84AgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgCAIAgH/9k=',
483
+ },
484
+ },
485
+ {
486
+ type: 'text',
487
+ text: 'Describe this image',
488
+ },
489
+ ],
490
+ }),
491
+ ]);
492
+
493
+ await expect(base64Res).resolves.toBeDefined();
494
+
495
+ const urlRes = chat.invoke([
496
+ new HumanMessage({
497
+ content: [
498
+ {
499
+ type: 'image',
500
+ source: {
501
+ type: 'url',
502
+ url: 'https://upload.wikimedia.org/wikipedia/commons/thumb/3/30/RedDisc.svg/24px-RedDisc.svg.png',
503
+ },
504
+ },
505
+ ],
506
+ }),
507
+ ]);
508
+
509
+ await expect(urlRes).resolves.toBeDefined();
510
+ }
511
+ );
512
+ });
513
+
514
+ test('Stream tokens', async () => {
515
+ const model = new ChatAnthropic({
516
+ modelName,
517
+ temperature: 0,
518
+ maxTokens: 10,
519
+ });
520
+ let res: AIMessageChunk | null = null;
521
+ for await (const chunk of await model.stream(
522
+ 'Why is the sky blue? Be concise.'
523
+ )) {
524
+ if (!res) {
525
+ res = chunk;
526
+ } else {
527
+ res = res.concat(chunk);
528
+ }
529
+ }
530
+ // console.log(res);
531
+ expect(res?.usage_metadata).toBeDefined();
532
+ if (!res?.usage_metadata) {
533
+ return;
534
+ }
535
+ expect(res.usage_metadata.input_tokens).toBeGreaterThan(1);
536
+ expect(res.usage_metadata.output_tokens).toBeGreaterThan(1);
537
+ expect(res.usage_metadata.total_tokens).toBe(
538
+ res.usage_metadata.input_tokens + res.usage_metadata.output_tokens
539
+ );
540
+ });
541
+
542
+ test('id is supplied when invoking', async () => {
543
+ const model = new ChatAnthropic({ modelName });
544
+ const result = await model.invoke('Hello');
545
+ expect(result.id).toBeDefined();
546
+ expect(result.id).not.toEqual('');
547
+ });
548
+
549
+ test('id is supplied when streaming', async () => {
550
+ const model = new ChatAnthropic({ modelName });
551
+ let finalChunk: AIMessageChunk | undefined;
552
+ for await (const chunk of await model.stream('Hello')) {
553
+ finalChunk = !finalChunk ? chunk : concat(finalChunk, chunk);
554
+ }
555
+ expect(finalChunk).toBeDefined();
556
+ if (!finalChunk) return;
557
+ expect(finalChunk.id).toBeDefined();
558
+ expect(finalChunk.id).not.toEqual('');
559
+ });
560
+
561
+ const CACHED_TEXT = `## Components
562
+
563
+ LangChain provides standard, extendable interfaces and external integrations for various components useful for building with LLMs.
564
+ Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
565
+
566
+ ### Chat models
567
+
568
+ <span data-heading-keywords="chat model,chat models"></span>
569
+
570
+ Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
571
+ These are generally newer models (older models are generally \`LLMs\`, see below).
572
+ Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.
573
+
574
+ Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.
575
+ This gives them the same interface as LLMs (and simpler to use).
576
+ When a string is passed in as input, it will be converted to a \`HumanMessage\` under the hood before being passed to the underlying model.
577
+
578
+ LangChain does not host any Chat Models, rather we rely on third party integrations.
579
+
580
+ We have some standardized parameters when constructing ChatModels:
581
+
582
+ - \`model\`: the name of the model
583
+
584
+ Chat Models also accept other parameters that are specific to that integration.
585
+
586
+ :::important
587
+ Some chat models have been fine-tuned for **tool calling** and provide a dedicated API for it.
588
+ Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.
589
+ Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
590
+ :::
591
+
592
+ For specifics on how to use chat models, see the [relevant how-to guides here](/docs/how_to/#chat-models).
593
+
594
+ #### Multimodality
595
+
596
+ Some chat models are multimodal, accepting images, audio and even video as inputs.
597
+ These are still less common, meaning model providers haven't standardized on the "best" way to define the API.
598
+ Multimodal outputs are even less common. As such, we've kept our multimodal abstractions fairly light weight
599
+ and plan to further solidify the multimodal APIs and interaction patterns as the field matures.
600
+
601
+ In LangChain, most chat models that support multimodal inputs also accept those values in OpenAI's content blocks format.
602
+ So far this is restricted to image inputs. For models like Gemini which support video and other bytes input, the APIs also support the native, model-specific representations.
603
+
604
+ For specifics on how to use multimodal models, see the [relevant how-to guides here](/docs/how_to/#multimodal).
605
+
606
+ ### LLMs
607
+
608
+ <span data-heading-keywords="llm,llms"></span>
609
+
610
+ :::caution
611
+ Pure text-in/text-out LLMs tend to be older or lower-level. Many popular models are best used as [chat completion models](/docs/concepts/#chat-models),
612
+ even for non-chat use cases.
613
+
614
+ You are probably looking for [the section above instead](/docs/concepts/#chat-models).
615
+ :::
616
+
617
+ Language models that takes a string as input and returns a string.
618
+ These are traditionally older models (newer models generally are [Chat Models](/docs/concepts/#chat-models), see above).
619
+
620
+ Although the underlying models are string in, string out, the LangChain wrappers also allow these models to take messages as input.
621
+ This gives them the same interface as [Chat Models](/docs/concepts/#chat-models).
622
+ When messages are passed in as input, they will be formatted into a string under the hood before being passed to the underlying model.
623
+
624
+ LangChain does not host any LLMs, rather we rely on third party integrations.
625
+
626
+ For specifics on how to use LLMs, see the [relevant how-to guides here](/docs/how_to/#llms).
627
+
628
+ ### Message types
629
+
630
+ Some language models take an array of messages as input and return a message.
631
+ There are a few different types of messages.
632
+ All messages have a \`role\`, \`content\`, and \`response_metadata\` property.
633
+
634
+ The \`role\` describes WHO is saying the message.
635
+ LangChain has different message classes for different roles.
636
+
637
+ The \`content\` property describes the content of the message.
638
+ This can be a few different things:
639
+
640
+ - A string (most models deal this type of content)
641
+ - A List of objects (this is used for multi-modal input, where the object contains information about that input type and that input location)
642
+
643
+ #### HumanMessage
644
+
645
+ This represents a message from the user.
646
+
647
+ #### AIMessage
648
+
649
+ This represents a message from the model. In addition to the \`content\` property, these messages also have:
650
+
651
+ **\`response_metadata\`**
652
+
653
+ The \`response_metadata\` property contains additional metadata about the response. The data here is often specific to each model provider.
654
+ This is where information like log-probs and token usage may be stored.
655
+
656
+ **\`tool_calls\`**
657
+
658
+ These represent a decision from an language model to call a tool. They are included as part of an \`AIMessage\` output.
659
+ They can be accessed from there with the \`.tool_calls\` property.
660
+
661
+ This property returns a list of \`ToolCall\`s. A \`ToolCall\` is an object with the following arguments:
662
+
663
+ - \`name\`: The name of the tool that should be called.
664
+ - \`args\`: The arguments to that tool.
665
+ - \`id\`: The id of that tool call.
666
+
667
+ #### SystemMessage
668
+
669
+ This represents a system message, which tells the model how to behave. Not every model provider supports this.
670
+
671
+ #### ToolMessage
672
+
673
+ This represents the result of a tool call. In addition to \`role\` and \`content\`, this message has:
674
+
675
+ - a \`tool_call_id\` field which conveys the id of the call to the tool that was called to produce this result.
676
+ - an \`artifact\` field which can be used to pass along arbitrary artifacts of the tool execution which are useful to track but which should not be sent to the model.
677
+
678
+ #### (Legacy) FunctionMessage
679
+
680
+ This is a legacy message type, corresponding to OpenAI's legacy function-calling API. \`ToolMessage\` should be used instead to correspond to the updated tool-calling API.
681
+
682
+ This represents the result of a function call. In addition to \`role\` and \`content\`, this message has a \`name\` parameter which conveys the name of the function that was called to produce this result.
683
+
684
+ ### Prompt templates
685
+
686
+ <span data-heading-keywords="prompt,prompttemplate,chatprompttemplate"></span>
687
+
688
+ Prompt templates help to translate user input and parameters into instructions for a language model.
689
+ This can be used to guide a model's response, helping it understand the context and generate relevant and coherent language-based output.
690
+
691
+ Prompt Templates take as input an object, where each key represents a variable in the prompt template to fill in.
692
+
693
+ Prompt Templates output a PromptValue. This PromptValue can be passed to an LLM or a ChatModel, and can also be cast to a string or an array of messages.
694
+ The reason this PromptValue exists is to make it easy to switch between strings and messages.
695
+
696
+ There are a few different types of prompt templates:
697
+
698
+ #### String PromptTemplates
699
+
700
+ These prompt templates are used to format a single string, and generally are used for simpler inputs.
701
+ For example, a common way to construct and use a PromptTemplate is as follows:
702
+
703
+ \`\`\`typescript
704
+ import { PromptTemplate } from "@langchain/core/prompts";
705
+
706
+ const promptTemplate = PromptTemplate.fromTemplate(
707
+ "Tell me a joke about {topic}"
708
+ );
709
+
710
+ await promptTemplate.invoke({ topic: "cats" });
711
+ \`\`\`
712
+
713
+ #### ChatPromptTemplates
714
+
715
+ These prompt templates are used to format an array of messages. These "templates" consist of an array of templates themselves.
716
+ For example, a common way to construct and use a ChatPromptTemplate is as follows:
717
+
718
+ \`\`\`typescript
719
+ import { ChatPromptTemplate } from "@langchain/core/prompts";
720
+
721
+ const promptTemplate = ChatPromptTemplate.fromMessages([
722
+ ["system", "You are a helpful assistant"],
723
+ ["user", "Tell me a joke about {topic}"],
724
+ ]);
725
+
726
+ await promptTemplate.invoke({ topic: "cats" });
727
+ \`\`\`
728
+
729
+ In the above example, this ChatPromptTemplate will construct two messages when called.
730
+ The first is a system message, that has no variables to format.
731
+ The second is a HumanMessage, and will be formatted by the \`topic\` variable the user passes in.
732
+
733
+ #### MessagesPlaceholder
734
+
735
+ <span data-heading-keywords="messagesplaceholder"></span>
736
+
737
+ This prompt template is responsible for adding an array of messages in a particular place.
738
+ In the above ChatPromptTemplate, we saw how we could format two messages, each one a string.
739
+ But what if we wanted the user to pass in an array of messages that we would slot into a particular spot?
740
+ This is how you use MessagesPlaceholder.
741
+
742
+ \`\`\`typescript
743
+ import {
744
+ ChatPromptTemplate,
745
+ MessagesPlaceholder,
746
+ } from "@langchain/core/prompts";
747
+ import { HumanMessage } from "@langchain/core/messages";
748
+
749
+ const promptTemplate = ChatPromptTemplate.fromMessages([
750
+ ["system", "You are a helpful assistant"],
751
+ new MessagesPlaceholder("msgs"),
752
+ ]);
753
+
754
+ promptTemplate.invoke({ msgs: [new HumanMessage({ content: "hi!" })] });
755
+ \`\`\`
756
+
757
+ This will produce an array of two messages, the first one being a system message, and the second one being the HumanMessage we passed in.
758
+ If we had passed in 5 messages, then it would have produced 6 messages in total (the system message plus the 5 passed in).
759
+ This is useful for letting an array of messages be slotted into a particular spot.
760
+
761
+ An alternative way to accomplish the same thing without using the \`MessagesPlaceholder\` class explicitly is:
762
+
763
+ \`\`\`typescript
764
+ const promptTemplate = ChatPromptTemplate.fromMessages([
765
+ ["system", "You are a helpful assistant"],
766
+ ["placeholder", "{msgs}"], // <-- This is the changed part
767
+ ]);
768
+ \`\`\`
769
+
770
+ For specifics on how to use prompt templates, see the [relevant how-to guides here](/docs/how_to/#prompt-templates).
771
+
772
+ ### Example Selectors
773
+
774
+ One common prompting technique for achieving better performance is to include examples as part of the prompt.
775
+ This gives the language model concrete examples of how it should behave.
776
+ Sometimes these examples are hardcoded into the prompt, but for more advanced situations it may be nice to dynamically select them.
777
+ Example Selectors are classes responsible for selecting and then formatting examples into prompts.
778
+
779
+ For specifics on how to use example selectors, see the [relevant how-to guides here](/docs/how_to/#example-selectors).
780
+
781
+ ### Output parsers
782
+
783
+ <span data-heading-keywords="output parser"></span>
784
+
785
+ :::note
786
+
787
+ The information here refers to parsers that take a text output from a model try to parse it into a more structured representation.
788
+ More and more models are supporting function (or tool) calling, which handles this automatically.
789
+ It is recommended to use function/tool calling rather than output parsing.
790
+ See documentation for that [here](/docs/concepts/#function-tool-calling).
791
+
792
+ :::
793
+
794
+ Responsible for taking the output of a model and transforming it to a more suitable format for downstream tasks.
795
+ Useful when you are using LLMs to generate structured data, or to normalize output from chat models and LLMs.
796
+
797
+ There are two main methods an output parser must implement:
798
+
799
+ - "Get format instructions": A method which returns a string containing instructions for how the output of a language model should be formatted.
800
+ - "Parse": A method which takes in a string (assumed to be the response from a language model) and parses it into some structure.
801
+
802
+ And then one optional one:
803
+
804
+ - "Parse with prompt": A method which takes in a string (assumed to be the response from a language model) and a prompt (assumed to be the prompt that generated such a response) and parses it into some structure. The prompt is largely provided in the event the OutputParser wants to retry or fix the output in some way, and needs information from the prompt to do so.
805
+
806
+ Output parsers accept a string or \`BaseMessage\` as input and can return an arbitrary type.
807
+
808
+ LangChain has many different types of output parsers. This is a list of output parsers LangChain supports. The table below has various pieces of information:
809
+
810
+ **Name**: The name of the output parser
811
+
812
+ **Supports Streaming**: Whether the output parser supports streaming.
813
+
814
+ **Input Type**: Expected input type. Most output parsers work on both strings and messages, but some (like OpenAI Functions) need a message with specific arguments.
815
+
816
+ **Output Type**: The output type of the object returned by the parser.
817
+
818
+ **Description**: Our commentary on this output parser and when to use it.
819
+
820
+ The current date is ${new Date().toISOString()}`;
821
+
822
+ test('system prompt caching', async () => {
823
+ const model = new ChatAnthropic({
824
+ modelName,
825
+ clientOptions: {
826
+ defaultHeaders: {
827
+ 'anthropic-beta': 'prompt-caching-2024-07-31',
828
+ },
829
+ },
830
+ });
831
+ const messages = [
832
+ new SystemMessage({
833
+ content: [
834
+ {
835
+ type: 'text',
836
+ text: `You are a pirate. Always respond in pirate dialect.\nUse the following as context when answering questions: ${CACHED_TEXT}`,
837
+ cache_control: { type: 'ephemeral' },
838
+ },
839
+ ],
840
+ }),
841
+ new HumanMessage({
842
+ content: 'What types of messages are supported in LangChain?',
843
+ }),
844
+ ];
845
+ const res = await model.invoke(messages);
846
+ expect(
847
+ res.usage_metadata?.input_token_details?.cache_creation
848
+ ).toBeGreaterThan(0);
849
+ expect(res.usage_metadata?.input_token_details?.cache_read).toBe(0);
850
+ const res2 = await model.invoke(messages);
851
+ expect(res2.usage_metadata?.input_token_details?.cache_creation).toBe(0);
852
+ expect(res2.usage_metadata?.input_token_details?.cache_read).toBeGreaterThan(
853
+ 0
854
+ );
855
+ const stream = await model.stream(messages);
856
+ let agg;
857
+ for await (const chunk of stream) {
858
+ agg = agg === undefined ? chunk : concat(agg, chunk);
859
+ }
860
+ expect(agg).toBeDefined();
861
+ expect(agg!.usage_metadata?.input_token_details?.cache_creation).toBe(0);
862
+ expect(agg!.usage_metadata?.input_token_details?.cache_read).toBeGreaterThan(
863
+ 0
864
+ );
865
+ });
866
+
867
+ // TODO: Add proper test with long tool content
868
+ test.skip('tool caching', async () => {
869
+ const model = new ChatAnthropic({
870
+ modelName,
871
+ clientOptions: {
872
+ defaultHeaders: {
873
+ 'anthropic-beta': 'prompt-caching-2024-07-31',
874
+ },
875
+ },
876
+ }).bindTools([
877
+ {
878
+ name: 'get_weather',
879
+ description: 'Get the weather for a specific location',
880
+ input_schema: {
881
+ type: 'object',
882
+ properties: {
883
+ location: {
884
+ type: 'string',
885
+ description: 'Location to get the weather for',
886
+ },
887
+ unit: {
888
+ type: 'string',
889
+ description: 'Temperature unit to return',
890
+ },
891
+ },
892
+ required: ['location'],
893
+ },
894
+ cache_control: { type: 'ephemeral' },
895
+ },
896
+ ]);
897
+ const messages = [
898
+ new HumanMessage({
899
+ content: 'What is the weather in Regensburg?',
900
+ }),
901
+ ];
902
+ const res = await model.invoke(messages);
903
+ console.log(res);
904
+ expect(
905
+ res.usage_metadata?.input_token_details?.cache_creation
906
+ ).toBeGreaterThan(0);
907
+ expect(res.usage_metadata?.input_token_details?.cache_read).toBe(0);
908
+ const res2 = await model.invoke(messages);
909
+ expect(res2.usage_metadata?.input_token_details?.cache_creation).toBe(0);
910
+ expect(res2.usage_metadata?.input_token_details?.cache_read).toBeGreaterThan(
911
+ 0
912
+ );
913
+ });
914
+
915
+ test.skip('Test ChatAnthropic with custom client', async () => {
916
+ const client = new AnthropicVertex();
917
+ const chat = new ChatAnthropic({
918
+ modelName,
919
+ maxRetries: 0,
920
+ createClient: () => client,
921
+ });
922
+ const message = new HumanMessage('Hello!');
923
+ const res = await chat.invoke([message]);
924
+ // console.log({ res });
925
+ expect(res.usage_metadata?.input_token_details).toBeDefined();
926
+ });
927
+
928
+ test('human message caching', async () => {
929
+ const model = new ChatAnthropic({
930
+ modelName,
931
+ });
932
+
933
+ const messages = [
934
+ new SystemMessage({
935
+ content: [
936
+ {
937
+ type: 'text',
938
+ text: `You are a scotsman. Always respond in scotsman dialect.\nUse the following as context when answering questions: ${CACHED_TEXT}`,
939
+ },
940
+ ],
941
+ }),
942
+ new HumanMessage({
943
+ content: [
944
+ {
945
+ type: 'text',
946
+ text: 'What types of messages are supported in LangChain?',
947
+ cache_control: { type: 'ephemeral' },
948
+ },
949
+ ],
950
+ }),
951
+ ];
952
+
953
+ const res = await model.invoke(messages);
954
+ expect(
955
+ res.usage_metadata?.input_token_details?.cache_creation
956
+ ).toBeGreaterThan(0);
957
+ expect(res.usage_metadata?.input_token_details?.cache_read).toBe(0);
958
+ const res2 = await model.invoke(messages);
959
+ expect(res2.usage_metadata?.input_token_details?.cache_creation).toBe(0);
960
+ expect(res2.usage_metadata?.input_token_details?.cache_read).toBeGreaterThan(
961
+ 0
962
+ );
963
+ });
964
+
965
+ test('Can accept PDF documents', async () => {
966
+ const model = new ChatAnthropic({
967
+ modelName: pdfModelName,
968
+ });
969
+
970
+ const pdfPath = './src/llm/anthropic/Jacob_Lee_Resume_2023.pdf';
971
+ const pdfBase64 = await fs.readFile(pdfPath, 'base64');
972
+
973
+ const response = await model.invoke([
974
+ ['system', 'Use the provided documents to answer the question'],
975
+ [
976
+ 'user',
977
+ [
978
+ {
979
+ type: 'document',
980
+ source: {
981
+ type: 'base64',
982
+ media_type: 'application/pdf',
983
+ data: pdfBase64,
984
+ },
985
+ },
986
+ {
987
+ type: 'text',
988
+ text: 'Summarize the contents of this PDF',
989
+ },
990
+ ],
991
+ ],
992
+ ]);
993
+
994
+ expect(response.content.length).toBeGreaterThan(10);
995
+ });
996
+
997
+ describe('Citations', () => {
998
+ test('document blocks', async () => {
999
+ const citationsModel = new ChatAnthropic({
1000
+ model: citationsModelName,
1001
+ });
1002
+ const messages = [
1003
+ {
1004
+ role: 'user',
1005
+ content: [
1006
+ {
1007
+ type: 'document',
1008
+ source: {
1009
+ type: 'text',
1010
+ media_type: 'text/plain',
1011
+ data: "The grass the user is asking about is bluegrass. The sky is orange because it's night.",
1012
+ },
1013
+ title: 'My Document',
1014
+ context: 'This is a trustworthy document.',
1015
+ citations: {
1016
+ enabled: true,
1017
+ },
1018
+ },
1019
+ {
1020
+ type: 'text',
1021
+ text: 'What color is the grass and sky?',
1022
+ },
1023
+ ],
1024
+ },
1025
+ ];
1026
+
1027
+ const response = await citationsModel.invoke(messages);
1028
+
1029
+ expect(response.content.length).toBeGreaterThan(2);
1030
+ expect(Array.isArray(response.content)).toBe(true);
1031
+ const blocksWithCitations = (response.content as any[]).filter(
1032
+ (block) => block.citations !== undefined
1033
+ );
1034
+ expect(blocksWithCitations.length).toEqual(2);
1035
+ expect(typeof blocksWithCitations[0].citations[0]).toEqual('object');
1036
+
1037
+ const stream = await citationsModel.stream(messages);
1038
+ let aggregated;
1039
+ let chunkHasCitation = false;
1040
+ for await (const chunk of stream) {
1041
+ aggregated = aggregated === undefined ? chunk : concat(aggregated, chunk);
1042
+ if (
1043
+ !chunkHasCitation &&
1044
+ Array.isArray(chunk.content) &&
1045
+ chunk.content.some((c: any) => c.citations !== undefined)
1046
+ ) {
1047
+ chunkHasCitation = true;
1048
+ }
1049
+ }
1050
+ expect(chunkHasCitation).toBe(true);
1051
+ expect(Array.isArray(aggregated?.content)).toBe(true);
1052
+ expect(aggregated?.content.length).toBeGreaterThan(2);
1053
+ expect(
1054
+ (aggregated?.content as any[]).some((c) => c.citations !== undefined)
1055
+ ).toBe(true);
1056
+ });
1057
+ describe('search result blocks', () => {
1058
+ const citationsModel = new ChatAnthropic({
1059
+ model: citationsModelName,
1060
+ clientOptions: {
1061
+ defaultHeaders: {
1062
+ 'anthropic-beta': 'search-results-2025-06-09',
1063
+ },
1064
+ },
1065
+ });
1066
+
1067
+ const messages = [
1068
+ {
1069
+ role: 'user',
1070
+ content: [
1071
+ {
1072
+ type: 'search_result',
1073
+ title: 'History of France',
1074
+ source: 'https://example.com/france-history',
1075
+ citations: { enabled: true },
1076
+ content: [
1077
+ {
1078
+ type: 'text',
1079
+ text: 'The capital of France is Paris.',
1080
+ },
1081
+ {
1082
+ type: 'text',
1083
+ text: 'The old capital of France was Lyon.',
1084
+ },
1085
+ ],
1086
+ },
1087
+ {
1088
+ type: 'search_result',
1089
+ title: 'Geography of France',
1090
+ source: 'https://example.com/france-geography',
1091
+ citations: { enabled: true },
1092
+ content: [
1093
+ {
1094
+ type: 'text',
1095
+ text: 'France is a country in Europe.',
1096
+ },
1097
+ {
1098
+ type: 'text',
1099
+ text: 'France borders Spain to the south.',
1100
+ },
1101
+ ],
1102
+ },
1103
+ {
1104
+ type: 'text',
1105
+ text: 'What is the capital of France and where is it located? You must cite your sources.',
1106
+ },
1107
+ ],
1108
+ },
1109
+ ];
1110
+
1111
+ test('without streaming', async () => {
1112
+ const response = await citationsModel.invoke(messages);
1113
+
1114
+ expect(Array.isArray(response.content)).toBe(true);
1115
+ expect(response.content.length).toBeGreaterThan(0);
1116
+
1117
+ // Check that we have cited content
1118
+ const blocksWithCitations = (response.content as any[]).filter(
1119
+ (block) => block.citations !== undefined
1120
+ );
1121
+ expect(blocksWithCitations.length).toBeGreaterThan(0);
1122
+
1123
+ // Verify citation structure
1124
+ const citation = blocksWithCitations[0].citations[0];
1125
+ expect(typeof citation).toBe('object');
1126
+ expect(citation.type).toBe('search_result_location');
1127
+ expect(citation.source).toBeDefined();
1128
+ });
1129
+ test('with streaming', async () => {
1130
+ // Test streaming
1131
+ const stream = await citationsModel.stream(messages);
1132
+ let aggregated;
1133
+ let chunkHasCitation = false;
1134
+ for await (const chunk of stream) {
1135
+ aggregated =
1136
+ aggregated === undefined ? chunk : concat(aggregated, chunk);
1137
+ if (
1138
+ !chunkHasCitation &&
1139
+ Array.isArray(chunk.content) &&
1140
+ chunk.content.some((c: any) => c.citations !== undefined)
1141
+ ) {
1142
+ chunkHasCitation = true;
1143
+ }
1144
+ }
1145
+ expect(chunkHasCitation).toBe(true);
1146
+ expect(Array.isArray(aggregated?.content)).toBe(true);
1147
+ expect(
1148
+ (aggregated?.content as any[]).some((c) => c.citations !== undefined)
1149
+ ).toBe(true);
1150
+ });
1151
+ });
1152
+
1153
+ test('search result blocks from tool', async () => {
1154
+ const ragTool = tool(
1155
+ (): ChatAnthropicContentBlock[] => [
1156
+ {
1157
+ type: 'search_result',
1158
+ title: 'History of France',
1159
+ source: 'https://example.com/france-history',
1160
+ citations: { enabled: true },
1161
+ content: [
1162
+ {
1163
+ type: 'text',
1164
+ text: 'The capital of France is Paris.',
1165
+ },
1166
+ {
1167
+ type: 'text',
1168
+ text: 'France was established as a republic in 1792.',
1169
+ },
1170
+ ],
1171
+ },
1172
+ {
1173
+ type: 'search_result',
1174
+ title: 'Geography of France',
1175
+ source: 'https://example.com/france-geography',
1176
+ citations: { enabled: true },
1177
+ content: [
1178
+ {
1179
+ type: 'text',
1180
+ text: 'France is located in Western Europe.',
1181
+ },
1182
+ {
1183
+ type: 'text',
1184
+ text: 'France has a population of approximately 67 million people.',
1185
+ },
1186
+ ],
1187
+ },
1188
+ ],
1189
+ {
1190
+ name: 'search_knowledge_base',
1191
+ description: 'Search the knowledge base for information about France',
1192
+ schema: z.object({
1193
+ query: z.string().describe('The search query'),
1194
+ }),
1195
+ }
1196
+ );
1197
+
1198
+ const citationsModel = new ChatAnthropic({
1199
+ model: citationsModelName,
1200
+ clientOptions: {
1201
+ defaultHeaders: {
1202
+ 'anthropic-beta': 'search-results-2025-06-09',
1203
+ },
1204
+ },
1205
+ }).bindTools([ragTool]);
1206
+
1207
+ const messages = [
1208
+ new HumanMessage(
1209
+ 'Search for information about France and tell me what you find with proper citations.'
1210
+ ),
1211
+ ];
1212
+
1213
+ const response = await citationsModel.invoke(messages);
1214
+ messages.push(response);
1215
+
1216
+ expect(Array.isArray(response.content)).toBe(true);
1217
+ expect(response.content.length).toBeGreaterThan(0);
1218
+
1219
+ // Check that the model called the tool
1220
+ expect(response.tool_calls?.length).toBeGreaterThan(0);
1221
+ expect(response.tool_calls?.[0].name).toBe('search_knowledge_base');
1222
+
1223
+ const toolResponse = await ragTool.invoke(response.tool_calls![0]);
1224
+ messages.push(toolResponse);
1225
+
1226
+ const response2 = await citationsModel.invoke(messages);
1227
+
1228
+ expect(Array.isArray(response2.content)).toBe(true);
1229
+ expect(response2.content.length).toBeGreaterThan(0);
1230
+ // Make sure that a citation exists somewhere in the content list
1231
+ const citationBlock = (response2.content as any[]).find(
1232
+ (block: any) =>
1233
+ Array.isArray(block.citations) && block.citations.length > 0
1234
+ );
1235
+ expect(citationBlock).toBeDefined();
1236
+ expect(citationBlock.citations[0].type).toBe('search_result_location');
1237
+ expect(citationBlock.citations[0].source).toBeDefined();
1238
+ });
1239
+ });
1240
+
1241
+ test('Test thinking blocks multiturn invoke', async () => {
1242
+ const model = new ChatAnthropic({
1243
+ model: extendedThinkingModelName,
1244
+ maxTokens: 5000,
1245
+ thinking: { type: 'enabled', budget_tokens: 2000 },
1246
+ });
1247
+
1248
+ async function doInvoke(messages: BaseMessage[]) {
1249
+ const response = await model.invoke(messages);
1250
+
1251
+ expect(Array.isArray(response.content)).toBe(true);
1252
+ const content = response.content as AnthropicMessageResponse[];
1253
+ expect(content.some((block) => 'thinking' in (block as any))).toBe(true);
1254
+
1255
+ for (const block of response.content) {
1256
+ expect(typeof block).toBe('object');
1257
+ if ((block as any).type === 'thinking') {
1258
+ expect(Object.keys(block).sort()).toEqual(
1259
+ ['type', 'thinking', 'signature'].sort()
1260
+ );
1261
+ expect((block as any).thinking).toBeTruthy();
1262
+ expect(typeof (block as any).thinking).toBe('string');
1263
+ expect((block as any).signature).toBeTruthy();
1264
+ expect(typeof (block as any).signature).toBe('string');
1265
+ }
1266
+ }
1267
+ return response;
1268
+ }
1269
+
1270
+ const invokeMessages = [new HumanMessage('Hello')];
1271
+
1272
+ invokeMessages.push(await doInvoke(invokeMessages));
1273
+ invokeMessages.push(new HumanMessage('What is 42+7?'));
1274
+
1275
+ // test a second time to make sure that we've got input translation working correctly
1276
+ await model.invoke(invokeMessages);
1277
+ });
1278
+
1279
+ test('Test thinking blocks multiturn streaming', async () => {
1280
+ const model = new ChatAnthropic({
1281
+ model: extendedThinkingModelName,
1282
+ maxTokens: 5000,
1283
+ thinking: { type: 'enabled', budget_tokens: 2000 },
1284
+ });
1285
+
1286
+ async function doStreaming(messages: BaseMessage[]) {
1287
+ let full: AIMessageChunk | null = null;
1288
+ for await (const chunk of await model.stream(messages)) {
1289
+ full = full ? concat(full, chunk) : chunk;
1290
+ }
1291
+ expect(full).toBeInstanceOf(AIMessageChunk);
1292
+ expect(Array.isArray(full?.content)).toBe(true);
1293
+ const content3 = full?.content as AnthropicMessageResponse[];
1294
+ expect(content3.some((block) => 'thinking' in (block as any))).toBe(true);
1295
+
1296
+ for (const block of full?.content || []) {
1297
+ expect(typeof block).toBe('object');
1298
+ if ((block as any).type === 'thinking') {
1299
+ expect(Object.keys(block).sort()).toEqual(
1300
+ ['type', 'thinking', 'signature', 'index'].sort()
1301
+ );
1302
+ expect((block as any).thinking).toBeTruthy();
1303
+ expect(typeof (block as any).thinking).toBe('string');
1304
+ expect((block as any).signature).toBeTruthy();
1305
+ expect(typeof (block as any).signature).toBe('string');
1306
+ }
1307
+ }
1308
+ return full as AIMessageChunk;
1309
+ }
1310
+
1311
+ const streamingMessages = [new HumanMessage('Hello')];
1312
+
1313
+ streamingMessages.push(await doStreaming(streamingMessages));
1314
+ streamingMessages.push(new HumanMessage('What is 42+7?'));
1315
+
1316
+ // test a second time to make sure that we've got input translation working correctly
1317
+ await doStreaming(streamingMessages);
1318
+ });
1319
+
1320
+ test('Test redacted thinking blocks multiturn invoke', async () => {
1321
+ const model = new ChatAnthropic({
1322
+ model: extendedThinkingModelName,
1323
+ maxTokens: 5000,
1324
+ thinking: { type: 'enabled', budget_tokens: 2000 },
1325
+ });
1326
+
1327
+ async function doInvoke(messages: BaseMessage[]) {
1328
+ const response = await model.invoke(messages);
1329
+ let hasReasoning = false;
1330
+
1331
+ for (const block of response.content) {
1332
+ expect(typeof block).toBe('object');
1333
+ if ((block as any).type === 'redacted_thinking') {
1334
+ hasReasoning = true;
1335
+ expect(Object.keys(block).sort()).toEqual(['type', 'data'].sort());
1336
+ expect((block as any).data).toBeTruthy();
1337
+ expect(typeof (block as any).data).toBe('string');
1338
+ }
1339
+ }
1340
+ expect(hasReasoning).toBe(true);
1341
+ return response;
1342
+ }
1343
+
1344
+ const invokeMessages = [
1345
+ new HumanMessage(
1346
+ 'ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB'
1347
+ ),
1348
+ ];
1349
+
1350
+ invokeMessages.push(await doInvoke(invokeMessages));
1351
+ invokeMessages.push(new HumanMessage('What is 42+7?'));
1352
+
1353
+ // test a second time to make sure that we've got input translation working correctly
1354
+ await doInvoke(invokeMessages);
1355
+ });
1356
+
1357
+ test('Test redacted thinking blocks multiturn streaming', async () => {
1358
+ const model = new ChatAnthropic({
1359
+ model: extendedThinkingModelName,
1360
+ maxTokens: 5000,
1361
+ thinking: { type: 'enabled', budget_tokens: 2000 },
1362
+ });
1363
+
1364
+ async function doStreaming(messages: BaseMessage[]) {
1365
+ let full: AIMessageChunk | null = null;
1366
+ for await (const chunk of await model.stream(messages)) {
1367
+ full = full ? concat(full, chunk) : chunk;
1368
+ }
1369
+ expect(full).toBeInstanceOf(AIMessageChunk);
1370
+ expect(Array.isArray(full?.content)).toBe(true);
1371
+ let streamHasReasoning = false;
1372
+
1373
+ for (const block of full?.content || []) {
1374
+ expect(typeof block).toBe('object');
1375
+ if ((block as any).type === 'redacted_thinking') {
1376
+ streamHasReasoning = true;
1377
+ expect(Object.keys(block).sort()).toEqual(
1378
+ ['type', 'data', 'index'].sort()
1379
+ );
1380
+ expect((block as any).data).toBeTruthy();
1381
+ expect(typeof (block as any).data).toBe('string');
1382
+ }
1383
+ }
1384
+ expect(streamHasReasoning).toBe(true);
1385
+ return full as AIMessageChunk;
1386
+ }
1387
+
1388
+ const streamingMessages = [
1389
+ new HumanMessage(
1390
+ 'ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB'
1391
+ ),
1392
+ ];
1393
+
1394
+ streamingMessages.push(await doStreaming(streamingMessages));
1395
+ streamingMessages.push(new HumanMessage('What is 42+7?'));
1396
+
1397
+ // test a second time to make sure that we've got input translation working correctly
1398
+ await doStreaming(streamingMessages);
1399
+ });
1400
+
1401
+ test('Can handle google function calling blocks in content', async () => {
1402
+ const chat = new ChatAnthropic({
1403
+ modelName: 'claude-3-7-sonnet-latest',
1404
+ maxRetries: 0,
1405
+ });
1406
+ const toolCallId = 'tool_call_id';
1407
+ const messages = [
1408
+ new SystemMessage("You're a helpful assistant"),
1409
+ new HumanMessage('What is the weather like in San Francisco?'),
1410
+ new AIMessage({
1411
+ content: [
1412
+ {
1413
+ // Pass a content block with the `functionCall` object that Google returns.
1414
+ functionCall: {
1415
+ args: {
1416
+ location: 'san francisco',
1417
+ },
1418
+ name: 'get_weather',
1419
+ },
1420
+ },
1421
+ ],
1422
+ tool_calls: [
1423
+ {
1424
+ id: toolCallId,
1425
+ name: 'get_weather',
1426
+ args: {
1427
+ location: 'san francisco',
1428
+ },
1429
+ },
1430
+ ],
1431
+ }),
1432
+ new ToolMessage({
1433
+ tool_call_id: toolCallId,
1434
+ content: 'The weather is sunny',
1435
+ }),
1436
+ new HumanMessage(
1437
+ 'Give me a one sentence description of what the sky looks like.'
1438
+ ),
1439
+ ];
1440
+ const res = await chat.invoke(messages);
1441
+ expect(res.content.length).toBeGreaterThan(1);
1442
+ });