@copilotkit/runtime 1.54.1 → 1.55.0-next.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (337) hide show
  1. package/.eslintrc.js +4 -4
  2. package/CHANGELOG.md +125 -113
  3. package/dist/_virtual/_rolldown/runtime.mjs +25 -1
  4. package/dist/agent/index.cjs +654 -0
  5. package/dist/agent/index.cjs.map +1 -0
  6. package/dist/agent/index.d.cts +263 -0
  7. package/dist/agent/index.d.cts.map +1 -0
  8. package/dist/agent/index.d.mts +263 -0
  9. package/dist/agent/index.d.mts.map +1 -0
  10. package/dist/agent/index.mjs +646 -0
  11. package/dist/agent/index.mjs.map +1 -0
  12. package/dist/graphql/message-conversion/agui-to-gql.cjs.map +1 -1
  13. package/dist/graphql/message-conversion/agui-to-gql.mjs.map +1 -1
  14. package/dist/lib/integrations/nextjs/app-router.cjs +2 -2
  15. package/dist/lib/integrations/nextjs/app-router.cjs.map +1 -1
  16. package/dist/lib/integrations/nextjs/app-router.mjs +1 -1
  17. package/dist/lib/integrations/nextjs/app-router.mjs.map +1 -1
  18. package/dist/lib/integrations/node-http/index.cjs +2 -3
  19. package/dist/lib/integrations/node-http/index.cjs.map +1 -1
  20. package/dist/lib/integrations/node-http/index.mjs +1 -1
  21. package/dist/lib/integrations/node-http/index.mjs.map +1 -1
  22. package/dist/lib/runtime/agent-integrations/langgraph/agent.cjs +1 -1
  23. package/dist/lib/runtime/agent-integrations/langgraph/agent.d.cts +2 -2
  24. package/dist/lib/runtime/agent-integrations/langgraph/agent.d.cts.map +1 -1
  25. package/dist/lib/runtime/agent-integrations/langgraph/agent.d.mts +3 -3
  26. package/dist/lib/runtime/agent-integrations/langgraph/agent.d.mts.map +1 -1
  27. package/dist/lib/runtime/agent-integrations/langgraph/agent.mjs +1 -1
  28. package/dist/lib/runtime/copilot-runtime.cjs +7 -5
  29. package/dist/lib/runtime/copilot-runtime.cjs.map +1 -1
  30. package/dist/lib/runtime/copilot-runtime.d.cts +10 -8
  31. package/dist/lib/runtime/copilot-runtime.d.cts.map +1 -1
  32. package/dist/lib/runtime/copilot-runtime.d.mts +10 -8
  33. package/dist/lib/runtime/copilot-runtime.d.mts.map +1 -1
  34. package/dist/lib/runtime/copilot-runtime.mjs +7 -5
  35. package/dist/lib/runtime/copilot-runtime.mjs.map +1 -1
  36. package/dist/lib/runtime/telemetry-agent-runner.cjs +2 -2
  37. package/dist/lib/runtime/telemetry-agent-runner.cjs.map +1 -1
  38. package/dist/lib/runtime/telemetry-agent-runner.d.cts +2 -1
  39. package/dist/lib/runtime/telemetry-agent-runner.d.cts.map +1 -1
  40. package/dist/lib/runtime/telemetry-agent-runner.d.mts +2 -1
  41. package/dist/lib/runtime/telemetry-agent-runner.d.mts.map +1 -1
  42. package/dist/lib/runtime/telemetry-agent-runner.mjs +1 -1
  43. package/dist/lib/runtime/telemetry-agent-runner.mjs.map +1 -1
  44. package/dist/lib/telemetry-client.cjs +1 -1
  45. package/dist/lib/telemetry-client.mjs +1 -1
  46. package/dist/package.cjs +21 -4
  47. package/dist/package.mjs +21 -4
  48. package/dist/service-adapters/anthropic/anthropic-adapter.d.mts +1 -1
  49. package/dist/v2/index.cjs +41 -15
  50. package/dist/v2/index.d.cts +14 -2
  51. package/dist/v2/index.d.mts +14 -2
  52. package/dist/v2/index.mjs +13 -4
  53. package/dist/v2/runtime/endpoints/express-single.cjs +190 -0
  54. package/dist/v2/runtime/endpoints/express-single.cjs.map +1 -0
  55. package/dist/v2/runtime/endpoints/express-single.d.cts +16 -0
  56. package/dist/v2/runtime/endpoints/express-single.d.cts.map +1 -0
  57. package/dist/v2/runtime/endpoints/express-single.d.mts +16 -0
  58. package/dist/v2/runtime/endpoints/express-single.d.mts.map +1 -0
  59. package/dist/v2/runtime/endpoints/express-single.mjs +187 -0
  60. package/dist/v2/runtime/endpoints/express-single.mjs.map +1 -0
  61. package/dist/v2/runtime/endpoints/express-utils.cjs +119 -0
  62. package/dist/v2/runtime/endpoints/express-utils.cjs.map +1 -0
  63. package/dist/v2/runtime/endpoints/express-utils.mjs +117 -0
  64. package/dist/v2/runtime/endpoints/express-utils.mjs.map +1 -0
  65. package/dist/v2/runtime/endpoints/express.cjs +217 -0
  66. package/dist/v2/runtime/endpoints/express.cjs.map +1 -0
  67. package/dist/v2/runtime/endpoints/express.d.cts +16 -0
  68. package/dist/v2/runtime/endpoints/express.d.cts.map +1 -0
  69. package/dist/v2/runtime/endpoints/express.d.mts +16 -0
  70. package/dist/v2/runtime/endpoints/express.d.mts.map +1 -0
  71. package/dist/v2/runtime/endpoints/express.mjs +214 -0
  72. package/dist/v2/runtime/endpoints/express.mjs.map +1 -0
  73. package/dist/v2/runtime/endpoints/hono-single.cjs +141 -0
  74. package/dist/v2/runtime/endpoints/hono-single.cjs.map +1 -0
  75. package/dist/v2/runtime/endpoints/hono-single.d.cts +41 -0
  76. package/dist/v2/runtime/endpoints/hono-single.d.cts.map +1 -0
  77. package/dist/v2/runtime/endpoints/hono-single.d.mts +41 -0
  78. package/dist/v2/runtime/endpoints/hono-single.d.mts.map +1 -0
  79. package/dist/v2/runtime/endpoints/hono-single.mjs +140 -0
  80. package/dist/v2/runtime/endpoints/hono-single.mjs.map +1 -0
  81. package/dist/v2/runtime/endpoints/hono.cjs +248 -0
  82. package/dist/v2/runtime/endpoints/hono.cjs.map +1 -0
  83. package/dist/v2/runtime/endpoints/hono.d.cts +164 -0
  84. package/dist/v2/runtime/endpoints/hono.d.cts.map +1 -0
  85. package/dist/v2/runtime/endpoints/hono.d.mts +164 -0
  86. package/dist/v2/runtime/endpoints/hono.d.mts.map +1 -0
  87. package/dist/v2/runtime/endpoints/hono.mjs +247 -0
  88. package/dist/v2/runtime/endpoints/hono.mjs.map +1 -0
  89. package/dist/v2/runtime/endpoints/index.d.cts +5 -0
  90. package/dist/v2/runtime/endpoints/index.d.mts +5 -0
  91. package/dist/v2/runtime/endpoints/single-route-helpers.cjs +68 -0
  92. package/dist/v2/runtime/endpoints/single-route-helpers.cjs.map +1 -0
  93. package/dist/v2/runtime/endpoints/single-route-helpers.mjs +65 -0
  94. package/dist/v2/runtime/endpoints/single-route-helpers.mjs.map +1 -0
  95. package/dist/v2/runtime/handlers/get-runtime-info.cjs +51 -0
  96. package/dist/v2/runtime/handlers/get-runtime-info.cjs.map +1 -0
  97. package/dist/v2/runtime/handlers/get-runtime-info.mjs +51 -0
  98. package/dist/v2/runtime/handlers/get-runtime-info.mjs.map +1 -0
  99. package/dist/v2/runtime/handlers/handle-connect.cjs +49 -0
  100. package/dist/v2/runtime/handlers/handle-connect.cjs.map +1 -0
  101. package/dist/v2/runtime/handlers/handle-connect.mjs +49 -0
  102. package/dist/v2/runtime/handlers/handle-connect.mjs.map +1 -0
  103. package/dist/v2/runtime/handlers/handle-run.cjs +61 -0
  104. package/dist/v2/runtime/handlers/handle-run.cjs.map +1 -0
  105. package/dist/v2/runtime/handlers/handle-run.mjs +61 -0
  106. package/dist/v2/runtime/handlers/handle-run.mjs.map +1 -0
  107. package/dist/v2/runtime/handlers/handle-stop.cjs +47 -0
  108. package/dist/v2/runtime/handlers/handle-stop.cjs.map +1 -0
  109. package/dist/v2/runtime/handlers/handle-stop.mjs +46 -0
  110. package/dist/v2/runtime/handlers/handle-stop.mjs.map +1 -0
  111. package/dist/v2/runtime/handlers/handle-transcribe.cjs +112 -0
  112. package/dist/v2/runtime/handlers/handle-transcribe.cjs.map +1 -0
  113. package/dist/v2/runtime/handlers/handle-transcribe.mjs +111 -0
  114. package/dist/v2/runtime/handlers/handle-transcribe.mjs.map +1 -0
  115. package/dist/v2/runtime/handlers/header-utils.cjs +26 -0
  116. package/dist/v2/runtime/handlers/header-utils.cjs.map +1 -0
  117. package/dist/v2/runtime/handlers/header-utils.mjs +25 -0
  118. package/dist/v2/runtime/handlers/header-utils.mjs.map +1 -0
  119. package/dist/v2/runtime/handlers/intelligence/connect.cjs +37 -0
  120. package/dist/v2/runtime/handlers/intelligence/connect.cjs.map +1 -0
  121. package/dist/v2/runtime/handlers/intelligence/connect.mjs +37 -0
  122. package/dist/v2/runtime/handlers/intelligence/connect.mjs.map +1 -0
  123. package/dist/v2/runtime/handlers/intelligence/run.cjs +89 -0
  124. package/dist/v2/runtime/handlers/intelligence/run.cjs.map +1 -0
  125. package/dist/v2/runtime/handlers/intelligence/run.mjs +88 -0
  126. package/dist/v2/runtime/handlers/intelligence/run.mjs.map +1 -0
  127. package/dist/v2/runtime/handlers/intelligence/thread-names.cjs +146 -0
  128. package/dist/v2/runtime/handlers/intelligence/thread-names.cjs.map +1 -0
  129. package/dist/v2/runtime/handlers/intelligence/thread-names.mjs +145 -0
  130. package/dist/v2/runtime/handlers/intelligence/thread-names.mjs.map +1 -0
  131. package/dist/v2/runtime/handlers/intelligence/threads.cjs +159 -0
  132. package/dist/v2/runtime/handlers/intelligence/threads.cjs.map +1 -0
  133. package/dist/v2/runtime/handlers/intelligence/threads.mjs +154 -0
  134. package/dist/v2/runtime/handlers/intelligence/threads.mjs.map +1 -0
  135. package/dist/v2/runtime/handlers/shared/agent-utils.cjs +74 -0
  136. package/dist/v2/runtime/handlers/shared/agent-utils.cjs.map +1 -0
  137. package/dist/v2/runtime/handlers/shared/agent-utils.mjs +70 -0
  138. package/dist/v2/runtime/handlers/shared/agent-utils.mjs.map +1 -0
  139. package/dist/v2/runtime/handlers/shared/intelligence-utils.cjs +21 -0
  140. package/dist/v2/runtime/handlers/shared/intelligence-utils.cjs.map +1 -0
  141. package/dist/v2/runtime/handlers/shared/intelligence-utils.mjs +20 -0
  142. package/dist/v2/runtime/handlers/shared/intelligence-utils.mjs.map +1 -0
  143. package/dist/v2/runtime/handlers/shared/json-response.cjs +12 -0
  144. package/dist/v2/runtime/handlers/shared/json-response.cjs.map +1 -0
  145. package/dist/v2/runtime/handlers/shared/json-response.mjs +10 -0
  146. package/dist/v2/runtime/handlers/shared/json-response.mjs.map +1 -0
  147. package/dist/v2/runtime/handlers/shared/resolve-intelligence-user.cjs +20 -0
  148. package/dist/v2/runtime/handlers/shared/resolve-intelligence-user.cjs.map +1 -0
  149. package/dist/v2/runtime/handlers/shared/resolve-intelligence-user.mjs +20 -0
  150. package/dist/v2/runtime/handlers/shared/resolve-intelligence-user.mjs.map +1 -0
  151. package/dist/v2/runtime/handlers/shared/sse-response.cjs +69 -0
  152. package/dist/v2/runtime/handlers/shared/sse-response.cjs.map +1 -0
  153. package/dist/v2/runtime/handlers/shared/sse-response.mjs +68 -0
  154. package/dist/v2/runtime/handlers/shared/sse-response.mjs.map +1 -0
  155. package/dist/v2/runtime/handlers/sse/connect.cjs +18 -0
  156. package/dist/v2/runtime/handlers/sse/connect.cjs.map +1 -0
  157. package/dist/v2/runtime/handlers/sse/connect.mjs +18 -0
  158. package/dist/v2/runtime/handlers/sse/connect.mjs.map +1 -0
  159. package/dist/v2/runtime/handlers/sse/run.cjs +18 -0
  160. package/dist/v2/runtime/handlers/sse/run.cjs.map +1 -0
  161. package/dist/v2/runtime/handlers/sse/run.mjs +18 -0
  162. package/dist/v2/runtime/handlers/sse/run.mjs.map +1 -0
  163. package/dist/v2/runtime/index.d.cts +13 -0
  164. package/dist/v2/runtime/index.d.mts +14 -0
  165. package/dist/v2/runtime/intelligence-platform/client.cjs +333 -0
  166. package/dist/v2/runtime/intelligence-platform/client.cjs.map +1 -0
  167. package/dist/v2/runtime/intelligence-platform/client.d.cts +336 -0
  168. package/dist/v2/runtime/intelligence-platform/client.d.cts.map +1 -0
  169. package/dist/v2/runtime/intelligence-platform/client.d.mts +336 -0
  170. package/dist/v2/runtime/intelligence-platform/client.d.mts.map +1 -0
  171. package/dist/v2/runtime/intelligence-platform/client.mjs +331 -0
  172. package/dist/v2/runtime/intelligence-platform/client.mjs.map +1 -0
  173. package/dist/v2/runtime/intelligence-platform/index.d.mts +2 -0
  174. package/dist/v2/runtime/middleware-sse-parser.cjs +138 -0
  175. package/dist/v2/runtime/middleware-sse-parser.cjs.map +1 -0
  176. package/dist/v2/runtime/middleware-sse-parser.d.cts +22 -0
  177. package/dist/v2/runtime/middleware-sse-parser.d.cts.map +1 -0
  178. package/dist/v2/runtime/middleware-sse-parser.d.mts +22 -0
  179. package/dist/v2/runtime/middleware-sse-parser.d.mts.map +1 -0
  180. package/dist/v2/runtime/middleware-sse-parser.mjs +137 -0
  181. package/dist/v2/runtime/middleware-sse-parser.mjs.map +1 -0
  182. package/dist/v2/runtime/middleware.cjs +35 -0
  183. package/dist/v2/runtime/middleware.cjs.map +1 -0
  184. package/dist/v2/runtime/middleware.d.cts +32 -0
  185. package/dist/v2/runtime/middleware.d.cts.map +1 -0
  186. package/dist/v2/runtime/middleware.d.mts +32 -0
  187. package/dist/v2/runtime/middleware.d.mts.map +1 -0
  188. package/dist/v2/runtime/middleware.mjs +33 -0
  189. package/dist/v2/runtime/middleware.mjs.map +1 -0
  190. package/dist/v2/runtime/runner/agent-runner.cjs +8 -0
  191. package/dist/v2/runtime/runner/agent-runner.cjs.map +1 -0
  192. package/dist/v2/runtime/runner/agent-runner.d.cts +32 -0
  193. package/dist/v2/runtime/runner/agent-runner.d.cts.map +1 -0
  194. package/dist/v2/runtime/runner/agent-runner.d.mts +32 -0
  195. package/dist/v2/runtime/runner/agent-runner.d.mts.map +1 -0
  196. package/dist/v2/runtime/runner/agent-runner.mjs +7 -0
  197. package/dist/v2/runtime/runner/agent-runner.mjs.map +1 -0
  198. package/dist/v2/runtime/runner/in-memory.cjs +223 -0
  199. package/dist/v2/runtime/runner/in-memory.cjs.map +1 -0
  200. package/dist/v2/runtime/runner/in-memory.d.cts +15 -0
  201. package/dist/v2/runtime/runner/in-memory.d.cts.map +1 -0
  202. package/dist/v2/runtime/runner/in-memory.d.mts +15 -0
  203. package/dist/v2/runtime/runner/in-memory.d.mts.map +1 -0
  204. package/dist/v2/runtime/runner/in-memory.mjs +222 -0
  205. package/dist/v2/runtime/runner/in-memory.mjs.map +1 -0
  206. package/dist/v2/runtime/runner/index.d.cts +6 -0
  207. package/dist/v2/runtime/runner/index.d.mts +6 -0
  208. package/dist/v2/runtime/runner/index.mjs +7 -0
  209. package/dist/v2/runtime/runner/intelligence.cjs +246 -0
  210. package/dist/v2/runtime/runner/intelligence.cjs.map +1 -0
  211. package/dist/v2/runtime/runner/intelligence.d.cts +57 -0
  212. package/dist/v2/runtime/runner/intelligence.d.cts.map +1 -0
  213. package/dist/v2/runtime/runner/intelligence.d.mts +57 -0
  214. package/dist/v2/runtime/runner/intelligence.d.mts.map +1 -0
  215. package/dist/v2/runtime/runner/intelligence.mjs +245 -0
  216. package/dist/v2/runtime/runner/intelligence.mjs.map +1 -0
  217. package/dist/v2/runtime/runtime.cjs +101 -0
  218. package/dist/v2/runtime/runtime.cjs.map +1 -0
  219. package/dist/v2/runtime/runtime.d.cts +132 -0
  220. package/dist/v2/runtime/runtime.d.cts.map +1 -0
  221. package/dist/v2/runtime/runtime.d.mts +133 -0
  222. package/dist/v2/runtime/runtime.d.mts.map +1 -0
  223. package/dist/v2/runtime/runtime.mjs +97 -0
  224. package/dist/v2/runtime/runtime.mjs.map +1 -0
  225. package/dist/v2/runtime/telemetry/scarf-client.cjs +32 -0
  226. package/dist/v2/runtime/telemetry/scarf-client.cjs.map +1 -0
  227. package/dist/v2/runtime/telemetry/scarf-client.mjs +32 -0
  228. package/dist/v2/runtime/telemetry/scarf-client.mjs.map +1 -0
  229. package/dist/v2/runtime/telemetry/telemetry-client.cjs +35 -0
  230. package/dist/v2/runtime/telemetry/telemetry-client.cjs.map +1 -0
  231. package/dist/v2/runtime/telemetry/telemetry-client.mjs +35 -0
  232. package/dist/v2/runtime/telemetry/telemetry-client.mjs.map +1 -0
  233. package/dist/v2/runtime/transcription-service/transcription-service.cjs +8 -0
  234. package/dist/v2/runtime/transcription-service/transcription-service.cjs.map +1 -0
  235. package/dist/v2/runtime/transcription-service/transcription-service.d.cts +15 -0
  236. package/dist/v2/runtime/transcription-service/transcription-service.d.cts.map +1 -0
  237. package/dist/v2/runtime/transcription-service/transcription-service.d.mts +15 -0
  238. package/dist/v2/runtime/transcription-service/transcription-service.d.mts.map +1 -0
  239. package/dist/v2/runtime/transcription-service/transcription-service.mjs +7 -0
  240. package/dist/v2/runtime/transcription-service/transcription-service.mjs.map +1 -0
  241. package/package.json +24 -7
  242. package/src/agent/__tests__/ai-sdk-v6-compat.test.ts +116 -0
  243. package/src/agent/__tests__/basic-agent.test.ts +1698 -0
  244. package/src/agent/__tests__/config-tools-execution.test.ts +516 -0
  245. package/src/agent/__tests__/mcp-clients.test.ts +260 -0
  246. package/src/agent/__tests__/property-overrides.test.ts +598 -0
  247. package/src/agent/__tests__/standard-schema-tools.test.ts +313 -0
  248. package/src/agent/__tests__/standard-schema-types.test.ts +158 -0
  249. package/src/agent/__tests__/state-tools.test.ts +436 -0
  250. package/src/agent/__tests__/test-helpers.ts +193 -0
  251. package/src/agent/__tests__/utils.test.ts +536 -0
  252. package/src/agent/__tests__/zod-regression.test.ts +350 -0
  253. package/src/agent/index.ts +1329 -0
  254. package/src/graphql/message-conversion/agui-to-gql.test.ts +1 -1
  255. package/src/graphql/message-conversion/agui-to-gql.ts +1 -1
  256. package/src/graphql/message-conversion/gql-to-agui.ts +1 -1
  257. package/src/graphql/message-conversion/roundtrip-conversion.test.ts +1 -1
  258. package/src/lib/integrations/nextjs/app-router.ts +2 -2
  259. package/src/lib/integrations/node-http/index.ts +2 -2
  260. package/src/lib/runtime/copilot-runtime.ts +3 -5
  261. package/src/lib/runtime/telemetry-agent-runner.ts +1 -1
  262. package/src/service-adapters/conversion.test.ts +1 -1
  263. package/src/service-adapters/conversion.ts +1 -28
  264. package/src/v2/index.ts +5 -2
  265. package/src/v2/runtime/__tests__/cors-credentials.test.ts +320 -0
  266. package/src/v2/runtime/__tests__/express-abort-signal.test.ts +25 -0
  267. package/src/v2/runtime/__tests__/express-body-order.test.ts +76 -0
  268. package/src/v2/runtime/__tests__/express-single-sse.test.ts +122 -0
  269. package/src/v2/runtime/__tests__/get-runtime-info.test.ts +141 -0
  270. package/src/v2/runtime/__tests__/handle-connect.test.ts +423 -0
  271. package/src/v2/runtime/__tests__/handle-run.test.ts +910 -0
  272. package/src/v2/runtime/__tests__/handle-threads.test.ts +388 -0
  273. package/src/v2/runtime/__tests__/handle-transcribe.test.ts +301 -0
  274. package/src/v2/runtime/__tests__/header-utils.test.ts +88 -0
  275. package/src/v2/runtime/__tests__/in-process-agent-runner-messages.test.ts +230 -0
  276. package/src/v2/runtime/__tests__/in-process-agent-runner.test.ts +1030 -0
  277. package/src/v2/runtime/__tests__/middleware-express.test.ts +206 -0
  278. package/src/v2/runtime/__tests__/middleware-single-express.test.ts +211 -0
  279. package/src/v2/runtime/__tests__/middleware-single.test.ts +225 -0
  280. package/src/v2/runtime/__tests__/middleware-sse-parser.test.ts +187 -0
  281. package/src/v2/runtime/__tests__/middleware.test.ts +251 -0
  282. package/src/v2/runtime/__tests__/routing-express.test.ts +174 -0
  283. package/src/v2/runtime/__tests__/routing-single-express.test.ts +168 -0
  284. package/src/v2/runtime/__tests__/routing-single.test.ts +193 -0
  285. package/src/v2/runtime/__tests__/routing.test.ts +257 -0
  286. package/src/v2/runtime/__tests__/runtime.test.ts +123 -0
  287. package/src/v2/runtime/__tests__/telemetry.test.ts +167 -0
  288. package/src/v2/runtime/__tests__/thread-names.test.ts +188 -0
  289. package/src/v2/runtime/endpoints/express-single.ts +231 -0
  290. package/src/v2/runtime/endpoints/express-utils.ts +182 -0
  291. package/src/v2/runtime/endpoints/express.ts +275 -0
  292. package/src/v2/runtime/endpoints/hono-single.ts +212 -0
  293. package/src/v2/runtime/endpoints/hono.ts +314 -0
  294. package/src/v2/runtime/endpoints/index.ts +4 -0
  295. package/src/v2/runtime/endpoints/single-route-helpers.ts +125 -0
  296. package/src/v2/runtime/express.ts +2 -0
  297. package/src/v2/runtime/handler.ts +3 -0
  298. package/src/v2/runtime/handlers/get-runtime-info.ts +79 -0
  299. package/src/v2/runtime/handlers/handle-connect.ts +76 -0
  300. package/src/v2/runtime/handlers/handle-run.ts +89 -0
  301. package/src/v2/runtime/handlers/handle-stop.ts +76 -0
  302. package/src/v2/runtime/handlers/handle-threads.ts +7 -0
  303. package/src/v2/runtime/handlers/handle-transcribe.ts +256 -0
  304. package/src/v2/runtime/handlers/header-utils.ts +24 -0
  305. package/src/v2/runtime/handlers/intelligence/connect.ts +65 -0
  306. package/src/v2/runtime/handlers/intelligence/run.ts +152 -0
  307. package/src/v2/runtime/handlers/intelligence/thread-names.ts +246 -0
  308. package/src/v2/runtime/handlers/intelligence/threads.ts +233 -0
  309. package/src/v2/runtime/handlers/shared/agent-utils.ts +136 -0
  310. package/src/v2/runtime/handlers/shared/intelligence-utils.ts +21 -0
  311. package/src/v2/runtime/handlers/shared/json-response.ts +6 -0
  312. package/src/v2/runtime/handlers/shared/resolve-intelligence-user.ts +25 -0
  313. package/src/v2/runtime/handlers/shared/sse-response.ts +100 -0
  314. package/src/v2/runtime/handlers/sse/connect.ts +24 -0
  315. package/src/v2/runtime/handlers/sse/run.ts +27 -0
  316. package/src/v2/runtime/index.ts +20 -0
  317. package/src/v2/runtime/intelligence-platform/__tests__/client.test.ts +605 -0
  318. package/src/v2/runtime/intelligence-platform/client.ts +659 -0
  319. package/src/v2/runtime/intelligence-platform/index.ts +10 -0
  320. package/src/v2/runtime/middleware-sse-parser.ts +200 -0
  321. package/src/v2/runtime/middleware.ts +115 -0
  322. package/src/v2/runtime/runner/__tests__/finalize-events.test.ts +109 -0
  323. package/src/v2/runtime/runner/__tests__/in-memory-runner.e2e.test.ts +775 -0
  324. package/src/v2/runtime/runner/__tests__/in-memory-runner.test.ts +363 -0
  325. package/src/v2/runtime/runner/__tests__/intelligence-runner.test.ts +981 -0
  326. package/src/v2/runtime/runner/agent-runner.ts +36 -0
  327. package/src/v2/runtime/runner/in-memory.ts +381 -0
  328. package/src/v2/runtime/runner/index.ts +4 -0
  329. package/src/v2/runtime/runner/intelligence.ts +429 -0
  330. package/src/v2/runtime/runtime.ts +260 -0
  331. package/src/v2/runtime/telemetry/events.ts +35 -0
  332. package/src/v2/runtime/telemetry/index.ts +7 -0
  333. package/src/v2/runtime/telemetry/scarf-client.ts +39 -0
  334. package/src/v2/runtime/telemetry/telemetry-client.ts +70 -0
  335. package/src/v2/runtime/transcription-service/transcription-service.ts +11 -0
  336. package/tsconfig.json +9 -2
  337. package/tsdown.config.ts +1 -0
@@ -0,0 +1,1698 @@
1
+ import { describe, it, expect, vi, beforeEach, afterEach } from "vitest";
2
+ import { z } from "zod";
3
+ import { BasicAgent, defineTool, type ToolDefinition } from "../index";
4
+ import {
5
+ EventType,
6
+ type BaseEvent,
7
+ type ReasoningStartEvent,
8
+ type RunAgentInput,
9
+ } from "@ag-ui/client";
10
+ import { streamText } from "ai";
11
+ import {
12
+ mockStreamTextResponse,
13
+ textStart,
14
+ textDelta,
15
+ finish,
16
+ abort,
17
+ error,
18
+ collectEvents,
19
+ toolCallStreamingStart,
20
+ toolCallDelta,
21
+ toolCall,
22
+ toolResult,
23
+ reasoningStart,
24
+ reasoningDelta,
25
+ reasoningEnd,
26
+ } from "./test-helpers";
27
+
28
+ // Mock the ai module
29
+ vi.mock("ai", () => ({
30
+ streamText: vi.fn(),
31
+ tool: vi.fn((config) => config),
32
+ }));
33
+
34
+ // Mock the SDK clients
35
+ vi.mock("@ai-sdk/openai", () => ({
36
+ createOpenAI: vi.fn(() => (modelId: string) => ({
37
+ modelId,
38
+ provider: "openai",
39
+ })),
40
+ }));
41
+
42
+ vi.mock("@ai-sdk/anthropic", () => ({
43
+ createAnthropic: vi.fn(() => (modelId: string) => ({
44
+ modelId,
45
+ provider: "anthropic",
46
+ })),
47
+ }));
48
+
49
+ vi.mock("@ai-sdk/google", () => ({
50
+ createGoogleGenerativeAI: vi.fn(() => (modelId: string) => ({
51
+ modelId,
52
+ provider: "google",
53
+ })),
54
+ }));
55
+
56
+ describe("BasicAgent", () => {
57
+ const originalEnv = process.env;
58
+
59
+ beforeEach(() => {
60
+ vi.clearAllMocks();
61
+ process.env = { ...originalEnv };
62
+ process.env.OPENAI_API_KEY = "test-key";
63
+ process.env.ANTHROPIC_API_KEY = "test-key";
64
+ process.env.GOOGLE_API_KEY = "test-key";
65
+ });
66
+
67
+ afterEach(() => {
68
+ process.env = originalEnv;
69
+ });
70
+
71
+ describe("Basic Event Emission", () => {
72
+ it("should emit RUN_STARTED and RUN_FINISHED events", async () => {
73
+ const agent = new BasicAgent({
74
+ model: "openai/gpt-4o",
75
+ });
76
+
77
+ vi.mocked(streamText).mockReturnValue(
78
+ mockStreamTextResponse([textDelta("Hello"), finish()]) as any,
79
+ );
80
+
81
+ const input: RunAgentInput = {
82
+ threadId: "thread1",
83
+ runId: "run1",
84
+ messages: [],
85
+ tools: [],
86
+ context: [],
87
+ state: {},
88
+ };
89
+
90
+ const events = await collectEvents(agent["run"](input));
91
+
92
+ expect(events[0]).toMatchObject({
93
+ type: EventType.RUN_STARTED,
94
+ threadId: "thread1",
95
+ runId: "run1",
96
+ });
97
+
98
+ expect(events[events.length - 1]).toMatchObject({
99
+ type: EventType.RUN_FINISHED,
100
+ threadId: "thread1",
101
+ runId: "run1",
102
+ });
103
+ });
104
+
105
+ it("should emit TEXT_MESSAGE_CHUNK events for text deltas", async () => {
106
+ const agent = new BasicAgent({
107
+ model: "openai/gpt-4o",
108
+ });
109
+
110
+ vi.mocked(streamText).mockReturnValue(
111
+ mockStreamTextResponse([
112
+ textDelta("Hello"),
113
+ textDelta(" world"),
114
+ finish(),
115
+ ]) as any,
116
+ );
117
+
118
+ const input: RunAgentInput = {
119
+ threadId: "thread1",
120
+ runId: "run1",
121
+ messages: [],
122
+ tools: [],
123
+ context: [],
124
+ state: {},
125
+ };
126
+
127
+ const events = await collectEvents(agent["run"](input));
128
+
129
+ const textEvents = events.filter(
130
+ (e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK,
131
+ );
132
+ expect(textEvents).toHaveLength(2);
133
+ expect(textEvents[0]).toMatchObject({
134
+ type: EventType.TEXT_MESSAGE_CHUNK,
135
+ role: "assistant",
136
+ delta: "Hello",
137
+ });
138
+ expect(textEvents[1]).toMatchObject({
139
+ type: EventType.TEXT_MESSAGE_CHUNK,
140
+ delta: " world",
141
+ });
142
+ });
143
+
144
+ it("should generate unique messageId when provider returns id '0'", async () => {
145
+ const agent = new BasicAgent({
146
+ model: "openai/gpt-4o",
147
+ });
148
+
149
+ vi.mocked(streamText).mockReturnValue(
150
+ mockStreamTextResponse([
151
+ textStart("0"), // Simulate Google Gemini returning "0"
152
+ textDelta("First message"),
153
+ finish(),
154
+ ]) as any,
155
+ );
156
+
157
+ const input: RunAgentInput = {
158
+ threadId: "thread1",
159
+ runId: "run1",
160
+ messages: [],
161
+ tools: [],
162
+ context: [],
163
+ state: {},
164
+ };
165
+
166
+ const events = await collectEvents(agent["run"](input));
167
+
168
+ const textEvents = events.filter(
169
+ (e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK,
170
+ );
171
+ expect(textEvents).toHaveLength(1);
172
+
173
+ // Verify that messageId is NOT "0" - should be a UUID
174
+ expect(textEvents[0].messageId).not.toBe("0");
175
+ expect(textEvents[0].messageId).toMatch(
176
+ /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/,
177
+ );
178
+ });
179
+
180
+ it("should use provider-supplied messageId when it's not '0'", async () => {
181
+ const agent = new BasicAgent({
182
+ model: "openai/gpt-4o",
183
+ });
184
+
185
+ const validId = "msg_abc123";
186
+ vi.mocked(streamText).mockReturnValue(
187
+ mockStreamTextResponse([
188
+ textStart(validId), // Valid ID from provider
189
+ textDelta("Test message"),
190
+ finish(),
191
+ ]) as any,
192
+ );
193
+
194
+ const input: RunAgentInput = {
195
+ threadId: "thread1",
196
+ runId: "run1",
197
+ messages: [],
198
+ tools: [],
199
+ context: [],
200
+ state: {},
201
+ };
202
+
203
+ const events = await collectEvents(agent["run"](input));
204
+
205
+ const textEvents = events.filter(
206
+ (e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK,
207
+ );
208
+ expect(textEvents).toHaveLength(1);
209
+
210
+ // Verify that the valid ID from provider is used
211
+ expect(textEvents[0].messageId).toBe(validId);
212
+ });
213
+ });
214
+
215
+ describe("Tool Call Events", () => {
216
+ it("should emit tool call lifecycle events", async () => {
217
+ const agent = new BasicAgent({
218
+ model: "openai/gpt-4o",
219
+ });
220
+
221
+ vi.mocked(streamText).mockReturnValue(
222
+ mockStreamTextResponse([
223
+ toolCallStreamingStart("call1", "testTool"),
224
+ toolCallDelta("call1", '{"arg'),
225
+ toolCallDelta("call1", '":"val"}'),
226
+ toolCall("call1", "testTool", { arg: "val" }),
227
+ toolResult("call1", "testTool", { result: "success" }),
228
+ finish(),
229
+ ]) as any,
230
+ );
231
+
232
+ const input: RunAgentInput = {
233
+ threadId: "thread1",
234
+ runId: "run1",
235
+ messages: [],
236
+ tools: [],
237
+ context: [],
238
+ state: {},
239
+ };
240
+
241
+ const events = await collectEvents(agent["run"](input));
242
+
243
+ // Check for TOOL_CALL_START
244
+ const startEvent = events.find(
245
+ (e: any) => e.type === EventType.TOOL_CALL_START,
246
+ );
247
+ expect(startEvent).toMatchObject({
248
+ type: EventType.TOOL_CALL_START,
249
+ toolCallId: "call1",
250
+ toolCallName: "testTool",
251
+ });
252
+
253
+ // Check for TOOL_CALL_ARGS
254
+ const argsEvents = events.filter(
255
+ (e: any) => e.type === EventType.TOOL_CALL_ARGS,
256
+ );
257
+ expect(argsEvents).toHaveLength(2);
258
+
259
+ // Check for TOOL_CALL_END
260
+ const endEvent = events.find(
261
+ (e: any) => e.type === EventType.TOOL_CALL_END,
262
+ );
263
+ expect(endEvent).toMatchObject({
264
+ type: EventType.TOOL_CALL_END,
265
+ toolCallId: "call1",
266
+ });
267
+
268
+ // Check for TOOL_CALL_RESULT
269
+ const resultEvent = events.find(
270
+ (e: any) => e.type === EventType.TOOL_CALL_RESULT,
271
+ );
272
+ expect(resultEvent).toMatchObject({
273
+ type: EventType.TOOL_CALL_RESULT,
274
+ role: "tool",
275
+ toolCallId: "call1",
276
+ });
277
+ });
278
+ });
279
+
280
+ describe("Prompt Building", () => {
281
+ it("should not add system message when no prompt, context, or state", async () => {
282
+ const agent = new BasicAgent({
283
+ model: "openai/gpt-4o",
284
+ });
285
+
286
+ vi.mocked(streamText).mockReturnValue(
287
+ mockStreamTextResponse([finish()]) as any,
288
+ );
289
+
290
+ const input: RunAgentInput = {
291
+ threadId: "thread1",
292
+ runId: "run1",
293
+ messages: [{ id: "1", role: "user", content: "Hello" }],
294
+ tools: [],
295
+ context: [],
296
+ state: {},
297
+ };
298
+
299
+ await collectEvents(agent["run"](input));
300
+
301
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
302
+ expect(callArgs.messages).toHaveLength(1);
303
+ expect(callArgs.messages[0].role).toBe("user");
304
+ });
305
+
306
+ it("should prepend system message with config prompt", async () => {
307
+ const agent = new BasicAgent({
308
+ model: "openai/gpt-4o",
309
+ prompt: "You are a helpful assistant.",
310
+ });
311
+
312
+ vi.mocked(streamText).mockReturnValue(
313
+ mockStreamTextResponse([finish()]) as any,
314
+ );
315
+
316
+ const input: RunAgentInput = {
317
+ threadId: "thread1",
318
+ runId: "run1",
319
+ messages: [{ id: "1", role: "user", content: "Hello" }],
320
+ tools: [],
321
+ context: [],
322
+ state: {},
323
+ };
324
+
325
+ await collectEvents(agent["run"](input));
326
+
327
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
328
+ expect(callArgs.messages).toHaveLength(2);
329
+ expect(callArgs.messages[0]).toMatchObject({
330
+ role: "system",
331
+ content: "You are a helpful assistant.",
332
+ });
333
+ });
334
+
335
+ it("should include context in system message", async () => {
336
+ const agent = new BasicAgent({
337
+ model: "openai/gpt-4o",
338
+ });
339
+
340
+ vi.mocked(streamText).mockReturnValue(
341
+ mockStreamTextResponse([finish()]) as any,
342
+ );
343
+
344
+ const input: RunAgentInput = {
345
+ threadId: "thread1",
346
+ runId: "run1",
347
+ messages: [],
348
+ tools: [],
349
+ context: [
350
+ { description: "User Name", value: "John Doe" },
351
+ { description: "Location", value: "New York" },
352
+ ],
353
+ state: {},
354
+ };
355
+
356
+ await collectEvents(agent["run"](input));
357
+
358
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
359
+ const systemMessage = callArgs.messages[0];
360
+ expect(systemMessage.role).toBe("system");
361
+ expect(systemMessage.content).toContain("Context from the application");
362
+ expect(systemMessage.content).toContain("User Name");
363
+ expect(systemMessage.content).toContain("John Doe");
364
+ expect(systemMessage.content).toContain("Location");
365
+ expect(systemMessage.content).toContain("New York");
366
+ });
367
+
368
+ it("should include state in system message", async () => {
369
+ const agent = new BasicAgent({
370
+ model: "openai/gpt-4o",
371
+ });
372
+
373
+ vi.mocked(streamText).mockReturnValue(
374
+ mockStreamTextResponse([finish()]) as any,
375
+ );
376
+
377
+ const input: RunAgentInput = {
378
+ threadId: "thread1",
379
+ runId: "run1",
380
+ messages: [],
381
+ tools: [],
382
+ context: [],
383
+ state: { counter: 0, items: ["a", "b"] },
384
+ };
385
+
386
+ await collectEvents(agent["run"](input));
387
+
388
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
389
+ const systemMessage = callArgs.messages[0];
390
+ expect(systemMessage.role).toBe("system");
391
+ expect(systemMessage.content).toContain("Application State");
392
+ expect(systemMessage.content).toContain("AGUISendStateSnapshot");
393
+ expect(systemMessage.content).toContain("AGUISendStateDelta");
394
+ expect(systemMessage.content).toContain('"counter": 0');
395
+ expect(systemMessage.content).toContain('"items"');
396
+ });
397
+
398
+ it("should combine prompt, context, and state", async () => {
399
+ const agent = new BasicAgent({
400
+ model: "openai/gpt-4o",
401
+ prompt: "You are helpful.",
402
+ });
403
+
404
+ vi.mocked(streamText).mockReturnValue(
405
+ mockStreamTextResponse([finish()]) as any,
406
+ );
407
+
408
+ const input: RunAgentInput = {
409
+ threadId: "thread1",
410
+ runId: "run1",
411
+ messages: [],
412
+ tools: [],
413
+ context: [{ description: "Context", value: "Data" }],
414
+ state: { value: 1 },
415
+ };
416
+
417
+ await collectEvents(agent["run"](input));
418
+
419
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
420
+ const systemMessage = callArgs.messages[0];
421
+ expect(systemMessage.content).toContain("You are helpful.");
422
+ expect(systemMessage.content).toContain("Context from the application");
423
+ expect(systemMessage.content).toContain("Application State");
424
+
425
+ // Check order: prompt, then context, then state
426
+ const promptIndex = systemMessage.content.indexOf("You are helpful.");
427
+ const contextIndex = systemMessage.content.indexOf(
428
+ "Context from the application",
429
+ );
430
+ const stateIndex = systemMessage.content.indexOf("Application State");
431
+
432
+ expect(promptIndex).toBeLessThan(contextIndex);
433
+ expect(contextIndex).toBeLessThan(stateIndex);
434
+ });
435
+ });
436
+
437
+ describe("Forward System/Developer Messages", () => {
438
+ it("should ignore system messages by default", async () => {
439
+ const agent = new BasicAgent({
440
+ model: "openai/gpt-4o",
441
+ });
442
+
443
+ vi.mocked(streamText).mockReturnValue(
444
+ mockStreamTextResponse([finish()]) as any,
445
+ );
446
+
447
+ const input: RunAgentInput = {
448
+ threadId: "thread1",
449
+ runId: "run1",
450
+ messages: [
451
+ { id: "sys1", role: "system", content: "System instruction" },
452
+ { id: "user1", role: "user", content: "Hello" },
453
+ ],
454
+ tools: [],
455
+ context: [],
456
+ state: {},
457
+ };
458
+
459
+ await collectEvents(agent["run"](input));
460
+
461
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
462
+ // Should only have the user message, system message ignored
463
+ expect(callArgs.messages).toHaveLength(1);
464
+ expect(callArgs.messages[0].role).toBe("user");
465
+ });
466
+
467
+ it("should ignore developer messages by default", async () => {
468
+ const agent = new BasicAgent({
469
+ model: "openai/gpt-4o",
470
+ });
471
+
472
+ vi.mocked(streamText).mockReturnValue(
473
+ mockStreamTextResponse([finish()]) as any,
474
+ );
475
+
476
+ const input: RunAgentInput = {
477
+ threadId: "thread1",
478
+ runId: "run1",
479
+ messages: [
480
+ { id: "dev1", role: "developer", content: "Developer hint" },
481
+ { id: "user1", role: "user", content: "Hello" },
482
+ ],
483
+ tools: [],
484
+ context: [],
485
+ state: {},
486
+ };
487
+
488
+ await collectEvents(agent["run"](input));
489
+
490
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
491
+ // Should only have the user message, developer message ignored
492
+ expect(callArgs.messages).toHaveLength(1);
493
+ expect(callArgs.messages[0].role).toBe("user");
494
+ });
495
+
496
+ it("should forward system messages when forwardSystemMessages is true", async () => {
497
+ const agent = new BasicAgent({
498
+ model: "openai/gpt-4o",
499
+ forwardSystemMessages: true,
500
+ });
501
+
502
+ vi.mocked(streamText).mockReturnValue(
503
+ mockStreamTextResponse([finish()]) as any,
504
+ );
505
+
506
+ const input: RunAgentInput = {
507
+ threadId: "thread1",
508
+ runId: "run1",
509
+ messages: [
510
+ { id: "sys1", role: "system", content: "System instruction" },
511
+ { id: "user1", role: "user", content: "Hello" },
512
+ ],
513
+ tools: [],
514
+ context: [],
515
+ state: {},
516
+ };
517
+
518
+ await collectEvents(agent["run"](input));
519
+
520
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
521
+ expect(callArgs.messages).toHaveLength(2);
522
+ expect(callArgs.messages[0]).toMatchObject({
523
+ role: "system",
524
+ content: "System instruction",
525
+ });
526
+ expect(callArgs.messages[1].role).toBe("user");
527
+ });
528
+
529
+ it("should forward developer messages as system when forwardDeveloperMessages is true", async () => {
530
+ const agent = new BasicAgent({
531
+ model: "openai/gpt-4o",
532
+ forwardDeveloperMessages: true,
533
+ });
534
+
535
+ vi.mocked(streamText).mockReturnValue(
536
+ mockStreamTextResponse([finish()]) as any,
537
+ );
538
+
539
+ const input: RunAgentInput = {
540
+ threadId: "thread1",
541
+ runId: "run1",
542
+ messages: [
543
+ { id: "dev1", role: "developer", content: "Developer hint" },
544
+ { id: "user1", role: "user", content: "Hello" },
545
+ ],
546
+ tools: [],
547
+ context: [],
548
+ state: {},
549
+ };
550
+
551
+ await collectEvents(agent["run"](input));
552
+
553
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
554
+ expect(callArgs.messages).toHaveLength(2);
555
+ // Developer messages are converted to system role
556
+ expect(callArgs.messages[0]).toMatchObject({
557
+ role: "system",
558
+ content: "Developer hint",
559
+ });
560
+ expect(callArgs.messages[1].role).toBe("user");
561
+ });
562
+
563
+ it("should forward both system and developer messages when both flags are true", async () => {
564
+ const agent = new BasicAgent({
565
+ model: "openai/gpt-4o",
566
+ forwardSystemMessages: true,
567
+ forwardDeveloperMessages: true,
568
+ });
569
+
570
+ vi.mocked(streamText).mockReturnValue(
571
+ mockStreamTextResponse([finish()]) as any,
572
+ );
573
+
574
+ const input: RunAgentInput = {
575
+ threadId: "thread1",
576
+ runId: "run1",
577
+ messages: [
578
+ { id: "sys1", role: "system", content: "System instruction" },
579
+ { id: "dev1", role: "developer", content: "Developer hint" },
580
+ { id: "user1", role: "user", content: "Hello" },
581
+ ],
582
+ tools: [],
583
+ context: [],
584
+ state: {},
585
+ };
586
+
587
+ await collectEvents(agent["run"](input));
588
+
589
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
590
+ expect(callArgs.messages).toHaveLength(3);
591
+ expect(callArgs.messages[0]).toMatchObject({
592
+ role: "system",
593
+ content: "System instruction",
594
+ });
595
+ expect(callArgs.messages[1]).toMatchObject({
596
+ role: "system",
597
+ content: "Developer hint",
598
+ });
599
+ expect(callArgs.messages[2].role).toBe("user");
600
+ });
601
+
602
+ it("should place config prompt before forwarded system/developer messages", async () => {
603
+ const agent = new BasicAgent({
604
+ model: "openai/gpt-4o",
605
+ prompt: "You are a helpful assistant.",
606
+ forwardSystemMessages: true,
607
+ forwardDeveloperMessages: true,
608
+ });
609
+
610
+ vi.mocked(streamText).mockReturnValue(
611
+ mockStreamTextResponse([finish()]) as any,
612
+ );
613
+
614
+ const input: RunAgentInput = {
615
+ threadId: "thread1",
616
+ runId: "run1",
617
+ messages: [
618
+ { id: "sys1", role: "system", content: "System instruction" },
619
+ { id: "dev1", role: "developer", content: "Developer hint" },
620
+ { id: "user1", role: "user", content: "Hello" },
621
+ ],
622
+ tools: [],
623
+ context: [],
624
+ state: {},
625
+ };
626
+
627
+ await collectEvents(agent["run"](input));
628
+
629
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
630
+ // Config prompt is prepended as first system message
631
+ expect(callArgs.messages).toHaveLength(4);
632
+ expect(callArgs.messages[0]).toMatchObject({
633
+ role: "system",
634
+ content: "You are a helpful assistant.",
635
+ });
636
+ expect(callArgs.messages[1]).toMatchObject({
637
+ role: "system",
638
+ content: "System instruction",
639
+ });
640
+ expect(callArgs.messages[2]).toMatchObject({
641
+ role: "system",
642
+ content: "Developer hint",
643
+ });
644
+ expect(callArgs.messages[3].role).toBe("user");
645
+ });
646
+ });
647
+
648
+ describe("Tool Configuration", () => {
649
+ it("should include tools from config", async () => {
650
+ const tool1 = defineTool({
651
+ name: "configTool",
652
+ description: "A config tool",
653
+ parameters: z.object({ input: z.string() }),
654
+ execute: async () => ({ result: "ok" }),
655
+ });
656
+
657
+ const agent = new BasicAgent({
658
+ model: "openai/gpt-4o",
659
+ tools: [tool1],
660
+ });
661
+
662
+ vi.mocked(streamText).mockReturnValue(
663
+ mockStreamTextResponse([finish()]) as any,
664
+ );
665
+
666
+ const input: RunAgentInput = {
667
+ threadId: "thread1",
668
+ runId: "run1",
669
+ messages: [],
670
+ tools: [],
671
+ context: [],
672
+ state: {},
673
+ };
674
+
675
+ await collectEvents(agent["run"](input));
676
+
677
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
678
+ expect(callArgs.tools).toHaveProperty("configTool");
679
+ });
680
+
681
+ it("should merge config tools with input tools", async () => {
682
+ const configTool = defineTool({
683
+ name: "configTool",
684
+ description: "From config",
685
+ parameters: z.object({}),
686
+ execute: async () => ({ result: "ok" }),
687
+ });
688
+
689
+ const agent = new BasicAgent({
690
+ model: "openai/gpt-4o",
691
+ tools: [configTool],
692
+ });
693
+
694
+ vi.mocked(streamText).mockReturnValue(
695
+ mockStreamTextResponse([finish()]) as any,
696
+ );
697
+
698
+ const input: RunAgentInput = {
699
+ threadId: "thread1",
700
+ runId: "run1",
701
+ messages: [],
702
+ tools: [
703
+ {
704
+ name: "inputTool",
705
+ description: "From input",
706
+ parameters: { type: "object", properties: {} },
707
+ },
708
+ ],
709
+ context: [],
710
+ state: {},
711
+ };
712
+
713
+ await collectEvents(agent["run"](input));
714
+
715
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
716
+ expect(callArgs.tools).toHaveProperty("configTool");
717
+ expect(callArgs.tools).toHaveProperty("inputTool");
718
+ });
719
+
720
+ it("should always include state update tools", async () => {
721
+ const agent = new BasicAgent({
722
+ model: "openai/gpt-4o",
723
+ });
724
+
725
+ vi.mocked(streamText).mockReturnValue(
726
+ mockStreamTextResponse([finish()]) as any,
727
+ );
728
+
729
+ const input: RunAgentInput = {
730
+ threadId: "thread1",
731
+ runId: "run1",
732
+ messages: [],
733
+ tools: [],
734
+ context: [],
735
+ state: {},
736
+ };
737
+
738
+ await collectEvents(agent["run"](input));
739
+
740
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
741
+ expect(callArgs.tools).toHaveProperty("AGUISendStateSnapshot");
742
+ expect(callArgs.tools).toHaveProperty("AGUISendStateDelta");
743
+ });
744
+ });
745
+
746
+ describe("Property Overrides", () => {
747
+ it("should respect overridable properties", async () => {
748
+ const agent = new BasicAgent({
749
+ model: "openai/gpt-4o",
750
+ temperature: 0.5,
751
+ overridableProperties: ["temperature"],
752
+ });
753
+
754
+ vi.mocked(streamText).mockReturnValue(
755
+ mockStreamTextResponse([finish()]) as any,
756
+ );
757
+
758
+ const input: RunAgentInput = {
759
+ threadId: "thread1",
760
+ runId: "run1",
761
+ messages: [],
762
+ tools: [],
763
+ context: [],
764
+ state: {},
765
+ forwardedProps: { temperature: 0.9 },
766
+ };
767
+
768
+ await collectEvents(agent["run"](input));
769
+
770
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
771
+ expect(callArgs.temperature).toBe(0.9);
772
+ });
773
+
774
+ it("should ignore non-overridable properties", async () => {
775
+ const agent = new BasicAgent({
776
+ model: "openai/gpt-4o",
777
+ temperature: 0.5,
778
+ overridableProperties: [], // No properties can be overridden
779
+ });
780
+
781
+ vi.mocked(streamText).mockReturnValue(
782
+ mockStreamTextResponse([finish()]) as any,
783
+ );
784
+
785
+ const input: RunAgentInput = {
786
+ threadId: "thread1",
787
+ runId: "run1",
788
+ messages: [],
789
+ tools: [],
790
+ context: [],
791
+ state: {},
792
+ forwardedProps: { temperature: 0.9 },
793
+ };
794
+
795
+ await collectEvents(agent["run"](input));
796
+
797
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
798
+ expect(callArgs.temperature).toBe(0.5); // Original value, not overridden
799
+ });
800
+ });
801
+
802
+ describe("Error Handling", () => {
803
+ it("should emit RUN_ERROR event on failure", async () => {
804
+ const agent = new BasicAgent({
805
+ model: "openai/gpt-4o",
806
+ });
807
+
808
+ vi.mocked(streamText).mockImplementation(() => {
809
+ throw new Error("Test error");
810
+ });
811
+
812
+ const input: RunAgentInput = {
813
+ threadId: "thread1",
814
+ runId: "run1",
815
+ messages: [],
816
+ tools: [],
817
+ context: [],
818
+ state: {},
819
+ };
820
+
821
+ try {
822
+ await collectEvents(agent["run"](input));
823
+ expect.fail("Should have thrown");
824
+ } catch (error: any) {
825
+ // Error is expected - check that we got a RUN_ERROR event
826
+ // Note: The error is thrown after emitting the event
827
+ expect(error.message).toContain("Test error");
828
+ }
829
+ });
830
+ });
831
+
832
+ describe("Reasoning Event Emission", () => {
833
+ it("should emit full reasoning lifecycle events", async () => {
834
+ const agent = new BasicAgent({
835
+ model: "openai/gpt-4o",
836
+ });
837
+
838
+ vi.mocked(streamText).mockReturnValue(
839
+ mockStreamTextResponse([
840
+ reasoningStart(),
841
+ reasoningDelta("Let me think..."),
842
+ reasoningDelta(" about this."),
843
+ reasoningEnd(),
844
+ finish(),
845
+ ]) as any,
846
+ );
847
+
848
+ const input: RunAgentInput = {
849
+ threadId: "thread1",
850
+ runId: "run1",
851
+ messages: [],
852
+ tools: [],
853
+ context: [],
854
+ state: {},
855
+ };
856
+
857
+ const events = await collectEvents(agent["run"](input));
858
+
859
+ // Verify event order
860
+ const eventTypes = events.map((e: any) => e.type);
861
+ expect(eventTypes[0]).toBe(EventType.RUN_STARTED);
862
+
863
+ const reasoningStartIdx = eventTypes.indexOf(EventType.REASONING_START);
864
+ const reasoningMsgStartIdx = eventTypes.indexOf(
865
+ EventType.REASONING_MESSAGE_START,
866
+ );
867
+ const reasoningContentIndices = eventTypes.reduce(
868
+ (acc: number[], type: string, idx: number) =>
869
+ type === EventType.REASONING_MESSAGE_CONTENT ? [...acc, idx] : acc,
870
+ [],
871
+ );
872
+ const reasoningMsgEndIdx = eventTypes.indexOf(
873
+ EventType.REASONING_MESSAGE_END,
874
+ );
875
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
876
+
877
+ expect(reasoningStartIdx).toBeGreaterThan(0);
878
+ expect(reasoningMsgStartIdx).toBeGreaterThan(reasoningStartIdx);
879
+ expect(reasoningContentIndices).toHaveLength(2);
880
+ expect(reasoningContentIndices[0]).toBeGreaterThan(reasoningMsgStartIdx);
881
+ expect(reasoningMsgEndIdx).toBeGreaterThan(
882
+ reasoningContentIndices[reasoningContentIndices.length - 1],
883
+ );
884
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
885
+
886
+ // Verify consistent messageId across all reasoning events
887
+ const reasoningEvents = events.filter((e: any) =>
888
+ [
889
+ EventType.REASONING_START,
890
+ EventType.REASONING_MESSAGE_START,
891
+ EventType.REASONING_MESSAGE_CONTENT,
892
+ EventType.REASONING_MESSAGE_END,
893
+ EventType.REASONING_END,
894
+ ].includes(e.type),
895
+ );
896
+ const messageIds = reasoningEvents.map((e: any) => e.messageId);
897
+ expect(new Set(messageIds).size).toBe(1);
898
+
899
+ // Verify REASONING_MESSAGE_START has role "reasoning"
900
+ const msgStartEvent = events.find(
901
+ (e: any) => e.type === EventType.REASONING_MESSAGE_START,
902
+ );
903
+ expect(msgStartEvent).toMatchObject({ role: "reasoning" });
904
+
905
+ // Verify content deltas
906
+ const contentEvents = events.filter(
907
+ (e: any) => e.type === EventType.REASONING_MESSAGE_CONTENT,
908
+ );
909
+ expect(contentEvents[0]).toMatchObject({ delta: "Let me think..." });
910
+ expect(contentEvents[1]).toMatchObject({ delta: " about this." });
911
+
912
+ // Verify last event is RUN_FINISHED
913
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
914
+ });
915
+
916
+ it("should emit reasoning events followed by text events", async () => {
917
+ const agent = new BasicAgent({
918
+ model: "openai/gpt-4o",
919
+ });
920
+
921
+ vi.mocked(streamText).mockReturnValue(
922
+ mockStreamTextResponse([
923
+ reasoningStart(),
924
+ reasoningDelta("thinking"),
925
+ reasoningEnd(),
926
+ textDelta("Hello"),
927
+ finish(),
928
+ ]) as any,
929
+ );
930
+
931
+ const input: RunAgentInput = {
932
+ threadId: "thread1",
933
+ runId: "run1",
934
+ messages: [],
935
+ tools: [],
936
+ context: [],
937
+ state: {},
938
+ };
939
+
940
+ const events = await collectEvents(agent["run"](input));
941
+ const eventTypes = events.map((e: any) => e.type);
942
+
943
+ // Reasoning events should come before text events
944
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
945
+ const textChunkIdx = eventTypes.indexOf(EventType.TEXT_MESSAGE_CHUNK);
946
+ expect(reasoningEndIdx).toBeLessThan(textChunkIdx);
947
+
948
+ // Reasoning messageId should differ from text messageId
949
+ const reasoningEvent = events.find(
950
+ (e: any) => e.type === EventType.REASONING_START,
951
+ );
952
+ const textEvent = events.find(
953
+ (e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK,
954
+ );
955
+ expect(reasoningEvent.messageId).not.toBe(textEvent.messageId);
956
+ });
957
+
958
+ it("should use provider-supplied reasoning id", async () => {
959
+ const agent = new BasicAgent({
960
+ model: "openai/gpt-4o",
961
+ });
962
+
963
+ vi.mocked(streamText).mockReturnValue(
964
+ mockStreamTextResponse([
965
+ reasoningStart("reasoning-msg-123"),
966
+ reasoningDelta("content"),
967
+ reasoningEnd(),
968
+ finish(),
969
+ ]) as any,
970
+ );
971
+
972
+ const input: RunAgentInput = {
973
+ threadId: "thread1",
974
+ runId: "run1",
975
+ messages: [],
976
+ tools: [],
977
+ context: [],
978
+ state: {},
979
+ };
980
+
981
+ const events = await collectEvents(agent["run"](input));
982
+
983
+ const reasoningEvents = events.filter((e: any) =>
984
+ [
985
+ EventType.REASONING_START,
986
+ EventType.REASONING_MESSAGE_START,
987
+ EventType.REASONING_MESSAGE_CONTENT,
988
+ EventType.REASONING_MESSAGE_END,
989
+ EventType.REASONING_END,
990
+ ].includes(e.type),
991
+ );
992
+
993
+ for (const event of reasoningEvents) {
994
+ expect(event.messageId).toBe("reasoning-msg-123");
995
+ }
996
+ });
997
+
998
+ it("should generate unique reasoningMessageId when provider returns id '0'", async () => {
999
+ const agent = new BasicAgent({
1000
+ model: "openai/gpt-4o",
1001
+ });
1002
+
1003
+ vi.mocked(streamText).mockReturnValue(
1004
+ mockStreamTextResponse([
1005
+ reasoningStart("0"),
1006
+ reasoningDelta("content"),
1007
+ reasoningEnd(),
1008
+ finish(),
1009
+ ]) as any,
1010
+ );
1011
+
1012
+ const input: RunAgentInput = {
1013
+ threadId: "thread1",
1014
+ runId: "run1",
1015
+ messages: [],
1016
+ tools: [],
1017
+ context: [],
1018
+ state: {},
1019
+ };
1020
+
1021
+ const events = await collectEvents(agent["run"](input));
1022
+
1023
+ const reasoningEvent = events.find(
1024
+ (e: any) => e.type === EventType.REASONING_START,
1025
+ );
1026
+ expect(reasoningEvent.messageId).not.toBe("0");
1027
+ expect(reasoningEvent.messageId).toMatch(
1028
+ /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/,
1029
+ );
1030
+ });
1031
+
1032
+ it("should handle empty reasoning content", async () => {
1033
+ const agent = new BasicAgent({
1034
+ model: "openai/gpt-4o",
1035
+ });
1036
+
1037
+ vi.mocked(streamText).mockReturnValue(
1038
+ mockStreamTextResponse([
1039
+ reasoningStart(),
1040
+ reasoningDelta(""),
1041
+ reasoningEnd(),
1042
+ finish(),
1043
+ ]) as any,
1044
+ );
1045
+
1046
+ const input: RunAgentInput = {
1047
+ threadId: "thread1",
1048
+ runId: "run1",
1049
+ messages: [],
1050
+ tools: [],
1051
+ context: [],
1052
+ state: {},
1053
+ };
1054
+
1055
+ const events = await collectEvents(agent["run"](input));
1056
+
1057
+ // Empty delta must NOT be emitted — EventSchemas rejects delta: ""
1058
+ const contentEvents = events.filter(
1059
+ (e: any) => e.type === EventType.REASONING_MESSAGE_CONTENT,
1060
+ );
1061
+ expect(contentEvents).toHaveLength(0);
1062
+
1063
+ // Full lifecycle should still complete
1064
+ const eventTypes = events.map((e: any) => e.type);
1065
+ expect(eventTypes).toContain(EventType.REASONING_START);
1066
+ expect(eventTypes).toContain(EventType.REASONING_MESSAGE_START);
1067
+ expect(eventTypes).toContain(EventType.REASONING_MESSAGE_END);
1068
+ expect(eventTypes).toContain(EventType.REASONING_END);
1069
+ expect(eventTypes).toContain(EventType.RUN_FINISHED);
1070
+ });
1071
+
1072
+ it("should handle reasoning-only stream (no text output)", async () => {
1073
+ const agent = new BasicAgent({
1074
+ model: "openai/gpt-4o",
1075
+ });
1076
+
1077
+ vi.mocked(streamText).mockReturnValue(
1078
+ mockStreamTextResponse([
1079
+ reasoningStart(),
1080
+ reasoningDelta("Deep thought"),
1081
+ reasoningEnd(),
1082
+ finish(),
1083
+ ]),
1084
+ );
1085
+
1086
+ const input: RunAgentInput = {
1087
+ threadId: "thread1",
1088
+ runId: "run1",
1089
+ messages: [],
1090
+ tools: [],
1091
+ context: [],
1092
+ state: {},
1093
+ };
1094
+
1095
+ const events = await collectEvents(agent["run"](input));
1096
+
1097
+ // No TEXT_MESSAGE_CHUNK events
1098
+ const textEvents = events.filter(
1099
+ (e: any) => e.type === EventType.TEXT_MESSAGE_CHUNK,
1100
+ );
1101
+ expect(textEvents).toHaveLength(0);
1102
+
1103
+ // Reasoning events are present
1104
+ const reasoningContentEvents = events.filter(
1105
+ (e: any) => e.type === EventType.REASONING_MESSAGE_CONTENT,
1106
+ );
1107
+ expect(reasoningContentEvents).toHaveLength(1);
1108
+ expect(reasoningContentEvents[0]).toMatchObject({
1109
+ delta: "Deep thought",
1110
+ });
1111
+ });
1112
+
1113
+ it("should skip empty reasoning deltas and continue stream", async () => {
1114
+ const agent = new BasicAgent({
1115
+ model: "openai/gpt-4o",
1116
+ });
1117
+
1118
+ vi.mocked(streamText).mockReturnValue(
1119
+ mockStreamTextResponse([
1120
+ reasoningStart(),
1121
+ reasoningDelta(""),
1122
+ reasoningEnd(),
1123
+ finish(),
1124
+ ]),
1125
+ );
1126
+
1127
+ const input: RunAgentInput = {
1128
+ threadId: "thread1",
1129
+ runId: "run1",
1130
+ messages: [],
1131
+ tools: [],
1132
+ context: [],
1133
+ state: {},
1134
+ };
1135
+
1136
+ const events = await collectEvents(agent["run"](input));
1137
+
1138
+ // No REASONING_MESSAGE_CONTENT events — empty delta skipped
1139
+ const contentEvents = events.filter(
1140
+ (e) => e.type === EventType.REASONING_MESSAGE_CONTENT,
1141
+ );
1142
+ expect(contentEvents).toHaveLength(0);
1143
+
1144
+ // Stream still completes with RUN_FINISHED
1145
+ const eventTypes = events.map((e) => e.type);
1146
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1147
+ });
1148
+
1149
+ it("should auto-close reasoning when SDK omits reasoning-end before tool call", async () => {
1150
+ const agent = new BasicAgent({
1151
+ model: "openai/gpt-4o",
1152
+ });
1153
+
1154
+ vi.mocked(streamText).mockReturnValue(
1155
+ mockStreamTextResponse([
1156
+ reasoningStart(),
1157
+ reasoningDelta("Thinking..."),
1158
+ // NO reasoningEnd() — simulates @ai-sdk/anthropic behaviour
1159
+ toolCallStreamingStart("call1", "testTool"),
1160
+ toolCallDelta("call1", '{"arg":"val"}'),
1161
+ toolCall("call1", "testTool", { arg: "val" }),
1162
+ toolResult("call1", "testTool", { result: "success" }),
1163
+ finish(),
1164
+ ]),
1165
+ );
1166
+
1167
+ const input: RunAgentInput = {
1168
+ threadId: "thread1",
1169
+ runId: "run1",
1170
+ messages: [],
1171
+ tools: [],
1172
+ context: [],
1173
+ state: {},
1174
+ };
1175
+
1176
+ const events = await collectEvents(agent["run"](input));
1177
+ const eventTypes = events.map((e) => e.type);
1178
+
1179
+ // REASONING_MESSAGE_END must appear before REASONING_END, which must appear before TOOL_CALL_START
1180
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1181
+ EventType.REASONING_MESSAGE_END,
1182
+ );
1183
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1184
+ const toolCallStartIdx = eventTypes.indexOf(EventType.TOOL_CALL_START);
1185
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1186
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1187
+ expect(reasoningEndIdx).toBeLessThan(toolCallStartIdx);
1188
+
1189
+ // Each close event must appear exactly once (guard against double-emit)
1190
+ expect(
1191
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1192
+ ).toHaveLength(1);
1193
+ expect(
1194
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1195
+ ).toHaveLength(1);
1196
+
1197
+ // Stream still completes with RUN_FINISHED
1198
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1199
+ });
1200
+
1201
+ it("should auto-close reasoning when SDK omits reasoning-end before text", async () => {
1202
+ const agent = new BasicAgent({
1203
+ model: "openai/gpt-4o",
1204
+ });
1205
+
1206
+ vi.mocked(streamText).mockReturnValue(
1207
+ mockStreamTextResponse([
1208
+ reasoningStart(),
1209
+ reasoningDelta("Let me think"),
1210
+ // NO reasoningEnd() — simulates @ai-sdk/anthropic behaviour
1211
+ textStart(),
1212
+ textDelta("Answer"),
1213
+ finish(),
1214
+ ]),
1215
+ );
1216
+
1217
+ const input: RunAgentInput = {
1218
+ threadId: "thread1",
1219
+ runId: "run1",
1220
+ messages: [],
1221
+ tools: [],
1222
+ context: [],
1223
+ state: {},
1224
+ };
1225
+
1226
+ const events = await collectEvents(agent["run"](input));
1227
+ const eventTypes = events.map((e) => e.type);
1228
+
1229
+ // REASONING_MESSAGE_END must appear before REASONING_END, which must appear before TEXT_MESSAGE_CHUNK
1230
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1231
+ EventType.REASONING_MESSAGE_END,
1232
+ );
1233
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1234
+ const textChunkIdx = eventTypes.indexOf(EventType.TEXT_MESSAGE_CHUNK);
1235
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1236
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1237
+ expect(reasoningEndIdx).toBeLessThan(textChunkIdx);
1238
+
1239
+ // Each close event must appear exactly once (guard against double-emit)
1240
+ expect(
1241
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1242
+ ).toHaveLength(1);
1243
+ expect(
1244
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1245
+ ).toHaveLength(1);
1246
+
1247
+ // Stream still completes with RUN_FINISHED
1248
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1249
+ });
1250
+
1251
+ it("should auto-close reasoning when SDK omits reasoning-end before finish", async () => {
1252
+ const agent = new BasicAgent({
1253
+ model: "openai/gpt-4o",
1254
+ });
1255
+
1256
+ vi.mocked(streamText).mockReturnValue(
1257
+ mockStreamTextResponse([
1258
+ reasoningStart(),
1259
+ reasoningDelta("Deep thought"),
1260
+ // NO reasoningEnd() — simulates @ai-sdk/anthropic behaviour
1261
+ finish(),
1262
+ ]),
1263
+ );
1264
+
1265
+ const input: RunAgentInput = {
1266
+ threadId: "thread1",
1267
+ runId: "run1",
1268
+ messages: [],
1269
+ tools: [],
1270
+ context: [],
1271
+ state: {},
1272
+ };
1273
+
1274
+ const events = await collectEvents(agent["run"](input));
1275
+ const eventTypes = events.map((e) => e.type);
1276
+
1277
+ // REASONING_MESSAGE_END must appear before REASONING_END (auto-closed by finish case)
1278
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1279
+ EventType.REASONING_MESSAGE_END,
1280
+ );
1281
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1282
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1283
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1284
+
1285
+ // Each close event must appear exactly once (guard against double-emit)
1286
+ expect(
1287
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1288
+ ).toHaveLength(1);
1289
+ expect(
1290
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1291
+ ).toHaveLength(1);
1292
+
1293
+ // Stream still completes with RUN_FINISHED
1294
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1295
+ });
1296
+
1297
+ it("should auto-close reasoning when stream aborts mid-reasoning", async () => {
1298
+ const agent = new BasicAgent({
1299
+ model: "openai/gpt-4o",
1300
+ });
1301
+
1302
+ vi.mocked(streamText).mockReturnValue(
1303
+ mockStreamTextResponse([
1304
+ reasoningStart(),
1305
+ reasoningDelta("Thinking..."),
1306
+ // NO reasoningEnd() — stream aborts before SDK can close reasoning
1307
+ abort(),
1308
+ ]),
1309
+ );
1310
+
1311
+ const input: RunAgentInput = {
1312
+ threadId: "thread1",
1313
+ runId: "run1",
1314
+ messages: [],
1315
+ tools: [],
1316
+ context: [],
1317
+ state: {},
1318
+ };
1319
+
1320
+ const events = await collectEvents(agent["run"](input));
1321
+ const eventTypes = events.map((e) => e.type);
1322
+
1323
+ // REASONING_MESSAGE_END must appear before REASONING_END, both before RUN_FINISHED
1324
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1325
+ EventType.REASONING_MESSAGE_END,
1326
+ );
1327
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1328
+ const runFinishedIdx = eventTypes.indexOf(EventType.RUN_FINISHED);
1329
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1330
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1331
+ expect(runFinishedIdx).toBeGreaterThan(reasoningEndIdx);
1332
+
1333
+ // Each close event must appear exactly once (guard against double-emit)
1334
+ expect(
1335
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1336
+ ).toHaveLength(1);
1337
+ expect(
1338
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1339
+ ).toHaveLength(1);
1340
+
1341
+ // Stream still completes with RUN_FINISHED
1342
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1343
+ });
1344
+
1345
+ it("should auto-close reasoning when stream errors mid-reasoning", async () => {
1346
+ const agent = new BasicAgent({
1347
+ model: "openai/gpt-4o",
1348
+ });
1349
+
1350
+ vi.mocked(streamText).mockReturnValue(
1351
+ mockStreamTextResponse([
1352
+ reasoningStart(),
1353
+ reasoningDelta("Thinking..."),
1354
+ // NO reasoningEnd() — stream errors before SDK can close reasoning
1355
+ error("stream failed"),
1356
+ ]),
1357
+ );
1358
+
1359
+ const input: RunAgentInput = {
1360
+ threadId: "thread1",
1361
+ runId: "run1",
1362
+ messages: [],
1363
+ tools: [],
1364
+ context: [],
1365
+ state: {},
1366
+ };
1367
+
1368
+ // subscriber.error() causes collectEvents to reject, so collect manually
1369
+ const events: BaseEvent[] = [];
1370
+ await new Promise<void>((resolve) => {
1371
+ agent["run"](input).subscribe({
1372
+ next: (e) => events.push(e),
1373
+ error: () => resolve(), // error is expected
1374
+ complete: () => resolve(),
1375
+ });
1376
+ });
1377
+
1378
+ const eventTypes = events.map((e) => e.type);
1379
+
1380
+ // REASONING_MESSAGE_END must appear before REASONING_END, both before RUN_ERROR
1381
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1382
+ EventType.REASONING_MESSAGE_END,
1383
+ );
1384
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1385
+ const runErrorIdx = eventTypes.indexOf(EventType.RUN_ERROR);
1386
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1387
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1388
+ expect(runErrorIdx).toBeGreaterThan(reasoningEndIdx);
1389
+
1390
+ // Each close event must appear exactly once (guard against double-emit)
1391
+ expect(
1392
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1393
+ ).toHaveLength(1);
1394
+ expect(
1395
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1396
+ ).toHaveLength(1);
1397
+ });
1398
+
1399
+ it("should auto-close reasoning for consecutive blocks with no reasoning-end between them", async () => {
1400
+ const agent = new BasicAgent({
1401
+ model: "openai/gpt-4o",
1402
+ });
1403
+
1404
+ vi.mocked(streamText).mockReturnValue(
1405
+ mockStreamTextResponse([
1406
+ reasoningStart(),
1407
+ reasoningDelta("First thought"),
1408
+ // NO reasoningEnd() — second block starts immediately
1409
+ reasoningStart(),
1410
+ reasoningDelta("Second thought"),
1411
+ reasoningEnd(),
1412
+ finish(),
1413
+ ]),
1414
+ );
1415
+
1416
+ const input: RunAgentInput = {
1417
+ threadId: "thread1",
1418
+ runId: "run1",
1419
+ messages: [],
1420
+ tools: [],
1421
+ context: [],
1422
+ state: {},
1423
+ };
1424
+
1425
+ const events = await collectEvents(agent["run"](input));
1426
+ const eventTypes = events.map((e) => e.type);
1427
+
1428
+ // Both reasoning blocks must be properly closed — two complete lifecycles
1429
+ expect(
1430
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1431
+ ).toHaveLength(2);
1432
+ expect(
1433
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1434
+ ).toHaveLength(2);
1435
+
1436
+ // First block's REASONING_END must appear before second block's REASONING_START
1437
+ const firstReasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1438
+ const secondReasoningStartIdx = eventTypes.lastIndexOf(
1439
+ EventType.REASONING_START,
1440
+ );
1441
+ expect(firstReasoningEndIdx).toBeLessThan(secondReasoningStartIdx);
1442
+
1443
+ // The two blocks must use distinct messageIds
1444
+ const startEvents = events.filter(
1445
+ (e): e is ReasoningStartEvent => e.type === EventType.REASONING_START,
1446
+ );
1447
+ expect(startEvents).toHaveLength(2);
1448
+ expect(startEvents[0].messageId).not.toBe(startEvents[1].messageId);
1449
+
1450
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1451
+ });
1452
+
1453
+ it("should close reasoning when an exception is thrown mid-stream", async () => {
1454
+ const agent = new BasicAgent({
1455
+ model: "openai/gpt-4o",
1456
+ });
1457
+
1458
+ // Simulate the fullStream generator throwing mid-iteration (not a stream error event)
1459
+ const throwingStream = {
1460
+ fullStream: (async function* () {
1461
+ yield reasoningStart();
1462
+ yield reasoningDelta("Thinking...");
1463
+ throw new Error("unexpected network failure");
1464
+ })(),
1465
+ };
1466
+ vi.mocked(streamText).mockReturnValue(
1467
+ throwingStream as unknown as ReturnType<typeof streamText>,
1468
+ );
1469
+
1470
+ const input: RunAgentInput = {
1471
+ threadId: "thread1",
1472
+ runId: "run1",
1473
+ messages: [],
1474
+ tools: [],
1475
+ context: [],
1476
+ state: {},
1477
+ };
1478
+
1479
+ // subscriber.error() causes collectEvents to reject, so collect manually
1480
+ const events: BaseEvent[] = [];
1481
+ await new Promise<void>((resolve) => {
1482
+ agent["run"](input).subscribe({
1483
+ next: (e) => events.push(e),
1484
+ error: () => resolve(), // error is expected
1485
+ complete: () => resolve(),
1486
+ });
1487
+ });
1488
+
1489
+ const eventTypes = events.map((e) => e.type);
1490
+
1491
+ // Reasoning must be closed before RUN_ERROR despite the exception path
1492
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1493
+ EventType.REASONING_MESSAGE_END,
1494
+ );
1495
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1496
+ const runErrorIdx = eventTypes.indexOf(EventType.RUN_ERROR);
1497
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1498
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1499
+ expect(runErrorIdx).toBeGreaterThan(reasoningEndIdx);
1500
+
1501
+ expect(
1502
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1503
+ ).toHaveLength(1);
1504
+ expect(
1505
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1506
+ ).toHaveLength(1);
1507
+ });
1508
+
1509
+ it("should close reasoning and emit RUN_FINISHED when stream exhausts without terminal event", async () => {
1510
+ const agent = new BasicAgent({
1511
+ model: "openai/gpt-4o",
1512
+ });
1513
+
1514
+ // Stream ends with no finish/abort/error — exercises !terminalEventEmitted fallback
1515
+ vi.mocked(streamText).mockReturnValue(
1516
+ mockStreamTextResponse([
1517
+ reasoningStart(),
1518
+ reasoningDelta("Thinking..."),
1519
+ // deliberate: no finish(), no abort(), no error()
1520
+ ]),
1521
+ );
1522
+
1523
+ const input: RunAgentInput = {
1524
+ threadId: "thread1",
1525
+ runId: "run1",
1526
+ messages: [],
1527
+ tools: [],
1528
+ context: [],
1529
+ state: {},
1530
+ };
1531
+
1532
+ const events = await collectEvents(agent["run"](input));
1533
+ const eventTypes = events.map((e) => e.type);
1534
+
1535
+ // Reasoning must be closed before RUN_FINISHED via fallback
1536
+ const reasoningMsgEndIdx = eventTypes.indexOf(
1537
+ EventType.REASONING_MESSAGE_END,
1538
+ );
1539
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1540
+ const runFinishedIdx = eventTypes.indexOf(EventType.RUN_FINISHED);
1541
+ expect(reasoningMsgEndIdx).toBeGreaterThan(0);
1542
+ expect(reasoningEndIdx).toBeGreaterThan(reasoningMsgEndIdx);
1543
+ expect(runFinishedIdx).toBeGreaterThan(reasoningEndIdx);
1544
+
1545
+ expect(
1546
+ eventTypes.filter((t) => t === EventType.REASONING_MESSAGE_END),
1547
+ ).toHaveLength(1);
1548
+ expect(
1549
+ eventTypes.filter((t) => t === EventType.REASONING_END),
1550
+ ).toHaveLength(1);
1551
+
1552
+ expect(eventTypes[eventTypes.length - 1]).toBe(EventType.RUN_FINISHED);
1553
+ });
1554
+
1555
+ it("should handle reasoning interleaved with tool calls", async () => {
1556
+ const agent = new BasicAgent({
1557
+ model: "openai/gpt-4o",
1558
+ });
1559
+
1560
+ vi.mocked(streamText).mockReturnValue(
1561
+ mockStreamTextResponse([
1562
+ reasoningStart(),
1563
+ reasoningDelta("I need to call a tool"),
1564
+ reasoningEnd(),
1565
+ toolCallStreamingStart("call1", "testTool"),
1566
+ toolCallDelta("call1", '{"arg":"val"}'),
1567
+ toolCall("call1", "testTool", { arg: "val" }),
1568
+ toolResult("call1", "testTool", { result: "success" }),
1569
+ finish(),
1570
+ ]) as any,
1571
+ );
1572
+
1573
+ const input: RunAgentInput = {
1574
+ threadId: "thread1",
1575
+ runId: "run1",
1576
+ messages: [],
1577
+ tools: [],
1578
+ context: [],
1579
+ state: {},
1580
+ };
1581
+
1582
+ const events = await collectEvents(agent["run"](input));
1583
+ const eventTypes = events.map((e) => e.type);
1584
+
1585
+ // Reasoning events precede tool call events
1586
+ const reasoningEndIdx = eventTypes.indexOf(EventType.REASONING_END);
1587
+ const toolCallStartIdx = eventTypes.indexOf(EventType.TOOL_CALL_START);
1588
+ expect(reasoningEndIdx).toBeLessThan(toolCallStartIdx);
1589
+
1590
+ // Both lifecycles complete
1591
+ expect(eventTypes).toContain(EventType.REASONING_START);
1592
+ expect(eventTypes).toContain(EventType.REASONING_END);
1593
+ expect(eventTypes).toContain(EventType.TOOL_CALL_START);
1594
+ expect(eventTypes).toContain(EventType.TOOL_CALL_END);
1595
+ });
1596
+ });
1597
+
1598
+ describe("Provider Options", () => {
1599
+ it("should pass providerOptions to streamText", async () => {
1600
+ const agent = new BasicAgent({
1601
+ model: "openai/gpt-4o",
1602
+ providerOptions: {
1603
+ openai: { reasoningEffort: "high", reasoningSummary: "detailed" },
1604
+ },
1605
+ });
1606
+
1607
+ vi.mocked(streamText).mockReturnValue(
1608
+ mockStreamTextResponse([finish()]) as any,
1609
+ );
1610
+
1611
+ const input: RunAgentInput = {
1612
+ threadId: "thread1",
1613
+ runId: "run1",
1614
+ messages: [],
1615
+ tools: [],
1616
+ context: [],
1617
+ state: {},
1618
+ };
1619
+
1620
+ await collectEvents(agent["run"](input));
1621
+
1622
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
1623
+ expect(callArgs.providerOptions).toEqual({
1624
+ openai: { reasoningEffort: "high", reasoningSummary: "detailed" },
1625
+ });
1626
+ });
1627
+
1628
+ it("should allow providerOptions override via forwardedProps when overridable", async () => {
1629
+ const agent = new BasicAgent({
1630
+ model: "openai/gpt-4o",
1631
+ providerOptions: {
1632
+ openai: { reasoningEffort: "low" },
1633
+ },
1634
+ overridableProperties: ["providerOptions"],
1635
+ });
1636
+
1637
+ vi.mocked(streamText).mockReturnValue(
1638
+ mockStreamTextResponse([finish()]) as any,
1639
+ );
1640
+
1641
+ const input: RunAgentInput = {
1642
+ threadId: "thread1",
1643
+ runId: "run1",
1644
+ messages: [],
1645
+ tools: [],
1646
+ context: [],
1647
+ state: {},
1648
+ forwardedProps: {
1649
+ providerOptions: {
1650
+ openai: { reasoningEffort: "high" },
1651
+ },
1652
+ },
1653
+ };
1654
+
1655
+ await collectEvents(agent["run"](input));
1656
+
1657
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
1658
+ expect(callArgs.providerOptions).toEqual({
1659
+ openai: { reasoningEffort: "high" },
1660
+ });
1661
+ });
1662
+
1663
+ it("should NOT allow providerOptions override when not in overridableProperties", async () => {
1664
+ const agent = new BasicAgent({
1665
+ model: "openai/gpt-4o",
1666
+ providerOptions: {
1667
+ openai: { reasoningEffort: "low" },
1668
+ },
1669
+ overridableProperties: [],
1670
+ });
1671
+
1672
+ vi.mocked(streamText).mockReturnValue(
1673
+ mockStreamTextResponse([finish()]) as any,
1674
+ );
1675
+
1676
+ const input: RunAgentInput = {
1677
+ threadId: "thread1",
1678
+ runId: "run1",
1679
+ messages: [],
1680
+ tools: [],
1681
+ context: [],
1682
+ state: {},
1683
+ forwardedProps: {
1684
+ providerOptions: {
1685
+ openai: { reasoningEffort: "high" },
1686
+ },
1687
+ },
1688
+ };
1689
+
1690
+ await collectEvents(agent["run"](input));
1691
+
1692
+ const callArgs = vi.mocked(streamText).mock.calls[0][0];
1693
+ expect(callArgs.providerOptions).toEqual({
1694
+ openai: { reasoningEffort: "low" },
1695
+ });
1696
+ });
1697
+ });
1698
+ });