modelmix 2.2.8 → 2.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (542) hide show
  1. package/README.md +19 -6
  2. package/demo/demo.mjs +24 -16
  3. package/demo/lmstudio.mjs +1 -1
  4. package/demo/node_modules/.package-lock.json +0 -47
  5. package/demo/package-lock.json +1 -50
  6. package/demo/package.json +1 -3
  7. package/demo/prompt.md +25 -0
  8. package/demo/stream.mjs +3 -3
  9. package/index.js +88 -2
  10. package/package.json +4 -2
  11. package/demo/node_modules/debug/LICENSE +0 -20
  12. package/demo/node_modules/debug/README.md +0 -481
  13. package/demo/node_modules/debug/node_modules/ms/index.js +0 -162
  14. package/demo/node_modules/debug/node_modules/ms/license.md +0 -21
  15. package/demo/node_modules/debug/node_modules/ms/package.json +0 -37
  16. package/demo/node_modules/debug/node_modules/ms/readme.md +0 -60
  17. package/demo/node_modules/debug/package.json +0 -59
  18. package/demo/node_modules/debug/src/browser.js +0 -269
  19. package/demo/node_modules/debug/src/common.js +0 -274
  20. package/demo/node_modules/debug/src/index.js +0 -10
  21. package/demo/node_modules/debug/src/node.js +0 -263
  22. package/demo/node_modules/lemonlog/README.md +0 -133
  23. package/demo/node_modules/lemonlog/demo/demo.js +0 -31
  24. package/demo/node_modules/lemonlog/index.js +0 -94
  25. package/demo/node_modules/lemonlog/package.json +0 -31
  26. package/demo/node_modules/openai/CHANGELOG.md +0 -1176
  27. package/demo/node_modules/openai/LICENSE +0 -201
  28. package/demo/node_modules/openai/README.md +0 -616
  29. package/demo/node_modules/openai/_shims/MultipartBody.d.ts +0 -9
  30. package/demo/node_modules/openai/_shims/MultipartBody.d.ts.map +0 -1
  31. package/demo/node_modules/openai/_shims/MultipartBody.js +0 -16
  32. package/demo/node_modules/openai/_shims/MultipartBody.js.map +0 -1
  33. package/demo/node_modules/openai/_shims/MultipartBody.mjs +0 -12
  34. package/demo/node_modules/openai/_shims/MultipartBody.mjs.map +0 -1
  35. package/demo/node_modules/openai/_shims/README.md +0 -46
  36. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts +0 -5
  37. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts.map +0 -1
  38. package/demo/node_modules/openai/_shims/auto/runtime-bun.js +0 -21
  39. package/demo/node_modules/openai/_shims/auto/runtime-bun.js.map +0 -1
  40. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs +0 -2
  41. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs.map +0 -1
  42. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts +0 -5
  43. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts.map +0 -1
  44. package/demo/node_modules/openai/_shims/auto/runtime-node.js +0 -21
  45. package/demo/node_modules/openai/_shims/auto/runtime-node.js.map +0 -1
  46. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs +0 -2
  47. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs.map +0 -1
  48. package/demo/node_modules/openai/_shims/auto/runtime.d.ts +0 -5
  49. package/demo/node_modules/openai/_shims/auto/runtime.d.ts.map +0 -1
  50. package/demo/node_modules/openai/_shims/auto/runtime.js +0 -21
  51. package/demo/node_modules/openai/_shims/auto/runtime.js.map +0 -1
  52. package/demo/node_modules/openai/_shims/auto/runtime.mjs +0 -2
  53. package/demo/node_modules/openai/_shims/auto/runtime.mjs.map +0 -1
  54. package/demo/node_modules/openai/_shims/auto/types-node.d.ts +0 -5
  55. package/demo/node_modules/openai/_shims/auto/types-node.d.ts.map +0 -1
  56. package/demo/node_modules/openai/_shims/auto/types-node.js +0 -21
  57. package/demo/node_modules/openai/_shims/auto/types-node.js.map +0 -1
  58. package/demo/node_modules/openai/_shims/auto/types-node.mjs +0 -2
  59. package/demo/node_modules/openai/_shims/auto/types-node.mjs.map +0 -1
  60. package/demo/node_modules/openai/_shims/auto/types.d.ts +0 -101
  61. package/demo/node_modules/openai/_shims/auto/types.js +0 -3
  62. package/demo/node_modules/openai/_shims/auto/types.mjs +0 -3
  63. package/demo/node_modules/openai/_shims/bun-runtime.d.ts +0 -6
  64. package/demo/node_modules/openai/_shims/bun-runtime.d.ts.map +0 -1
  65. package/demo/node_modules/openai/_shims/bun-runtime.js +0 -14
  66. package/demo/node_modules/openai/_shims/bun-runtime.js.map +0 -1
  67. package/demo/node_modules/openai/_shims/bun-runtime.mjs +0 -10
  68. package/demo/node_modules/openai/_shims/bun-runtime.mjs.map +0 -1
  69. package/demo/node_modules/openai/_shims/index.d.ts +0 -81
  70. package/demo/node_modules/openai/_shims/index.js +0 -13
  71. package/demo/node_modules/openai/_shims/index.mjs +0 -7
  72. package/demo/node_modules/openai/_shims/manual-types.d.ts +0 -12
  73. package/demo/node_modules/openai/_shims/manual-types.js +0 -3
  74. package/demo/node_modules/openai/_shims/manual-types.mjs +0 -3
  75. package/demo/node_modules/openai/_shims/node-runtime.d.ts +0 -3
  76. package/demo/node_modules/openai/_shims/node-runtime.d.ts.map +0 -1
  77. package/demo/node_modules/openai/_shims/node-runtime.js +0 -90
  78. package/demo/node_modules/openai/_shims/node-runtime.js.map +0 -1
  79. package/demo/node_modules/openai/_shims/node-runtime.mjs +0 -56
  80. package/demo/node_modules/openai/_shims/node-runtime.mjs.map +0 -1
  81. package/demo/node_modules/openai/_shims/node-types.d.ts +0 -42
  82. package/demo/node_modules/openai/_shims/node-types.js +0 -3
  83. package/demo/node_modules/openai/_shims/node-types.mjs +0 -3
  84. package/demo/node_modules/openai/_shims/registry.d.ts +0 -37
  85. package/demo/node_modules/openai/_shims/registry.d.ts.map +0 -1
  86. package/demo/node_modules/openai/_shims/registry.js +0 -41
  87. package/demo/node_modules/openai/_shims/registry.js.map +0 -1
  88. package/demo/node_modules/openai/_shims/registry.mjs +0 -37
  89. package/demo/node_modules/openai/_shims/registry.mjs.map +0 -1
  90. package/demo/node_modules/openai/_shims/web-runtime.d.ts +0 -5
  91. package/demo/node_modules/openai/_shims/web-runtime.d.ts.map +0 -1
  92. package/demo/node_modules/openai/_shims/web-runtime.js +0 -78
  93. package/demo/node_modules/openai/_shims/web-runtime.js.map +0 -1
  94. package/demo/node_modules/openai/_shims/web-runtime.mjs +0 -71
  95. package/demo/node_modules/openai/_shims/web-runtime.mjs.map +0 -1
  96. package/demo/node_modules/openai/_shims/web-types.d.ts +0 -83
  97. package/demo/node_modules/openai/_shims/web-types.js +0 -3
  98. package/demo/node_modules/openai/_shims/web-types.mjs +0 -3
  99. package/demo/node_modules/openai/bin/cli +0 -49
  100. package/demo/node_modules/openai/core.d.ts +0 -239
  101. package/demo/node_modules/openai/core.d.ts.map +0 -1
  102. package/demo/node_modules/openai/core.js +0 -879
  103. package/demo/node_modules/openai/core.js.map +0 -1
  104. package/demo/node_modules/openai/core.mjs +0 -848
  105. package/demo/node_modules/openai/core.mjs.map +0 -1
  106. package/demo/node_modules/openai/error.d.ts +0 -57
  107. package/demo/node_modules/openai/error.d.ts.map +0 -1
  108. package/demo/node_modules/openai/error.js +0 -148
  109. package/demo/node_modules/openai/error.js.map +0 -1
  110. package/demo/node_modules/openai/error.mjs +0 -132
  111. package/demo/node_modules/openai/error.mjs.map +0 -1
  112. package/demo/node_modules/openai/index.d.mts +0 -267
  113. package/demo/node_modules/openai/index.d.ts +0 -267
  114. package/demo/node_modules/openai/index.d.ts.map +0 -1
  115. package/demo/node_modules/openai/index.js +0 -262
  116. package/demo/node_modules/openai/index.js.map +0 -1
  117. package/demo/node_modules/openai/index.mjs +0 -232
  118. package/demo/node_modules/openai/index.mjs.map +0 -1
  119. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts +0 -74
  120. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts.map +0 -1
  121. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js +0 -246
  122. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js.map +0 -1
  123. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs +0 -242
  124. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs.map +0 -1
  125. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +0 -114
  126. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts.map +0 -1
  127. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js +0 -519
  128. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js.map +0 -1
  129. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs +0 -515
  130. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs.map +0 -1
  131. package/demo/node_modules/openai/lib/AssistantStream.d.ts +0 -58
  132. package/demo/node_modules/openai/lib/AssistantStream.d.ts.map +0 -1
  133. package/demo/node_modules/openai/lib/AssistantStream.js +0 -548
  134. package/demo/node_modules/openai/lib/AssistantStream.js.map +0 -1
  135. package/demo/node_modules/openai/lib/AssistantStream.mjs +0 -521
  136. package/demo/node_modules/openai/lib/AssistantStream.mjs.map +0 -1
  137. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts +0 -2
  138. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts.map +0 -1
  139. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js +0 -2177
  140. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js.map +0 -1
  141. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs +0 -2172
  142. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs.map +0 -1
  143. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts +0 -19
  144. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts.map +0 -1
  145. package/demo/node_modules/openai/lib/ChatCompletionRunner.js +0 -34
  146. package/demo/node_modules/openai/lib/ChatCompletionRunner.js.map +0 -1
  147. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs +0 -30
  148. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs.map +0 -1
  149. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts +0 -149
  150. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts.map +0 -1
  151. package/demo/node_modules/openai/lib/ChatCompletionStream.js +0 -312
  152. package/demo/node_modules/openai/lib/ChatCompletionStream.js.map +0 -1
  153. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs +0 -308
  154. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs.map +0 -1
  155. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +0 -22
  156. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts.map +0 -1
  157. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js +0 -32
  158. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js.map +0 -1
  159. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs +0 -28
  160. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs.map +0 -1
  161. package/demo/node_modules/openai/lib/RunnableFunction.d.ts +0 -95
  162. package/demo/node_modules/openai/lib/RunnableFunction.d.ts.map +0 -1
  163. package/demo/node_modules/openai/lib/RunnableFunction.js +0 -35
  164. package/demo/node_modules/openai/lib/RunnableFunction.js.map +0 -1
  165. package/demo/node_modules/openai/lib/RunnableFunction.mjs +0 -29
  166. package/demo/node_modules/openai/lib/RunnableFunction.mjs.map +0 -1
  167. package/demo/node_modules/openai/lib/Util.d.ts +0 -5
  168. package/demo/node_modules/openai/lib/Util.d.ts.map +0 -1
  169. package/demo/node_modules/openai/lib/Util.js +0 -26
  170. package/demo/node_modules/openai/lib/Util.js.map +0 -1
  171. package/demo/node_modules/openai/lib/Util.mjs +0 -22
  172. package/demo/node_modules/openai/lib/Util.mjs.map +0 -1
  173. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts +0 -6
  174. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts.map +0 -1
  175. package/demo/node_modules/openai/lib/chatCompletionUtils.js +0 -20
  176. package/demo/node_modules/openai/lib/chatCompletionUtils.js.map +0 -1
  177. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs +0 -13
  178. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs.map +0 -1
  179. package/demo/node_modules/openai/lib/jsonschema.d.ts +0 -106
  180. package/demo/node_modules/openai/lib/jsonschema.d.ts.map +0 -1
  181. package/demo/node_modules/openai/lib/jsonschema.js +0 -11
  182. package/demo/node_modules/openai/lib/jsonschema.js.map +0 -1
  183. package/demo/node_modules/openai/lib/jsonschema.mjs +0 -10
  184. package/demo/node_modules/openai/lib/jsonschema.mjs.map +0 -1
  185. package/demo/node_modules/openai/package.json +0 -105
  186. package/demo/node_modules/openai/pagination.d.ts +0 -37
  187. package/demo/node_modules/openai/pagination.d.ts.map +0 -1
  188. package/demo/node_modules/openai/pagination.js +0 -64
  189. package/demo/node_modules/openai/pagination.js.map +0 -1
  190. package/demo/node_modules/openai/pagination.mjs +0 -59
  191. package/demo/node_modules/openai/pagination.mjs.map +0 -1
  192. package/demo/node_modules/openai/resource.d.ts +0 -6
  193. package/demo/node_modules/openai/resource.d.ts.map +0 -1
  194. package/demo/node_modules/openai/resource.js +0 -11
  195. package/demo/node_modules/openai/resource.js.map +0 -1
  196. package/demo/node_modules/openai/resource.mjs +0 -7
  197. package/demo/node_modules/openai/resource.mjs.map +0 -1
  198. package/demo/node_modules/openai/resources/audio/audio.d.ts +0 -20
  199. package/demo/node_modules/openai/resources/audio/audio.d.ts.map +0 -1
  200. package/demo/node_modules/openai/resources/audio/audio.js +0 -46
  201. package/demo/node_modules/openai/resources/audio/audio.js.map +0 -1
  202. package/demo/node_modules/openai/resources/audio/audio.mjs +0 -19
  203. package/demo/node_modules/openai/resources/audio/audio.mjs.map +0 -1
  204. package/demo/node_modules/openai/resources/audio/index.d.ts +0 -5
  205. package/demo/node_modules/openai/resources/audio/index.d.ts.map +0 -1
  206. package/demo/node_modules/openai/resources/audio/index.js +0 -13
  207. package/demo/node_modules/openai/resources/audio/index.js.map +0 -1
  208. package/demo/node_modules/openai/resources/audio/index.mjs +0 -6
  209. package/demo/node_modules/openai/resources/audio/index.mjs.map +0 -1
  210. package/demo/node_modules/openai/resources/audio/speech.d.ts +0 -42
  211. package/demo/node_modules/openai/resources/audio/speech.d.ts.map +0 -1
  212. package/demo/node_modules/openai/resources/audio/speech.js +0 -17
  213. package/demo/node_modules/openai/resources/audio/speech.js.map +0 -1
  214. package/demo/node_modules/openai/resources/audio/speech.mjs +0 -13
  215. package/demo/node_modules/openai/resources/audio/speech.mjs.map +0 -1
  216. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts +0 -71
  217. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts.map +0 -1
  218. package/demo/node_modules/openai/resources/audio/transcriptions.js +0 -18
  219. package/demo/node_modules/openai/resources/audio/transcriptions.js.map +0 -1
  220. package/demo/node_modules/openai/resources/audio/transcriptions.mjs +0 -14
  221. package/demo/node_modules/openai/resources/audio/transcriptions.mjs.map +0 -1
  222. package/demo/node_modules/openai/resources/audio/translations.d.ts +0 -50
  223. package/demo/node_modules/openai/resources/audio/translations.d.ts.map +0 -1
  224. package/demo/node_modules/openai/resources/audio/translations.js +0 -18
  225. package/demo/node_modules/openai/resources/audio/translations.js.map +0 -1
  226. package/demo/node_modules/openai/resources/audio/translations.mjs +0 -14
  227. package/demo/node_modules/openai/resources/audio/translations.mjs.map +0 -1
  228. package/demo/node_modules/openai/resources/batches.d.ts +0 -189
  229. package/demo/node_modules/openai/resources/batches.d.ts.map +0 -1
  230. package/demo/node_modules/openai/resources/batches.js +0 -65
  231. package/demo/node_modules/openai/resources/batches.js.map +0 -1
  232. package/demo/node_modules/openai/resources/batches.mjs +0 -37
  233. package/demo/node_modules/openai/resources/batches.mjs.map +0 -1
  234. package/demo/node_modules/openai/resources/beta/assistants.d.ts +0 -1048
  235. package/demo/node_modules/openai/resources/beta/assistants.d.ts.map +0 -1
  236. package/demo/node_modules/openai/resources/beta/assistants.js +0 -89
  237. package/demo/node_modules/openai/resources/beta/assistants.js.map +0 -1
  238. package/demo/node_modules/openai/resources/beta/assistants.mjs +0 -61
  239. package/demo/node_modules/openai/resources/beta/assistants.mjs.map +0 -1
  240. package/demo/node_modules/openai/resources/beta/beta.d.ts +0 -53
  241. package/demo/node_modules/openai/resources/beta/beta.d.ts.map +0 -1
  242. package/demo/node_modules/openai/resources/beta/beta.js +0 -51
  243. package/demo/node_modules/openai/resources/beta/beta.js.map +0 -1
  244. package/demo/node_modules/openai/resources/beta/beta.mjs +0 -24
  245. package/demo/node_modules/openai/resources/beta/beta.mjs.map +0 -1
  246. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts +0 -9
  247. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts.map +0 -1
  248. package/demo/node_modules/openai/resources/beta/chat/chat.js +0 -40
  249. package/demo/node_modules/openai/resources/beta/chat/chat.js.map +0 -1
  250. package/demo/node_modules/openai/resources/beta/chat/chat.mjs +0 -13
  251. package/demo/node_modules/openai/resources/beta/chat/chat.mjs.map +0 -1
  252. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts +0 -37
  253. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts.map +0 -1
  254. package/demo/node_modules/openai/resources/beta/chat/completions.js +0 -39
  255. package/demo/node_modules/openai/resources/beta/chat/completions.js.map +0 -1
  256. package/demo/node_modules/openai/resources/beta/chat/completions.mjs +0 -30
  257. package/demo/node_modules/openai/resources/beta/chat/completions.mjs.map +0 -1
  258. package/demo/node_modules/openai/resources/beta/chat/index.d.ts +0 -3
  259. package/demo/node_modules/openai/resources/beta/chat/index.d.ts.map +0 -1
  260. package/demo/node_modules/openai/resources/beta/chat/index.js +0 -9
  261. package/demo/node_modules/openai/resources/beta/chat/index.js.map +0 -1
  262. package/demo/node_modules/openai/resources/beta/chat/index.mjs +0 -4
  263. package/demo/node_modules/openai/resources/beta/chat/index.mjs.map +0 -1
  264. package/demo/node_modules/openai/resources/beta/index.d.ts +0 -6
  265. package/demo/node_modules/openai/resources/beta/index.d.ts.map +0 -1
  266. package/demo/node_modules/openai/resources/beta/index.js +0 -17
  267. package/demo/node_modules/openai/resources/beta/index.js.map +0 -1
  268. package/demo/node_modules/openai/resources/beta/index.mjs +0 -7
  269. package/demo/node_modules/openai/resources/beta/index.mjs.map +0 -1
  270. package/demo/node_modules/openai/resources/beta/threads/index.d.ts +0 -4
  271. package/demo/node_modules/openai/resources/beta/threads/index.d.ts.map +0 -1
  272. package/demo/node_modules/openai/resources/beta/threads/index.js +0 -13
  273. package/demo/node_modules/openai/resources/beta/threads/index.js.map +0 -1
  274. package/demo/node_modules/openai/resources/beta/threads/index.mjs +0 -5
  275. package/demo/node_modules/openai/resources/beta/threads/index.mjs.map +0 -1
  276. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts +0 -552
  277. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts.map +0 -1
  278. package/demo/node_modules/openai/resources/beta/threads/messages.js +0 -89
  279. package/demo/node_modules/openai/resources/beta/threads/messages.js.map +0 -1
  280. package/demo/node_modules/openai/resources/beta/threads/messages.mjs +0 -61
  281. package/demo/node_modules/openai/resources/beta/threads/messages.mjs.map +0 -1
  282. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts +0 -3
  283. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts.map +0 -1
  284. package/demo/node_modules/openai/resources/beta/threads/runs/index.js +0 -11
  285. package/demo/node_modules/openai/resources/beta/threads/runs/index.js.map +0 -1
  286. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs +0 -4
  287. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs.map +0 -1
  288. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts +0 -1194
  289. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts.map +0 -1
  290. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js +0 -190
  291. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js.map +0 -1
  292. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs +0 -162
  293. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs.map +0 -1
  294. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts +0 -520
  295. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts.map +0 -1
  296. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js +0 -60
  297. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js.map +0 -1
  298. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs +0 -32
  299. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs.map +0 -1
  300. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts +0 -1240
  301. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts.map +0 -1
  302. package/demo/node_modules/openai/resources/beta/threads/threads.js +0 -108
  303. package/demo/node_modules/openai/resources/beta/threads/threads.js.map +0 -1
  304. package/demo/node_modules/openai/resources/beta/threads/threads.mjs +0 -81
  305. package/demo/node_modules/openai/resources/beta/threads/threads.mjs.map +0 -1
  306. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts +0 -142
  307. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts.map +0 -1
  308. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js +0 -129
  309. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js.map +0 -1
  310. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs +0 -125
  311. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs.map +0 -1
  312. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts +0 -154
  313. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts.map +0 -1
  314. package/demo/node_modules/openai/resources/beta/vector-stores/files.js +0 -149
  315. package/demo/node_modules/openai/resources/beta/vector-stores/files.js.map +0 -1
  316. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs +0 -121
  317. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs.map +0 -1
  318. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts +0 -4
  319. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts.map +0 -1
  320. package/demo/node_modules/openai/resources/beta/vector-stores/index.js +0 -13
  321. package/demo/node_modules/openai/resources/beta/vector-stores/index.js.map +0 -1
  322. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs +0 -5
  323. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs.map +0 -1
  324. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts +0 -233
  325. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts.map +0 -1
  326. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js +0 -99
  327. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js.map +0 -1
  328. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs +0 -71
  329. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs.map +0 -1
  330. package/demo/node_modules/openai/resources/chat/chat.d.ts +0 -42
  331. package/demo/node_modules/openai/resources/chat/chat.d.ts.map +0 -1
  332. package/demo/node_modules/openai/resources/chat/chat.js +0 -40
  333. package/demo/node_modules/openai/resources/chat/chat.js.map +0 -1
  334. package/demo/node_modules/openai/resources/chat/chat.mjs +0 -13
  335. package/demo/node_modules/openai/resources/chat/chat.mjs.map +0 -1
  336. package/demo/node_modules/openai/resources/chat/completions.d.ts +0 -845
  337. package/demo/node_modules/openai/resources/chat/completions.d.ts.map +0 -1
  338. package/demo/node_modules/openai/resources/chat/completions.js +0 -14
  339. package/demo/node_modules/openai/resources/chat/completions.js.map +0 -1
  340. package/demo/node_modules/openai/resources/chat/completions.mjs +0 -10
  341. package/demo/node_modules/openai/resources/chat/completions.mjs.map +0 -1
  342. package/demo/node_modules/openai/resources/chat/index.d.ts +0 -3
  343. package/demo/node_modules/openai/resources/chat/index.d.ts.map +0 -1
  344. package/demo/node_modules/openai/resources/chat/index.js +0 -9
  345. package/demo/node_modules/openai/resources/chat/index.js.map +0 -1
  346. package/demo/node_modules/openai/resources/chat/index.mjs +0 -4
  347. package/demo/node_modules/openai/resources/chat/index.mjs.map +0 -1
  348. package/demo/node_modules/openai/resources/completions.d.ts +0 -272
  349. package/demo/node_modules/openai/resources/completions.d.ts.map +0 -1
  350. package/demo/node_modules/openai/resources/completions.js +0 -14
  351. package/demo/node_modules/openai/resources/completions.js.map +0 -1
  352. package/demo/node_modules/openai/resources/completions.mjs +0 -10
  353. package/demo/node_modules/openai/resources/completions.mjs.map +0 -1
  354. package/demo/node_modules/openai/resources/embeddings.d.ts +0 -103
  355. package/demo/node_modules/openai/resources/embeddings.d.ts.map +0 -1
  356. package/demo/node_modules/openai/resources/embeddings.js +0 -17
  357. package/demo/node_modules/openai/resources/embeddings.js.map +0 -1
  358. package/demo/node_modules/openai/resources/embeddings.mjs +0 -13
  359. package/demo/node_modules/openai/resources/embeddings.mjs.map +0 -1
  360. package/demo/node_modules/openai/resources/files.d.ts +0 -140
  361. package/demo/node_modules/openai/resources/files.d.ts.map +0 -1
  362. package/demo/node_modules/openai/resources/files.js +0 -120
  363. package/demo/node_modules/openai/resources/files.js.map +0 -1
  364. package/demo/node_modules/openai/resources/files.mjs +0 -92
  365. package/demo/node_modules/openai/resources/files.mjs.map +0 -1
  366. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +0 -19
  367. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts.map +0 -1
  368. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js +0 -42
  369. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js.map +0 -1
  370. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs +0 -15
  371. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs.map +0 -1
  372. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts +0 -3
  373. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts.map +0 -1
  374. package/demo/node_modules/openai/resources/fine-tuning/index.js +0 -11
  375. package/demo/node_modules/openai/resources/fine-tuning/index.js.map +0 -1
  376. package/demo/node_modules/openai/resources/fine-tuning/index.mjs +0 -4
  377. package/demo/node_modules/openai/resources/fine-tuning/index.mjs.map +0 -1
  378. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +0 -69
  379. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts.map +0 -1
  380. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js +0 -47
  381. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js.map +0 -1
  382. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs +0 -19
  383. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs.map +0 -1
  384. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +0 -3
  385. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts.map +0 -1
  386. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js +0 -12
  387. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js.map +0 -1
  388. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs +0 -4
  389. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs.map +0 -1
  390. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +0 -362
  391. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts.map +0 -1
  392. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js +0 -93
  393. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js.map +0 -1
  394. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs +0 -64
  395. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs.map +0 -1
  396. package/demo/node_modules/openai/resources/images.d.ts +0 -174
  397. package/demo/node_modules/openai/resources/images.d.ts.map +0 -1
  398. package/demo/node_modules/openai/resources/images.js +0 -30
  399. package/demo/node_modules/openai/resources/images.js.map +0 -1
  400. package/demo/node_modules/openai/resources/images.mjs +0 -26
  401. package/demo/node_modules/openai/resources/images.mjs.map +0 -1
  402. package/demo/node_modules/openai/resources/index.d.ts +0 -13
  403. package/demo/node_modules/openai/resources/index.d.ts.map +0 -1
  404. package/demo/node_modules/openai/resources/index.js +0 -44
  405. package/demo/node_modules/openai/resources/index.js.map +0 -1
  406. package/demo/node_modules/openai/resources/index.mjs +0 -14
  407. package/demo/node_modules/openai/resources/index.mjs.map +0 -1
  408. package/demo/node_modules/openai/resources/models.d.ts +0 -58
  409. package/demo/node_modules/openai/resources/models.d.ts.map +0 -1
  410. package/demo/node_modules/openai/resources/models.js +0 -64
  411. package/demo/node_modules/openai/resources/models.js.map +0 -1
  412. package/demo/node_modules/openai/resources/models.mjs +0 -36
  413. package/demo/node_modules/openai/resources/models.mjs.map +0 -1
  414. package/demo/node_modules/openai/resources/moderations.d.ts +0 -176
  415. package/demo/node_modules/openai/resources/moderations.d.ts.map +0 -1
  416. package/demo/node_modules/openai/resources/moderations.js +0 -17
  417. package/demo/node_modules/openai/resources/moderations.js.map +0 -1
  418. package/demo/node_modules/openai/resources/moderations.mjs +0 -13
  419. package/demo/node_modules/openai/resources/moderations.mjs.map +0 -1
  420. package/demo/node_modules/openai/resources/shared.d.ts +0 -39
  421. package/demo/node_modules/openai/resources/shared.d.ts.map +0 -1
  422. package/demo/node_modules/openai/resources/shared.js +0 -4
  423. package/demo/node_modules/openai/resources/shared.js.map +0 -1
  424. package/demo/node_modules/openai/resources/shared.mjs +0 -3
  425. package/demo/node_modules/openai/resources/shared.mjs.map +0 -1
  426. package/demo/node_modules/openai/shims/node.d.ts +0 -29
  427. package/demo/node_modules/openai/shims/node.d.ts.map +0 -1
  428. package/demo/node_modules/openai/shims/node.js +0 -31
  429. package/demo/node_modules/openai/shims/node.js.map +0 -1
  430. package/demo/node_modules/openai/shims/node.mjs +0 -5
  431. package/demo/node_modules/openai/shims/node.mjs.map +0 -1
  432. package/demo/node_modules/openai/shims/web.d.ts +0 -26
  433. package/demo/node_modules/openai/shims/web.d.ts.map +0 -1
  434. package/demo/node_modules/openai/shims/web.js +0 -31
  435. package/demo/node_modules/openai/shims/web.js.map +0 -1
  436. package/demo/node_modules/openai/shims/web.mjs +0 -5
  437. package/demo/node_modules/openai/shims/web.mjs.map +0 -1
  438. package/demo/node_modules/openai/src/_shims/MultipartBody.ts +0 -9
  439. package/demo/node_modules/openai/src/_shims/README.md +0 -46
  440. package/demo/node_modules/openai/src/_shims/auto/runtime-bun.ts +0 -4
  441. package/demo/node_modules/openai/src/_shims/auto/runtime-node.ts +0 -4
  442. package/demo/node_modules/openai/src/_shims/auto/runtime.ts +0 -4
  443. package/demo/node_modules/openai/src/_shims/auto/types-node.ts +0 -4
  444. package/demo/node_modules/openai/src/_shims/auto/types.d.ts +0 -101
  445. package/demo/node_modules/openai/src/_shims/auto/types.js +0 -3
  446. package/demo/node_modules/openai/src/_shims/auto/types.mjs +0 -3
  447. package/demo/node_modules/openai/src/_shims/bun-runtime.ts +0 -14
  448. package/demo/node_modules/openai/src/_shims/index.d.ts +0 -81
  449. package/demo/node_modules/openai/src/_shims/index.js +0 -13
  450. package/demo/node_modules/openai/src/_shims/index.mjs +0 -7
  451. package/demo/node_modules/openai/src/_shims/manual-types.d.ts +0 -12
  452. package/demo/node_modules/openai/src/_shims/manual-types.js +0 -3
  453. package/demo/node_modules/openai/src/_shims/manual-types.mjs +0 -3
  454. package/demo/node_modules/openai/src/_shims/node-runtime.ts +0 -83
  455. package/demo/node_modules/openai/src/_shims/node-types.d.ts +0 -42
  456. package/demo/node_modules/openai/src/_shims/node-types.js +0 -3
  457. package/demo/node_modules/openai/src/_shims/node-types.mjs +0 -3
  458. package/demo/node_modules/openai/src/_shims/registry.ts +0 -65
  459. package/demo/node_modules/openai/src/_shims/web-runtime.ts +0 -103
  460. package/demo/node_modules/openai/src/_shims/web-types.d.ts +0 -83
  461. package/demo/node_modules/openai/src/_shims/web-types.js +0 -3
  462. package/demo/node_modules/openai/src/_shims/web-types.mjs +0 -3
  463. package/demo/node_modules/openai/src/core.ts +0 -1162
  464. package/demo/node_modules/openai/src/error.ts +0 -158
  465. package/demo/node_modules/openai/src/index.ts +0 -502
  466. package/demo/node_modules/openai/src/lib/.keep +0 -4
  467. package/demo/node_modules/openai/src/lib/AbstractAssistantStreamRunner.ts +0 -340
  468. package/demo/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts +0 -682
  469. package/demo/node_modules/openai/src/lib/AssistantStream.ts +0 -723
  470. package/demo/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts +0 -2328
  471. package/demo/node_modules/openai/src/lib/ChatCompletionRunner.ts +0 -68
  472. package/demo/node_modules/openai/src/lib/ChatCompletionStream.ts +0 -494
  473. package/demo/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts +0 -68
  474. package/demo/node_modules/openai/src/lib/RunnableFunction.ts +0 -134
  475. package/demo/node_modules/openai/src/lib/Util.ts +0 -23
  476. package/demo/node_modules/openai/src/lib/chatCompletionUtils.ts +0 -28
  477. package/demo/node_modules/openai/src/lib/jsonschema.ts +0 -148
  478. package/demo/node_modules/openai/src/pagination.ts +0 -98
  479. package/demo/node_modules/openai/src/resource.ts +0 -11
  480. package/demo/node_modules/openai/src/resources/audio/audio.ts +0 -23
  481. package/demo/node_modules/openai/src/resources/audio/index.ts +0 -6
  482. package/demo/node_modules/openai/src/resources/audio/speech.ts +0 -52
  483. package/demo/node_modules/openai/src/resources/audio/transcriptions.ts +0 -84
  484. package/demo/node_modules/openai/src/resources/audio/translations.ts +0 -61
  485. package/demo/node_modules/openai/src/resources/batches.ts +0 -252
  486. package/demo/node_modules/openai/src/resources/beta/assistants.ts +0 -1315
  487. package/demo/node_modules/openai/src/resources/beta/beta.ts +0 -56
  488. package/demo/node_modules/openai/src/resources/beta/chat/chat.ts +0 -12
  489. package/demo/node_modules/openai/src/resources/beta/chat/completions.ts +0 -106
  490. package/demo/node_modules/openai/src/resources/beta/chat/index.ts +0 -4
  491. package/demo/node_modules/openai/src/resources/beta/index.ts +0 -48
  492. package/demo/node_modules/openai/src/resources/beta/threads/index.ts +0 -72
  493. package/demo/node_modules/openai/src/resources/beta/threads/messages.ts +0 -706
  494. package/demo/node_modules/openai/src/resources/beta/threads/runs/index.ts +0 -44
  495. package/demo/node_modules/openai/src/resources/beta/threads/runs/runs.ts +0 -1627
  496. package/demo/node_modules/openai/src/resources/beta/threads/runs/steps.ts +0 -641
  497. package/demo/node_modules/openai/src/resources/beta/threads/threads.ts +0 -1536
  498. package/demo/node_modules/openai/src/resources/beta/vector-stores/file-batches.ts +0 -293
  499. package/demo/node_modules/openai/src/resources/beta/vector-stores/files.ts +0 -284
  500. package/demo/node_modules/openai/src/resources/beta/vector-stores/index.ts +0 -25
  501. package/demo/node_modules/openai/src/resources/beta/vector-stores/vector-stores.ts +0 -318
  502. package/demo/node_modules/openai/src/resources/chat/chat.ts +0 -67
  503. package/demo/node_modules/openai/src/resources/chat/completions.ts +0 -996
  504. package/demo/node_modules/openai/src/resources/chat/index.ts +0 -33
  505. package/demo/node_modules/openai/src/resources/completions.ts +0 -329
  506. package/demo/node_modules/openai/src/resources/embeddings.ts +0 -125
  507. package/demo/node_modules/openai/src/resources/files.ts +0 -214
  508. package/demo/node_modules/openai/src/resources/fine-tuning/fine-tuning.ts +0 -22
  509. package/demo/node_modules/openai/src/resources/fine-tuning/index.ts +0 -16
  510. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/checkpoints.ts +0 -108
  511. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/index.ts +0 -21
  512. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/jobs.ts +0 -458
  513. package/demo/node_modules/openai/src/resources/images.ts +0 -215
  514. package/demo/node_modules/openai/src/resources/index.ts +0 -45
  515. package/demo/node_modules/openai/src/resources/models.ts +0 -76
  516. package/demo/node_modules/openai/src/resources/moderations.ts +0 -214
  517. package/demo/node_modules/openai/src/resources/shared.ts +0 -47
  518. package/demo/node_modules/openai/src/shims/node.ts +0 -50
  519. package/demo/node_modules/openai/src/shims/web.ts +0 -50
  520. package/demo/node_modules/openai/src/streaming.ts +0 -508
  521. package/demo/node_modules/openai/src/tsconfig.json +0 -11
  522. package/demo/node_modules/openai/src/uploads.ts +0 -248
  523. package/demo/node_modules/openai/src/version.ts +0 -1
  524. package/demo/node_modules/openai/streaming.d.ts +0 -41
  525. package/demo/node_modules/openai/streaming.d.ts.map +0 -1
  526. package/demo/node_modules/openai/streaming.js +0 -433
  527. package/demo/node_modules/openai/streaming.js.map +0 -1
  528. package/demo/node_modules/openai/streaming.mjs +0 -426
  529. package/demo/node_modules/openai/streaming.mjs.map +0 -1
  530. package/demo/node_modules/openai/uploads.d.ts +0 -75
  531. package/demo/node_modules/openai/uploads.d.ts.map +0 -1
  532. package/demo/node_modules/openai/uploads.js +0 -165
  533. package/demo/node_modules/openai/uploads.js.map +0 -1
  534. package/demo/node_modules/openai/uploads.mjs +0 -152
  535. package/demo/node_modules/openai/uploads.mjs.map +0 -1
  536. package/demo/node_modules/openai/version.d.ts +0 -2
  537. package/demo/node_modules/openai/version.d.ts.map +0 -1
  538. package/demo/node_modules/openai/version.js +0 -5
  539. package/demo/node_modules/openai/version.js.map +0 -1
  540. package/demo/node_modules/openai/version.mjs +0 -2
  541. package/demo/node_modules/openai/version.mjs.map +0 -1
  542. package/demo/watson.png +0 -0
@@ -1,2172 +0,0 @@
1
- import OpenAI from 'openai';
2
- import { APIConnectionError } from 'openai/error';
3
- import { PassThrough } from 'stream';
4
- import { ParsingToolFunction, ChatCompletionStreamingRunner, } from 'openai/resources/beta/chat/completions';
5
- import { Response } from 'node-fetch';
6
- import { isAssistantMessage } from "./chatCompletionUtils.mjs";
7
- /**
8
- * Creates a mock `fetch` function and a `handleRequest` function for intercepting `fetch` calls.
9
- *
10
- * You call `handleRequest` with a callback function that handles the next `fetch` call.
11
- * It returns a Promise that:
12
- * - waits for the next call to `fetch`
13
- * - calls the callback with the `fetch` arguments
14
- * - resolves `fetch` with the callback output
15
- */
16
- function mockFetch() {
17
- const fetchQueue = [];
18
- const handlerQueue = [];
19
- const enqueueHandler = () => {
20
- handlerQueue.push(new Promise((resolve) => {
21
- fetchQueue.push((handle) => {
22
- enqueueHandler();
23
- resolve(handle);
24
- });
25
- }));
26
- };
27
- enqueueHandler();
28
- async function fetch(req, init) {
29
- const handler = await handlerQueue.shift();
30
- if (!handler)
31
- throw new Error('expected handler to be defined');
32
- const signal = init?.signal;
33
- if (!signal)
34
- return await handler(req, init);
35
- return await Promise.race([
36
- handler(req, init),
37
- new Promise((resolve, reject) => {
38
- if (signal.aborted) {
39
- // @ts-ignore does exist in Node
40
- reject(new DOMException('The user aborted a request.', 'AbortError'));
41
- return;
42
- }
43
- signal.addEventListener('abort', (e) => {
44
- // @ts-ignore does exist in Node
45
- reject(new DOMException('The user aborted a request.', 'AbortError'));
46
- });
47
- }),
48
- ]);
49
- }
50
- function handleRequest(handle) {
51
- return new Promise((resolve, reject) => {
52
- fetchQueue.shift()?.(async (req, init) => {
53
- try {
54
- return await handle(req, init);
55
- }
56
- catch (err) {
57
- reject(err);
58
- return err;
59
- }
60
- finally {
61
- resolve();
62
- }
63
- });
64
- });
65
- }
66
- return { fetch, handleRequest };
67
- }
68
- // mockChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock
69
- // chat completion request/responses.
70
- function mockChatCompletionFetch() {
71
- const { fetch, handleRequest: handleRawRequest } = mockFetch();
72
- function handleRequest(handler) {
73
- return handleRawRequest(async (req, init) => {
74
- const rawBody = init?.body;
75
- if (typeof rawBody !== 'string')
76
- throw new Error(`expected init.body to be a string`);
77
- const body = JSON.parse(rawBody);
78
- return new Response(JSON.stringify(await handler(body)), {
79
- headers: { 'Content-Type': 'application/json' },
80
- });
81
- });
82
- }
83
- return { fetch, handleRequest };
84
- }
85
- // mockStreamingChatCompletionFetch is like mockFetch, but with better a more convenient handleRequest to mock
86
- // streaming chat completion request/responses.
87
- function mockStreamingChatCompletionFetch() {
88
- const { fetch, handleRequest: handleRawRequest } = mockFetch();
89
- function handleRequest(handler) {
90
- return handleRawRequest(async (req, init) => {
91
- const rawBody = init?.body;
92
- if (typeof rawBody !== 'string')
93
- throw new Error(`expected init.body to be a string`);
94
- const body = JSON.parse(rawBody);
95
- const stream = new PassThrough();
96
- (async () => {
97
- for await (const chunk of handler(body)) {
98
- stream.write(`data: ${JSON.stringify(chunk)}\n\n`);
99
- }
100
- stream.end(`data: [DONE]\n\n`);
101
- })();
102
- return new Response(stream, {
103
- headers: {
104
- 'Content-Type': 'text/event-stream',
105
- 'Transfer-Encoding': 'chunked',
106
- },
107
- });
108
- });
109
- }
110
- return { fetch, handleRequest };
111
- }
112
- // contentChoiceDeltas returns an async iterator which mocks a delta stream of a by splitting the
113
- // argument into chunks separated by whitespace.
114
- function* contentChoiceDeltas(content, { index = 0, role = 'assistant', } = {}) {
115
- const deltas = content.split(/\s+/g);
116
- for (let i = 0; i < deltas.length; i++) {
117
- yield {
118
- index,
119
- finish_reason: i === deltas.length - 1 ? 'stop' : null,
120
- logprobs: null,
121
- delta: {
122
- role,
123
- content: deltas[i] ? `${deltas[i]}${i === deltas.length - 1 ? '' : ' '}` : null,
124
- },
125
- };
126
- }
127
- }
128
- // functionCallDeltas returns an async iterator which mocks a delta stream of a functionCall by splitting
129
- // the argument into chunks separated by whitespace.
130
- function* functionCallDeltas(args, { index = 0, id = '123', name, role = 'assistant', }) {
131
- const deltas = args.split(/\s+/g);
132
- for (let i = 0; i < deltas.length; i++) {
133
- yield {
134
- index,
135
- finish_reason: i === deltas.length - 1 ? 'function_call' : null,
136
- delta: {
137
- role,
138
- tool_calls: [
139
- {
140
- type: 'function',
141
- index: 0,
142
- id,
143
- function: {
144
- arguments: `${deltas[i] || ''}${i === deltas.length - 1 ? '' : ' '}`,
145
- ...(i === deltas.length - 1 ? { name } : null),
146
- },
147
- },
148
- ],
149
- },
150
- };
151
- }
152
- }
153
- class RunnerListener {
154
- constructor(runner) {
155
- this.runner = runner;
156
- this.contents = [];
157
- this.messages = [];
158
- this.chatCompletions = [];
159
- this.functionCalls = [];
160
- this.functionCallResults = [];
161
- this.finalContent = null;
162
- this.gotConnect = false;
163
- this.gotAbort = false;
164
- this.gotEnd = false;
165
- this.onceMessageCallCount = 0;
166
- runner
167
- .on('connect', () => (this.gotConnect = true))
168
- .on('content', (content) => this.contents.push(content))
169
- .on('message', (message) => this.messages.push(message))
170
- .on('chatCompletion', (completion) => this.chatCompletions.push(completion))
171
- .on('functionCall', (functionCall) => this.functionCalls.push(functionCall))
172
- .on('functionCallResult', (result) => this.functionCallResults.push(result))
173
- .on('finalContent', (content) => (this.finalContent = content))
174
- .on('finalMessage', (message) => (this.finalMessage = message))
175
- .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion))
176
- .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall))
177
- .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result))
178
- .on('totalUsage', (usage) => (this.totalUsage = usage))
179
- .on('error', (error) => (this.error = error))
180
- .on('abort', (error) => ((this.error = error), (this.gotAbort = true)))
181
- .on('end', () => (this.gotEnd = true))
182
- .once('message', () => this.onceMessageCallCount++);
183
- }
184
- async sanityCheck({ error } = {}) {
185
- expect(this.onceMessageCallCount).toBeLessThanOrEqual(1);
186
- expect(this.gotAbort).toEqual(this.runner.aborted);
187
- if (this.runner.aborted)
188
- expect(this.runner.errored).toBe(true);
189
- if (error) {
190
- expect(this.error?.message).toEqual(error);
191
- expect(this.runner.errored).toBe(true);
192
- await expect(this.runner.finalChatCompletion()).rejects.toThrow(error);
193
- await expect(this.runner.finalMessage()).rejects.toThrow(error);
194
- await expect(this.runner.finalContent()).rejects.toThrow(error);
195
- await expect(this.runner.finalFunctionCall()).rejects.toThrow(error);
196
- await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error);
197
- await expect(this.runner.totalUsage()).rejects.toThrow(error);
198
- await expect(this.runner.done()).rejects.toThrow(error);
199
- }
200
- else {
201
- expect(this.error).toBeUndefined();
202
- expect(this.runner.errored).toBe(false);
203
- }
204
- if (!this.gotConnect) {
205
- expect(this.contents).toEqual([]);
206
- expect(this.messages).toEqual([]);
207
- expect(this.chatCompletions).toEqual([]);
208
- expect(this.functionCalls).toEqual([]);
209
- expect(this.functionCallResults).toEqual([]);
210
- expect(this.finalContent).toBeUndefined();
211
- expect(this.finalMessage).toBeUndefined();
212
- expect(this.finalChatCompletion).toBeUndefined();
213
- expect(this.finalFunctionCall).toBeUndefined();
214
- expect(this.finalFunctionCallResult).toBeUndefined();
215
- expect(this.totalUsage).toBeUndefined();
216
- expect(this.gotEnd).toBe(true);
217
- return;
218
- }
219
- if (error)
220
- return;
221
- const expectedContents = this.messages
222
- .filter(isAssistantMessage)
223
- .map((m) => m.content)
224
- .filter(Boolean);
225
- expect(this.contents).toEqual(expectedContents);
226
- expect(this.finalMessage).toEqual([...this.messages].reverse().find((x) => x.role === 'assistant'));
227
- expect(await this.runner.finalMessage()).toEqual(this.finalMessage);
228
- expect(this.finalContent).toEqual(expectedContents[expectedContents.length - 1] ?? null);
229
- expect(await this.runner.finalContent()).toEqual(this.finalContent);
230
- expect(this.finalChatCompletion).toEqual(this.chatCompletions[this.chatCompletions.length - 1]);
231
- expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion);
232
- expect(this.finalFunctionCall).toEqual(this.functionCalls[this.functionCalls.length - 1]);
233
- expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall);
234
- expect(this.finalFunctionCallResult).toEqual(this.functionCallResults[this.functionCallResults.length - 1]);
235
- expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult);
236
- expect(this.chatCompletions).toEqual(this.runner.allChatCompletions());
237
- expect(this.messages).toEqual(this.runner.messages.slice(-this.messages.length));
238
- if (this.chatCompletions.some((c) => c.usage)) {
239
- const totalUsage = {
240
- completion_tokens: 0,
241
- prompt_tokens: 0,
242
- total_tokens: 0,
243
- };
244
- for (const { usage } of this.chatCompletions) {
245
- if (usage) {
246
- totalUsage.completion_tokens += usage.completion_tokens;
247
- totalUsage.prompt_tokens += usage.prompt_tokens;
248
- totalUsage.total_tokens += usage.total_tokens;
249
- }
250
- }
251
- expect(this.totalUsage).toEqual(totalUsage);
252
- expect(await this.runner.totalUsage()).toEqual(totalUsage);
253
- }
254
- expect(this.gotEnd).toBe(true);
255
- }
256
- }
257
- class StreamingRunnerListener {
258
- constructor(runner) {
259
- this.runner = runner;
260
- this.eventChunks = [];
261
- this.eventContents = [];
262
- this.eventMessages = [];
263
- this.eventChatCompletions = [];
264
- this.eventFunctionCalls = [];
265
- this.eventFunctionCallResults = [];
266
- this.finalContent = null;
267
- this.gotConnect = false;
268
- this.gotEnd = false;
269
- runner
270
- .on('connect', () => (this.gotConnect = true))
271
- .on('chunk', (chunk) => this.eventChunks.push(chunk))
272
- .on('content', (delta, snapshot) => this.eventContents.push([delta, snapshot]))
273
- .on('message', (message) => this.eventMessages.push(message))
274
- .on('chatCompletion', (completion) => this.eventChatCompletions.push(completion))
275
- .on('functionCall', (functionCall) => this.eventFunctionCalls.push(functionCall))
276
- .on('functionCallResult', (result) => this.eventFunctionCallResults.push(result))
277
- .on('finalContent', (content) => (this.finalContent = content))
278
- .on('finalMessage', (message) => (this.finalMessage = message))
279
- .on('finalChatCompletion', (completion) => (this.finalChatCompletion = completion))
280
- .on('finalFunctionCall', (functionCall) => (this.finalFunctionCall = functionCall))
281
- .on('finalFunctionCallResult', (result) => (this.finalFunctionCallResult = result))
282
- .on('error', (error) => (this.error = error))
283
- .on('abort', (abort) => (this.error = abort))
284
- .on('end', () => (this.gotEnd = true));
285
- }
286
- async sanityCheck({ error } = {}) {
287
- if (error) {
288
- expect(this.error?.message).toEqual(error);
289
- expect(this.runner.errored).toBe(true);
290
- await expect(this.runner.finalChatCompletion()).rejects.toThrow(error);
291
- await expect(this.runner.finalMessage()).rejects.toThrow(error);
292
- await expect(this.runner.finalContent()).rejects.toThrow(error);
293
- await expect(this.runner.finalFunctionCall()).rejects.toThrow(error);
294
- await expect(this.runner.finalFunctionCallResult()).rejects.toThrow(error);
295
- await expect(this.runner.done()).rejects.toThrow(error);
296
- }
297
- else {
298
- expect(this.error).toBeUndefined();
299
- expect(this.runner.errored).toBe(false);
300
- }
301
- if (!this.gotConnect) {
302
- expect(this.eventContents).toEqual([]);
303
- expect(this.eventMessages).toEqual([]);
304
- expect(this.eventChatCompletions).toEqual([]);
305
- expect(this.eventFunctionCalls).toEqual([]);
306
- expect(this.eventFunctionCallResults).toEqual([]);
307
- expect(this.finalContent).toBeUndefined();
308
- expect(this.finalMessage).toBeUndefined();
309
- expect(this.finalChatCompletion).toBeUndefined();
310
- expect(this.finalFunctionCall).toBeUndefined();
311
- expect(this.finalFunctionCallResult).toBeUndefined();
312
- expect(this.gotEnd).toBe(true);
313
- return;
314
- }
315
- if (error)
316
- return;
317
- if (this.eventContents.length)
318
- expect(this.eventChunks.length).toBeGreaterThan(0);
319
- expect(this.finalMessage).toEqual([...this.eventMessages].reverse().find((x) => x.role === 'assistant'));
320
- expect(await this.runner.finalMessage()).toEqual(this.finalMessage);
321
- expect(this.finalContent).toEqual(this.eventContents[this.eventContents.length - 1]?.[1] ?? null);
322
- expect(await this.runner.finalContent()).toEqual(this.finalContent);
323
- expect(this.finalChatCompletion).toEqual(this.eventChatCompletions[this.eventChatCompletions.length - 1]);
324
- expect(await this.runner.finalChatCompletion()).toEqual(this.finalChatCompletion);
325
- expect(this.finalFunctionCall).toEqual(this.eventFunctionCalls[this.eventFunctionCalls.length - 1]);
326
- expect(await this.runner.finalFunctionCall()).toEqual(this.finalFunctionCall);
327
- expect(this.finalFunctionCallResult).toEqual(this.eventFunctionCallResults[this.eventFunctionCallResults.length - 1]);
328
- expect(await this.runner.finalFunctionCallResult()).toEqual(this.finalFunctionCallResult);
329
- expect(this.eventChatCompletions).toEqual(this.runner.allChatCompletions());
330
- expect(this.eventMessages).toEqual(this.runner.messages.slice(-this.eventMessages.length));
331
- if (error) {
332
- expect(this.error?.message).toEqual(error);
333
- expect(this.runner.errored).toBe(true);
334
- }
335
- else {
336
- expect(this.error).toBeUndefined();
337
- expect(this.runner.errored).toBe(false);
338
- }
339
- expect(this.gotEnd).toBe(true);
340
- }
341
- }
342
- function _typeTests() {
343
- const openai = new OpenAI();
344
- openai.beta.chat.completions.runTools({
345
- messages: [
346
- { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' },
347
- ],
348
- model: 'gpt-3.5-turbo',
349
- tools: [
350
- {
351
- type: 'function',
352
- function: {
353
- name: 'numProperties',
354
- function: (obj) => String(Object.keys(obj).length),
355
- parameters: { type: 'object' },
356
- parse: (str) => {
357
- const result = JSON.parse(str);
358
- if (!(result instanceof Object) || Array.isArray(result)) {
359
- throw new Error('must be an object');
360
- }
361
- return result;
362
- },
363
- description: 'gets the number of properties on an object',
364
- },
365
- },
366
- {
367
- type: 'function',
368
- function: {
369
- function: (str) => String(str.length),
370
- parameters: { type: 'string' },
371
- description: 'gets the length of a string',
372
- },
373
- },
374
- {
375
- type: 'function',
376
- // @ts-expect-error function must accept string if parse is omitted
377
- function: {
378
- function: (obj) => String(Object.keys(obj).length),
379
- parameters: { type: 'object' },
380
- description: 'gets the number of properties on an object',
381
- },
382
- },
383
- ],
384
- });
385
- openai.beta.chat.completions.runTools({
386
- messages: [
387
- { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' },
388
- ],
389
- model: 'gpt-3.5-turbo',
390
- tools: [
391
- new ParsingToolFunction({
392
- name: 'numProperties',
393
- // @ts-expect-error parse and function don't match
394
- parse: (str) => str,
395
- function: (obj) => String(Object.keys(obj).length),
396
- parameters: { type: 'object' },
397
- description: 'gets the number of properties on an object',
398
- }),
399
- ],
400
- });
401
- openai.beta.chat.completions.runTools({
402
- messages: [
403
- { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' },
404
- ],
405
- model: 'gpt-3.5-turbo',
406
- tools: [
407
- new ParsingToolFunction({
408
- name: 'numProperties',
409
- parse: (str) => {
410
- const result = JSON.parse(str);
411
- if (!(result instanceof Object) || Array.isArray(result)) {
412
- throw new Error('must be an object');
413
- }
414
- return result;
415
- },
416
- function: (obj) => String(Object.keys(obj).length),
417
- parameters: { type: 'object' },
418
- description: 'gets the number of properties on an object',
419
- }),
420
- new ParsingToolFunction({
421
- name: 'keys',
422
- parse: (str) => {
423
- const result = JSON.parse(str);
424
- if (!(result instanceof Object)) {
425
- throw new Error('must be an Object');
426
- }
427
- return result;
428
- },
429
- function: (obj) => Object.keys(obj).join(', '),
430
- parameters: { type: 'object' },
431
- description: 'gets the number of properties on an object',
432
- }),
433
- new ParsingToolFunction({
434
- name: 'len2',
435
- // @ts-expect-error parse and function don't match
436
- parse: (str) => str,
437
- function: (obj) => String(Object.keys(obj).length),
438
- parameters: { type: 'object' },
439
- description: 'gets the number of properties on an object',
440
- }),
441
- ],
442
- });
443
- openai.beta.chat.completions.runTools({
444
- messages: [
445
- { role: 'user', content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}' },
446
- ],
447
- model: 'gpt-3.5-turbo',
448
- // @ts-ignore error occurs here in TS 4
449
- tools: [
450
- {
451
- type: 'function',
452
- function: {
453
- name: 'numProperties',
454
- parse: (str) => {
455
- const result = JSON.parse(str);
456
- if (!(result instanceof Object) || Array.isArray(result)) {
457
- throw new Error('must be an object');
458
- }
459
- return result;
460
- },
461
- function: (obj) => String(Object.keys(obj).length),
462
- parameters: { type: 'object' },
463
- description: 'gets the number of properties on an object',
464
- },
465
- },
466
- {
467
- type: 'function',
468
- function: {
469
- name: 'keys',
470
- parse: (str) => {
471
- const result = JSON.parse(str);
472
- if (!(result instanceof Object)) {
473
- throw new Error('must be an Object');
474
- }
475
- return result;
476
- },
477
- function: (obj) => Object.keys(obj).join(', '),
478
- parameters: { type: 'object' },
479
- description: 'gets the number of properties on an object',
480
- },
481
- },
482
- {
483
- type: 'function',
484
- function: {
485
- name: 'len2',
486
- parse: (str) => str,
487
- // @ts-ignore error occurs here in TS 5
488
- // function input doesn't match parse output
489
- function: (obj) => String(Object.keys(obj).length),
490
- parameters: { type: 'object' },
491
- description: 'gets the number of properties on an object',
492
- },
493
- },
494
- ],
495
- });
496
- }
497
- describe('resource completions', () => {
498
- describe('runTools with stream: false', () => {
499
- test('successful flow', async () => {
500
- const { fetch, handleRequest } = mockChatCompletionFetch();
501
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
502
- const runner = openai.beta.chat.completions.runTools({
503
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
504
- model: 'gpt-3.5-turbo',
505
- tools: [
506
- {
507
- type: 'function',
508
- function: {
509
- function: function getWeather() {
510
- return `it's raining`;
511
- },
512
- parameters: {},
513
- description: 'gets the weather',
514
- },
515
- },
516
- ],
517
- });
518
- const listener = new RunnerListener(runner);
519
- await handleRequest(async (request) => {
520
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
521
- return {
522
- id: '1',
523
- choices: [
524
- {
525
- index: 0,
526
- finish_reason: 'function_call',
527
- logprobs: null,
528
- message: {
529
- role: 'assistant',
530
- content: null,
531
- tool_calls: [
532
- {
533
- type: 'function',
534
- id: '123',
535
- function: {
536
- arguments: '',
537
- name: 'getWeather',
538
- },
539
- },
540
- ],
541
- },
542
- },
543
- ],
544
- created: Math.floor(Date.now() / 1000),
545
- model: 'gpt-3.5-turbo',
546
- object: 'chat.completion',
547
- };
548
- });
549
- await handleRequest(async (request) => {
550
- expect(request.messages).toEqual([
551
- { role: 'user', content: 'tell me what the weather is like' },
552
- {
553
- role: 'assistant',
554
- content: null,
555
- tool_calls: [
556
- {
557
- type: 'function',
558
- id: '123',
559
- function: {
560
- arguments: '',
561
- name: 'getWeather',
562
- },
563
- },
564
- ],
565
- },
566
- {
567
- role: 'tool',
568
- content: `it's raining`,
569
- tool_call_id: '123',
570
- },
571
- ]);
572
- return {
573
- id: '2',
574
- choices: [
575
- {
576
- index: 0,
577
- finish_reason: 'stop',
578
- logprobs: null,
579
- message: {
580
- role: 'assistant',
581
- content: `it's raining`,
582
- },
583
- },
584
- ],
585
- created: Math.floor(Date.now() / 1000),
586
- model: 'gpt-3.5-turbo',
587
- object: 'chat.completion',
588
- };
589
- });
590
- await runner.done();
591
- expect(listener.messages).toEqual([
592
- { role: 'user', content: 'tell me what the weather is like' },
593
- {
594
- role: 'assistant',
595
- content: null,
596
- tool_calls: [
597
- {
598
- type: 'function',
599
- id: '123',
600
- function: {
601
- arguments: '',
602
- name: 'getWeather',
603
- },
604
- },
605
- ],
606
- },
607
- { role: 'tool', content: `it's raining`, tool_call_id: '123' },
608
- { role: 'assistant', content: "it's raining" },
609
- ]);
610
- expect(listener.functionCallResults).toEqual([`it's raining`]);
611
- await listener.sanityCheck();
612
- });
613
- test('flow with abort', async () => {
614
- const { fetch, handleRequest } = mockChatCompletionFetch();
615
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
616
- const controller = new AbortController();
617
- const runner = openai.beta.chat.completions.runTools({
618
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
619
- model: 'gpt-3.5-turbo',
620
- tools: [
621
- {
622
- type: 'function',
623
- function: {
624
- function: function getWeather() {
625
- return `it's raining`;
626
- },
627
- parameters: {},
628
- description: 'gets the weather',
629
- },
630
- },
631
- ],
632
- }, { signal: controller.signal });
633
- const listener = new RunnerListener(runner);
634
- await handleRequest(async (request) => {
635
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
636
- return {
637
- id: '1',
638
- choices: [
639
- {
640
- index: 0,
641
- finish_reason: 'function_call',
642
- logprobs: null,
643
- message: {
644
- role: 'assistant',
645
- content: null,
646
- tool_calls: [
647
- {
648
- type: 'function',
649
- id: '123',
650
- function: {
651
- arguments: '',
652
- name: 'getWeather',
653
- },
654
- },
655
- ],
656
- },
657
- },
658
- ],
659
- created: Math.floor(Date.now() / 1000),
660
- model: 'gpt-3.5-turbo',
661
- object: 'chat.completion',
662
- };
663
- });
664
- controller.abort();
665
- await runner.done().catch(() => { });
666
- expect(listener.messages).toEqual([
667
- { role: 'user', content: 'tell me what the weather is like' },
668
- {
669
- role: 'assistant',
670
- content: null,
671
- tool_calls: [
672
- {
673
- type: 'function',
674
- id: '123',
675
- function: {
676
- arguments: '',
677
- name: 'getWeather',
678
- },
679
- },
680
- ],
681
- },
682
- { role: 'tool', content: `it's raining`, tool_call_id: '123' },
683
- ]);
684
- expect(listener.functionCallResults).toEqual([`it's raining`]);
685
- await listener.sanityCheck({ error: 'Request was aborted.' });
686
- expect(runner.aborted).toBe(true);
687
- });
688
- test('successful flow with parse', async () => {
689
- const { fetch, handleRequest } = mockChatCompletionFetch();
690
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
691
- const runner = openai.beta.chat.completions.runTools({
692
- messages: [
693
- {
694
- role: 'user',
695
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
696
- },
697
- ],
698
- model: 'gpt-3.5-turbo',
699
- tools: [
700
- new ParsingToolFunction({
701
- name: 'numProperties',
702
- function: (obj) => String(Object.keys(obj).length),
703
- parameters: { type: 'object' },
704
- parse: (str) => {
705
- const result = JSON.parse(str);
706
- if (!(result instanceof Object) || Array.isArray(result)) {
707
- throw new Error('must be an object');
708
- }
709
- return result;
710
- },
711
- description: 'gets the number of properties on an object',
712
- }),
713
- ],
714
- });
715
- const listener = new RunnerListener(runner);
716
- await handleRequest(async (request) => {
717
- expect(request.messages).toEqual([
718
- {
719
- role: 'user',
720
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
721
- },
722
- ]);
723
- return {
724
- id: '1',
725
- choices: [
726
- {
727
- index: 0,
728
- finish_reason: 'function_call',
729
- logprobs: null,
730
- message: {
731
- role: 'assistant',
732
- content: null,
733
- tool_calls: [
734
- {
735
- type: 'function',
736
- id: '123',
737
- function: {
738
- arguments: '{"a": 1, "b": 2, "c": 3}',
739
- name: 'numProperties',
740
- },
741
- },
742
- ],
743
- },
744
- },
745
- ],
746
- created: Math.floor(Date.now() / 1000),
747
- model: 'gpt-3.5-turbo',
748
- object: 'chat.completion',
749
- usage: {
750
- completion_tokens: 5,
751
- prompt_tokens: 20,
752
- total_tokens: 25,
753
- },
754
- };
755
- });
756
- await handleRequest(async (request) => {
757
- expect(request.messages).toEqual([
758
- {
759
- role: 'user',
760
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
761
- },
762
- {
763
- role: 'assistant',
764
- content: null,
765
- tool_calls: [
766
- {
767
- type: 'function',
768
- id: '123',
769
- function: {
770
- arguments: '{"a": 1, "b": 2, "c": 3}',
771
- name: 'numProperties',
772
- },
773
- },
774
- ],
775
- },
776
- {
777
- role: 'tool',
778
- content: '3',
779
- tool_call_id: '123',
780
- },
781
- ]);
782
- return {
783
- id: '2',
784
- choices: [
785
- {
786
- index: 0,
787
- finish_reason: 'stop',
788
- logprobs: null,
789
- message: {
790
- role: 'assistant',
791
- content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`,
792
- },
793
- },
794
- ],
795
- created: Math.floor(Date.now() / 1000),
796
- model: 'gpt-3.5-turbo',
797
- object: 'chat.completion',
798
- usage: {
799
- completion_tokens: 10,
800
- prompt_tokens: 25,
801
- total_tokens: 35,
802
- },
803
- };
804
- });
805
- await runner.done();
806
- expect(listener.messages).toEqual([
807
- {
808
- role: 'user',
809
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
810
- },
811
- {
812
- role: 'assistant',
813
- content: null,
814
- tool_calls: [
815
- {
816
- type: 'function',
817
- id: '123',
818
- function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' },
819
- },
820
- ],
821
- },
822
- { role: 'tool', content: '3', tool_call_id: '123' },
823
- { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' },
824
- ]);
825
- expect(listener.functionCallResults).toEqual(['3']);
826
- await listener.sanityCheck();
827
- });
828
- test('flow with parse error', async () => {
829
- const { fetch, handleRequest } = mockChatCompletionFetch();
830
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
831
- const runner = openai.beta.chat.completions.runTools({
832
- messages: [
833
- {
834
- role: 'user',
835
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
836
- },
837
- ],
838
- model: 'gpt-3.5-turbo',
839
- tools: [
840
- new ParsingToolFunction({
841
- name: 'numProperties',
842
- function: (obj) => String(Object.keys(obj).length),
843
- parameters: { type: 'object' },
844
- parse: (str) => {
845
- const result = JSON.parse(str);
846
- if (!(result instanceof Object) || Array.isArray(result)) {
847
- throw new Error('must be an object');
848
- }
849
- return result;
850
- },
851
- description: 'gets the number of properties on an object',
852
- }),
853
- ],
854
- });
855
- const listener = new RunnerListener(runner);
856
- await Promise.all([
857
- handleRequest(async (request) => {
858
- expect(request.messages).toEqual([
859
- {
860
- role: 'user',
861
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
862
- },
863
- ]);
864
- return {
865
- id: '1',
866
- choices: [
867
- {
868
- index: 0,
869
- finish_reason: 'function_call',
870
- logprobs: null,
871
- message: {
872
- role: 'assistant',
873
- content: null,
874
- tool_calls: [
875
- {
876
- type: 'function',
877
- id: '123',
878
- function: {
879
- arguments: '[{"a": 1, "b": 2, "c": 3}]',
880
- name: 'numProperties',
881
- },
882
- },
883
- ],
884
- },
885
- },
886
- ],
887
- created: Math.floor(Date.now() / 1000),
888
- model: 'gpt-3.5-turbo',
889
- object: 'chat.completion',
890
- };
891
- }),
892
- handleRequest(async (request) => {
893
- expect(request.messages).toEqual([
894
- {
895
- role: 'user',
896
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
897
- },
898
- {
899
- role: 'assistant',
900
- content: null,
901
- tool_calls: [
902
- {
903
- type: 'function',
904
- id: '123',
905
- function: {
906
- arguments: '[{"a": 1, "b": 2, "c": 3}]',
907
- name: 'numProperties',
908
- },
909
- },
910
- ],
911
- },
912
- {
913
- role: 'tool',
914
- content: `must be an object`,
915
- tool_call_id: '123',
916
- },
917
- ]);
918
- return {
919
- id: '2',
920
- choices: [
921
- {
922
- index: 0,
923
- finish_reason: 'function_call',
924
- logprobs: null,
925
- message: {
926
- role: 'assistant',
927
- content: null,
928
- tool_calls: [
929
- {
930
- type: 'function',
931
- id: '1234',
932
- function: {
933
- arguments: '{"a": 1, "b": 2, "c": 3}',
934
- name: 'numProperties',
935
- },
936
- },
937
- ],
938
- },
939
- },
940
- ],
941
- created: Math.floor(Date.now() / 1000),
942
- model: 'gpt-3.5-turbo',
943
- object: 'chat.completion',
944
- };
945
- }),
946
- handleRequest(async (request) => {
947
- expect(request.messages).toEqual([
948
- {
949
- role: 'user',
950
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
951
- },
952
- {
953
- role: 'assistant',
954
- content: null,
955
- tool_calls: [
956
- {
957
- type: 'function',
958
- id: '123',
959
- function: {
960
- arguments: '[{"a": 1, "b": 2, "c": 3}]',
961
- name: 'numProperties',
962
- },
963
- },
964
- ],
965
- },
966
- {
967
- role: 'tool',
968
- content: `must be an object`,
969
- tool_call_id: '123',
970
- },
971
- {
972
- role: 'assistant',
973
- content: null,
974
- tool_calls: [
975
- {
976
- type: 'function',
977
- id: '1234',
978
- function: {
979
- arguments: '{"a": 1, "b": 2, "c": 3}',
980
- name: 'numProperties',
981
- },
982
- },
983
- ],
984
- },
985
- {
986
- role: 'tool',
987
- content: '3',
988
- tool_call_id: '1234',
989
- },
990
- ]);
991
- return {
992
- id: '3',
993
- choices: [
994
- {
995
- index: 0,
996
- finish_reason: 'stop',
997
- logprobs: null,
998
- message: {
999
- role: 'assistant',
1000
- content: `there are 3 properties in {"a": 1, "b": 2, "c": 3}`,
1001
- },
1002
- },
1003
- ],
1004
- created: Math.floor(Date.now() / 1000),
1005
- model: 'gpt-3.5-turbo',
1006
- object: 'chat.completion',
1007
- };
1008
- }),
1009
- runner.done(),
1010
- ]);
1011
- expect(listener.messages).toEqual([
1012
- {
1013
- role: 'user',
1014
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1015
- },
1016
- {
1017
- role: 'assistant',
1018
- content: null,
1019
- tool_calls: [
1020
- {
1021
- type: 'function',
1022
- id: '123',
1023
- function: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' },
1024
- },
1025
- ],
1026
- },
1027
- { role: 'tool', content: `must be an object`, tool_call_id: '123' },
1028
- {
1029
- role: 'assistant',
1030
- content: null,
1031
- tool_calls: [
1032
- {
1033
- type: 'function',
1034
- id: '1234',
1035
- function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' },
1036
- },
1037
- ],
1038
- },
1039
- { role: 'tool', content: '3', tool_call_id: '1234' },
1040
- { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' },
1041
- ]);
1042
- expect(listener.functionCallResults).toEqual([`must be an object`, '3']);
1043
- await listener.sanityCheck();
1044
- });
1045
- test('single function call', async () => {
1046
- const { fetch, handleRequest } = mockChatCompletionFetch();
1047
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1048
- const runner = openai.beta.chat.completions.runTools({
1049
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1050
- model: 'gpt-3.5-turbo',
1051
- tool_choice: {
1052
- type: 'function',
1053
- function: {
1054
- name: 'getWeather',
1055
- },
1056
- },
1057
- tools: [
1058
- {
1059
- type: 'function',
1060
- function: {
1061
- function: function getWeather() {
1062
- return `it's raining`;
1063
- },
1064
- parameters: {},
1065
- description: 'gets the weather',
1066
- },
1067
- },
1068
- ],
1069
- });
1070
- const listener = new RunnerListener(runner);
1071
- await Promise.all([
1072
- handleRequest(async (request) => {
1073
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1074
- return {
1075
- id: '1',
1076
- choices: [
1077
- {
1078
- index: 0,
1079
- finish_reason: 'function_call',
1080
- logprobs: null,
1081
- message: {
1082
- role: 'assistant',
1083
- content: null,
1084
- tool_calls: [
1085
- {
1086
- type: 'function',
1087
- id: '123',
1088
- function: {
1089
- arguments: '',
1090
- name: 'getWeather',
1091
- },
1092
- },
1093
- ],
1094
- },
1095
- },
1096
- ],
1097
- created: Math.floor(Date.now() / 1000),
1098
- model: 'gpt-3.5-turbo',
1099
- object: 'chat.completion',
1100
- };
1101
- }),
1102
- runner.done(),
1103
- ]);
1104
- expect(listener.messages).toEqual([
1105
- { role: 'user', content: 'tell me what the weather is like' },
1106
- {
1107
- role: 'assistant',
1108
- content: null,
1109
- tool_calls: [
1110
- {
1111
- type: 'function',
1112
- id: '123',
1113
- function: {
1114
- arguments: '',
1115
- name: 'getWeather',
1116
- },
1117
- },
1118
- ],
1119
- },
1120
- { role: 'tool', content: `it's raining`, tool_call_id: '123' },
1121
- ]);
1122
- expect(listener.functionCallResults).toEqual([`it's raining`]);
1123
- await listener.sanityCheck();
1124
- });
1125
- test('wrong function name', async () => {
1126
- const { fetch, handleRequest } = mockChatCompletionFetch();
1127
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1128
- const runner = openai.beta.chat.completions.runTools({
1129
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1130
- model: 'gpt-3.5-turbo',
1131
- tools: [
1132
- {
1133
- type: 'function',
1134
- function: {
1135
- function: function getWeather() {
1136
- return `it's raining`;
1137
- },
1138
- parameters: {},
1139
- description: 'gets the weather',
1140
- },
1141
- },
1142
- ],
1143
- });
1144
- const listener = new RunnerListener(runner);
1145
- await Promise.all([
1146
- handleRequest(async (request) => {
1147
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1148
- return {
1149
- id: '1',
1150
- choices: [
1151
- {
1152
- index: 0,
1153
- finish_reason: 'function_call',
1154
- logprobs: null,
1155
- message: {
1156
- role: 'assistant',
1157
- content: null,
1158
- tool_calls: [
1159
- {
1160
- type: 'function',
1161
- id: '123',
1162
- function: {
1163
- arguments: '',
1164
- name: 'get_weather',
1165
- },
1166
- },
1167
- ],
1168
- },
1169
- },
1170
- ],
1171
- created: Math.floor(Date.now() / 1000),
1172
- model: 'gpt-3.5-turbo',
1173
- object: 'chat.completion',
1174
- };
1175
- }),
1176
- handleRequest(async (request) => {
1177
- expect(request.messages).toEqual([
1178
- { role: 'user', content: 'tell me what the weather is like' },
1179
- {
1180
- role: 'assistant',
1181
- content: null,
1182
- tool_calls: [
1183
- {
1184
- type: 'function',
1185
- id: '123',
1186
- function: {
1187
- arguments: '',
1188
- name: 'get_weather',
1189
- },
1190
- },
1191
- ],
1192
- },
1193
- {
1194
- role: 'tool',
1195
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
1196
- tool_call_id: '123',
1197
- },
1198
- ]);
1199
- return {
1200
- id: '2',
1201
- choices: [
1202
- {
1203
- index: 0,
1204
- finish_reason: 'function_call',
1205
- logprobs: null,
1206
- message: {
1207
- role: 'assistant',
1208
- content: null,
1209
- tool_calls: [
1210
- {
1211
- type: 'function',
1212
- id: '1234',
1213
- function: {
1214
- arguments: '',
1215
- name: 'getWeather',
1216
- },
1217
- },
1218
- ],
1219
- },
1220
- },
1221
- ],
1222
- created: Math.floor(Date.now() / 1000),
1223
- model: 'gpt-3.5-turbo',
1224
- object: 'chat.completion',
1225
- };
1226
- }),
1227
- handleRequest(async (request) => {
1228
- expect(request.messages).toEqual([
1229
- { role: 'user', content: 'tell me what the weather is like' },
1230
- {
1231
- role: 'assistant',
1232
- content: null,
1233
- tool_calls: [
1234
- {
1235
- type: 'function',
1236
- id: '123',
1237
- function: {
1238
- arguments: '',
1239
- name: 'get_weather',
1240
- },
1241
- },
1242
- ],
1243
- },
1244
- {
1245
- role: 'tool',
1246
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
1247
- tool_call_id: '123',
1248
- },
1249
- {
1250
- role: 'assistant',
1251
- content: null,
1252
- tool_calls: [
1253
- {
1254
- type: 'function',
1255
- id: '1234',
1256
- function: {
1257
- arguments: '',
1258
- name: 'getWeather',
1259
- },
1260
- },
1261
- ],
1262
- },
1263
- {
1264
- role: 'tool',
1265
- content: `it's raining`,
1266
- tool_call_id: '1234',
1267
- },
1268
- ]);
1269
- return {
1270
- id: '3',
1271
- choices: [
1272
- {
1273
- index: 0,
1274
- finish_reason: 'stop',
1275
- logprobs: null,
1276
- message: {
1277
- role: 'assistant',
1278
- content: `it's raining`,
1279
- },
1280
- },
1281
- ],
1282
- created: Math.floor(Date.now() / 1000),
1283
- model: 'gpt-3.5-turbo',
1284
- object: 'chat.completion',
1285
- };
1286
- }),
1287
- runner.done(),
1288
- ]);
1289
- expect(listener.messages).toEqual([
1290
- { role: 'user', content: 'tell me what the weather is like' },
1291
- {
1292
- role: 'assistant',
1293
- content: null,
1294
- tool_calls: [{ type: 'function', id: '123', function: { name: 'get_weather', arguments: '' } }],
1295
- },
1296
- {
1297
- role: 'tool',
1298
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
1299
- tool_call_id: '123',
1300
- },
1301
- {
1302
- role: 'assistant',
1303
- content: null,
1304
- tool_calls: [{ type: 'function', id: '1234', function: { name: 'getWeather', arguments: '' } }],
1305
- },
1306
- { role: 'tool', content: `it's raining`, tool_call_id: '1234' },
1307
- { role: 'assistant', content: "it's raining" },
1308
- ]);
1309
- expect(listener.functionCallResults).toEqual([
1310
- `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
1311
- `it's raining`,
1312
- ]);
1313
- await listener.sanityCheck();
1314
- });
1315
- });
1316
- describe('runTools with stream: true', () => {
1317
- test('successful flow', async () => {
1318
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1319
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1320
- const runner = openai.beta.chat.completions.runTools({
1321
- stream: true,
1322
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1323
- model: 'gpt-3.5-turbo',
1324
- tools: [
1325
- {
1326
- type: 'function',
1327
- function: {
1328
- function: function getWeather() {
1329
- return `it's raining`;
1330
- },
1331
- parameters: {},
1332
- description: 'gets the weather',
1333
- },
1334
- },
1335
- ],
1336
- });
1337
- const listener = new StreamingRunnerListener(runner);
1338
- await Promise.all([
1339
- handleRequest(async function* (request) {
1340
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1341
- yield {
1342
- id: '1',
1343
- choices: [
1344
- {
1345
- index: 0,
1346
- finish_reason: 'function_call',
1347
- logprobs: null,
1348
- delta: {
1349
- role: 'assistant',
1350
- content: null,
1351
- tool_calls: [
1352
- {
1353
- type: 'function',
1354
- index: 0,
1355
- id: '123',
1356
- function: {
1357
- arguments: '',
1358
- name: 'getWeather',
1359
- },
1360
- },
1361
- ],
1362
- },
1363
- },
1364
- ],
1365
- created: Math.floor(Date.now() / 1000),
1366
- model: 'gpt-3.5-turbo',
1367
- object: 'chat.completion.chunk',
1368
- };
1369
- }),
1370
- handleRequest(async function* (request) {
1371
- expect(request.messages).toEqual([
1372
- { role: 'user', content: 'tell me what the weather is like' },
1373
- {
1374
- role: 'assistant',
1375
- content: null,
1376
- tool_calls: [
1377
- {
1378
- type: 'function',
1379
- id: '123',
1380
- function: {
1381
- arguments: '',
1382
- name: 'getWeather',
1383
- },
1384
- },
1385
- ],
1386
- },
1387
- {
1388
- role: 'tool',
1389
- content: `it's raining`,
1390
- tool_call_id: '123',
1391
- },
1392
- ]);
1393
- for (const choice of contentChoiceDeltas(`it's raining`)) {
1394
- yield {
1395
- id: '2',
1396
- choices: [choice],
1397
- created: Math.floor(Date.now() / 1000),
1398
- model: 'gpt-3.5-turbo',
1399
- object: 'chat.completion.chunk',
1400
- };
1401
- }
1402
- }),
1403
- runner.done(),
1404
- ]);
1405
- expect(listener.eventMessages).toEqual([
1406
- {
1407
- role: 'assistant',
1408
- content: null,
1409
- tool_calls: [
1410
- {
1411
- type: 'function',
1412
- id: '123',
1413
- function: {
1414
- arguments: '',
1415
- name: 'getWeather',
1416
- },
1417
- },
1418
- ],
1419
- },
1420
- { role: 'tool', content: `it's raining`, tool_call_id: '123' },
1421
- { role: 'assistant', content: "it's raining" },
1422
- ]);
1423
- expect(listener.eventFunctionCallResults).toEqual([`it's raining`]);
1424
- await listener.sanityCheck();
1425
- });
1426
- test('flow with abort', async () => {
1427
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1428
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1429
- const controller = new AbortController();
1430
- const runner = openai.beta.chat.completions.runTools({
1431
- stream: true,
1432
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1433
- model: 'gpt-3.5-turbo',
1434
- tools: [
1435
- {
1436
- type: 'function',
1437
- function: {
1438
- function: function getWeather() {
1439
- return `it's raining`;
1440
- },
1441
- parameters: {},
1442
- description: 'gets the weather',
1443
- },
1444
- },
1445
- ],
1446
- }, { signal: controller.signal });
1447
- runner.on('functionCallResult', () => controller.abort());
1448
- const listener = new StreamingRunnerListener(runner);
1449
- await handleRequest(async function* (request) {
1450
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1451
- yield {
1452
- id: '1',
1453
- choices: [
1454
- {
1455
- index: 0,
1456
- finish_reason: 'function_call',
1457
- delta: {
1458
- role: 'assistant',
1459
- content: null,
1460
- tool_calls: [
1461
- {
1462
- type: 'function',
1463
- index: 0,
1464
- id: '123',
1465
- function: {
1466
- arguments: '',
1467
- name: 'getWeather',
1468
- },
1469
- },
1470
- ],
1471
- },
1472
- },
1473
- ],
1474
- created: Math.floor(Date.now() / 1000),
1475
- model: 'gpt-3.5-turbo',
1476
- object: 'chat.completion.chunk',
1477
- };
1478
- });
1479
- await runner.done().catch(() => { });
1480
- expect(listener.eventMessages).toEqual([
1481
- {
1482
- role: 'assistant',
1483
- content: null,
1484
- tool_calls: [
1485
- {
1486
- type: 'function',
1487
- id: '123',
1488
- function: {
1489
- arguments: '',
1490
- name: 'getWeather',
1491
- },
1492
- },
1493
- ],
1494
- },
1495
- { role: 'tool', content: `it's raining`, tool_call_id: '123' },
1496
- ]);
1497
- expect(listener.eventFunctionCallResults).toEqual([`it's raining`]);
1498
- await listener.sanityCheck({ error: 'Request was aborted.' });
1499
- expect(runner.aborted).toBe(true);
1500
- });
1501
- test('successful flow with parse', async () => {
1502
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1503
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1504
- const runner = openai.beta.chat.completions.runTools({
1505
- stream: true,
1506
- messages: [
1507
- {
1508
- role: 'user',
1509
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1510
- },
1511
- ],
1512
- model: 'gpt-3.5-turbo',
1513
- tools: [
1514
- new ParsingToolFunction({
1515
- name: 'numProperties',
1516
- function: (obj) => String(Object.keys(obj).length),
1517
- parameters: { type: 'object' },
1518
- parse: (str) => {
1519
- const result = JSON.parse(str);
1520
- if (!(result instanceof Object) || Array.isArray(result)) {
1521
- throw new Error('must be an object');
1522
- }
1523
- return result;
1524
- },
1525
- description: 'gets the number of properties on an object',
1526
- }),
1527
- ],
1528
- });
1529
- const listener = new StreamingRunnerListener(runner);
1530
- await Promise.all([
1531
- handleRequest(async function* (request) {
1532
- expect(request.messages).toEqual([
1533
- {
1534
- role: 'user',
1535
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1536
- },
1537
- ]);
1538
- yield {
1539
- id: '1',
1540
- choices: [
1541
- {
1542
- index: 0,
1543
- finish_reason: 'function_call',
1544
- delta: {
1545
- role: 'assistant',
1546
- content: null,
1547
- tool_calls: [
1548
- {
1549
- type: 'function',
1550
- id: '123',
1551
- index: 0,
1552
- function: {
1553
- arguments: '{"a": 1, "b": 2, "c": 3}',
1554
- name: 'numProperties',
1555
- },
1556
- },
1557
- ],
1558
- },
1559
- },
1560
- ],
1561
- created: Math.floor(Date.now() / 1000),
1562
- model: 'gpt-3.5-turbo',
1563
- object: 'chat.completion.chunk',
1564
- };
1565
- }),
1566
- handleRequest(async function* (request) {
1567
- expect(request.messages).toEqual([
1568
- {
1569
- role: 'user',
1570
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1571
- },
1572
- {
1573
- role: 'assistant',
1574
- content: null,
1575
- tool_calls: [
1576
- {
1577
- type: 'function',
1578
- id: '123',
1579
- function: {
1580
- arguments: '{"a": 1, "b": 2, "c": 3}',
1581
- name: 'numProperties',
1582
- },
1583
- },
1584
- ],
1585
- },
1586
- {
1587
- role: 'tool',
1588
- content: '3',
1589
- tool_call_id: '123',
1590
- },
1591
- ]);
1592
- for (const choice of contentChoiceDeltas(`there are 3 properties in {"a": 1, "b": 2, "c": 3}`)) {
1593
- yield {
1594
- id: '2',
1595
- choices: [choice],
1596
- created: Math.floor(Date.now() / 1000),
1597
- model: 'gpt-3.5-turbo',
1598
- object: 'chat.completion.chunk',
1599
- };
1600
- }
1601
- }),
1602
- runner.done(),
1603
- ]);
1604
- expect(listener.eventMessages).toEqual([
1605
- {
1606
- role: 'assistant',
1607
- content: null,
1608
- tool_calls: [
1609
- {
1610
- type: 'function',
1611
- id: '123',
1612
- function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' },
1613
- },
1614
- ],
1615
- },
1616
- { role: 'tool', content: '3', tool_call_id: '123' },
1617
- { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' },
1618
- ]);
1619
- expect(listener.eventFunctionCallResults).toEqual(['3']);
1620
- await listener.sanityCheck();
1621
- });
1622
- test('flow with parse error', async () => {
1623
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1624
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1625
- const runner = openai.beta.chat.completions.runTools({
1626
- stream: true,
1627
- messages: [
1628
- {
1629
- role: 'user',
1630
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1631
- },
1632
- ],
1633
- model: 'gpt-3.5-turbo',
1634
- tools: [
1635
- new ParsingToolFunction({
1636
- name: 'numProperties',
1637
- function: (obj) => String(Object.keys(obj).length),
1638
- parameters: { type: 'object' },
1639
- parse: (str) => {
1640
- const result = JSON.parse(str);
1641
- if (!(result instanceof Object) || Array.isArray(result)) {
1642
- throw new Error('must be an object');
1643
- }
1644
- return result;
1645
- },
1646
- description: 'gets the number of properties on an object',
1647
- }),
1648
- ],
1649
- });
1650
- const listener = new StreamingRunnerListener(runner);
1651
- await Promise.all([
1652
- handleRequest(async function* (request) {
1653
- expect(request.messages).toEqual([
1654
- {
1655
- role: 'user',
1656
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1657
- },
1658
- ]);
1659
- for (const choice of functionCallDeltas('[{"a": 1, "b": 2, "c": 3}]', {
1660
- name: 'numProperties',
1661
- id: '123',
1662
- })) {
1663
- yield {
1664
- id: '1',
1665
- choices: [choice],
1666
- created: Math.floor(Date.now() / 1000),
1667
- model: 'gpt-3.5-turbo',
1668
- object: 'chat.completion.chunk',
1669
- };
1670
- }
1671
- }),
1672
- handleRequest(async function* (request) {
1673
- expect(request.messages).toEqual([
1674
- {
1675
- role: 'user',
1676
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1677
- },
1678
- {
1679
- role: 'assistant',
1680
- content: null,
1681
- tool_calls: [
1682
- {
1683
- type: 'function',
1684
- id: '123',
1685
- function: {
1686
- arguments: '[{"a": 1, "b": 2, "c": 3}]',
1687
- name: 'numProperties',
1688
- },
1689
- },
1690
- ],
1691
- },
1692
- {
1693
- role: 'tool',
1694
- content: `must be an object`,
1695
- tool_call_id: '123',
1696
- },
1697
- ]);
1698
- for (const choice of functionCallDeltas('{"a": 1, "b": 2, "c": 3}', {
1699
- name: 'numProperties',
1700
- id: '1234',
1701
- })) {
1702
- yield {
1703
- id: '2',
1704
- choices: [choice],
1705
- created: Math.floor(Date.now() / 1000),
1706
- model: 'gpt-3.5-turbo',
1707
- object: 'chat.completion.chunk',
1708
- };
1709
- }
1710
- }),
1711
- handleRequest(async function* (request) {
1712
- expect(request.messages).toEqual([
1713
- {
1714
- role: 'user',
1715
- content: 'can you tell me how many properties are in {"a": 1, "b": 2, "c": 3}',
1716
- },
1717
- {
1718
- role: 'assistant',
1719
- content: null,
1720
- tool_calls: [
1721
- {
1722
- type: 'function',
1723
- id: '123',
1724
- function: {
1725
- arguments: '[{"a": 1, "b": 2, "c": 3}]',
1726
- name: 'numProperties',
1727
- },
1728
- },
1729
- ],
1730
- },
1731
- {
1732
- role: 'tool',
1733
- content: `must be an object`,
1734
- tool_call_id: '123',
1735
- },
1736
- {
1737
- role: 'assistant',
1738
- content: null,
1739
- tool_calls: [
1740
- {
1741
- type: 'function',
1742
- id: '1234',
1743
- function: {
1744
- arguments: '{"a": 1, "b": 2, "c": 3}',
1745
- name: 'numProperties',
1746
- },
1747
- },
1748
- ],
1749
- },
1750
- {
1751
- role: 'tool',
1752
- content: '3',
1753
- tool_call_id: '1234',
1754
- },
1755
- ]);
1756
- for (const choice of contentChoiceDeltas(`there are 3 properties in {"a": 1, "b": 2, "c": 3}`)) {
1757
- yield {
1758
- id: '3',
1759
- choices: [choice],
1760
- created: Math.floor(Date.now() / 1000),
1761
- model: 'gpt-3.5-turbo',
1762
- object: 'chat.completion.chunk',
1763
- };
1764
- }
1765
- }),
1766
- runner.done(),
1767
- ]);
1768
- expect(listener.eventMessages).toEqual([
1769
- {
1770
- role: 'assistant',
1771
- content: null,
1772
- tool_calls: [
1773
- {
1774
- type: 'function',
1775
- id: '123',
1776
- function: { name: 'numProperties', arguments: '[{"a": 1, "b": 2, "c": 3}]' },
1777
- },
1778
- ],
1779
- },
1780
- { role: 'tool', content: `must be an object`, tool_call_id: '123' },
1781
- {
1782
- role: 'assistant',
1783
- content: null,
1784
- tool_calls: [
1785
- {
1786
- type: 'function',
1787
- id: '1234',
1788
- function: { name: 'numProperties', arguments: '{"a": 1, "b": 2, "c": 3}' },
1789
- },
1790
- ],
1791
- },
1792
- { role: 'tool', content: '3', tool_call_id: '1234' },
1793
- { role: 'assistant', content: 'there are 3 properties in {"a": 1, "b": 2, "c": 3}' },
1794
- ]);
1795
- expect(listener.eventFunctionCallResults).toEqual([`must be an object`, '3']);
1796
- await listener.sanityCheck();
1797
- });
1798
- test('single function call', async () => {
1799
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1800
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1801
- const runner = openai.beta.chat.completions.runTools({
1802
- stream: true,
1803
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1804
- model: 'gpt-3.5-turbo',
1805
- tool_choice: {
1806
- type: 'function',
1807
- function: {
1808
- name: 'getWeather',
1809
- },
1810
- },
1811
- tools: [
1812
- {
1813
- type: 'function',
1814
- function: {
1815
- function: function getWeather() {
1816
- return `it's raining`;
1817
- },
1818
- parameters: {},
1819
- description: 'gets the weather',
1820
- },
1821
- },
1822
- ],
1823
- });
1824
- const listener = new StreamingRunnerListener(runner);
1825
- await Promise.all([
1826
- handleRequest(async function* (request) {
1827
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1828
- yield {
1829
- id: '1',
1830
- choices: [
1831
- {
1832
- index: 0,
1833
- finish_reason: 'function_call',
1834
- delta: {
1835
- role: 'assistant',
1836
- content: null,
1837
- tool_calls: [
1838
- {
1839
- type: 'function',
1840
- index: 0,
1841
- id: '123',
1842
- function: {
1843
- arguments: '',
1844
- name: 'getWeather',
1845
- },
1846
- },
1847
- ],
1848
- },
1849
- },
1850
- ],
1851
- created: Math.floor(Date.now() / 1000),
1852
- model: 'gpt-3.5-turbo',
1853
- object: 'chat.completion.chunk',
1854
- };
1855
- }),
1856
- runner.done(),
1857
- ]);
1858
- expect(listener.eventMessages).toEqual([
1859
- {
1860
- role: 'assistant',
1861
- content: null,
1862
- tool_calls: [
1863
- {
1864
- type: 'function',
1865
- id: '123',
1866
- function: {
1867
- arguments: '',
1868
- name: 'getWeather',
1869
- },
1870
- },
1871
- ],
1872
- },
1873
- { role: 'tool', tool_call_id: '123', content: `it's raining` },
1874
- ]);
1875
- expect(listener.eventFunctionCallResults).toEqual([`it's raining`]);
1876
- await listener.sanityCheck();
1877
- });
1878
- test('wrong function name', async () => {
1879
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
1880
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
1881
- const runner = openai.beta.chat.completions.runTools({
1882
- stream: true,
1883
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
1884
- model: 'gpt-3.5-turbo',
1885
- tools: [
1886
- {
1887
- type: 'function',
1888
- function: {
1889
- function: function getWeather() {
1890
- return `it's raining`;
1891
- },
1892
- parameters: {},
1893
- description: 'gets the weather',
1894
- },
1895
- },
1896
- ],
1897
- });
1898
- const listener = new StreamingRunnerListener(runner);
1899
- await Promise.all([
1900
- handleRequest(async function* (request) {
1901
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
1902
- yield {
1903
- id: '1',
1904
- choices: [
1905
- {
1906
- index: 0,
1907
- finish_reason: 'function_call',
1908
- delta: {
1909
- role: 'assistant',
1910
- content: null,
1911
- tool_calls: [
1912
- {
1913
- type: 'function',
1914
- index: 0,
1915
- id: '123',
1916
- function: {
1917
- arguments: '',
1918
- name: 'get_weather',
1919
- },
1920
- },
1921
- ],
1922
- },
1923
- },
1924
- ],
1925
- created: Math.floor(Date.now() / 1000),
1926
- model: 'gpt-3.5-turbo',
1927
- object: 'chat.completion.chunk',
1928
- };
1929
- }),
1930
- handleRequest(async function* (request) {
1931
- expect(request.messages).toEqual([
1932
- { role: 'user', content: 'tell me what the weather is like' },
1933
- {
1934
- role: 'assistant',
1935
- content: null,
1936
- tool_calls: [
1937
- {
1938
- type: 'function',
1939
- id: '123',
1940
- function: {
1941
- arguments: '',
1942
- name: 'get_weather',
1943
- },
1944
- },
1945
- ],
1946
- },
1947
- {
1948
- role: 'tool',
1949
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
1950
- tool_call_id: '123',
1951
- },
1952
- ]);
1953
- yield {
1954
- id: '2',
1955
- choices: [
1956
- {
1957
- index: 0,
1958
- finish_reason: 'function_call',
1959
- logprobs: null,
1960
- delta: {
1961
- role: 'assistant',
1962
- content: null,
1963
- tool_calls: [
1964
- {
1965
- type: 'function',
1966
- index: 0,
1967
- id: '1234',
1968
- function: {
1969
- arguments: '',
1970
- name: 'getWeather',
1971
- },
1972
- },
1973
- ],
1974
- },
1975
- },
1976
- ],
1977
- created: Math.floor(Date.now() / 1000),
1978
- model: 'gpt-3.5-turbo',
1979
- object: 'chat.completion.chunk',
1980
- };
1981
- }),
1982
- handleRequest(async function* (request) {
1983
- expect(request.messages).toEqual([
1984
- { role: 'user', content: 'tell me what the weather is like' },
1985
- {
1986
- role: 'assistant',
1987
- content: null,
1988
- tool_calls: [
1989
- {
1990
- type: 'function',
1991
- id: '123',
1992
- function: {
1993
- arguments: '',
1994
- name: 'get_weather',
1995
- },
1996
- },
1997
- ],
1998
- },
1999
- {
2000
- role: 'tool',
2001
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
2002
- tool_call_id: '123',
2003
- },
2004
- {
2005
- role: 'assistant',
2006
- content: null,
2007
- tool_calls: [
2008
- {
2009
- type: 'function',
2010
- id: '1234',
2011
- function: {
2012
- arguments: '',
2013
- name: 'getWeather',
2014
- },
2015
- },
2016
- ],
2017
- },
2018
- {
2019
- role: 'tool',
2020
- content: `it's raining`,
2021
- tool_call_id: '1234',
2022
- },
2023
- ]);
2024
- for (const choice of contentChoiceDeltas(`it's raining`)) {
2025
- yield {
2026
- id: '3',
2027
- choices: [choice],
2028
- created: Math.floor(Date.now() / 1000),
2029
- model: 'gpt-3.5-turbo',
2030
- object: 'chat.completion.chunk',
2031
- };
2032
- }
2033
- }),
2034
- runner.done(),
2035
- ]);
2036
- expect(listener.eventMessages).toEqual([
2037
- {
2038
- role: 'assistant',
2039
- content: null,
2040
- tool_calls: [
2041
- {
2042
- type: 'function',
2043
- id: '123',
2044
- function: {
2045
- arguments: '',
2046
- name: 'get_weather',
2047
- },
2048
- },
2049
- ],
2050
- },
2051
- {
2052
- role: 'tool',
2053
- content: `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
2054
- tool_call_id: '123',
2055
- },
2056
- {
2057
- role: 'assistant',
2058
- content: null,
2059
- tool_calls: [
2060
- {
2061
- type: 'function',
2062
- id: '1234',
2063
- function: {
2064
- arguments: '',
2065
- name: 'getWeather',
2066
- },
2067
- },
2068
- ],
2069
- },
2070
- { role: 'tool', content: `it's raining`, tool_call_id: '1234' },
2071
- { role: 'assistant', content: "it's raining" },
2072
- ]);
2073
- expect(listener.eventFunctionCallResults).toEqual([
2074
- `Invalid tool_call: "get_weather". Available options are: "getWeather". Please try again`,
2075
- `it's raining`,
2076
- ]);
2077
- await listener.sanityCheck();
2078
- });
2079
- });
2080
- describe('stream', () => {
2081
- test('successful flow', async () => {
2082
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
2083
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
2084
- const runner = openai.beta.chat.completions.stream({
2085
- stream: true,
2086
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
2087
- model: 'gpt-3.5-turbo',
2088
- });
2089
- const listener = new StreamingRunnerListener(runner);
2090
- await Promise.all([
2091
- handleRequest(async function* (request) {
2092
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
2093
- for (const choice of contentChoiceDeltas(`The weather is great today!`)) {
2094
- yield {
2095
- id: '1',
2096
- choices: [choice],
2097
- created: Math.floor(Date.now() / 1000),
2098
- model: 'gpt-3.5-turbo',
2099
- object: 'chat.completion.chunk',
2100
- };
2101
- }
2102
- }),
2103
- runner.done(),
2104
- ]);
2105
- expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' });
2106
- await listener.sanityCheck();
2107
- });
2108
- test('toReadableStream and fromReadableStream', async () => {
2109
- const { fetch, handleRequest } = mockStreamingChatCompletionFetch();
2110
- const openai = new OpenAI({ apiKey: 'something1234', baseURL: 'http://127.0.0.1:4010', fetch });
2111
- const runner = openai.beta.chat.completions.stream({
2112
- stream: true,
2113
- messages: [{ role: 'user', content: 'tell me what the weather is like' }],
2114
- model: 'gpt-3.5-turbo',
2115
- });
2116
- const proxied = ChatCompletionStreamingRunner.fromReadableStream(runner.toReadableStream());
2117
- const listener = new StreamingRunnerListener(proxied);
2118
- await Promise.all([
2119
- handleRequest(async function* (request) {
2120
- expect(request.messages).toEqual([{ role: 'user', content: 'tell me what the weather is like' }]);
2121
- for (const choice of contentChoiceDeltas(`The weather is great today!`)) {
2122
- yield {
2123
- id: '1',
2124
- choices: [choice],
2125
- created: Math.floor(Date.now() / 1000),
2126
- model: 'gpt-3.5-turbo',
2127
- object: 'chat.completion.chunk',
2128
- };
2129
- }
2130
- }),
2131
- proxied.done(),
2132
- ]);
2133
- expect(listener.finalMessage).toEqual({ role: 'assistant', content: 'The weather is great today!' });
2134
- await listener.sanityCheck();
2135
- });
2136
- test('handles network errors', async () => {
2137
- const { fetch, handleRequest } = mockFetch();
2138
- const openai = new OpenAI({ apiKey: '...', fetch });
2139
- const stream = openai.beta.chat.completions.stream({
2140
- max_tokens: 1024,
2141
- model: 'gpt-3.5-turbo',
2142
- messages: [{ role: 'user', content: 'Say hello there!' }],
2143
- }, { maxRetries: 0 });
2144
- handleRequest(async () => {
2145
- throw new Error('mock request error');
2146
- }).catch(() => { });
2147
- async function runStream() {
2148
- await stream.done();
2149
- }
2150
- await expect(runStream).rejects.toThrow(APIConnectionError);
2151
- });
2152
- test('handles network errors on async iterator', async () => {
2153
- const { fetch, handleRequest } = mockFetch();
2154
- const openai = new OpenAI({ apiKey: '...', fetch });
2155
- const stream = openai.beta.chat.completions.stream({
2156
- max_tokens: 1024,
2157
- model: 'gpt-3.5-turbo',
2158
- messages: [{ role: 'user', content: 'Say hello there!' }],
2159
- }, { maxRetries: 0 });
2160
- handleRequest(async () => {
2161
- throw new Error('mock request error');
2162
- }).catch(() => { });
2163
- async function runStream() {
2164
- for await (const _event of stream) {
2165
- continue;
2166
- }
2167
- }
2168
- await expect(runStream).rejects.toThrow(APIConnectionError);
2169
- });
2170
- });
2171
- });
2172
- //# sourceMappingURL=ChatCompletionRunFunctions.test.mjs.map