modelmix 2.2.8 → 2.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (542) hide show
  1. package/README.md +19 -6
  2. package/demo/demo.mjs +24 -16
  3. package/demo/lmstudio.mjs +1 -1
  4. package/demo/node_modules/.package-lock.json +0 -47
  5. package/demo/package-lock.json +1 -50
  6. package/demo/package.json +1 -3
  7. package/demo/prompt.md +25 -0
  8. package/demo/stream.mjs +3 -3
  9. package/index.js +88 -2
  10. package/package.json +4 -2
  11. package/demo/node_modules/debug/LICENSE +0 -20
  12. package/demo/node_modules/debug/README.md +0 -481
  13. package/demo/node_modules/debug/node_modules/ms/index.js +0 -162
  14. package/demo/node_modules/debug/node_modules/ms/license.md +0 -21
  15. package/demo/node_modules/debug/node_modules/ms/package.json +0 -37
  16. package/demo/node_modules/debug/node_modules/ms/readme.md +0 -60
  17. package/demo/node_modules/debug/package.json +0 -59
  18. package/demo/node_modules/debug/src/browser.js +0 -269
  19. package/demo/node_modules/debug/src/common.js +0 -274
  20. package/demo/node_modules/debug/src/index.js +0 -10
  21. package/demo/node_modules/debug/src/node.js +0 -263
  22. package/demo/node_modules/lemonlog/README.md +0 -133
  23. package/demo/node_modules/lemonlog/demo/demo.js +0 -31
  24. package/demo/node_modules/lemonlog/index.js +0 -94
  25. package/demo/node_modules/lemonlog/package.json +0 -31
  26. package/demo/node_modules/openai/CHANGELOG.md +0 -1176
  27. package/demo/node_modules/openai/LICENSE +0 -201
  28. package/demo/node_modules/openai/README.md +0 -616
  29. package/demo/node_modules/openai/_shims/MultipartBody.d.ts +0 -9
  30. package/demo/node_modules/openai/_shims/MultipartBody.d.ts.map +0 -1
  31. package/demo/node_modules/openai/_shims/MultipartBody.js +0 -16
  32. package/demo/node_modules/openai/_shims/MultipartBody.js.map +0 -1
  33. package/demo/node_modules/openai/_shims/MultipartBody.mjs +0 -12
  34. package/demo/node_modules/openai/_shims/MultipartBody.mjs.map +0 -1
  35. package/demo/node_modules/openai/_shims/README.md +0 -46
  36. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts +0 -5
  37. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts.map +0 -1
  38. package/demo/node_modules/openai/_shims/auto/runtime-bun.js +0 -21
  39. package/demo/node_modules/openai/_shims/auto/runtime-bun.js.map +0 -1
  40. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs +0 -2
  41. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs.map +0 -1
  42. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts +0 -5
  43. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts.map +0 -1
  44. package/demo/node_modules/openai/_shims/auto/runtime-node.js +0 -21
  45. package/demo/node_modules/openai/_shims/auto/runtime-node.js.map +0 -1
  46. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs +0 -2
  47. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs.map +0 -1
  48. package/demo/node_modules/openai/_shims/auto/runtime.d.ts +0 -5
  49. package/demo/node_modules/openai/_shims/auto/runtime.d.ts.map +0 -1
  50. package/demo/node_modules/openai/_shims/auto/runtime.js +0 -21
  51. package/demo/node_modules/openai/_shims/auto/runtime.js.map +0 -1
  52. package/demo/node_modules/openai/_shims/auto/runtime.mjs +0 -2
  53. package/demo/node_modules/openai/_shims/auto/runtime.mjs.map +0 -1
  54. package/demo/node_modules/openai/_shims/auto/types-node.d.ts +0 -5
  55. package/demo/node_modules/openai/_shims/auto/types-node.d.ts.map +0 -1
  56. package/demo/node_modules/openai/_shims/auto/types-node.js +0 -21
  57. package/demo/node_modules/openai/_shims/auto/types-node.js.map +0 -1
  58. package/demo/node_modules/openai/_shims/auto/types-node.mjs +0 -2
  59. package/demo/node_modules/openai/_shims/auto/types-node.mjs.map +0 -1
  60. package/demo/node_modules/openai/_shims/auto/types.d.ts +0 -101
  61. package/demo/node_modules/openai/_shims/auto/types.js +0 -3
  62. package/demo/node_modules/openai/_shims/auto/types.mjs +0 -3
  63. package/demo/node_modules/openai/_shims/bun-runtime.d.ts +0 -6
  64. package/demo/node_modules/openai/_shims/bun-runtime.d.ts.map +0 -1
  65. package/demo/node_modules/openai/_shims/bun-runtime.js +0 -14
  66. package/demo/node_modules/openai/_shims/bun-runtime.js.map +0 -1
  67. package/demo/node_modules/openai/_shims/bun-runtime.mjs +0 -10
  68. package/demo/node_modules/openai/_shims/bun-runtime.mjs.map +0 -1
  69. package/demo/node_modules/openai/_shims/index.d.ts +0 -81
  70. package/demo/node_modules/openai/_shims/index.js +0 -13
  71. package/demo/node_modules/openai/_shims/index.mjs +0 -7
  72. package/demo/node_modules/openai/_shims/manual-types.d.ts +0 -12
  73. package/demo/node_modules/openai/_shims/manual-types.js +0 -3
  74. package/demo/node_modules/openai/_shims/manual-types.mjs +0 -3
  75. package/demo/node_modules/openai/_shims/node-runtime.d.ts +0 -3
  76. package/demo/node_modules/openai/_shims/node-runtime.d.ts.map +0 -1
  77. package/demo/node_modules/openai/_shims/node-runtime.js +0 -90
  78. package/demo/node_modules/openai/_shims/node-runtime.js.map +0 -1
  79. package/demo/node_modules/openai/_shims/node-runtime.mjs +0 -56
  80. package/demo/node_modules/openai/_shims/node-runtime.mjs.map +0 -1
  81. package/demo/node_modules/openai/_shims/node-types.d.ts +0 -42
  82. package/demo/node_modules/openai/_shims/node-types.js +0 -3
  83. package/demo/node_modules/openai/_shims/node-types.mjs +0 -3
  84. package/demo/node_modules/openai/_shims/registry.d.ts +0 -37
  85. package/demo/node_modules/openai/_shims/registry.d.ts.map +0 -1
  86. package/demo/node_modules/openai/_shims/registry.js +0 -41
  87. package/demo/node_modules/openai/_shims/registry.js.map +0 -1
  88. package/demo/node_modules/openai/_shims/registry.mjs +0 -37
  89. package/demo/node_modules/openai/_shims/registry.mjs.map +0 -1
  90. package/demo/node_modules/openai/_shims/web-runtime.d.ts +0 -5
  91. package/demo/node_modules/openai/_shims/web-runtime.d.ts.map +0 -1
  92. package/demo/node_modules/openai/_shims/web-runtime.js +0 -78
  93. package/demo/node_modules/openai/_shims/web-runtime.js.map +0 -1
  94. package/demo/node_modules/openai/_shims/web-runtime.mjs +0 -71
  95. package/demo/node_modules/openai/_shims/web-runtime.mjs.map +0 -1
  96. package/demo/node_modules/openai/_shims/web-types.d.ts +0 -83
  97. package/demo/node_modules/openai/_shims/web-types.js +0 -3
  98. package/demo/node_modules/openai/_shims/web-types.mjs +0 -3
  99. package/demo/node_modules/openai/bin/cli +0 -49
  100. package/demo/node_modules/openai/core.d.ts +0 -239
  101. package/demo/node_modules/openai/core.d.ts.map +0 -1
  102. package/demo/node_modules/openai/core.js +0 -879
  103. package/demo/node_modules/openai/core.js.map +0 -1
  104. package/demo/node_modules/openai/core.mjs +0 -848
  105. package/demo/node_modules/openai/core.mjs.map +0 -1
  106. package/demo/node_modules/openai/error.d.ts +0 -57
  107. package/demo/node_modules/openai/error.d.ts.map +0 -1
  108. package/demo/node_modules/openai/error.js +0 -148
  109. package/demo/node_modules/openai/error.js.map +0 -1
  110. package/demo/node_modules/openai/error.mjs +0 -132
  111. package/demo/node_modules/openai/error.mjs.map +0 -1
  112. package/demo/node_modules/openai/index.d.mts +0 -267
  113. package/demo/node_modules/openai/index.d.ts +0 -267
  114. package/demo/node_modules/openai/index.d.ts.map +0 -1
  115. package/demo/node_modules/openai/index.js +0 -262
  116. package/demo/node_modules/openai/index.js.map +0 -1
  117. package/demo/node_modules/openai/index.mjs +0 -232
  118. package/demo/node_modules/openai/index.mjs.map +0 -1
  119. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts +0 -74
  120. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts.map +0 -1
  121. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js +0 -246
  122. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js.map +0 -1
  123. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs +0 -242
  124. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs.map +0 -1
  125. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +0 -114
  126. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts.map +0 -1
  127. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js +0 -519
  128. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js.map +0 -1
  129. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs +0 -515
  130. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs.map +0 -1
  131. package/demo/node_modules/openai/lib/AssistantStream.d.ts +0 -58
  132. package/demo/node_modules/openai/lib/AssistantStream.d.ts.map +0 -1
  133. package/demo/node_modules/openai/lib/AssistantStream.js +0 -548
  134. package/demo/node_modules/openai/lib/AssistantStream.js.map +0 -1
  135. package/demo/node_modules/openai/lib/AssistantStream.mjs +0 -521
  136. package/demo/node_modules/openai/lib/AssistantStream.mjs.map +0 -1
  137. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts +0 -2
  138. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts.map +0 -1
  139. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js +0 -2177
  140. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js.map +0 -1
  141. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs +0 -2172
  142. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs.map +0 -1
  143. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts +0 -19
  144. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts.map +0 -1
  145. package/demo/node_modules/openai/lib/ChatCompletionRunner.js +0 -34
  146. package/demo/node_modules/openai/lib/ChatCompletionRunner.js.map +0 -1
  147. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs +0 -30
  148. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs.map +0 -1
  149. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts +0 -149
  150. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts.map +0 -1
  151. package/demo/node_modules/openai/lib/ChatCompletionStream.js +0 -312
  152. package/demo/node_modules/openai/lib/ChatCompletionStream.js.map +0 -1
  153. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs +0 -308
  154. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs.map +0 -1
  155. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +0 -22
  156. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts.map +0 -1
  157. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js +0 -32
  158. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js.map +0 -1
  159. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs +0 -28
  160. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs.map +0 -1
  161. package/demo/node_modules/openai/lib/RunnableFunction.d.ts +0 -95
  162. package/demo/node_modules/openai/lib/RunnableFunction.d.ts.map +0 -1
  163. package/demo/node_modules/openai/lib/RunnableFunction.js +0 -35
  164. package/demo/node_modules/openai/lib/RunnableFunction.js.map +0 -1
  165. package/demo/node_modules/openai/lib/RunnableFunction.mjs +0 -29
  166. package/demo/node_modules/openai/lib/RunnableFunction.mjs.map +0 -1
  167. package/demo/node_modules/openai/lib/Util.d.ts +0 -5
  168. package/demo/node_modules/openai/lib/Util.d.ts.map +0 -1
  169. package/demo/node_modules/openai/lib/Util.js +0 -26
  170. package/demo/node_modules/openai/lib/Util.js.map +0 -1
  171. package/demo/node_modules/openai/lib/Util.mjs +0 -22
  172. package/demo/node_modules/openai/lib/Util.mjs.map +0 -1
  173. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts +0 -6
  174. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts.map +0 -1
  175. package/demo/node_modules/openai/lib/chatCompletionUtils.js +0 -20
  176. package/demo/node_modules/openai/lib/chatCompletionUtils.js.map +0 -1
  177. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs +0 -13
  178. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs.map +0 -1
  179. package/demo/node_modules/openai/lib/jsonschema.d.ts +0 -106
  180. package/demo/node_modules/openai/lib/jsonschema.d.ts.map +0 -1
  181. package/demo/node_modules/openai/lib/jsonschema.js +0 -11
  182. package/demo/node_modules/openai/lib/jsonschema.js.map +0 -1
  183. package/demo/node_modules/openai/lib/jsonschema.mjs +0 -10
  184. package/demo/node_modules/openai/lib/jsonschema.mjs.map +0 -1
  185. package/demo/node_modules/openai/package.json +0 -105
  186. package/demo/node_modules/openai/pagination.d.ts +0 -37
  187. package/demo/node_modules/openai/pagination.d.ts.map +0 -1
  188. package/demo/node_modules/openai/pagination.js +0 -64
  189. package/demo/node_modules/openai/pagination.js.map +0 -1
  190. package/demo/node_modules/openai/pagination.mjs +0 -59
  191. package/demo/node_modules/openai/pagination.mjs.map +0 -1
  192. package/demo/node_modules/openai/resource.d.ts +0 -6
  193. package/demo/node_modules/openai/resource.d.ts.map +0 -1
  194. package/demo/node_modules/openai/resource.js +0 -11
  195. package/demo/node_modules/openai/resource.js.map +0 -1
  196. package/demo/node_modules/openai/resource.mjs +0 -7
  197. package/demo/node_modules/openai/resource.mjs.map +0 -1
  198. package/demo/node_modules/openai/resources/audio/audio.d.ts +0 -20
  199. package/demo/node_modules/openai/resources/audio/audio.d.ts.map +0 -1
  200. package/demo/node_modules/openai/resources/audio/audio.js +0 -46
  201. package/demo/node_modules/openai/resources/audio/audio.js.map +0 -1
  202. package/demo/node_modules/openai/resources/audio/audio.mjs +0 -19
  203. package/demo/node_modules/openai/resources/audio/audio.mjs.map +0 -1
  204. package/demo/node_modules/openai/resources/audio/index.d.ts +0 -5
  205. package/demo/node_modules/openai/resources/audio/index.d.ts.map +0 -1
  206. package/demo/node_modules/openai/resources/audio/index.js +0 -13
  207. package/demo/node_modules/openai/resources/audio/index.js.map +0 -1
  208. package/demo/node_modules/openai/resources/audio/index.mjs +0 -6
  209. package/demo/node_modules/openai/resources/audio/index.mjs.map +0 -1
  210. package/demo/node_modules/openai/resources/audio/speech.d.ts +0 -42
  211. package/demo/node_modules/openai/resources/audio/speech.d.ts.map +0 -1
  212. package/demo/node_modules/openai/resources/audio/speech.js +0 -17
  213. package/demo/node_modules/openai/resources/audio/speech.js.map +0 -1
  214. package/demo/node_modules/openai/resources/audio/speech.mjs +0 -13
  215. package/demo/node_modules/openai/resources/audio/speech.mjs.map +0 -1
  216. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts +0 -71
  217. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts.map +0 -1
  218. package/demo/node_modules/openai/resources/audio/transcriptions.js +0 -18
  219. package/demo/node_modules/openai/resources/audio/transcriptions.js.map +0 -1
  220. package/demo/node_modules/openai/resources/audio/transcriptions.mjs +0 -14
  221. package/demo/node_modules/openai/resources/audio/transcriptions.mjs.map +0 -1
  222. package/demo/node_modules/openai/resources/audio/translations.d.ts +0 -50
  223. package/demo/node_modules/openai/resources/audio/translations.d.ts.map +0 -1
  224. package/demo/node_modules/openai/resources/audio/translations.js +0 -18
  225. package/demo/node_modules/openai/resources/audio/translations.js.map +0 -1
  226. package/demo/node_modules/openai/resources/audio/translations.mjs +0 -14
  227. package/demo/node_modules/openai/resources/audio/translations.mjs.map +0 -1
  228. package/demo/node_modules/openai/resources/batches.d.ts +0 -189
  229. package/demo/node_modules/openai/resources/batches.d.ts.map +0 -1
  230. package/demo/node_modules/openai/resources/batches.js +0 -65
  231. package/demo/node_modules/openai/resources/batches.js.map +0 -1
  232. package/demo/node_modules/openai/resources/batches.mjs +0 -37
  233. package/demo/node_modules/openai/resources/batches.mjs.map +0 -1
  234. package/demo/node_modules/openai/resources/beta/assistants.d.ts +0 -1048
  235. package/demo/node_modules/openai/resources/beta/assistants.d.ts.map +0 -1
  236. package/demo/node_modules/openai/resources/beta/assistants.js +0 -89
  237. package/demo/node_modules/openai/resources/beta/assistants.js.map +0 -1
  238. package/demo/node_modules/openai/resources/beta/assistants.mjs +0 -61
  239. package/demo/node_modules/openai/resources/beta/assistants.mjs.map +0 -1
  240. package/demo/node_modules/openai/resources/beta/beta.d.ts +0 -53
  241. package/demo/node_modules/openai/resources/beta/beta.d.ts.map +0 -1
  242. package/demo/node_modules/openai/resources/beta/beta.js +0 -51
  243. package/demo/node_modules/openai/resources/beta/beta.js.map +0 -1
  244. package/demo/node_modules/openai/resources/beta/beta.mjs +0 -24
  245. package/demo/node_modules/openai/resources/beta/beta.mjs.map +0 -1
  246. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts +0 -9
  247. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts.map +0 -1
  248. package/demo/node_modules/openai/resources/beta/chat/chat.js +0 -40
  249. package/demo/node_modules/openai/resources/beta/chat/chat.js.map +0 -1
  250. package/demo/node_modules/openai/resources/beta/chat/chat.mjs +0 -13
  251. package/demo/node_modules/openai/resources/beta/chat/chat.mjs.map +0 -1
  252. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts +0 -37
  253. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts.map +0 -1
  254. package/demo/node_modules/openai/resources/beta/chat/completions.js +0 -39
  255. package/demo/node_modules/openai/resources/beta/chat/completions.js.map +0 -1
  256. package/demo/node_modules/openai/resources/beta/chat/completions.mjs +0 -30
  257. package/demo/node_modules/openai/resources/beta/chat/completions.mjs.map +0 -1
  258. package/demo/node_modules/openai/resources/beta/chat/index.d.ts +0 -3
  259. package/demo/node_modules/openai/resources/beta/chat/index.d.ts.map +0 -1
  260. package/demo/node_modules/openai/resources/beta/chat/index.js +0 -9
  261. package/demo/node_modules/openai/resources/beta/chat/index.js.map +0 -1
  262. package/demo/node_modules/openai/resources/beta/chat/index.mjs +0 -4
  263. package/demo/node_modules/openai/resources/beta/chat/index.mjs.map +0 -1
  264. package/demo/node_modules/openai/resources/beta/index.d.ts +0 -6
  265. package/demo/node_modules/openai/resources/beta/index.d.ts.map +0 -1
  266. package/demo/node_modules/openai/resources/beta/index.js +0 -17
  267. package/demo/node_modules/openai/resources/beta/index.js.map +0 -1
  268. package/demo/node_modules/openai/resources/beta/index.mjs +0 -7
  269. package/demo/node_modules/openai/resources/beta/index.mjs.map +0 -1
  270. package/demo/node_modules/openai/resources/beta/threads/index.d.ts +0 -4
  271. package/demo/node_modules/openai/resources/beta/threads/index.d.ts.map +0 -1
  272. package/demo/node_modules/openai/resources/beta/threads/index.js +0 -13
  273. package/demo/node_modules/openai/resources/beta/threads/index.js.map +0 -1
  274. package/demo/node_modules/openai/resources/beta/threads/index.mjs +0 -5
  275. package/demo/node_modules/openai/resources/beta/threads/index.mjs.map +0 -1
  276. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts +0 -552
  277. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts.map +0 -1
  278. package/demo/node_modules/openai/resources/beta/threads/messages.js +0 -89
  279. package/demo/node_modules/openai/resources/beta/threads/messages.js.map +0 -1
  280. package/demo/node_modules/openai/resources/beta/threads/messages.mjs +0 -61
  281. package/demo/node_modules/openai/resources/beta/threads/messages.mjs.map +0 -1
  282. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts +0 -3
  283. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts.map +0 -1
  284. package/demo/node_modules/openai/resources/beta/threads/runs/index.js +0 -11
  285. package/demo/node_modules/openai/resources/beta/threads/runs/index.js.map +0 -1
  286. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs +0 -4
  287. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs.map +0 -1
  288. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts +0 -1194
  289. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts.map +0 -1
  290. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js +0 -190
  291. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js.map +0 -1
  292. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs +0 -162
  293. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs.map +0 -1
  294. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts +0 -520
  295. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts.map +0 -1
  296. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js +0 -60
  297. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js.map +0 -1
  298. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs +0 -32
  299. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs.map +0 -1
  300. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts +0 -1240
  301. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts.map +0 -1
  302. package/demo/node_modules/openai/resources/beta/threads/threads.js +0 -108
  303. package/demo/node_modules/openai/resources/beta/threads/threads.js.map +0 -1
  304. package/demo/node_modules/openai/resources/beta/threads/threads.mjs +0 -81
  305. package/demo/node_modules/openai/resources/beta/threads/threads.mjs.map +0 -1
  306. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts +0 -142
  307. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts.map +0 -1
  308. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js +0 -129
  309. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js.map +0 -1
  310. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs +0 -125
  311. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs.map +0 -1
  312. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts +0 -154
  313. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts.map +0 -1
  314. package/demo/node_modules/openai/resources/beta/vector-stores/files.js +0 -149
  315. package/demo/node_modules/openai/resources/beta/vector-stores/files.js.map +0 -1
  316. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs +0 -121
  317. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs.map +0 -1
  318. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts +0 -4
  319. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts.map +0 -1
  320. package/demo/node_modules/openai/resources/beta/vector-stores/index.js +0 -13
  321. package/demo/node_modules/openai/resources/beta/vector-stores/index.js.map +0 -1
  322. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs +0 -5
  323. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs.map +0 -1
  324. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts +0 -233
  325. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts.map +0 -1
  326. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js +0 -99
  327. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js.map +0 -1
  328. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs +0 -71
  329. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs.map +0 -1
  330. package/demo/node_modules/openai/resources/chat/chat.d.ts +0 -42
  331. package/demo/node_modules/openai/resources/chat/chat.d.ts.map +0 -1
  332. package/demo/node_modules/openai/resources/chat/chat.js +0 -40
  333. package/demo/node_modules/openai/resources/chat/chat.js.map +0 -1
  334. package/demo/node_modules/openai/resources/chat/chat.mjs +0 -13
  335. package/demo/node_modules/openai/resources/chat/chat.mjs.map +0 -1
  336. package/demo/node_modules/openai/resources/chat/completions.d.ts +0 -845
  337. package/demo/node_modules/openai/resources/chat/completions.d.ts.map +0 -1
  338. package/demo/node_modules/openai/resources/chat/completions.js +0 -14
  339. package/demo/node_modules/openai/resources/chat/completions.js.map +0 -1
  340. package/demo/node_modules/openai/resources/chat/completions.mjs +0 -10
  341. package/demo/node_modules/openai/resources/chat/completions.mjs.map +0 -1
  342. package/demo/node_modules/openai/resources/chat/index.d.ts +0 -3
  343. package/demo/node_modules/openai/resources/chat/index.d.ts.map +0 -1
  344. package/demo/node_modules/openai/resources/chat/index.js +0 -9
  345. package/demo/node_modules/openai/resources/chat/index.js.map +0 -1
  346. package/demo/node_modules/openai/resources/chat/index.mjs +0 -4
  347. package/demo/node_modules/openai/resources/chat/index.mjs.map +0 -1
  348. package/demo/node_modules/openai/resources/completions.d.ts +0 -272
  349. package/demo/node_modules/openai/resources/completions.d.ts.map +0 -1
  350. package/demo/node_modules/openai/resources/completions.js +0 -14
  351. package/demo/node_modules/openai/resources/completions.js.map +0 -1
  352. package/demo/node_modules/openai/resources/completions.mjs +0 -10
  353. package/demo/node_modules/openai/resources/completions.mjs.map +0 -1
  354. package/demo/node_modules/openai/resources/embeddings.d.ts +0 -103
  355. package/demo/node_modules/openai/resources/embeddings.d.ts.map +0 -1
  356. package/demo/node_modules/openai/resources/embeddings.js +0 -17
  357. package/demo/node_modules/openai/resources/embeddings.js.map +0 -1
  358. package/demo/node_modules/openai/resources/embeddings.mjs +0 -13
  359. package/demo/node_modules/openai/resources/embeddings.mjs.map +0 -1
  360. package/demo/node_modules/openai/resources/files.d.ts +0 -140
  361. package/demo/node_modules/openai/resources/files.d.ts.map +0 -1
  362. package/demo/node_modules/openai/resources/files.js +0 -120
  363. package/demo/node_modules/openai/resources/files.js.map +0 -1
  364. package/demo/node_modules/openai/resources/files.mjs +0 -92
  365. package/demo/node_modules/openai/resources/files.mjs.map +0 -1
  366. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +0 -19
  367. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts.map +0 -1
  368. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js +0 -42
  369. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js.map +0 -1
  370. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs +0 -15
  371. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs.map +0 -1
  372. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts +0 -3
  373. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts.map +0 -1
  374. package/demo/node_modules/openai/resources/fine-tuning/index.js +0 -11
  375. package/demo/node_modules/openai/resources/fine-tuning/index.js.map +0 -1
  376. package/demo/node_modules/openai/resources/fine-tuning/index.mjs +0 -4
  377. package/demo/node_modules/openai/resources/fine-tuning/index.mjs.map +0 -1
  378. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +0 -69
  379. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts.map +0 -1
  380. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js +0 -47
  381. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js.map +0 -1
  382. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs +0 -19
  383. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs.map +0 -1
  384. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +0 -3
  385. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts.map +0 -1
  386. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js +0 -12
  387. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js.map +0 -1
  388. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs +0 -4
  389. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs.map +0 -1
  390. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +0 -362
  391. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts.map +0 -1
  392. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js +0 -93
  393. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js.map +0 -1
  394. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs +0 -64
  395. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs.map +0 -1
  396. package/demo/node_modules/openai/resources/images.d.ts +0 -174
  397. package/demo/node_modules/openai/resources/images.d.ts.map +0 -1
  398. package/demo/node_modules/openai/resources/images.js +0 -30
  399. package/demo/node_modules/openai/resources/images.js.map +0 -1
  400. package/demo/node_modules/openai/resources/images.mjs +0 -26
  401. package/demo/node_modules/openai/resources/images.mjs.map +0 -1
  402. package/demo/node_modules/openai/resources/index.d.ts +0 -13
  403. package/demo/node_modules/openai/resources/index.d.ts.map +0 -1
  404. package/demo/node_modules/openai/resources/index.js +0 -44
  405. package/demo/node_modules/openai/resources/index.js.map +0 -1
  406. package/demo/node_modules/openai/resources/index.mjs +0 -14
  407. package/demo/node_modules/openai/resources/index.mjs.map +0 -1
  408. package/demo/node_modules/openai/resources/models.d.ts +0 -58
  409. package/demo/node_modules/openai/resources/models.d.ts.map +0 -1
  410. package/demo/node_modules/openai/resources/models.js +0 -64
  411. package/demo/node_modules/openai/resources/models.js.map +0 -1
  412. package/demo/node_modules/openai/resources/models.mjs +0 -36
  413. package/demo/node_modules/openai/resources/models.mjs.map +0 -1
  414. package/demo/node_modules/openai/resources/moderations.d.ts +0 -176
  415. package/demo/node_modules/openai/resources/moderations.d.ts.map +0 -1
  416. package/demo/node_modules/openai/resources/moderations.js +0 -17
  417. package/demo/node_modules/openai/resources/moderations.js.map +0 -1
  418. package/demo/node_modules/openai/resources/moderations.mjs +0 -13
  419. package/demo/node_modules/openai/resources/moderations.mjs.map +0 -1
  420. package/demo/node_modules/openai/resources/shared.d.ts +0 -39
  421. package/demo/node_modules/openai/resources/shared.d.ts.map +0 -1
  422. package/demo/node_modules/openai/resources/shared.js +0 -4
  423. package/demo/node_modules/openai/resources/shared.js.map +0 -1
  424. package/demo/node_modules/openai/resources/shared.mjs +0 -3
  425. package/demo/node_modules/openai/resources/shared.mjs.map +0 -1
  426. package/demo/node_modules/openai/shims/node.d.ts +0 -29
  427. package/demo/node_modules/openai/shims/node.d.ts.map +0 -1
  428. package/demo/node_modules/openai/shims/node.js +0 -31
  429. package/demo/node_modules/openai/shims/node.js.map +0 -1
  430. package/demo/node_modules/openai/shims/node.mjs +0 -5
  431. package/demo/node_modules/openai/shims/node.mjs.map +0 -1
  432. package/demo/node_modules/openai/shims/web.d.ts +0 -26
  433. package/demo/node_modules/openai/shims/web.d.ts.map +0 -1
  434. package/demo/node_modules/openai/shims/web.js +0 -31
  435. package/demo/node_modules/openai/shims/web.js.map +0 -1
  436. package/demo/node_modules/openai/shims/web.mjs +0 -5
  437. package/demo/node_modules/openai/shims/web.mjs.map +0 -1
  438. package/demo/node_modules/openai/src/_shims/MultipartBody.ts +0 -9
  439. package/demo/node_modules/openai/src/_shims/README.md +0 -46
  440. package/demo/node_modules/openai/src/_shims/auto/runtime-bun.ts +0 -4
  441. package/demo/node_modules/openai/src/_shims/auto/runtime-node.ts +0 -4
  442. package/demo/node_modules/openai/src/_shims/auto/runtime.ts +0 -4
  443. package/demo/node_modules/openai/src/_shims/auto/types-node.ts +0 -4
  444. package/demo/node_modules/openai/src/_shims/auto/types.d.ts +0 -101
  445. package/demo/node_modules/openai/src/_shims/auto/types.js +0 -3
  446. package/demo/node_modules/openai/src/_shims/auto/types.mjs +0 -3
  447. package/demo/node_modules/openai/src/_shims/bun-runtime.ts +0 -14
  448. package/demo/node_modules/openai/src/_shims/index.d.ts +0 -81
  449. package/demo/node_modules/openai/src/_shims/index.js +0 -13
  450. package/demo/node_modules/openai/src/_shims/index.mjs +0 -7
  451. package/demo/node_modules/openai/src/_shims/manual-types.d.ts +0 -12
  452. package/demo/node_modules/openai/src/_shims/manual-types.js +0 -3
  453. package/demo/node_modules/openai/src/_shims/manual-types.mjs +0 -3
  454. package/demo/node_modules/openai/src/_shims/node-runtime.ts +0 -83
  455. package/demo/node_modules/openai/src/_shims/node-types.d.ts +0 -42
  456. package/demo/node_modules/openai/src/_shims/node-types.js +0 -3
  457. package/demo/node_modules/openai/src/_shims/node-types.mjs +0 -3
  458. package/demo/node_modules/openai/src/_shims/registry.ts +0 -65
  459. package/demo/node_modules/openai/src/_shims/web-runtime.ts +0 -103
  460. package/demo/node_modules/openai/src/_shims/web-types.d.ts +0 -83
  461. package/demo/node_modules/openai/src/_shims/web-types.js +0 -3
  462. package/demo/node_modules/openai/src/_shims/web-types.mjs +0 -3
  463. package/demo/node_modules/openai/src/core.ts +0 -1162
  464. package/demo/node_modules/openai/src/error.ts +0 -158
  465. package/demo/node_modules/openai/src/index.ts +0 -502
  466. package/demo/node_modules/openai/src/lib/.keep +0 -4
  467. package/demo/node_modules/openai/src/lib/AbstractAssistantStreamRunner.ts +0 -340
  468. package/demo/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts +0 -682
  469. package/demo/node_modules/openai/src/lib/AssistantStream.ts +0 -723
  470. package/demo/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts +0 -2328
  471. package/demo/node_modules/openai/src/lib/ChatCompletionRunner.ts +0 -68
  472. package/demo/node_modules/openai/src/lib/ChatCompletionStream.ts +0 -494
  473. package/demo/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts +0 -68
  474. package/demo/node_modules/openai/src/lib/RunnableFunction.ts +0 -134
  475. package/demo/node_modules/openai/src/lib/Util.ts +0 -23
  476. package/demo/node_modules/openai/src/lib/chatCompletionUtils.ts +0 -28
  477. package/demo/node_modules/openai/src/lib/jsonschema.ts +0 -148
  478. package/demo/node_modules/openai/src/pagination.ts +0 -98
  479. package/demo/node_modules/openai/src/resource.ts +0 -11
  480. package/demo/node_modules/openai/src/resources/audio/audio.ts +0 -23
  481. package/demo/node_modules/openai/src/resources/audio/index.ts +0 -6
  482. package/demo/node_modules/openai/src/resources/audio/speech.ts +0 -52
  483. package/demo/node_modules/openai/src/resources/audio/transcriptions.ts +0 -84
  484. package/demo/node_modules/openai/src/resources/audio/translations.ts +0 -61
  485. package/demo/node_modules/openai/src/resources/batches.ts +0 -252
  486. package/demo/node_modules/openai/src/resources/beta/assistants.ts +0 -1315
  487. package/demo/node_modules/openai/src/resources/beta/beta.ts +0 -56
  488. package/demo/node_modules/openai/src/resources/beta/chat/chat.ts +0 -12
  489. package/demo/node_modules/openai/src/resources/beta/chat/completions.ts +0 -106
  490. package/demo/node_modules/openai/src/resources/beta/chat/index.ts +0 -4
  491. package/demo/node_modules/openai/src/resources/beta/index.ts +0 -48
  492. package/demo/node_modules/openai/src/resources/beta/threads/index.ts +0 -72
  493. package/demo/node_modules/openai/src/resources/beta/threads/messages.ts +0 -706
  494. package/demo/node_modules/openai/src/resources/beta/threads/runs/index.ts +0 -44
  495. package/demo/node_modules/openai/src/resources/beta/threads/runs/runs.ts +0 -1627
  496. package/demo/node_modules/openai/src/resources/beta/threads/runs/steps.ts +0 -641
  497. package/demo/node_modules/openai/src/resources/beta/threads/threads.ts +0 -1536
  498. package/demo/node_modules/openai/src/resources/beta/vector-stores/file-batches.ts +0 -293
  499. package/demo/node_modules/openai/src/resources/beta/vector-stores/files.ts +0 -284
  500. package/demo/node_modules/openai/src/resources/beta/vector-stores/index.ts +0 -25
  501. package/demo/node_modules/openai/src/resources/beta/vector-stores/vector-stores.ts +0 -318
  502. package/demo/node_modules/openai/src/resources/chat/chat.ts +0 -67
  503. package/demo/node_modules/openai/src/resources/chat/completions.ts +0 -996
  504. package/demo/node_modules/openai/src/resources/chat/index.ts +0 -33
  505. package/demo/node_modules/openai/src/resources/completions.ts +0 -329
  506. package/demo/node_modules/openai/src/resources/embeddings.ts +0 -125
  507. package/demo/node_modules/openai/src/resources/files.ts +0 -214
  508. package/demo/node_modules/openai/src/resources/fine-tuning/fine-tuning.ts +0 -22
  509. package/demo/node_modules/openai/src/resources/fine-tuning/index.ts +0 -16
  510. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/checkpoints.ts +0 -108
  511. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/index.ts +0 -21
  512. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/jobs.ts +0 -458
  513. package/demo/node_modules/openai/src/resources/images.ts +0 -215
  514. package/demo/node_modules/openai/src/resources/index.ts +0 -45
  515. package/demo/node_modules/openai/src/resources/models.ts +0 -76
  516. package/demo/node_modules/openai/src/resources/moderations.ts +0 -214
  517. package/demo/node_modules/openai/src/resources/shared.ts +0 -47
  518. package/demo/node_modules/openai/src/shims/node.ts +0 -50
  519. package/demo/node_modules/openai/src/shims/web.ts +0 -50
  520. package/demo/node_modules/openai/src/streaming.ts +0 -508
  521. package/demo/node_modules/openai/src/tsconfig.json +0 -11
  522. package/demo/node_modules/openai/src/uploads.ts +0 -248
  523. package/demo/node_modules/openai/src/version.ts +0 -1
  524. package/demo/node_modules/openai/streaming.d.ts +0 -41
  525. package/demo/node_modules/openai/streaming.d.ts.map +0 -1
  526. package/demo/node_modules/openai/streaming.js +0 -433
  527. package/demo/node_modules/openai/streaming.js.map +0 -1
  528. package/demo/node_modules/openai/streaming.mjs +0 -426
  529. package/demo/node_modules/openai/streaming.mjs.map +0 -1
  530. package/demo/node_modules/openai/uploads.d.ts +0 -75
  531. package/demo/node_modules/openai/uploads.d.ts.map +0 -1
  532. package/demo/node_modules/openai/uploads.js +0 -165
  533. package/demo/node_modules/openai/uploads.js.map +0 -1
  534. package/demo/node_modules/openai/uploads.mjs +0 -152
  535. package/demo/node_modules/openai/uploads.mjs.map +0 -1
  536. package/demo/node_modules/openai/version.d.ts +0 -2
  537. package/demo/node_modules/openai/version.d.ts.map +0 -1
  538. package/demo/node_modules/openai/version.js +0 -5
  539. package/demo/node_modules/openai/version.js.map +0 -1
  540. package/demo/node_modules/openai/version.mjs +0 -2
  541. package/demo/node_modules/openai/version.mjs.map +0 -1
  542. package/demo/watson.png +0 -0
@@ -1,1194 +0,0 @@
1
- import * as Core from "../../../../core.js";
2
- import { APIPromise } from "../../../../core.js";
3
- import { APIResource } from "../../../../resource.js";
4
- import { AssistantStream, RunCreateParamsBaseStream } from "../../../../lib/AssistantStream.js";
5
- import { RunSubmitToolOutputsParamsStream } from "../../../../lib/AssistantStream.js";
6
- import * as RunsAPI from "./runs.js";
7
- import * as AssistantsAPI from "../../assistants.js";
8
- import * as MessagesAPI from "../messages.js";
9
- import * as ThreadsAPI from "../threads.js";
10
- import * as StepsAPI from "./steps.js";
11
- import { CursorPage, type CursorPageParams } from "../../../../pagination.js";
12
- import { Stream } from "../../../../streaming.js";
13
- export declare class Runs extends APIResource {
14
- steps: StepsAPI.Steps;
15
- /**
16
- * Create a run.
17
- */
18
- create(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Run>;
19
- create(threadId: string, body: RunCreateParamsStreaming, options?: Core.RequestOptions): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
20
- create(threadId: string, body: RunCreateParamsBase, options?: Core.RequestOptions): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;
21
- /**
22
- * Retrieves a run.
23
- */
24
- retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run>;
25
- /**
26
- * Modifies a run.
27
- */
28
- update(threadId: string, runId: string, body: RunUpdateParams, options?: Core.RequestOptions): Core.APIPromise<Run>;
29
- /**
30
- * Returns a list of runs belonging to a thread.
31
- */
32
- list(threadId: string, query?: RunListParams, options?: Core.RequestOptions): Core.PagePromise<RunsPage, Run>;
33
- list(threadId: string, options?: Core.RequestOptions): Core.PagePromise<RunsPage, Run>;
34
- /**
35
- * Cancels a run that is `in_progress`.
36
- */
37
- cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run>;
38
- /**
39
- * A helper to create a run an poll for a terminal state. More information on Run
40
- * lifecycles can be found here:
41
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
42
- */
43
- createAndPoll(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions & {
44
- pollIntervalMs?: number;
45
- }): Promise<Run>;
46
- /**
47
- * Create a Run stream
48
- *
49
- * @deprecated use `stream` instead
50
- */
51
- createAndStream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream;
52
- /**
53
- * A helper to poll a run status until it reaches a terminal state. More
54
- * information on Run lifecycles can be found here:
55
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
56
- */
57
- poll(threadId: string, runId: string, options?: Core.RequestOptions & {
58
- pollIntervalMs?: number;
59
- }): Promise<Run>;
60
- /**
61
- * Create a Run stream
62
- */
63
- stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream;
64
- /**
65
- * When a run has the `status: "requires_action"` and `required_action.type` is
66
- * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
67
- * tool calls once they're all completed. All outputs must be submitted in a single
68
- * request.
69
- */
70
- submitToolOutputs(threadId: string, runId: string, body: RunSubmitToolOutputsParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Run>;
71
- submitToolOutputs(threadId: string, runId: string, body: RunSubmitToolOutputsParamsStreaming, options?: Core.RequestOptions): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
72
- submitToolOutputs(threadId: string, runId: string, body: RunSubmitToolOutputsParamsBase, options?: Core.RequestOptions): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;
73
- /**
74
- * A helper to submit a tool output to a run and poll for a terminal run state.
75
- * More information on Run lifecycles can be found here:
76
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
77
- */
78
- submitToolOutputsAndPoll(threadId: string, runId: string, body: RunSubmitToolOutputsParamsNonStreaming, options?: Core.RequestOptions & {
79
- pollIntervalMs?: number;
80
- }): Promise<Run>;
81
- /**
82
- * Submit the tool outputs from a previous run and stream the run to a terminal
83
- * state. More information on Run lifecycles can be found here:
84
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
85
- */
86
- submitToolOutputsStream(threadId: string, runId: string, body: RunSubmitToolOutputsParamsStream, options?: Core.RequestOptions): AssistantStream;
87
- }
88
- export declare class RunsPage extends CursorPage<Run> {
89
- }
90
- /**
91
- * Tool call objects
92
- */
93
- export interface RequiredActionFunctionToolCall {
94
- /**
95
- * The ID of the tool call. This ID must be referenced when you submit the tool
96
- * outputs in using the
97
- * [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
98
- * endpoint.
99
- */
100
- id: string;
101
- /**
102
- * The function definition.
103
- */
104
- function: RequiredActionFunctionToolCall.Function;
105
- /**
106
- * The type of tool call the output is required for. For now, this is always
107
- * `function`.
108
- */
109
- type: 'function';
110
- }
111
- export declare namespace RequiredActionFunctionToolCall {
112
- /**
113
- * The function definition.
114
- */
115
- interface Function {
116
- /**
117
- * The arguments that the model expects you to pass to the function.
118
- */
119
- arguments: string;
120
- /**
121
- * The name of the function.
122
- */
123
- name: string;
124
- }
125
- }
126
- /**
127
- * Represents an execution run on a
128
- * [thread](https://platform.openai.com/docs/api-reference/threads).
129
- */
130
- export interface Run {
131
- /**
132
- * The identifier, which can be referenced in API endpoints.
133
- */
134
- id: string;
135
- /**
136
- * The ID of the
137
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
138
- * execution of this run.
139
- */
140
- assistant_id: string;
141
- /**
142
- * The Unix timestamp (in seconds) for when the run was cancelled.
143
- */
144
- cancelled_at: number | null;
145
- /**
146
- * The Unix timestamp (in seconds) for when the run was completed.
147
- */
148
- completed_at: number | null;
149
- /**
150
- * The Unix timestamp (in seconds) for when the run was created.
151
- */
152
- created_at: number;
153
- /**
154
- * The Unix timestamp (in seconds) for when the run will expire.
155
- */
156
- expires_at: number | null;
157
- /**
158
- * The Unix timestamp (in seconds) for when the run failed.
159
- */
160
- failed_at: number | null;
161
- /**
162
- * Details on why the run is incomplete. Will be `null` if the run is not
163
- * incomplete.
164
- */
165
- incomplete_details: Run.IncompleteDetails | null;
166
- /**
167
- * The instructions that the
168
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
169
- * this run.
170
- */
171
- instructions: string;
172
- /**
173
- * The last error associated with this run. Will be `null` if there are no errors.
174
- */
175
- last_error: Run.LastError | null;
176
- /**
177
- * The maximum number of completion tokens specified to have been used over the
178
- * course of the run.
179
- */
180
- max_completion_tokens: number | null;
181
- /**
182
- * The maximum number of prompt tokens specified to have been used over the course
183
- * of the run.
184
- */
185
- max_prompt_tokens: number | null;
186
- /**
187
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
188
- * for storing additional information about the object in a structured format. Keys
189
- * can be a maximum of 64 characters long and values can be a maxium of 512
190
- * characters long.
191
- */
192
- metadata: unknown | null;
193
- /**
194
- * The model that the
195
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
196
- * this run.
197
- */
198
- model: string;
199
- /**
200
- * The object type, which is always `thread.run`.
201
- */
202
- object: 'thread.run';
203
- /**
204
- * Details on the action required to continue the run. Will be `null` if no action
205
- * is required.
206
- */
207
- required_action: Run.RequiredAction | null;
208
- /**
209
- * Specifies the format that the model must output. Compatible with
210
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
211
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
212
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
213
- *
214
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
215
- * message the model generates is valid JSON.
216
- *
217
- * **Important:** when using JSON mode, you **must** also instruct the model to
218
- * produce JSON yourself via a system or user message. Without this, the model may
219
- * generate an unending stream of whitespace until the generation reaches the token
220
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
221
- * the message content may be partially cut off if `finish_reason="length"`, which
222
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
223
- * max context length.
224
- */
225
- response_format: ThreadsAPI.AssistantResponseFormatOption | null;
226
- /**
227
- * The Unix timestamp (in seconds) for when the run was started.
228
- */
229
- started_at: number | null;
230
- /**
231
- * The status of the run, which can be either `queued`, `in_progress`,
232
- * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
233
- * `incomplete`, or `expired`.
234
- */
235
- status: RunStatus;
236
- /**
237
- * The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
238
- * that was executed on as a part of this run.
239
- */
240
- thread_id: string;
241
- /**
242
- * Controls which (if any) tool is called by the model. `none` means the model will
243
- * not call any tools and instead generates a message. `auto` is the default value
244
- * and means the model can pick between generating a message or calling one or more
245
- * tools. `required` means the model must call one or more tools before responding
246
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
247
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
248
- * call that tool.
249
- */
250
- tool_choice: ThreadsAPI.AssistantToolChoiceOption | null;
251
- /**
252
- * The list of tools that the
253
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
254
- * this run.
255
- */
256
- tools: Array<AssistantsAPI.AssistantTool>;
257
- /**
258
- * Controls for how a thread will be truncated prior to the run. Use this to
259
- * control the intial context window of the run.
260
- */
261
- truncation_strategy: Run.TruncationStrategy | null;
262
- /**
263
- * Usage statistics related to the run. This value will be `null` if the run is not
264
- * in a terminal state (i.e. `in_progress`, `queued`, etc.).
265
- */
266
- usage: Run.Usage | null;
267
- /**
268
- * The sampling temperature used for this run. If not set, defaults to 1.
269
- */
270
- temperature?: number | null;
271
- /**
272
- * The nucleus sampling value used for this run. If not set, defaults to 1.
273
- */
274
- top_p?: number | null;
275
- }
276
- export declare namespace Run {
277
- /**
278
- * Details on why the run is incomplete. Will be `null` if the run is not
279
- * incomplete.
280
- */
281
- interface IncompleteDetails {
282
- /**
283
- * The reason why the run is incomplete. This will point to which specific token
284
- * limit was reached over the course of the run.
285
- */
286
- reason?: 'max_completion_tokens' | 'max_prompt_tokens';
287
- }
288
- /**
289
- * The last error associated with this run. Will be `null` if there are no errors.
290
- */
291
- interface LastError {
292
- /**
293
- * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
294
- */
295
- code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt';
296
- /**
297
- * A human-readable description of the error.
298
- */
299
- message: string;
300
- }
301
- /**
302
- * Details on the action required to continue the run. Will be `null` if no action
303
- * is required.
304
- */
305
- interface RequiredAction {
306
- /**
307
- * Details on the tool outputs needed for this run to continue.
308
- */
309
- submit_tool_outputs: RequiredAction.SubmitToolOutputs;
310
- /**
311
- * For now, this is always `submit_tool_outputs`.
312
- */
313
- type: 'submit_tool_outputs';
314
- }
315
- namespace RequiredAction {
316
- /**
317
- * Details on the tool outputs needed for this run to continue.
318
- */
319
- interface SubmitToolOutputs {
320
- /**
321
- * A list of the relevant tool calls.
322
- */
323
- tool_calls: Array<RunsAPI.RequiredActionFunctionToolCall>;
324
- }
325
- }
326
- /**
327
- * Controls for how a thread will be truncated prior to the run. Use this to
328
- * control the intial context window of the run.
329
- */
330
- interface TruncationStrategy {
331
- /**
332
- * The truncation strategy to use for the thread. The default is `auto`. If set to
333
- * `last_messages`, the thread will be truncated to the n most recent messages in
334
- * the thread. When set to `auto`, messages in the middle of the thread will be
335
- * dropped to fit the context length of the model, `max_prompt_tokens`.
336
- */
337
- type: 'auto' | 'last_messages';
338
- /**
339
- * The number of most recent messages from the thread when constructing the context
340
- * for the run.
341
- */
342
- last_messages?: number | null;
343
- }
344
- /**
345
- * Usage statistics related to the run. This value will be `null` if the run is not
346
- * in a terminal state (i.e. `in_progress`, `queued`, etc.).
347
- */
348
- interface Usage {
349
- /**
350
- * Number of completion tokens used over the course of the run.
351
- */
352
- completion_tokens: number;
353
- /**
354
- * Number of prompt tokens used over the course of the run.
355
- */
356
- prompt_tokens: number;
357
- /**
358
- * Total number of tokens used (prompt + completion).
359
- */
360
- total_tokens: number;
361
- }
362
- }
363
- /**
364
- * The status of the run, which can be either `queued`, `in_progress`,
365
- * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
366
- * `incomplete`, or `expired`.
367
- */
368
- export type RunStatus = 'queued' | 'in_progress' | 'requires_action' | 'cancelling' | 'cancelled' | 'failed' | 'completed' | 'incomplete' | 'expired';
369
- export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;
370
- export interface RunCreateParamsBase {
371
- /**
372
- * The ID of the
373
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
374
- * execute this run.
375
- */
376
- assistant_id: string;
377
- /**
378
- * Appends additional instructions at the end of the instructions for the run. This
379
- * is useful for modifying the behavior on a per-run basis without overriding other
380
- * instructions.
381
- */
382
- additional_instructions?: string | null;
383
- /**
384
- * Adds additional messages to the thread before creating the run.
385
- */
386
- additional_messages?: Array<RunCreateParams.AdditionalMessage> | null;
387
- /**
388
- * Overrides the
389
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
390
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
391
- */
392
- instructions?: string | null;
393
- /**
394
- * The maximum number of completion tokens that may be used over the course of the
395
- * run. The run will make a best effort to use only the number of completion tokens
396
- * specified, across multiple turns of the run. If the run exceeds the number of
397
- * completion tokens specified, the run will end with status `incomplete`. See
398
- * `incomplete_details` for more info.
399
- */
400
- max_completion_tokens?: number | null;
401
- /**
402
- * The maximum number of prompt tokens that may be used over the course of the run.
403
- * The run will make a best effort to use only the number of prompt tokens
404
- * specified, across multiple turns of the run. If the run exceeds the number of
405
- * prompt tokens specified, the run will end with status `incomplete`. See
406
- * `incomplete_details` for more info.
407
- */
408
- max_prompt_tokens?: number | null;
409
- /**
410
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
411
- * for storing additional information about the object in a structured format. Keys
412
- * can be a maximum of 64 characters long and values can be a maxium of 512
413
- * characters long.
414
- */
415
- metadata?: unknown | null;
416
- /**
417
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
418
- * be used to execute this run. If a value is provided here, it will override the
419
- * model associated with the assistant. If not, the model associated with the
420
- * assistant will be used.
421
- */
422
- model?: (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613' | null;
423
- /**
424
- * Specifies the format that the model must output. Compatible with
425
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
426
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
427
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
428
- *
429
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
430
- * message the model generates is valid JSON.
431
- *
432
- * **Important:** when using JSON mode, you **must** also instruct the model to
433
- * produce JSON yourself via a system or user message. Without this, the model may
434
- * generate an unending stream of whitespace until the generation reaches the token
435
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
436
- * the message content may be partially cut off if `finish_reason="length"`, which
437
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
438
- * max context length.
439
- */
440
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
441
- /**
442
- * If `true`, returns a stream of events that happen during the Run as server-sent
443
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
444
- * message.
445
- */
446
- stream?: boolean | null;
447
- /**
448
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
449
- * make the output more random, while lower values like 0.2 will make it more
450
- * focused and deterministic.
451
- */
452
- temperature?: number | null;
453
- /**
454
- * Controls which (if any) tool is called by the model. `none` means the model will
455
- * not call any tools and instead generates a message. `auto` is the default value
456
- * and means the model can pick between generating a message or calling one or more
457
- * tools. `required` means the model must call one or more tools before responding
458
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
459
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
460
- * call that tool.
461
- */
462
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
463
- /**
464
- * Override the tools the assistant can use for this run. This is useful for
465
- * modifying the behavior on a per-run basis.
466
- */
467
- tools?: Array<AssistantsAPI.AssistantTool> | null;
468
- /**
469
- * An alternative to sampling with temperature, called nucleus sampling, where the
470
- * model considers the results of the tokens with top_p probability mass. So 0.1
471
- * means only the tokens comprising the top 10% probability mass are considered.
472
- *
473
- * We generally recommend altering this or temperature but not both.
474
- */
475
- top_p?: number | null;
476
- /**
477
- * Controls for how a thread will be truncated prior to the run. Use this to
478
- * control the intial context window of the run.
479
- */
480
- truncation_strategy?: RunCreateParams.TruncationStrategy | null;
481
- }
482
- export declare namespace RunCreateParams {
483
- interface AdditionalMessage {
484
- /**
485
- * The text contents of the message.
486
- */
487
- content: string | Array<MessagesAPI.MessageContentPartParam>;
488
- /**
489
- * The role of the entity that is creating the message. Allowed values include:
490
- *
491
- * - `user`: Indicates the message is sent by an actual user and should be used in
492
- * most cases to represent user-generated messages.
493
- * - `assistant`: Indicates the message is generated by the assistant. Use this
494
- * value to insert messages from the assistant into the conversation.
495
- */
496
- role: 'user' | 'assistant';
497
- /**
498
- * A list of files attached to the message, and the tools they should be added to.
499
- */
500
- attachments?: Array<AdditionalMessage.Attachment> | null;
501
- /**
502
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
503
- * for storing additional information about the object in a structured format. Keys
504
- * can be a maximum of 64 characters long and values can be a maxium of 512
505
- * characters long.
506
- */
507
- metadata?: unknown | null;
508
- }
509
- namespace AdditionalMessage {
510
- interface Attachment {
511
- /**
512
- * The ID of the file to attach to the message.
513
- */
514
- file_id?: string;
515
- /**
516
- * The tools to add this file to.
517
- */
518
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
519
- }
520
- }
521
- /**
522
- * Controls for how a thread will be truncated prior to the run. Use this to
523
- * control the intial context window of the run.
524
- */
525
- interface TruncationStrategy {
526
- /**
527
- * The truncation strategy to use for the thread. The default is `auto`. If set to
528
- * `last_messages`, the thread will be truncated to the n most recent messages in
529
- * the thread. When set to `auto`, messages in the middle of the thread will be
530
- * dropped to fit the context length of the model, `max_prompt_tokens`.
531
- */
532
- type: 'auto' | 'last_messages';
533
- /**
534
- * The number of most recent messages from the thread when constructing the context
535
- * for the run.
536
- */
537
- last_messages?: number | null;
538
- }
539
- type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
540
- type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
541
- }
542
- export interface RunCreateParamsNonStreaming extends RunCreateParamsBase {
543
- /**
544
- * If `true`, returns a stream of events that happen during the Run as server-sent
545
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
546
- * message.
547
- */
548
- stream?: false | null;
549
- }
550
- export interface RunCreateParamsStreaming extends RunCreateParamsBase {
551
- /**
552
- * If `true`, returns a stream of events that happen during the Run as server-sent
553
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
554
- * message.
555
- */
556
- stream: true;
557
- }
558
- export interface RunUpdateParams {
559
- /**
560
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
561
- * for storing additional information about the object in a structured format. Keys
562
- * can be a maximum of 64 characters long and values can be a maxium of 512
563
- * characters long.
564
- */
565
- metadata?: unknown | null;
566
- }
567
- export interface RunListParams extends CursorPageParams {
568
- /**
569
- * A cursor for use in pagination. `before` is an object ID that defines your place
570
- * in the list. For instance, if you make a list request and receive 100 objects,
571
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
572
- * fetch the previous page of the list.
573
- */
574
- before?: string;
575
- /**
576
- * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
577
- * order and `desc` for descending order.
578
- */
579
- order?: 'asc' | 'desc';
580
- }
581
- export interface RunCreateAndPollParams {
582
- /**
583
- * The ID of the
584
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
585
- * execute this run.
586
- */
587
- assistant_id: string;
588
- /**
589
- * Appends additional instructions at the end of the instructions for the run. This
590
- * is useful for modifying the behavior on a per-run basis without overriding other
591
- * instructions.
592
- */
593
- additional_instructions?: string | null;
594
- /**
595
- * Adds additional messages to the thread before creating the run.
596
- */
597
- additional_messages?: Array<RunCreateAndPollParams.AdditionalMessage> | null;
598
- /**
599
- * Overrides the
600
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
601
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
602
- */
603
- instructions?: string | null;
604
- /**
605
- * The maximum number of completion tokens that may be used over the course of the
606
- * run. The run will make a best effort to use only the number of completion tokens
607
- * specified, across multiple turns of the run. If the run exceeds the number of
608
- * completion tokens specified, the run will end with status `incomplete`. See
609
- * `incomplete_details` for more info.
610
- */
611
- max_completion_tokens?: number | null;
612
- /**
613
- * The maximum number of prompt tokens that may be used over the course of the run.
614
- * The run will make a best effort to use only the number of prompt tokens
615
- * specified, across multiple turns of the run. If the run exceeds the number of
616
- * prompt tokens specified, the run will end with status `incomplete`. See
617
- * `incomplete_details` for more info.
618
- */
619
- max_prompt_tokens?: number | null;
620
- /**
621
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
622
- * for storing additional information about the object in a structured format. Keys
623
- * can be a maximum of 64 characters long and values can be a maxium of 512
624
- * characters long.
625
- */
626
- metadata?: unknown | null;
627
- /**
628
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
629
- * be used to execute this run. If a value is provided here, it will override the
630
- * model associated with the assistant. If not, the model associated with the
631
- * assistant will be used.
632
- */
633
- model?: (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613' | null;
634
- /**
635
- * Specifies the format that the model must output. Compatible with
636
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
637
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
638
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
639
- *
640
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
641
- * message the model generates is valid JSON.
642
- *
643
- * **Important:** when using JSON mode, you **must** also instruct the model to
644
- * produce JSON yourself via a system or user message. Without this, the model may
645
- * generate an unending stream of whitespace until the generation reaches the token
646
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
647
- * the message content may be partially cut off if `finish_reason="length"`, which
648
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
649
- * max context length.
650
- */
651
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
652
- /**
653
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
654
- * make the output more random, while lower values like 0.2 will make it more
655
- * focused and deterministic.
656
- */
657
- temperature?: number | null;
658
- /**
659
- * Controls which (if any) tool is called by the model. `none` means the model will
660
- * not call any tools and instead generates a message. `auto` is the default value
661
- * and means the model can pick between generating a message or calling one or more
662
- * tools. `required` means the model must call one or more tools before responding
663
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
664
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
665
- * call that tool.
666
- */
667
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
668
- /**
669
- * Override the tools the assistant can use for this run. This is useful for
670
- * modifying the behavior on a per-run basis.
671
- */
672
- tools?: Array<AssistantsAPI.AssistantTool> | null;
673
- /**
674
- * An alternative to sampling with temperature, called nucleus sampling, where the
675
- * model considers the results of the tokens with top_p probability mass. So 0.1
676
- * means only the tokens comprising the top 10% probability mass are considered.
677
- *
678
- * We generally recommend altering this or temperature but not both.
679
- */
680
- top_p?: number | null;
681
- /**
682
- * Controls for how a thread will be truncated prior to the run. Use this to
683
- * control the intial context window of the run.
684
- */
685
- truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null;
686
- }
687
- export declare namespace RunCreateAndPollParams {
688
- interface AdditionalMessage {
689
- /**
690
- * The text contents of the message.
691
- */
692
- content: string | Array<MessagesAPI.MessageContentPartParam>;
693
- /**
694
- * The role of the entity that is creating the message. Allowed values include:
695
- *
696
- * - `user`: Indicates the message is sent by an actual user and should be used in
697
- * most cases to represent user-generated messages.
698
- * - `assistant`: Indicates the message is generated by the assistant. Use this
699
- * value to insert messages from the assistant into the conversation.
700
- */
701
- role: 'user' | 'assistant';
702
- /**
703
- * A list of files attached to the message, and the tools they should be added to.
704
- */
705
- attachments?: Array<AdditionalMessage.Attachment> | null;
706
- /**
707
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
708
- * for storing additional information about the object in a structured format. Keys
709
- * can be a maximum of 64 characters long and values can be a maxium of 512
710
- * characters long.
711
- */
712
- metadata?: unknown | null;
713
- }
714
- namespace AdditionalMessage {
715
- interface Attachment {
716
- /**
717
- * The ID of the file to attach to the message.
718
- */
719
- file_id?: string;
720
- /**
721
- * The tools to add this file to.
722
- */
723
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
724
- }
725
- }
726
- /**
727
- * Controls for how a thread will be truncated prior to the run. Use this to
728
- * control the intial context window of the run.
729
- */
730
- interface TruncationStrategy {
731
- /**
732
- * The truncation strategy to use for the thread. The default is `auto`. If set to
733
- * `last_messages`, the thread will be truncated to the n most recent messages in
734
- * the thread. When set to `auto`, messages in the middle of the thread will be
735
- * dropped to fit the context length of the model, `max_prompt_tokens`.
736
- */
737
- type: 'auto' | 'last_messages';
738
- /**
739
- * The number of most recent messages from the thread when constructing the context
740
- * for the run.
741
- */
742
- last_messages?: number | null;
743
- }
744
- }
745
- export interface RunCreateAndStreamParams {
746
- /**
747
- * The ID of the
748
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
749
- * execute this run.
750
- */
751
- assistant_id: string;
752
- /**
753
- * Appends additional instructions at the end of the instructions for the run. This
754
- * is useful for modifying the behavior on a per-run basis without overriding other
755
- * instructions.
756
- */
757
- additional_instructions?: string | null;
758
- /**
759
- * Adds additional messages to the thread before creating the run.
760
- */
761
- additional_messages?: Array<RunCreateAndStreamParams.AdditionalMessage> | null;
762
- /**
763
- * Overrides the
764
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
765
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
766
- */
767
- instructions?: string | null;
768
- /**
769
- * The maximum number of completion tokens that may be used over the course of the
770
- * run. The run will make a best effort to use only the number of completion tokens
771
- * specified, across multiple turns of the run. If the run exceeds the number of
772
- * completion tokens specified, the run will end with status `incomplete`. See
773
- * `incomplete_details` for more info.
774
- */
775
- max_completion_tokens?: number | null;
776
- /**
777
- * The maximum number of prompt tokens that may be used over the course of the run.
778
- * The run will make a best effort to use only the number of prompt tokens
779
- * specified, across multiple turns of the run. If the run exceeds the number of
780
- * prompt tokens specified, the run will end with status `incomplete`. See
781
- * `incomplete_details` for more info.
782
- */
783
- max_prompt_tokens?: number | null;
784
- /**
785
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
786
- * for storing additional information about the object in a structured format. Keys
787
- * can be a maximum of 64 characters long and values can be a maxium of 512
788
- * characters long.
789
- */
790
- metadata?: unknown | null;
791
- /**
792
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
793
- * be used to execute this run. If a value is provided here, it will override the
794
- * model associated with the assistant. If not, the model associated with the
795
- * assistant will be used.
796
- */
797
- model?: (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613' | null;
798
- /**
799
- * Specifies the format that the model must output. Compatible with
800
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
801
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
802
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
803
- *
804
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
805
- * message the model generates is valid JSON.
806
- *
807
- * **Important:** when using JSON mode, you **must** also instruct the model to
808
- * produce JSON yourself via a system or user message. Without this, the model may
809
- * generate an unending stream of whitespace until the generation reaches the token
810
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
811
- * the message content may be partially cut off if `finish_reason="length"`, which
812
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
813
- * max context length.
814
- */
815
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
816
- /**
817
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
818
- * make the output more random, while lower values like 0.2 will make it more
819
- * focused and deterministic.
820
- */
821
- temperature?: number | null;
822
- /**
823
- * Controls which (if any) tool is called by the model. `none` means the model will
824
- * not call any tools and instead generates a message. `auto` is the default value
825
- * and means the model can pick between generating a message or calling one or more
826
- * tools. `required` means the model must call one or more tools before responding
827
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
828
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
829
- * call that tool.
830
- */
831
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
832
- /**
833
- * Override the tools the assistant can use for this run. This is useful for
834
- * modifying the behavior on a per-run basis.
835
- */
836
- tools?: Array<AssistantsAPI.AssistantTool> | null;
837
- /**
838
- * An alternative to sampling with temperature, called nucleus sampling, where the
839
- * model considers the results of the tokens with top_p probability mass. So 0.1
840
- * means only the tokens comprising the top 10% probability mass are considered.
841
- *
842
- * We generally recommend altering this or temperature but not both.
843
- */
844
- top_p?: number | null;
845
- /**
846
- * Controls for how a thread will be truncated prior to the run. Use this to
847
- * control the intial context window of the run.
848
- */
849
- truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null;
850
- }
851
- export declare namespace RunCreateAndStreamParams {
852
- interface AdditionalMessage {
853
- /**
854
- * The text contents of the message.
855
- */
856
- content: string | Array<MessagesAPI.MessageContentPartParam>;
857
- /**
858
- * The role of the entity that is creating the message. Allowed values include:
859
- *
860
- * - `user`: Indicates the message is sent by an actual user and should be used in
861
- * most cases to represent user-generated messages.
862
- * - `assistant`: Indicates the message is generated by the assistant. Use this
863
- * value to insert messages from the assistant into the conversation.
864
- */
865
- role: 'user' | 'assistant';
866
- /**
867
- * A list of files attached to the message, and the tools they should be added to.
868
- */
869
- attachments?: Array<AdditionalMessage.Attachment> | null;
870
- /**
871
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
872
- * for storing additional information about the object in a structured format. Keys
873
- * can be a maximum of 64 characters long and values can be a maxium of 512
874
- * characters long.
875
- */
876
- metadata?: unknown | null;
877
- }
878
- namespace AdditionalMessage {
879
- interface Attachment {
880
- /**
881
- * The ID of the file to attach to the message.
882
- */
883
- file_id?: string;
884
- /**
885
- * The tools to add this file to.
886
- */
887
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
888
- }
889
- }
890
- /**
891
- * Controls for how a thread will be truncated prior to the run. Use this to
892
- * control the intial context window of the run.
893
- */
894
- interface TruncationStrategy {
895
- /**
896
- * The truncation strategy to use for the thread. The default is `auto`. If set to
897
- * `last_messages`, the thread will be truncated to the n most recent messages in
898
- * the thread. When set to `auto`, messages in the middle of the thread will be
899
- * dropped to fit the context length of the model, `max_prompt_tokens`.
900
- */
901
- type: 'auto' | 'last_messages';
902
- /**
903
- * The number of most recent messages from the thread when constructing the context
904
- * for the run.
905
- */
906
- last_messages?: number | null;
907
- }
908
- }
909
- export interface RunStreamParams {
910
- /**
911
- * The ID of the
912
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
913
- * execute this run.
914
- */
915
- assistant_id: string;
916
- /**
917
- * Appends additional instructions at the end of the instructions for the run. This
918
- * is useful for modifying the behavior on a per-run basis without overriding other
919
- * instructions.
920
- */
921
- additional_instructions?: string | null;
922
- /**
923
- * Adds additional messages to the thread before creating the run.
924
- */
925
- additional_messages?: Array<RunStreamParams.AdditionalMessage> | null;
926
- /**
927
- * Overrides the
928
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
929
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
930
- */
931
- instructions?: string | null;
932
- /**
933
- * The maximum number of completion tokens that may be used over the course of the
934
- * run. The run will make a best effort to use only the number of completion tokens
935
- * specified, across multiple turns of the run. If the run exceeds the number of
936
- * completion tokens specified, the run will end with status `incomplete`. See
937
- * `incomplete_details` for more info.
938
- */
939
- max_completion_tokens?: number | null;
940
- /**
941
- * The maximum number of prompt tokens that may be used over the course of the run.
942
- * The run will make a best effort to use only the number of prompt tokens
943
- * specified, across multiple turns of the run. If the run exceeds the number of
944
- * prompt tokens specified, the run will end with status `incomplete`. See
945
- * `incomplete_details` for more info.
946
- */
947
- max_prompt_tokens?: number | null;
948
- /**
949
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
950
- * for storing additional information about the object in a structured format. Keys
951
- * can be a maximum of 64 characters long and values can be a maxium of 512
952
- * characters long.
953
- */
954
- metadata?: unknown | null;
955
- /**
956
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
957
- * be used to execute this run. If a value is provided here, it will override the
958
- * model associated with the assistant. If not, the model associated with the
959
- * assistant will be used.
960
- */
961
- model?: (string & {}) | 'gpt-4o' | 'gpt-4o-2024-05-13' | 'gpt-4-turbo' | 'gpt-4-turbo-2024-04-09' | 'gpt-4-0125-preview' | 'gpt-4-turbo-preview' | 'gpt-4-1106-preview' | 'gpt-4-vision-preview' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-3.5-turbo' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-0125' | 'gpt-3.5-turbo-16k-0613' | null;
962
- /**
963
- * Specifies the format that the model must output. Compatible with
964
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
965
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
966
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
967
- *
968
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
969
- * message the model generates is valid JSON.
970
- *
971
- * **Important:** when using JSON mode, you **must** also instruct the model to
972
- * produce JSON yourself via a system or user message. Without this, the model may
973
- * generate an unending stream of whitespace until the generation reaches the token
974
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
975
- * the message content may be partially cut off if `finish_reason="length"`, which
976
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
977
- * max context length.
978
- */
979
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
980
- /**
981
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
982
- * make the output more random, while lower values like 0.2 will make it more
983
- * focused and deterministic.
984
- */
985
- temperature?: number | null;
986
- /**
987
- * Controls which (if any) tool is called by the model. `none` means the model will
988
- * not call any tools and instead generates a message. `auto` is the default value
989
- * and means the model can pick between generating a message or calling one or more
990
- * tools. `required` means the model must call one or more tools before responding
991
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
992
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
993
- * call that tool.
994
- */
995
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
996
- /**
997
- * Override the tools the assistant can use for this run. This is useful for
998
- * modifying the behavior on a per-run basis.
999
- */
1000
- tools?: Array<AssistantsAPI.AssistantTool> | null;
1001
- /**
1002
- * An alternative to sampling with temperature, called nucleus sampling, where the
1003
- * model considers the results of the tokens with top_p probability mass. So 0.1
1004
- * means only the tokens comprising the top 10% probability mass are considered.
1005
- *
1006
- * We generally recommend altering this or temperature but not both.
1007
- */
1008
- top_p?: number | null;
1009
- /**
1010
- * Controls for how a thread will be truncated prior to the run. Use this to
1011
- * control the intial context window of the run.
1012
- */
1013
- truncation_strategy?: RunStreamParams.TruncationStrategy | null;
1014
- }
1015
- export declare namespace RunStreamParams {
1016
- interface AdditionalMessage {
1017
- /**
1018
- * The text contents of the message.
1019
- */
1020
- content: string | Array<MessagesAPI.MessageContentPartParam>;
1021
- /**
1022
- * The role of the entity that is creating the message. Allowed values include:
1023
- *
1024
- * - `user`: Indicates the message is sent by an actual user and should be used in
1025
- * most cases to represent user-generated messages.
1026
- * - `assistant`: Indicates the message is generated by the assistant. Use this
1027
- * value to insert messages from the assistant into the conversation.
1028
- */
1029
- role: 'user' | 'assistant';
1030
- /**
1031
- * A list of files attached to the message, and the tools they should be added to.
1032
- */
1033
- attachments?: Array<AdditionalMessage.Attachment> | null;
1034
- /**
1035
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1036
- * for storing additional information about the object in a structured format. Keys
1037
- * can be a maximum of 64 characters long and values can be a maxium of 512
1038
- * characters long.
1039
- */
1040
- metadata?: unknown | null;
1041
- }
1042
- namespace AdditionalMessage {
1043
- interface Attachment {
1044
- /**
1045
- * The ID of the file to attach to the message.
1046
- */
1047
- file_id?: string;
1048
- /**
1049
- * The tools to add this file to.
1050
- */
1051
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
1052
- }
1053
- }
1054
- /**
1055
- * Controls for how a thread will be truncated prior to the run. Use this to
1056
- * control the intial context window of the run.
1057
- */
1058
- interface TruncationStrategy {
1059
- /**
1060
- * The truncation strategy to use for the thread. The default is `auto`. If set to
1061
- * `last_messages`, the thread will be truncated to the n most recent messages in
1062
- * the thread. When set to `auto`, messages in the middle of the thread will be
1063
- * dropped to fit the context length of the model, `max_prompt_tokens`.
1064
- */
1065
- type: 'auto' | 'last_messages';
1066
- /**
1067
- * The number of most recent messages from the thread when constructing the context
1068
- * for the run.
1069
- */
1070
- last_messages?: number | null;
1071
- }
1072
- }
1073
- export type RunSubmitToolOutputsParams = RunSubmitToolOutputsParamsNonStreaming | RunSubmitToolOutputsParamsStreaming;
1074
- export interface RunSubmitToolOutputsParamsBase {
1075
- /**
1076
- * A list of tools for which the outputs are being submitted.
1077
- */
1078
- tool_outputs: Array<RunSubmitToolOutputsParams.ToolOutput>;
1079
- /**
1080
- * If `true`, returns a stream of events that happen during the Run as server-sent
1081
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1082
- * message.
1083
- */
1084
- stream?: boolean | null;
1085
- }
1086
- export declare namespace RunSubmitToolOutputsParams {
1087
- interface ToolOutput {
1088
- /**
1089
- * The output of the tool call to be submitted to continue the run.
1090
- */
1091
- output?: string;
1092
- /**
1093
- * The ID of the tool call in the `required_action` object within the run object
1094
- * the output is being submitted for.
1095
- */
1096
- tool_call_id?: string;
1097
- }
1098
- type RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
1099
- type RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
1100
- }
1101
- export interface RunSubmitToolOutputsParamsNonStreaming extends RunSubmitToolOutputsParamsBase {
1102
- /**
1103
- * If `true`, returns a stream of events that happen during the Run as server-sent
1104
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1105
- * message.
1106
- */
1107
- stream?: false | null;
1108
- }
1109
- export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutputsParamsBase {
1110
- /**
1111
- * If `true`, returns a stream of events that happen during the Run as server-sent
1112
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1113
- * message.
1114
- */
1115
- stream: true;
1116
- }
1117
- export interface RunSubmitToolOutputsAndPollParams {
1118
- /**
1119
- * A list of tools for which the outputs are being submitted.
1120
- */
1121
- tool_outputs: Array<RunSubmitToolOutputsAndPollParams.ToolOutput>;
1122
- }
1123
- export declare namespace RunSubmitToolOutputsAndPollParams {
1124
- interface ToolOutput {
1125
- /**
1126
- * The output of the tool call to be submitted to continue the run.
1127
- */
1128
- output?: string;
1129
- /**
1130
- * The ID of the tool call in the `required_action` object within the run object
1131
- * the output is being submitted for.
1132
- */
1133
- tool_call_id?: string;
1134
- }
1135
- }
1136
- export interface RunSubmitToolOutputsStreamParams {
1137
- /**
1138
- * A list of tools for which the outputs are being submitted.
1139
- */
1140
- tool_outputs: Array<RunSubmitToolOutputsStreamParams.ToolOutput>;
1141
- }
1142
- export declare namespace RunSubmitToolOutputsStreamParams {
1143
- interface ToolOutput {
1144
- /**
1145
- * The output of the tool call to be submitted to continue the run.
1146
- */
1147
- output?: string;
1148
- /**
1149
- * The ID of the tool call in the `required_action` object within the run object
1150
- * the output is being submitted for.
1151
- */
1152
- tool_call_id?: string;
1153
- }
1154
- }
1155
- export declare namespace Runs {
1156
- export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
1157
- export import Run = RunsAPI.Run;
1158
- export import RunStatus = RunsAPI.RunStatus;
1159
- export import RunsPage = RunsAPI.RunsPage;
1160
- export import RunCreateParams = RunsAPI.RunCreateParams;
1161
- export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
1162
- export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
1163
- export import RunUpdateParams = RunsAPI.RunUpdateParams;
1164
- export import RunListParams = RunsAPI.RunListParams;
1165
- export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
1166
- export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
1167
- export import RunStreamParams = RunsAPI.RunStreamParams;
1168
- export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
1169
- export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
1170
- export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
1171
- export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
1172
- export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
1173
- export import Steps = StepsAPI.Steps;
1174
- export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
1175
- export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
1176
- export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
1177
- export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
1178
- export import FileSearchToolCall = StepsAPI.FileSearchToolCall;
1179
- export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta;
1180
- export import FunctionToolCall = StepsAPI.FunctionToolCall;
1181
- export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
1182
- export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
1183
- export import RunStep = StepsAPI.RunStep;
1184
- export import RunStepDelta = StepsAPI.RunStepDelta;
1185
- export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
1186
- export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta;
1187
- export import ToolCall = StepsAPI.ToolCall;
1188
- export import ToolCallDelta = StepsAPI.ToolCallDelta;
1189
- export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject;
1190
- export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
1191
- export import RunStepsPage = StepsAPI.RunStepsPage;
1192
- export import StepListParams = StepsAPI.StepListParams;
1193
- }
1194
- //# sourceMappingURL=runs.d.ts.map