modelmix 2.2.8 → 2.3.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (542) hide show
  1. package/README.md +19 -6
  2. package/demo/demo.mjs +24 -16
  3. package/demo/lmstudio.mjs +1 -1
  4. package/demo/node_modules/.package-lock.json +0 -47
  5. package/demo/package-lock.json +1 -50
  6. package/demo/package.json +1 -3
  7. package/demo/prompt.md +25 -0
  8. package/demo/stream.mjs +3 -3
  9. package/index.js +88 -2
  10. package/package.json +4 -2
  11. package/demo/node_modules/debug/LICENSE +0 -20
  12. package/demo/node_modules/debug/README.md +0 -481
  13. package/demo/node_modules/debug/node_modules/ms/index.js +0 -162
  14. package/demo/node_modules/debug/node_modules/ms/license.md +0 -21
  15. package/demo/node_modules/debug/node_modules/ms/package.json +0 -37
  16. package/demo/node_modules/debug/node_modules/ms/readme.md +0 -60
  17. package/demo/node_modules/debug/package.json +0 -59
  18. package/demo/node_modules/debug/src/browser.js +0 -269
  19. package/demo/node_modules/debug/src/common.js +0 -274
  20. package/demo/node_modules/debug/src/index.js +0 -10
  21. package/demo/node_modules/debug/src/node.js +0 -263
  22. package/demo/node_modules/lemonlog/README.md +0 -133
  23. package/demo/node_modules/lemonlog/demo/demo.js +0 -31
  24. package/demo/node_modules/lemonlog/index.js +0 -94
  25. package/demo/node_modules/lemonlog/package.json +0 -31
  26. package/demo/node_modules/openai/CHANGELOG.md +0 -1176
  27. package/demo/node_modules/openai/LICENSE +0 -201
  28. package/demo/node_modules/openai/README.md +0 -616
  29. package/demo/node_modules/openai/_shims/MultipartBody.d.ts +0 -9
  30. package/demo/node_modules/openai/_shims/MultipartBody.d.ts.map +0 -1
  31. package/demo/node_modules/openai/_shims/MultipartBody.js +0 -16
  32. package/demo/node_modules/openai/_shims/MultipartBody.js.map +0 -1
  33. package/demo/node_modules/openai/_shims/MultipartBody.mjs +0 -12
  34. package/demo/node_modules/openai/_shims/MultipartBody.mjs.map +0 -1
  35. package/demo/node_modules/openai/_shims/README.md +0 -46
  36. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts +0 -5
  37. package/demo/node_modules/openai/_shims/auto/runtime-bun.d.ts.map +0 -1
  38. package/demo/node_modules/openai/_shims/auto/runtime-bun.js +0 -21
  39. package/demo/node_modules/openai/_shims/auto/runtime-bun.js.map +0 -1
  40. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs +0 -2
  41. package/demo/node_modules/openai/_shims/auto/runtime-bun.mjs.map +0 -1
  42. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts +0 -5
  43. package/demo/node_modules/openai/_shims/auto/runtime-node.d.ts.map +0 -1
  44. package/demo/node_modules/openai/_shims/auto/runtime-node.js +0 -21
  45. package/demo/node_modules/openai/_shims/auto/runtime-node.js.map +0 -1
  46. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs +0 -2
  47. package/demo/node_modules/openai/_shims/auto/runtime-node.mjs.map +0 -1
  48. package/demo/node_modules/openai/_shims/auto/runtime.d.ts +0 -5
  49. package/demo/node_modules/openai/_shims/auto/runtime.d.ts.map +0 -1
  50. package/demo/node_modules/openai/_shims/auto/runtime.js +0 -21
  51. package/demo/node_modules/openai/_shims/auto/runtime.js.map +0 -1
  52. package/demo/node_modules/openai/_shims/auto/runtime.mjs +0 -2
  53. package/demo/node_modules/openai/_shims/auto/runtime.mjs.map +0 -1
  54. package/demo/node_modules/openai/_shims/auto/types-node.d.ts +0 -5
  55. package/demo/node_modules/openai/_shims/auto/types-node.d.ts.map +0 -1
  56. package/demo/node_modules/openai/_shims/auto/types-node.js +0 -21
  57. package/demo/node_modules/openai/_shims/auto/types-node.js.map +0 -1
  58. package/demo/node_modules/openai/_shims/auto/types-node.mjs +0 -2
  59. package/demo/node_modules/openai/_shims/auto/types-node.mjs.map +0 -1
  60. package/demo/node_modules/openai/_shims/auto/types.d.ts +0 -101
  61. package/demo/node_modules/openai/_shims/auto/types.js +0 -3
  62. package/demo/node_modules/openai/_shims/auto/types.mjs +0 -3
  63. package/demo/node_modules/openai/_shims/bun-runtime.d.ts +0 -6
  64. package/demo/node_modules/openai/_shims/bun-runtime.d.ts.map +0 -1
  65. package/demo/node_modules/openai/_shims/bun-runtime.js +0 -14
  66. package/demo/node_modules/openai/_shims/bun-runtime.js.map +0 -1
  67. package/demo/node_modules/openai/_shims/bun-runtime.mjs +0 -10
  68. package/demo/node_modules/openai/_shims/bun-runtime.mjs.map +0 -1
  69. package/demo/node_modules/openai/_shims/index.d.ts +0 -81
  70. package/demo/node_modules/openai/_shims/index.js +0 -13
  71. package/demo/node_modules/openai/_shims/index.mjs +0 -7
  72. package/demo/node_modules/openai/_shims/manual-types.d.ts +0 -12
  73. package/demo/node_modules/openai/_shims/manual-types.js +0 -3
  74. package/demo/node_modules/openai/_shims/manual-types.mjs +0 -3
  75. package/demo/node_modules/openai/_shims/node-runtime.d.ts +0 -3
  76. package/demo/node_modules/openai/_shims/node-runtime.d.ts.map +0 -1
  77. package/demo/node_modules/openai/_shims/node-runtime.js +0 -90
  78. package/demo/node_modules/openai/_shims/node-runtime.js.map +0 -1
  79. package/demo/node_modules/openai/_shims/node-runtime.mjs +0 -56
  80. package/demo/node_modules/openai/_shims/node-runtime.mjs.map +0 -1
  81. package/demo/node_modules/openai/_shims/node-types.d.ts +0 -42
  82. package/demo/node_modules/openai/_shims/node-types.js +0 -3
  83. package/demo/node_modules/openai/_shims/node-types.mjs +0 -3
  84. package/demo/node_modules/openai/_shims/registry.d.ts +0 -37
  85. package/demo/node_modules/openai/_shims/registry.d.ts.map +0 -1
  86. package/demo/node_modules/openai/_shims/registry.js +0 -41
  87. package/demo/node_modules/openai/_shims/registry.js.map +0 -1
  88. package/demo/node_modules/openai/_shims/registry.mjs +0 -37
  89. package/demo/node_modules/openai/_shims/registry.mjs.map +0 -1
  90. package/demo/node_modules/openai/_shims/web-runtime.d.ts +0 -5
  91. package/demo/node_modules/openai/_shims/web-runtime.d.ts.map +0 -1
  92. package/demo/node_modules/openai/_shims/web-runtime.js +0 -78
  93. package/demo/node_modules/openai/_shims/web-runtime.js.map +0 -1
  94. package/demo/node_modules/openai/_shims/web-runtime.mjs +0 -71
  95. package/demo/node_modules/openai/_shims/web-runtime.mjs.map +0 -1
  96. package/demo/node_modules/openai/_shims/web-types.d.ts +0 -83
  97. package/demo/node_modules/openai/_shims/web-types.js +0 -3
  98. package/demo/node_modules/openai/_shims/web-types.mjs +0 -3
  99. package/demo/node_modules/openai/bin/cli +0 -49
  100. package/demo/node_modules/openai/core.d.ts +0 -239
  101. package/demo/node_modules/openai/core.d.ts.map +0 -1
  102. package/demo/node_modules/openai/core.js +0 -879
  103. package/demo/node_modules/openai/core.js.map +0 -1
  104. package/demo/node_modules/openai/core.mjs +0 -848
  105. package/demo/node_modules/openai/core.mjs.map +0 -1
  106. package/demo/node_modules/openai/error.d.ts +0 -57
  107. package/demo/node_modules/openai/error.d.ts.map +0 -1
  108. package/demo/node_modules/openai/error.js +0 -148
  109. package/demo/node_modules/openai/error.js.map +0 -1
  110. package/demo/node_modules/openai/error.mjs +0 -132
  111. package/demo/node_modules/openai/error.mjs.map +0 -1
  112. package/demo/node_modules/openai/index.d.mts +0 -267
  113. package/demo/node_modules/openai/index.d.ts +0 -267
  114. package/demo/node_modules/openai/index.d.ts.map +0 -1
  115. package/demo/node_modules/openai/index.js +0 -262
  116. package/demo/node_modules/openai/index.js.map +0 -1
  117. package/demo/node_modules/openai/index.mjs +0 -232
  118. package/demo/node_modules/openai/index.mjs.map +0 -1
  119. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts +0 -74
  120. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.d.ts.map +0 -1
  121. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js +0 -246
  122. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.js.map +0 -1
  123. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs +0 -242
  124. package/demo/node_modules/openai/lib/AbstractAssistantStreamRunner.mjs.map +0 -1
  125. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +0 -114
  126. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts.map +0 -1
  127. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js +0 -519
  128. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.js.map +0 -1
  129. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs +0 -515
  130. package/demo/node_modules/openai/lib/AbstractChatCompletionRunner.mjs.map +0 -1
  131. package/demo/node_modules/openai/lib/AssistantStream.d.ts +0 -58
  132. package/demo/node_modules/openai/lib/AssistantStream.d.ts.map +0 -1
  133. package/demo/node_modules/openai/lib/AssistantStream.js +0 -548
  134. package/demo/node_modules/openai/lib/AssistantStream.js.map +0 -1
  135. package/demo/node_modules/openai/lib/AssistantStream.mjs +0 -521
  136. package/demo/node_modules/openai/lib/AssistantStream.mjs.map +0 -1
  137. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts +0 -2
  138. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.d.ts.map +0 -1
  139. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js +0 -2177
  140. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.js.map +0 -1
  141. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs +0 -2172
  142. package/demo/node_modules/openai/lib/ChatCompletionRunFunctions.test.mjs.map +0 -1
  143. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts +0 -19
  144. package/demo/node_modules/openai/lib/ChatCompletionRunner.d.ts.map +0 -1
  145. package/demo/node_modules/openai/lib/ChatCompletionRunner.js +0 -34
  146. package/demo/node_modules/openai/lib/ChatCompletionRunner.js.map +0 -1
  147. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs +0 -30
  148. package/demo/node_modules/openai/lib/ChatCompletionRunner.mjs.map +0 -1
  149. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts +0 -149
  150. package/demo/node_modules/openai/lib/ChatCompletionStream.d.ts.map +0 -1
  151. package/demo/node_modules/openai/lib/ChatCompletionStream.js +0 -312
  152. package/demo/node_modules/openai/lib/ChatCompletionStream.js.map +0 -1
  153. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs +0 -308
  154. package/demo/node_modules/openai/lib/ChatCompletionStream.mjs.map +0 -1
  155. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +0 -22
  156. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts.map +0 -1
  157. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js +0 -32
  158. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.js.map +0 -1
  159. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs +0 -28
  160. package/demo/node_modules/openai/lib/ChatCompletionStreamingRunner.mjs.map +0 -1
  161. package/demo/node_modules/openai/lib/RunnableFunction.d.ts +0 -95
  162. package/demo/node_modules/openai/lib/RunnableFunction.d.ts.map +0 -1
  163. package/demo/node_modules/openai/lib/RunnableFunction.js +0 -35
  164. package/demo/node_modules/openai/lib/RunnableFunction.js.map +0 -1
  165. package/demo/node_modules/openai/lib/RunnableFunction.mjs +0 -29
  166. package/demo/node_modules/openai/lib/RunnableFunction.mjs.map +0 -1
  167. package/demo/node_modules/openai/lib/Util.d.ts +0 -5
  168. package/demo/node_modules/openai/lib/Util.d.ts.map +0 -1
  169. package/demo/node_modules/openai/lib/Util.js +0 -26
  170. package/demo/node_modules/openai/lib/Util.js.map +0 -1
  171. package/demo/node_modules/openai/lib/Util.mjs +0 -22
  172. package/demo/node_modules/openai/lib/Util.mjs.map +0 -1
  173. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts +0 -6
  174. package/demo/node_modules/openai/lib/chatCompletionUtils.d.ts.map +0 -1
  175. package/demo/node_modules/openai/lib/chatCompletionUtils.js +0 -20
  176. package/demo/node_modules/openai/lib/chatCompletionUtils.js.map +0 -1
  177. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs +0 -13
  178. package/demo/node_modules/openai/lib/chatCompletionUtils.mjs.map +0 -1
  179. package/demo/node_modules/openai/lib/jsonschema.d.ts +0 -106
  180. package/demo/node_modules/openai/lib/jsonschema.d.ts.map +0 -1
  181. package/demo/node_modules/openai/lib/jsonschema.js +0 -11
  182. package/demo/node_modules/openai/lib/jsonschema.js.map +0 -1
  183. package/demo/node_modules/openai/lib/jsonschema.mjs +0 -10
  184. package/demo/node_modules/openai/lib/jsonschema.mjs.map +0 -1
  185. package/demo/node_modules/openai/package.json +0 -105
  186. package/demo/node_modules/openai/pagination.d.ts +0 -37
  187. package/demo/node_modules/openai/pagination.d.ts.map +0 -1
  188. package/demo/node_modules/openai/pagination.js +0 -64
  189. package/demo/node_modules/openai/pagination.js.map +0 -1
  190. package/demo/node_modules/openai/pagination.mjs +0 -59
  191. package/demo/node_modules/openai/pagination.mjs.map +0 -1
  192. package/demo/node_modules/openai/resource.d.ts +0 -6
  193. package/demo/node_modules/openai/resource.d.ts.map +0 -1
  194. package/demo/node_modules/openai/resource.js +0 -11
  195. package/demo/node_modules/openai/resource.js.map +0 -1
  196. package/demo/node_modules/openai/resource.mjs +0 -7
  197. package/demo/node_modules/openai/resource.mjs.map +0 -1
  198. package/demo/node_modules/openai/resources/audio/audio.d.ts +0 -20
  199. package/demo/node_modules/openai/resources/audio/audio.d.ts.map +0 -1
  200. package/demo/node_modules/openai/resources/audio/audio.js +0 -46
  201. package/demo/node_modules/openai/resources/audio/audio.js.map +0 -1
  202. package/demo/node_modules/openai/resources/audio/audio.mjs +0 -19
  203. package/demo/node_modules/openai/resources/audio/audio.mjs.map +0 -1
  204. package/demo/node_modules/openai/resources/audio/index.d.ts +0 -5
  205. package/demo/node_modules/openai/resources/audio/index.d.ts.map +0 -1
  206. package/demo/node_modules/openai/resources/audio/index.js +0 -13
  207. package/demo/node_modules/openai/resources/audio/index.js.map +0 -1
  208. package/demo/node_modules/openai/resources/audio/index.mjs +0 -6
  209. package/demo/node_modules/openai/resources/audio/index.mjs.map +0 -1
  210. package/demo/node_modules/openai/resources/audio/speech.d.ts +0 -42
  211. package/demo/node_modules/openai/resources/audio/speech.d.ts.map +0 -1
  212. package/demo/node_modules/openai/resources/audio/speech.js +0 -17
  213. package/demo/node_modules/openai/resources/audio/speech.js.map +0 -1
  214. package/demo/node_modules/openai/resources/audio/speech.mjs +0 -13
  215. package/demo/node_modules/openai/resources/audio/speech.mjs.map +0 -1
  216. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts +0 -71
  217. package/demo/node_modules/openai/resources/audio/transcriptions.d.ts.map +0 -1
  218. package/demo/node_modules/openai/resources/audio/transcriptions.js +0 -18
  219. package/demo/node_modules/openai/resources/audio/transcriptions.js.map +0 -1
  220. package/demo/node_modules/openai/resources/audio/transcriptions.mjs +0 -14
  221. package/demo/node_modules/openai/resources/audio/transcriptions.mjs.map +0 -1
  222. package/demo/node_modules/openai/resources/audio/translations.d.ts +0 -50
  223. package/demo/node_modules/openai/resources/audio/translations.d.ts.map +0 -1
  224. package/demo/node_modules/openai/resources/audio/translations.js +0 -18
  225. package/demo/node_modules/openai/resources/audio/translations.js.map +0 -1
  226. package/demo/node_modules/openai/resources/audio/translations.mjs +0 -14
  227. package/demo/node_modules/openai/resources/audio/translations.mjs.map +0 -1
  228. package/demo/node_modules/openai/resources/batches.d.ts +0 -189
  229. package/demo/node_modules/openai/resources/batches.d.ts.map +0 -1
  230. package/demo/node_modules/openai/resources/batches.js +0 -65
  231. package/demo/node_modules/openai/resources/batches.js.map +0 -1
  232. package/demo/node_modules/openai/resources/batches.mjs +0 -37
  233. package/demo/node_modules/openai/resources/batches.mjs.map +0 -1
  234. package/demo/node_modules/openai/resources/beta/assistants.d.ts +0 -1048
  235. package/demo/node_modules/openai/resources/beta/assistants.d.ts.map +0 -1
  236. package/demo/node_modules/openai/resources/beta/assistants.js +0 -89
  237. package/demo/node_modules/openai/resources/beta/assistants.js.map +0 -1
  238. package/demo/node_modules/openai/resources/beta/assistants.mjs +0 -61
  239. package/demo/node_modules/openai/resources/beta/assistants.mjs.map +0 -1
  240. package/demo/node_modules/openai/resources/beta/beta.d.ts +0 -53
  241. package/demo/node_modules/openai/resources/beta/beta.d.ts.map +0 -1
  242. package/demo/node_modules/openai/resources/beta/beta.js +0 -51
  243. package/demo/node_modules/openai/resources/beta/beta.js.map +0 -1
  244. package/demo/node_modules/openai/resources/beta/beta.mjs +0 -24
  245. package/demo/node_modules/openai/resources/beta/beta.mjs.map +0 -1
  246. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts +0 -9
  247. package/demo/node_modules/openai/resources/beta/chat/chat.d.ts.map +0 -1
  248. package/demo/node_modules/openai/resources/beta/chat/chat.js +0 -40
  249. package/demo/node_modules/openai/resources/beta/chat/chat.js.map +0 -1
  250. package/demo/node_modules/openai/resources/beta/chat/chat.mjs +0 -13
  251. package/demo/node_modules/openai/resources/beta/chat/chat.mjs.map +0 -1
  252. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts +0 -37
  253. package/demo/node_modules/openai/resources/beta/chat/completions.d.ts.map +0 -1
  254. package/demo/node_modules/openai/resources/beta/chat/completions.js +0 -39
  255. package/demo/node_modules/openai/resources/beta/chat/completions.js.map +0 -1
  256. package/demo/node_modules/openai/resources/beta/chat/completions.mjs +0 -30
  257. package/demo/node_modules/openai/resources/beta/chat/completions.mjs.map +0 -1
  258. package/demo/node_modules/openai/resources/beta/chat/index.d.ts +0 -3
  259. package/demo/node_modules/openai/resources/beta/chat/index.d.ts.map +0 -1
  260. package/demo/node_modules/openai/resources/beta/chat/index.js +0 -9
  261. package/demo/node_modules/openai/resources/beta/chat/index.js.map +0 -1
  262. package/demo/node_modules/openai/resources/beta/chat/index.mjs +0 -4
  263. package/demo/node_modules/openai/resources/beta/chat/index.mjs.map +0 -1
  264. package/demo/node_modules/openai/resources/beta/index.d.ts +0 -6
  265. package/demo/node_modules/openai/resources/beta/index.d.ts.map +0 -1
  266. package/demo/node_modules/openai/resources/beta/index.js +0 -17
  267. package/demo/node_modules/openai/resources/beta/index.js.map +0 -1
  268. package/demo/node_modules/openai/resources/beta/index.mjs +0 -7
  269. package/demo/node_modules/openai/resources/beta/index.mjs.map +0 -1
  270. package/demo/node_modules/openai/resources/beta/threads/index.d.ts +0 -4
  271. package/demo/node_modules/openai/resources/beta/threads/index.d.ts.map +0 -1
  272. package/demo/node_modules/openai/resources/beta/threads/index.js +0 -13
  273. package/demo/node_modules/openai/resources/beta/threads/index.js.map +0 -1
  274. package/demo/node_modules/openai/resources/beta/threads/index.mjs +0 -5
  275. package/demo/node_modules/openai/resources/beta/threads/index.mjs.map +0 -1
  276. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts +0 -552
  277. package/demo/node_modules/openai/resources/beta/threads/messages.d.ts.map +0 -1
  278. package/demo/node_modules/openai/resources/beta/threads/messages.js +0 -89
  279. package/demo/node_modules/openai/resources/beta/threads/messages.js.map +0 -1
  280. package/demo/node_modules/openai/resources/beta/threads/messages.mjs +0 -61
  281. package/demo/node_modules/openai/resources/beta/threads/messages.mjs.map +0 -1
  282. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts +0 -3
  283. package/demo/node_modules/openai/resources/beta/threads/runs/index.d.ts.map +0 -1
  284. package/demo/node_modules/openai/resources/beta/threads/runs/index.js +0 -11
  285. package/demo/node_modules/openai/resources/beta/threads/runs/index.js.map +0 -1
  286. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs +0 -4
  287. package/demo/node_modules/openai/resources/beta/threads/runs/index.mjs.map +0 -1
  288. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts +0 -1194
  289. package/demo/node_modules/openai/resources/beta/threads/runs/runs.d.ts.map +0 -1
  290. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js +0 -190
  291. package/demo/node_modules/openai/resources/beta/threads/runs/runs.js.map +0 -1
  292. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs +0 -162
  293. package/demo/node_modules/openai/resources/beta/threads/runs/runs.mjs.map +0 -1
  294. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts +0 -520
  295. package/demo/node_modules/openai/resources/beta/threads/runs/steps.d.ts.map +0 -1
  296. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js +0 -60
  297. package/demo/node_modules/openai/resources/beta/threads/runs/steps.js.map +0 -1
  298. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs +0 -32
  299. package/demo/node_modules/openai/resources/beta/threads/runs/steps.mjs.map +0 -1
  300. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts +0 -1240
  301. package/demo/node_modules/openai/resources/beta/threads/threads.d.ts.map +0 -1
  302. package/demo/node_modules/openai/resources/beta/threads/threads.js +0 -108
  303. package/demo/node_modules/openai/resources/beta/threads/threads.js.map +0 -1
  304. package/demo/node_modules/openai/resources/beta/threads/threads.mjs +0 -81
  305. package/demo/node_modules/openai/resources/beta/threads/threads.mjs.map +0 -1
  306. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts +0 -142
  307. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.d.ts.map +0 -1
  308. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js +0 -129
  309. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.js.map +0 -1
  310. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs +0 -125
  311. package/demo/node_modules/openai/resources/beta/vector-stores/file-batches.mjs.map +0 -1
  312. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts +0 -154
  313. package/demo/node_modules/openai/resources/beta/vector-stores/files.d.ts.map +0 -1
  314. package/demo/node_modules/openai/resources/beta/vector-stores/files.js +0 -149
  315. package/demo/node_modules/openai/resources/beta/vector-stores/files.js.map +0 -1
  316. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs +0 -121
  317. package/demo/node_modules/openai/resources/beta/vector-stores/files.mjs.map +0 -1
  318. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts +0 -4
  319. package/demo/node_modules/openai/resources/beta/vector-stores/index.d.ts.map +0 -1
  320. package/demo/node_modules/openai/resources/beta/vector-stores/index.js +0 -13
  321. package/demo/node_modules/openai/resources/beta/vector-stores/index.js.map +0 -1
  322. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs +0 -5
  323. package/demo/node_modules/openai/resources/beta/vector-stores/index.mjs.map +0 -1
  324. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts +0 -233
  325. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.d.ts.map +0 -1
  326. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js +0 -99
  327. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.js.map +0 -1
  328. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs +0 -71
  329. package/demo/node_modules/openai/resources/beta/vector-stores/vector-stores.mjs.map +0 -1
  330. package/demo/node_modules/openai/resources/chat/chat.d.ts +0 -42
  331. package/demo/node_modules/openai/resources/chat/chat.d.ts.map +0 -1
  332. package/demo/node_modules/openai/resources/chat/chat.js +0 -40
  333. package/demo/node_modules/openai/resources/chat/chat.js.map +0 -1
  334. package/demo/node_modules/openai/resources/chat/chat.mjs +0 -13
  335. package/demo/node_modules/openai/resources/chat/chat.mjs.map +0 -1
  336. package/demo/node_modules/openai/resources/chat/completions.d.ts +0 -845
  337. package/demo/node_modules/openai/resources/chat/completions.d.ts.map +0 -1
  338. package/demo/node_modules/openai/resources/chat/completions.js +0 -14
  339. package/demo/node_modules/openai/resources/chat/completions.js.map +0 -1
  340. package/demo/node_modules/openai/resources/chat/completions.mjs +0 -10
  341. package/demo/node_modules/openai/resources/chat/completions.mjs.map +0 -1
  342. package/demo/node_modules/openai/resources/chat/index.d.ts +0 -3
  343. package/demo/node_modules/openai/resources/chat/index.d.ts.map +0 -1
  344. package/demo/node_modules/openai/resources/chat/index.js +0 -9
  345. package/demo/node_modules/openai/resources/chat/index.js.map +0 -1
  346. package/demo/node_modules/openai/resources/chat/index.mjs +0 -4
  347. package/demo/node_modules/openai/resources/chat/index.mjs.map +0 -1
  348. package/demo/node_modules/openai/resources/completions.d.ts +0 -272
  349. package/demo/node_modules/openai/resources/completions.d.ts.map +0 -1
  350. package/demo/node_modules/openai/resources/completions.js +0 -14
  351. package/demo/node_modules/openai/resources/completions.js.map +0 -1
  352. package/demo/node_modules/openai/resources/completions.mjs +0 -10
  353. package/demo/node_modules/openai/resources/completions.mjs.map +0 -1
  354. package/demo/node_modules/openai/resources/embeddings.d.ts +0 -103
  355. package/demo/node_modules/openai/resources/embeddings.d.ts.map +0 -1
  356. package/demo/node_modules/openai/resources/embeddings.js +0 -17
  357. package/demo/node_modules/openai/resources/embeddings.js.map +0 -1
  358. package/demo/node_modules/openai/resources/embeddings.mjs +0 -13
  359. package/demo/node_modules/openai/resources/embeddings.mjs.map +0 -1
  360. package/demo/node_modules/openai/resources/files.d.ts +0 -140
  361. package/demo/node_modules/openai/resources/files.d.ts.map +0 -1
  362. package/demo/node_modules/openai/resources/files.js +0 -120
  363. package/demo/node_modules/openai/resources/files.js.map +0 -1
  364. package/demo/node_modules/openai/resources/files.mjs +0 -92
  365. package/demo/node_modules/openai/resources/files.mjs.map +0 -1
  366. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +0 -19
  367. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts.map +0 -1
  368. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js +0 -42
  369. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.js.map +0 -1
  370. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs +0 -15
  371. package/demo/node_modules/openai/resources/fine-tuning/fine-tuning.mjs.map +0 -1
  372. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts +0 -3
  373. package/demo/node_modules/openai/resources/fine-tuning/index.d.ts.map +0 -1
  374. package/demo/node_modules/openai/resources/fine-tuning/index.js +0 -11
  375. package/demo/node_modules/openai/resources/fine-tuning/index.js.map +0 -1
  376. package/demo/node_modules/openai/resources/fine-tuning/index.mjs +0 -4
  377. package/demo/node_modules/openai/resources/fine-tuning/index.mjs.map +0 -1
  378. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +0 -69
  379. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts.map +0 -1
  380. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js +0 -47
  381. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.js.map +0 -1
  382. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs +0 -19
  383. package/demo/node_modules/openai/resources/fine-tuning/jobs/checkpoints.mjs.map +0 -1
  384. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +0 -3
  385. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.d.ts.map +0 -1
  386. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js +0 -12
  387. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.js.map +0 -1
  388. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs +0 -4
  389. package/demo/node_modules/openai/resources/fine-tuning/jobs/index.mjs.map +0 -1
  390. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +0 -362
  391. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts.map +0 -1
  392. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js +0 -93
  393. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.js.map +0 -1
  394. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs +0 -64
  395. package/demo/node_modules/openai/resources/fine-tuning/jobs/jobs.mjs.map +0 -1
  396. package/demo/node_modules/openai/resources/images.d.ts +0 -174
  397. package/demo/node_modules/openai/resources/images.d.ts.map +0 -1
  398. package/demo/node_modules/openai/resources/images.js +0 -30
  399. package/demo/node_modules/openai/resources/images.js.map +0 -1
  400. package/demo/node_modules/openai/resources/images.mjs +0 -26
  401. package/demo/node_modules/openai/resources/images.mjs.map +0 -1
  402. package/demo/node_modules/openai/resources/index.d.ts +0 -13
  403. package/demo/node_modules/openai/resources/index.d.ts.map +0 -1
  404. package/demo/node_modules/openai/resources/index.js +0 -44
  405. package/demo/node_modules/openai/resources/index.js.map +0 -1
  406. package/demo/node_modules/openai/resources/index.mjs +0 -14
  407. package/demo/node_modules/openai/resources/index.mjs.map +0 -1
  408. package/demo/node_modules/openai/resources/models.d.ts +0 -58
  409. package/demo/node_modules/openai/resources/models.d.ts.map +0 -1
  410. package/demo/node_modules/openai/resources/models.js +0 -64
  411. package/demo/node_modules/openai/resources/models.js.map +0 -1
  412. package/demo/node_modules/openai/resources/models.mjs +0 -36
  413. package/demo/node_modules/openai/resources/models.mjs.map +0 -1
  414. package/demo/node_modules/openai/resources/moderations.d.ts +0 -176
  415. package/demo/node_modules/openai/resources/moderations.d.ts.map +0 -1
  416. package/demo/node_modules/openai/resources/moderations.js +0 -17
  417. package/demo/node_modules/openai/resources/moderations.js.map +0 -1
  418. package/demo/node_modules/openai/resources/moderations.mjs +0 -13
  419. package/demo/node_modules/openai/resources/moderations.mjs.map +0 -1
  420. package/demo/node_modules/openai/resources/shared.d.ts +0 -39
  421. package/demo/node_modules/openai/resources/shared.d.ts.map +0 -1
  422. package/demo/node_modules/openai/resources/shared.js +0 -4
  423. package/demo/node_modules/openai/resources/shared.js.map +0 -1
  424. package/demo/node_modules/openai/resources/shared.mjs +0 -3
  425. package/demo/node_modules/openai/resources/shared.mjs.map +0 -1
  426. package/demo/node_modules/openai/shims/node.d.ts +0 -29
  427. package/demo/node_modules/openai/shims/node.d.ts.map +0 -1
  428. package/demo/node_modules/openai/shims/node.js +0 -31
  429. package/demo/node_modules/openai/shims/node.js.map +0 -1
  430. package/demo/node_modules/openai/shims/node.mjs +0 -5
  431. package/demo/node_modules/openai/shims/node.mjs.map +0 -1
  432. package/demo/node_modules/openai/shims/web.d.ts +0 -26
  433. package/demo/node_modules/openai/shims/web.d.ts.map +0 -1
  434. package/demo/node_modules/openai/shims/web.js +0 -31
  435. package/demo/node_modules/openai/shims/web.js.map +0 -1
  436. package/demo/node_modules/openai/shims/web.mjs +0 -5
  437. package/demo/node_modules/openai/shims/web.mjs.map +0 -1
  438. package/demo/node_modules/openai/src/_shims/MultipartBody.ts +0 -9
  439. package/demo/node_modules/openai/src/_shims/README.md +0 -46
  440. package/demo/node_modules/openai/src/_shims/auto/runtime-bun.ts +0 -4
  441. package/demo/node_modules/openai/src/_shims/auto/runtime-node.ts +0 -4
  442. package/demo/node_modules/openai/src/_shims/auto/runtime.ts +0 -4
  443. package/demo/node_modules/openai/src/_shims/auto/types-node.ts +0 -4
  444. package/demo/node_modules/openai/src/_shims/auto/types.d.ts +0 -101
  445. package/demo/node_modules/openai/src/_shims/auto/types.js +0 -3
  446. package/demo/node_modules/openai/src/_shims/auto/types.mjs +0 -3
  447. package/demo/node_modules/openai/src/_shims/bun-runtime.ts +0 -14
  448. package/demo/node_modules/openai/src/_shims/index.d.ts +0 -81
  449. package/demo/node_modules/openai/src/_shims/index.js +0 -13
  450. package/demo/node_modules/openai/src/_shims/index.mjs +0 -7
  451. package/demo/node_modules/openai/src/_shims/manual-types.d.ts +0 -12
  452. package/demo/node_modules/openai/src/_shims/manual-types.js +0 -3
  453. package/demo/node_modules/openai/src/_shims/manual-types.mjs +0 -3
  454. package/demo/node_modules/openai/src/_shims/node-runtime.ts +0 -83
  455. package/demo/node_modules/openai/src/_shims/node-types.d.ts +0 -42
  456. package/demo/node_modules/openai/src/_shims/node-types.js +0 -3
  457. package/demo/node_modules/openai/src/_shims/node-types.mjs +0 -3
  458. package/demo/node_modules/openai/src/_shims/registry.ts +0 -65
  459. package/demo/node_modules/openai/src/_shims/web-runtime.ts +0 -103
  460. package/demo/node_modules/openai/src/_shims/web-types.d.ts +0 -83
  461. package/demo/node_modules/openai/src/_shims/web-types.js +0 -3
  462. package/demo/node_modules/openai/src/_shims/web-types.mjs +0 -3
  463. package/demo/node_modules/openai/src/core.ts +0 -1162
  464. package/demo/node_modules/openai/src/error.ts +0 -158
  465. package/demo/node_modules/openai/src/index.ts +0 -502
  466. package/demo/node_modules/openai/src/lib/.keep +0 -4
  467. package/demo/node_modules/openai/src/lib/AbstractAssistantStreamRunner.ts +0 -340
  468. package/demo/node_modules/openai/src/lib/AbstractChatCompletionRunner.ts +0 -682
  469. package/demo/node_modules/openai/src/lib/AssistantStream.ts +0 -723
  470. package/demo/node_modules/openai/src/lib/ChatCompletionRunFunctions.test.ts +0 -2328
  471. package/demo/node_modules/openai/src/lib/ChatCompletionRunner.ts +0 -68
  472. package/demo/node_modules/openai/src/lib/ChatCompletionStream.ts +0 -494
  473. package/demo/node_modules/openai/src/lib/ChatCompletionStreamingRunner.ts +0 -68
  474. package/demo/node_modules/openai/src/lib/RunnableFunction.ts +0 -134
  475. package/demo/node_modules/openai/src/lib/Util.ts +0 -23
  476. package/demo/node_modules/openai/src/lib/chatCompletionUtils.ts +0 -28
  477. package/demo/node_modules/openai/src/lib/jsonschema.ts +0 -148
  478. package/demo/node_modules/openai/src/pagination.ts +0 -98
  479. package/demo/node_modules/openai/src/resource.ts +0 -11
  480. package/demo/node_modules/openai/src/resources/audio/audio.ts +0 -23
  481. package/demo/node_modules/openai/src/resources/audio/index.ts +0 -6
  482. package/demo/node_modules/openai/src/resources/audio/speech.ts +0 -52
  483. package/demo/node_modules/openai/src/resources/audio/transcriptions.ts +0 -84
  484. package/demo/node_modules/openai/src/resources/audio/translations.ts +0 -61
  485. package/demo/node_modules/openai/src/resources/batches.ts +0 -252
  486. package/demo/node_modules/openai/src/resources/beta/assistants.ts +0 -1315
  487. package/demo/node_modules/openai/src/resources/beta/beta.ts +0 -56
  488. package/demo/node_modules/openai/src/resources/beta/chat/chat.ts +0 -12
  489. package/demo/node_modules/openai/src/resources/beta/chat/completions.ts +0 -106
  490. package/demo/node_modules/openai/src/resources/beta/chat/index.ts +0 -4
  491. package/demo/node_modules/openai/src/resources/beta/index.ts +0 -48
  492. package/demo/node_modules/openai/src/resources/beta/threads/index.ts +0 -72
  493. package/demo/node_modules/openai/src/resources/beta/threads/messages.ts +0 -706
  494. package/demo/node_modules/openai/src/resources/beta/threads/runs/index.ts +0 -44
  495. package/demo/node_modules/openai/src/resources/beta/threads/runs/runs.ts +0 -1627
  496. package/demo/node_modules/openai/src/resources/beta/threads/runs/steps.ts +0 -641
  497. package/demo/node_modules/openai/src/resources/beta/threads/threads.ts +0 -1536
  498. package/demo/node_modules/openai/src/resources/beta/vector-stores/file-batches.ts +0 -293
  499. package/demo/node_modules/openai/src/resources/beta/vector-stores/files.ts +0 -284
  500. package/demo/node_modules/openai/src/resources/beta/vector-stores/index.ts +0 -25
  501. package/demo/node_modules/openai/src/resources/beta/vector-stores/vector-stores.ts +0 -318
  502. package/demo/node_modules/openai/src/resources/chat/chat.ts +0 -67
  503. package/demo/node_modules/openai/src/resources/chat/completions.ts +0 -996
  504. package/demo/node_modules/openai/src/resources/chat/index.ts +0 -33
  505. package/demo/node_modules/openai/src/resources/completions.ts +0 -329
  506. package/demo/node_modules/openai/src/resources/embeddings.ts +0 -125
  507. package/demo/node_modules/openai/src/resources/files.ts +0 -214
  508. package/demo/node_modules/openai/src/resources/fine-tuning/fine-tuning.ts +0 -22
  509. package/demo/node_modules/openai/src/resources/fine-tuning/index.ts +0 -16
  510. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/checkpoints.ts +0 -108
  511. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/index.ts +0 -21
  512. package/demo/node_modules/openai/src/resources/fine-tuning/jobs/jobs.ts +0 -458
  513. package/demo/node_modules/openai/src/resources/images.ts +0 -215
  514. package/demo/node_modules/openai/src/resources/index.ts +0 -45
  515. package/demo/node_modules/openai/src/resources/models.ts +0 -76
  516. package/demo/node_modules/openai/src/resources/moderations.ts +0 -214
  517. package/demo/node_modules/openai/src/resources/shared.ts +0 -47
  518. package/demo/node_modules/openai/src/shims/node.ts +0 -50
  519. package/demo/node_modules/openai/src/shims/web.ts +0 -50
  520. package/demo/node_modules/openai/src/streaming.ts +0 -508
  521. package/demo/node_modules/openai/src/tsconfig.json +0 -11
  522. package/demo/node_modules/openai/src/uploads.ts +0 -248
  523. package/demo/node_modules/openai/src/version.ts +0 -1
  524. package/demo/node_modules/openai/streaming.d.ts +0 -41
  525. package/demo/node_modules/openai/streaming.d.ts.map +0 -1
  526. package/demo/node_modules/openai/streaming.js +0 -433
  527. package/demo/node_modules/openai/streaming.js.map +0 -1
  528. package/demo/node_modules/openai/streaming.mjs +0 -426
  529. package/demo/node_modules/openai/streaming.mjs.map +0 -1
  530. package/demo/node_modules/openai/uploads.d.ts +0 -75
  531. package/demo/node_modules/openai/uploads.d.ts.map +0 -1
  532. package/demo/node_modules/openai/uploads.js +0 -165
  533. package/demo/node_modules/openai/uploads.js.map +0 -1
  534. package/demo/node_modules/openai/uploads.mjs +0 -152
  535. package/demo/node_modules/openai/uploads.mjs.map +0 -1
  536. package/demo/node_modules/openai/version.d.ts +0 -2
  537. package/demo/node_modules/openai/version.d.ts.map +0 -1
  538. package/demo/node_modules/openai/version.js +0 -5
  539. package/demo/node_modules/openai/version.js.map +0 -1
  540. package/demo/node_modules/openai/version.mjs +0 -2
  541. package/demo/node_modules/openai/version.mjs.map +0 -1
  542. package/demo/watson.png +0 -0
@@ -1,1627 +0,0 @@
1
- // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
-
3
- import * as Core from '../../../../core';
4
- import { APIPromise } from '../../../../core';
5
- import { APIResource } from '../../../../resource';
6
- import { isRequestOptions } from '../../../../core';
7
- import { AssistantStream, RunCreateParamsBaseStream } from '../../../../lib/AssistantStream';
8
- import { sleep } from '../../../../core';
9
- import { RunSubmitToolOutputsParamsStream } from '../../../../lib/AssistantStream';
10
- import * as RunsAPI from './runs';
11
- import * as AssistantsAPI from '../../assistants';
12
- import * as MessagesAPI from '../messages';
13
- import * as ThreadsAPI from '../threads';
14
- import * as StepsAPI from './steps';
15
- import { CursorPage, type CursorPageParams } from '../../../../pagination';
16
- import { Stream } from '../../../../streaming';
17
-
18
- export class Runs extends APIResource {
19
- steps: StepsAPI.Steps = new StepsAPI.Steps(this._client);
20
-
21
- /**
22
- * Create a run.
23
- */
24
- create(threadId: string, body: RunCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Run>;
25
- create(
26
- threadId: string,
27
- body: RunCreateParamsStreaming,
28
- options?: Core.RequestOptions,
29
- ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
30
- create(
31
- threadId: string,
32
- body: RunCreateParamsBase,
33
- options?: Core.RequestOptions,
34
- ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;
35
- create(
36
- threadId: string,
37
- body: RunCreateParams,
38
- options?: Core.RequestOptions,
39
- ): APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>> {
40
- return this._client.post(`/threads/${threadId}/runs`, {
41
- body,
42
- ...options,
43
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
44
- stream: body.stream ?? false,
45
- }) as APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
46
- }
47
-
48
- /**
49
- * Retrieves a run.
50
- */
51
- retrieve(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {
52
- return this._client.get(`/threads/${threadId}/runs/${runId}`, {
53
- ...options,
54
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
55
- });
56
- }
57
-
58
- /**
59
- * Modifies a run.
60
- */
61
- update(
62
- threadId: string,
63
- runId: string,
64
- body: RunUpdateParams,
65
- options?: Core.RequestOptions,
66
- ): Core.APIPromise<Run> {
67
- return this._client.post(`/threads/${threadId}/runs/${runId}`, {
68
- body,
69
- ...options,
70
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
71
- });
72
- }
73
-
74
- /**
75
- * Returns a list of runs belonging to a thread.
76
- */
77
- list(
78
- threadId: string,
79
- query?: RunListParams,
80
- options?: Core.RequestOptions,
81
- ): Core.PagePromise<RunsPage, Run>;
82
- list(threadId: string, options?: Core.RequestOptions): Core.PagePromise<RunsPage, Run>;
83
- list(
84
- threadId: string,
85
- query: RunListParams | Core.RequestOptions = {},
86
- options?: Core.RequestOptions,
87
- ): Core.PagePromise<RunsPage, Run> {
88
- if (isRequestOptions(query)) {
89
- return this.list(threadId, {}, query);
90
- }
91
- return this._client.getAPIList(`/threads/${threadId}/runs`, RunsPage, {
92
- query,
93
- ...options,
94
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
95
- });
96
- }
97
-
98
- /**
99
- * Cancels a run that is `in_progress`.
100
- */
101
- cancel(threadId: string, runId: string, options?: Core.RequestOptions): Core.APIPromise<Run> {
102
- return this._client.post(`/threads/${threadId}/runs/${runId}/cancel`, {
103
- ...options,
104
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
105
- });
106
- }
107
-
108
- /**
109
- * A helper to create a run an poll for a terminal state. More information on Run
110
- * lifecycles can be found here:
111
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
112
- */
113
- async createAndPoll(
114
- threadId: string,
115
- body: RunCreateParamsNonStreaming,
116
- options?: Core.RequestOptions & { pollIntervalMs?: number },
117
- ): Promise<Run> {
118
- const run = await this.create(threadId, body, options);
119
- return await this.poll(threadId, run.id, options);
120
- }
121
-
122
- /**
123
- * Create a Run stream
124
- *
125
- * @deprecated use `stream` instead
126
- */
127
- createAndStream(
128
- threadId: string,
129
- body: RunCreateParamsBaseStream,
130
- options?: Core.RequestOptions,
131
- ): AssistantStream {
132
- return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
133
- }
134
-
135
- /**
136
- * A helper to poll a run status until it reaches a terminal state. More
137
- * information on Run lifecycles can be found here:
138
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
139
- */
140
- async poll(
141
- threadId: string,
142
- runId: string,
143
- options?: Core.RequestOptions & { pollIntervalMs?: number },
144
- ): Promise<Run> {
145
- const headers: { [key: string]: string } = { ...options?.headers, 'X-Stainless-Poll-Helper': 'true' };
146
-
147
- if (options?.pollIntervalMs) {
148
- headers['X-Stainless-Custom-Poll-Interval'] = options.pollIntervalMs.toString();
149
- }
150
-
151
- while (true) {
152
- const { data: run, response } = await this.retrieve(threadId, runId, {
153
- ...options,
154
- headers: { ...options?.headers, ...headers },
155
- }).withResponse();
156
-
157
- switch (run.status) {
158
- //If we are in any sort of intermediate state we poll
159
- case 'queued':
160
- case 'in_progress':
161
- case 'cancelling':
162
- let sleepInterval = 5000;
163
-
164
- if (options?.pollIntervalMs) {
165
- sleepInterval = options.pollIntervalMs;
166
- } else {
167
- const headerInterval = response.headers.get('openai-poll-after-ms');
168
- if (headerInterval) {
169
- const headerIntervalMs = parseInt(headerInterval);
170
- if (!isNaN(headerIntervalMs)) {
171
- sleepInterval = headerIntervalMs;
172
- }
173
- }
174
- }
175
- await sleep(sleepInterval);
176
- break;
177
- //We return the run in any terminal state.
178
- case 'requires_action':
179
- case 'incomplete':
180
- case 'cancelled':
181
- case 'completed':
182
- case 'failed':
183
- case 'expired':
184
- return run;
185
- }
186
- }
187
- }
188
-
189
- /**
190
- * Create a Run stream
191
- */
192
- stream(threadId: string, body: RunCreateParamsBaseStream, options?: Core.RequestOptions): AssistantStream {
193
- return AssistantStream.createAssistantStream(threadId, this._client.beta.threads.runs, body, options);
194
- }
195
-
196
- /**
197
- * When a run has the `status: "requires_action"` and `required_action.type` is
198
- * `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
199
- * tool calls once they're all completed. All outputs must be submitted in a single
200
- * request.
201
- */
202
- submitToolOutputs(
203
- threadId: string,
204
- runId: string,
205
- body: RunSubmitToolOutputsParamsNonStreaming,
206
- options?: Core.RequestOptions,
207
- ): APIPromise<Run>;
208
- submitToolOutputs(
209
- threadId: string,
210
- runId: string,
211
- body: RunSubmitToolOutputsParamsStreaming,
212
- options?: Core.RequestOptions,
213
- ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
214
- submitToolOutputs(
215
- threadId: string,
216
- runId: string,
217
- body: RunSubmitToolOutputsParamsBase,
218
- options?: Core.RequestOptions,
219
- ): APIPromise<Stream<AssistantsAPI.AssistantStreamEvent> | Run>;
220
- submitToolOutputs(
221
- threadId: string,
222
- runId: string,
223
- body: RunSubmitToolOutputsParams,
224
- options?: Core.RequestOptions,
225
- ): APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>> {
226
- return this._client.post(`/threads/${threadId}/runs/${runId}/submit_tool_outputs`, {
227
- body,
228
- ...options,
229
- headers: { 'OpenAI-Beta': 'assistants=v2', ...options?.headers },
230
- stream: body.stream ?? false,
231
- }) as APIPromise<Run> | APIPromise<Stream<AssistantsAPI.AssistantStreamEvent>>;
232
- }
233
-
234
- /**
235
- * A helper to submit a tool output to a run and poll for a terminal run state.
236
- * More information on Run lifecycles can be found here:
237
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
238
- */
239
- async submitToolOutputsAndPoll(
240
- threadId: string,
241
- runId: string,
242
- body: RunSubmitToolOutputsParamsNonStreaming,
243
- options?: Core.RequestOptions & { pollIntervalMs?: number },
244
- ): Promise<Run> {
245
- const run = await this.submitToolOutputs(threadId, runId, body, options);
246
- return await this.poll(threadId, run.id, options);
247
- }
248
-
249
- /**
250
- * Submit the tool outputs from a previous run and stream the run to a terminal
251
- * state. More information on Run lifecycles can be found here:
252
- * https://platform.openai.com/docs/assistants/how-it-works/runs-and-run-steps
253
- */
254
- submitToolOutputsStream(
255
- threadId: string,
256
- runId: string,
257
- body: RunSubmitToolOutputsParamsStream,
258
- options?: Core.RequestOptions,
259
- ): AssistantStream {
260
- return AssistantStream.createToolAssistantStream(
261
- threadId,
262
- runId,
263
- this._client.beta.threads.runs,
264
- body,
265
- options,
266
- );
267
- }
268
- }
269
-
270
- export class RunsPage extends CursorPage<Run> {}
271
-
272
- /**
273
- * Tool call objects
274
- */
275
- export interface RequiredActionFunctionToolCall {
276
- /**
277
- * The ID of the tool call. This ID must be referenced when you submit the tool
278
- * outputs in using the
279
- * [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
280
- * endpoint.
281
- */
282
- id: string;
283
-
284
- /**
285
- * The function definition.
286
- */
287
- function: RequiredActionFunctionToolCall.Function;
288
-
289
- /**
290
- * The type of tool call the output is required for. For now, this is always
291
- * `function`.
292
- */
293
- type: 'function';
294
- }
295
-
296
- export namespace RequiredActionFunctionToolCall {
297
- /**
298
- * The function definition.
299
- */
300
- export interface Function {
301
- /**
302
- * The arguments that the model expects you to pass to the function.
303
- */
304
- arguments: string;
305
-
306
- /**
307
- * The name of the function.
308
- */
309
- name: string;
310
- }
311
- }
312
-
313
- /**
314
- * Represents an execution run on a
315
- * [thread](https://platform.openai.com/docs/api-reference/threads).
316
- */
317
- export interface Run {
318
- /**
319
- * The identifier, which can be referenced in API endpoints.
320
- */
321
- id: string;
322
-
323
- /**
324
- * The ID of the
325
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
326
- * execution of this run.
327
- */
328
- assistant_id: string;
329
-
330
- /**
331
- * The Unix timestamp (in seconds) for when the run was cancelled.
332
- */
333
- cancelled_at: number | null;
334
-
335
- /**
336
- * The Unix timestamp (in seconds) for when the run was completed.
337
- */
338
- completed_at: number | null;
339
-
340
- /**
341
- * The Unix timestamp (in seconds) for when the run was created.
342
- */
343
- created_at: number;
344
-
345
- /**
346
- * The Unix timestamp (in seconds) for when the run will expire.
347
- */
348
- expires_at: number | null;
349
-
350
- /**
351
- * The Unix timestamp (in seconds) for when the run failed.
352
- */
353
- failed_at: number | null;
354
-
355
- /**
356
- * Details on why the run is incomplete. Will be `null` if the run is not
357
- * incomplete.
358
- */
359
- incomplete_details: Run.IncompleteDetails | null;
360
-
361
- /**
362
- * The instructions that the
363
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
364
- * this run.
365
- */
366
- instructions: string;
367
-
368
- /**
369
- * The last error associated with this run. Will be `null` if there are no errors.
370
- */
371
- last_error: Run.LastError | null;
372
-
373
- /**
374
- * The maximum number of completion tokens specified to have been used over the
375
- * course of the run.
376
- */
377
- max_completion_tokens: number | null;
378
-
379
- /**
380
- * The maximum number of prompt tokens specified to have been used over the course
381
- * of the run.
382
- */
383
- max_prompt_tokens: number | null;
384
-
385
- /**
386
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
387
- * for storing additional information about the object in a structured format. Keys
388
- * can be a maximum of 64 characters long and values can be a maxium of 512
389
- * characters long.
390
- */
391
- metadata: unknown | null;
392
-
393
- /**
394
- * The model that the
395
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
396
- * this run.
397
- */
398
- model: string;
399
-
400
- /**
401
- * The object type, which is always `thread.run`.
402
- */
403
- object: 'thread.run';
404
-
405
- /**
406
- * Details on the action required to continue the run. Will be `null` if no action
407
- * is required.
408
- */
409
- required_action: Run.RequiredAction | null;
410
-
411
- /**
412
- * Specifies the format that the model must output. Compatible with
413
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
414
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
415
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
416
- *
417
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
418
- * message the model generates is valid JSON.
419
- *
420
- * **Important:** when using JSON mode, you **must** also instruct the model to
421
- * produce JSON yourself via a system or user message. Without this, the model may
422
- * generate an unending stream of whitespace until the generation reaches the token
423
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
424
- * the message content may be partially cut off if `finish_reason="length"`, which
425
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
426
- * max context length.
427
- */
428
- response_format: ThreadsAPI.AssistantResponseFormatOption | null;
429
-
430
- /**
431
- * The Unix timestamp (in seconds) for when the run was started.
432
- */
433
- started_at: number | null;
434
-
435
- /**
436
- * The status of the run, which can be either `queued`, `in_progress`,
437
- * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
438
- * `incomplete`, or `expired`.
439
- */
440
- status: RunStatus;
441
-
442
- /**
443
- * The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
444
- * that was executed on as a part of this run.
445
- */
446
- thread_id: string;
447
-
448
- /**
449
- * Controls which (if any) tool is called by the model. `none` means the model will
450
- * not call any tools and instead generates a message. `auto` is the default value
451
- * and means the model can pick between generating a message or calling one or more
452
- * tools. `required` means the model must call one or more tools before responding
453
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
454
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
455
- * call that tool.
456
- */
457
- tool_choice: ThreadsAPI.AssistantToolChoiceOption | null;
458
-
459
- /**
460
- * The list of tools that the
461
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
462
- * this run.
463
- */
464
- tools: Array<AssistantsAPI.AssistantTool>;
465
-
466
- /**
467
- * Controls for how a thread will be truncated prior to the run. Use this to
468
- * control the intial context window of the run.
469
- */
470
- truncation_strategy: Run.TruncationStrategy | null;
471
-
472
- /**
473
- * Usage statistics related to the run. This value will be `null` if the run is not
474
- * in a terminal state (i.e. `in_progress`, `queued`, etc.).
475
- */
476
- usage: Run.Usage | null;
477
-
478
- /**
479
- * The sampling temperature used for this run. If not set, defaults to 1.
480
- */
481
- temperature?: number | null;
482
-
483
- /**
484
- * The nucleus sampling value used for this run. If not set, defaults to 1.
485
- */
486
- top_p?: number | null;
487
- }
488
-
489
- export namespace Run {
490
- /**
491
- * Details on why the run is incomplete. Will be `null` if the run is not
492
- * incomplete.
493
- */
494
- export interface IncompleteDetails {
495
- /**
496
- * The reason why the run is incomplete. This will point to which specific token
497
- * limit was reached over the course of the run.
498
- */
499
- reason?: 'max_completion_tokens' | 'max_prompt_tokens';
500
- }
501
-
502
- /**
503
- * The last error associated with this run. Will be `null` if there are no errors.
504
- */
505
- export interface LastError {
506
- /**
507
- * One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
508
- */
509
- code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt';
510
-
511
- /**
512
- * A human-readable description of the error.
513
- */
514
- message: string;
515
- }
516
-
517
- /**
518
- * Details on the action required to continue the run. Will be `null` if no action
519
- * is required.
520
- */
521
- export interface RequiredAction {
522
- /**
523
- * Details on the tool outputs needed for this run to continue.
524
- */
525
- submit_tool_outputs: RequiredAction.SubmitToolOutputs;
526
-
527
- /**
528
- * For now, this is always `submit_tool_outputs`.
529
- */
530
- type: 'submit_tool_outputs';
531
- }
532
-
533
- export namespace RequiredAction {
534
- /**
535
- * Details on the tool outputs needed for this run to continue.
536
- */
537
- export interface SubmitToolOutputs {
538
- /**
539
- * A list of the relevant tool calls.
540
- */
541
- tool_calls: Array<RunsAPI.RequiredActionFunctionToolCall>;
542
- }
543
- }
544
-
545
- /**
546
- * Controls for how a thread will be truncated prior to the run. Use this to
547
- * control the intial context window of the run.
548
- */
549
- export interface TruncationStrategy {
550
- /**
551
- * The truncation strategy to use for the thread. The default is `auto`. If set to
552
- * `last_messages`, the thread will be truncated to the n most recent messages in
553
- * the thread. When set to `auto`, messages in the middle of the thread will be
554
- * dropped to fit the context length of the model, `max_prompt_tokens`.
555
- */
556
- type: 'auto' | 'last_messages';
557
-
558
- /**
559
- * The number of most recent messages from the thread when constructing the context
560
- * for the run.
561
- */
562
- last_messages?: number | null;
563
- }
564
-
565
- /**
566
- * Usage statistics related to the run. This value will be `null` if the run is not
567
- * in a terminal state (i.e. `in_progress`, `queued`, etc.).
568
- */
569
- export interface Usage {
570
- /**
571
- * Number of completion tokens used over the course of the run.
572
- */
573
- completion_tokens: number;
574
-
575
- /**
576
- * Number of prompt tokens used over the course of the run.
577
- */
578
- prompt_tokens: number;
579
-
580
- /**
581
- * Total number of tokens used (prompt + completion).
582
- */
583
- total_tokens: number;
584
- }
585
- }
586
-
587
- /**
588
- * The status of the run, which can be either `queued`, `in_progress`,
589
- * `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
590
- * `incomplete`, or `expired`.
591
- */
592
- export type RunStatus =
593
- | 'queued'
594
- | 'in_progress'
595
- | 'requires_action'
596
- | 'cancelling'
597
- | 'cancelled'
598
- | 'failed'
599
- | 'completed'
600
- | 'incomplete'
601
- | 'expired';
602
-
603
- export type RunCreateParams = RunCreateParamsNonStreaming | RunCreateParamsStreaming;
604
-
605
- export interface RunCreateParamsBase {
606
- /**
607
- * The ID of the
608
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
609
- * execute this run.
610
- */
611
- assistant_id: string;
612
-
613
- /**
614
- * Appends additional instructions at the end of the instructions for the run. This
615
- * is useful for modifying the behavior on a per-run basis without overriding other
616
- * instructions.
617
- */
618
- additional_instructions?: string | null;
619
-
620
- /**
621
- * Adds additional messages to the thread before creating the run.
622
- */
623
- additional_messages?: Array<RunCreateParams.AdditionalMessage> | null;
624
-
625
- /**
626
- * Overrides the
627
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
628
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
629
- */
630
- instructions?: string | null;
631
-
632
- /**
633
- * The maximum number of completion tokens that may be used over the course of the
634
- * run. The run will make a best effort to use only the number of completion tokens
635
- * specified, across multiple turns of the run. If the run exceeds the number of
636
- * completion tokens specified, the run will end with status `incomplete`. See
637
- * `incomplete_details` for more info.
638
- */
639
- max_completion_tokens?: number | null;
640
-
641
- /**
642
- * The maximum number of prompt tokens that may be used over the course of the run.
643
- * The run will make a best effort to use only the number of prompt tokens
644
- * specified, across multiple turns of the run. If the run exceeds the number of
645
- * prompt tokens specified, the run will end with status `incomplete`. See
646
- * `incomplete_details` for more info.
647
- */
648
- max_prompt_tokens?: number | null;
649
-
650
- /**
651
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
652
- * for storing additional information about the object in a structured format. Keys
653
- * can be a maximum of 64 characters long and values can be a maxium of 512
654
- * characters long.
655
- */
656
- metadata?: unknown | null;
657
-
658
- /**
659
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
660
- * be used to execute this run. If a value is provided here, it will override the
661
- * model associated with the assistant. If not, the model associated with the
662
- * assistant will be used.
663
- */
664
- model?:
665
- | (string & {})
666
- | 'gpt-4o'
667
- | 'gpt-4o-2024-05-13'
668
- | 'gpt-4-turbo'
669
- | 'gpt-4-turbo-2024-04-09'
670
- | 'gpt-4-0125-preview'
671
- | 'gpt-4-turbo-preview'
672
- | 'gpt-4-1106-preview'
673
- | 'gpt-4-vision-preview'
674
- | 'gpt-4'
675
- | 'gpt-4-0314'
676
- | 'gpt-4-0613'
677
- | 'gpt-4-32k'
678
- | 'gpt-4-32k-0314'
679
- | 'gpt-4-32k-0613'
680
- | 'gpt-3.5-turbo'
681
- | 'gpt-3.5-turbo-16k'
682
- | 'gpt-3.5-turbo-0613'
683
- | 'gpt-3.5-turbo-1106'
684
- | 'gpt-3.5-turbo-0125'
685
- | 'gpt-3.5-turbo-16k-0613'
686
- | null;
687
-
688
- /**
689
- * Specifies the format that the model must output. Compatible with
690
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
691
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
692
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
693
- *
694
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
695
- * message the model generates is valid JSON.
696
- *
697
- * **Important:** when using JSON mode, you **must** also instruct the model to
698
- * produce JSON yourself via a system or user message. Without this, the model may
699
- * generate an unending stream of whitespace until the generation reaches the token
700
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
701
- * the message content may be partially cut off if `finish_reason="length"`, which
702
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
703
- * max context length.
704
- */
705
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
706
-
707
- /**
708
- * If `true`, returns a stream of events that happen during the Run as server-sent
709
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
710
- * message.
711
- */
712
- stream?: boolean | null;
713
-
714
- /**
715
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
716
- * make the output more random, while lower values like 0.2 will make it more
717
- * focused and deterministic.
718
- */
719
- temperature?: number | null;
720
-
721
- /**
722
- * Controls which (if any) tool is called by the model. `none` means the model will
723
- * not call any tools and instead generates a message. `auto` is the default value
724
- * and means the model can pick between generating a message or calling one or more
725
- * tools. `required` means the model must call one or more tools before responding
726
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
727
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
728
- * call that tool.
729
- */
730
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
731
-
732
- /**
733
- * Override the tools the assistant can use for this run. This is useful for
734
- * modifying the behavior on a per-run basis.
735
- */
736
- tools?: Array<AssistantsAPI.AssistantTool> | null;
737
-
738
- /**
739
- * An alternative to sampling with temperature, called nucleus sampling, where the
740
- * model considers the results of the tokens with top_p probability mass. So 0.1
741
- * means only the tokens comprising the top 10% probability mass are considered.
742
- *
743
- * We generally recommend altering this or temperature but not both.
744
- */
745
- top_p?: number | null;
746
-
747
- /**
748
- * Controls for how a thread will be truncated prior to the run. Use this to
749
- * control the intial context window of the run.
750
- */
751
- truncation_strategy?: RunCreateParams.TruncationStrategy | null;
752
- }
753
-
754
- export namespace RunCreateParams {
755
- export interface AdditionalMessage {
756
- /**
757
- * The text contents of the message.
758
- */
759
- content: string | Array<MessagesAPI.MessageContentPartParam>;
760
-
761
- /**
762
- * The role of the entity that is creating the message. Allowed values include:
763
- *
764
- * - `user`: Indicates the message is sent by an actual user and should be used in
765
- * most cases to represent user-generated messages.
766
- * - `assistant`: Indicates the message is generated by the assistant. Use this
767
- * value to insert messages from the assistant into the conversation.
768
- */
769
- role: 'user' | 'assistant';
770
-
771
- /**
772
- * A list of files attached to the message, and the tools they should be added to.
773
- */
774
- attachments?: Array<AdditionalMessage.Attachment> | null;
775
-
776
- /**
777
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
778
- * for storing additional information about the object in a structured format. Keys
779
- * can be a maximum of 64 characters long and values can be a maxium of 512
780
- * characters long.
781
- */
782
- metadata?: unknown | null;
783
- }
784
-
785
- export namespace AdditionalMessage {
786
- export interface Attachment {
787
- /**
788
- * The ID of the file to attach to the message.
789
- */
790
- file_id?: string;
791
-
792
- /**
793
- * The tools to add this file to.
794
- */
795
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
796
- }
797
- }
798
-
799
- /**
800
- * Controls for how a thread will be truncated prior to the run. Use this to
801
- * control the intial context window of the run.
802
- */
803
- export interface TruncationStrategy {
804
- /**
805
- * The truncation strategy to use for the thread. The default is `auto`. If set to
806
- * `last_messages`, the thread will be truncated to the n most recent messages in
807
- * the thread. When set to `auto`, messages in the middle of the thread will be
808
- * dropped to fit the context length of the model, `max_prompt_tokens`.
809
- */
810
- type: 'auto' | 'last_messages';
811
-
812
- /**
813
- * The number of most recent messages from the thread when constructing the context
814
- * for the run.
815
- */
816
- last_messages?: number | null;
817
- }
818
-
819
- export type RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
820
- export type RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
821
- }
822
-
823
- export interface RunCreateParamsNonStreaming extends RunCreateParamsBase {
824
- /**
825
- * If `true`, returns a stream of events that happen during the Run as server-sent
826
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
827
- * message.
828
- */
829
- stream?: false | null;
830
- }
831
-
832
- export interface RunCreateParamsStreaming extends RunCreateParamsBase {
833
- /**
834
- * If `true`, returns a stream of events that happen during the Run as server-sent
835
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
836
- * message.
837
- */
838
- stream: true;
839
- }
840
-
841
- export interface RunUpdateParams {
842
- /**
843
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
844
- * for storing additional information about the object in a structured format. Keys
845
- * can be a maximum of 64 characters long and values can be a maxium of 512
846
- * characters long.
847
- */
848
- metadata?: unknown | null;
849
- }
850
-
851
- export interface RunListParams extends CursorPageParams {
852
- /**
853
- * A cursor for use in pagination. `before` is an object ID that defines your place
854
- * in the list. For instance, if you make a list request and receive 100 objects,
855
- * ending with obj_foo, your subsequent call can include before=obj_foo in order to
856
- * fetch the previous page of the list.
857
- */
858
- before?: string;
859
-
860
- /**
861
- * Sort order by the `created_at` timestamp of the objects. `asc` for ascending
862
- * order and `desc` for descending order.
863
- */
864
- order?: 'asc' | 'desc';
865
- }
866
-
867
- export interface RunCreateAndPollParams {
868
- /**
869
- * The ID of the
870
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
871
- * execute this run.
872
- */
873
- assistant_id: string;
874
-
875
- /**
876
- * Appends additional instructions at the end of the instructions for the run. This
877
- * is useful for modifying the behavior on a per-run basis without overriding other
878
- * instructions.
879
- */
880
- additional_instructions?: string | null;
881
-
882
- /**
883
- * Adds additional messages to the thread before creating the run.
884
- */
885
- additional_messages?: Array<RunCreateAndPollParams.AdditionalMessage> | null;
886
-
887
- /**
888
- * Overrides the
889
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
890
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
891
- */
892
- instructions?: string | null;
893
-
894
- /**
895
- * The maximum number of completion tokens that may be used over the course of the
896
- * run. The run will make a best effort to use only the number of completion tokens
897
- * specified, across multiple turns of the run. If the run exceeds the number of
898
- * completion tokens specified, the run will end with status `incomplete`. See
899
- * `incomplete_details` for more info.
900
- */
901
- max_completion_tokens?: number | null;
902
-
903
- /**
904
- * The maximum number of prompt tokens that may be used over the course of the run.
905
- * The run will make a best effort to use only the number of prompt tokens
906
- * specified, across multiple turns of the run. If the run exceeds the number of
907
- * prompt tokens specified, the run will end with status `incomplete`. See
908
- * `incomplete_details` for more info.
909
- */
910
- max_prompt_tokens?: number | null;
911
-
912
- /**
913
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
914
- * for storing additional information about the object in a structured format. Keys
915
- * can be a maximum of 64 characters long and values can be a maxium of 512
916
- * characters long.
917
- */
918
- metadata?: unknown | null;
919
-
920
- /**
921
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
922
- * be used to execute this run. If a value is provided here, it will override the
923
- * model associated with the assistant. If not, the model associated with the
924
- * assistant will be used.
925
- */
926
- model?:
927
- | (string & {})
928
- | 'gpt-4o'
929
- | 'gpt-4o-2024-05-13'
930
- | 'gpt-4-turbo'
931
- | 'gpt-4-turbo-2024-04-09'
932
- | 'gpt-4-0125-preview'
933
- | 'gpt-4-turbo-preview'
934
- | 'gpt-4-1106-preview'
935
- | 'gpt-4-vision-preview'
936
- | 'gpt-4'
937
- | 'gpt-4-0314'
938
- | 'gpt-4-0613'
939
- | 'gpt-4-32k'
940
- | 'gpt-4-32k-0314'
941
- | 'gpt-4-32k-0613'
942
- | 'gpt-3.5-turbo'
943
- | 'gpt-3.5-turbo-16k'
944
- | 'gpt-3.5-turbo-0613'
945
- | 'gpt-3.5-turbo-1106'
946
- | 'gpt-3.5-turbo-0125'
947
- | 'gpt-3.5-turbo-16k-0613'
948
- | null;
949
-
950
- /**
951
- * Specifies the format that the model must output. Compatible with
952
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
953
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
954
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
955
- *
956
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
957
- * message the model generates is valid JSON.
958
- *
959
- * **Important:** when using JSON mode, you **must** also instruct the model to
960
- * produce JSON yourself via a system or user message. Without this, the model may
961
- * generate an unending stream of whitespace until the generation reaches the token
962
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
963
- * the message content may be partially cut off if `finish_reason="length"`, which
964
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
965
- * max context length.
966
- */
967
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
968
-
969
- /**
970
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
971
- * make the output more random, while lower values like 0.2 will make it more
972
- * focused and deterministic.
973
- */
974
- temperature?: number | null;
975
-
976
- /**
977
- * Controls which (if any) tool is called by the model. `none` means the model will
978
- * not call any tools and instead generates a message. `auto` is the default value
979
- * and means the model can pick between generating a message or calling one or more
980
- * tools. `required` means the model must call one or more tools before responding
981
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
982
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
983
- * call that tool.
984
- */
985
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
986
-
987
- /**
988
- * Override the tools the assistant can use for this run. This is useful for
989
- * modifying the behavior on a per-run basis.
990
- */
991
- tools?: Array<AssistantsAPI.AssistantTool> | null;
992
-
993
- /**
994
- * An alternative to sampling with temperature, called nucleus sampling, where the
995
- * model considers the results of the tokens with top_p probability mass. So 0.1
996
- * means only the tokens comprising the top 10% probability mass are considered.
997
- *
998
- * We generally recommend altering this or temperature but not both.
999
- */
1000
- top_p?: number | null;
1001
-
1002
- /**
1003
- * Controls for how a thread will be truncated prior to the run. Use this to
1004
- * control the intial context window of the run.
1005
- */
1006
- truncation_strategy?: RunCreateAndPollParams.TruncationStrategy | null;
1007
- }
1008
-
1009
- export namespace RunCreateAndPollParams {
1010
- export interface AdditionalMessage {
1011
- /**
1012
- * The text contents of the message.
1013
- */
1014
- content: string | Array<MessagesAPI.MessageContentPartParam>;
1015
-
1016
- /**
1017
- * The role of the entity that is creating the message. Allowed values include:
1018
- *
1019
- * - `user`: Indicates the message is sent by an actual user and should be used in
1020
- * most cases to represent user-generated messages.
1021
- * - `assistant`: Indicates the message is generated by the assistant. Use this
1022
- * value to insert messages from the assistant into the conversation.
1023
- */
1024
- role: 'user' | 'assistant';
1025
-
1026
- /**
1027
- * A list of files attached to the message, and the tools they should be added to.
1028
- */
1029
- attachments?: Array<AdditionalMessage.Attachment> | null;
1030
-
1031
- /**
1032
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1033
- * for storing additional information about the object in a structured format. Keys
1034
- * can be a maximum of 64 characters long and values can be a maxium of 512
1035
- * characters long.
1036
- */
1037
- metadata?: unknown | null;
1038
- }
1039
-
1040
- export namespace AdditionalMessage {
1041
- export interface Attachment {
1042
- /**
1043
- * The ID of the file to attach to the message.
1044
- */
1045
- file_id?: string;
1046
-
1047
- /**
1048
- * The tools to add this file to.
1049
- */
1050
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
1051
- }
1052
- }
1053
-
1054
- /**
1055
- * Controls for how a thread will be truncated prior to the run. Use this to
1056
- * control the intial context window of the run.
1057
- */
1058
- export interface TruncationStrategy {
1059
- /**
1060
- * The truncation strategy to use for the thread. The default is `auto`. If set to
1061
- * `last_messages`, the thread will be truncated to the n most recent messages in
1062
- * the thread. When set to `auto`, messages in the middle of the thread will be
1063
- * dropped to fit the context length of the model, `max_prompt_tokens`.
1064
- */
1065
- type: 'auto' | 'last_messages';
1066
-
1067
- /**
1068
- * The number of most recent messages from the thread when constructing the context
1069
- * for the run.
1070
- */
1071
- last_messages?: number | null;
1072
- }
1073
- }
1074
-
1075
- export interface RunCreateAndStreamParams {
1076
- /**
1077
- * The ID of the
1078
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
1079
- * execute this run.
1080
- */
1081
- assistant_id: string;
1082
-
1083
- /**
1084
- * Appends additional instructions at the end of the instructions for the run. This
1085
- * is useful for modifying the behavior on a per-run basis without overriding other
1086
- * instructions.
1087
- */
1088
- additional_instructions?: string | null;
1089
-
1090
- /**
1091
- * Adds additional messages to the thread before creating the run.
1092
- */
1093
- additional_messages?: Array<RunCreateAndStreamParams.AdditionalMessage> | null;
1094
-
1095
- /**
1096
- * Overrides the
1097
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
1098
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
1099
- */
1100
- instructions?: string | null;
1101
-
1102
- /**
1103
- * The maximum number of completion tokens that may be used over the course of the
1104
- * run. The run will make a best effort to use only the number of completion tokens
1105
- * specified, across multiple turns of the run. If the run exceeds the number of
1106
- * completion tokens specified, the run will end with status `incomplete`. See
1107
- * `incomplete_details` for more info.
1108
- */
1109
- max_completion_tokens?: number | null;
1110
-
1111
- /**
1112
- * The maximum number of prompt tokens that may be used over the course of the run.
1113
- * The run will make a best effort to use only the number of prompt tokens
1114
- * specified, across multiple turns of the run. If the run exceeds the number of
1115
- * prompt tokens specified, the run will end with status `incomplete`. See
1116
- * `incomplete_details` for more info.
1117
- */
1118
- max_prompt_tokens?: number | null;
1119
-
1120
- /**
1121
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1122
- * for storing additional information about the object in a structured format. Keys
1123
- * can be a maximum of 64 characters long and values can be a maxium of 512
1124
- * characters long.
1125
- */
1126
- metadata?: unknown | null;
1127
-
1128
- /**
1129
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
1130
- * be used to execute this run. If a value is provided here, it will override the
1131
- * model associated with the assistant. If not, the model associated with the
1132
- * assistant will be used.
1133
- */
1134
- model?:
1135
- | (string & {})
1136
- | 'gpt-4o'
1137
- | 'gpt-4o-2024-05-13'
1138
- | 'gpt-4-turbo'
1139
- | 'gpt-4-turbo-2024-04-09'
1140
- | 'gpt-4-0125-preview'
1141
- | 'gpt-4-turbo-preview'
1142
- | 'gpt-4-1106-preview'
1143
- | 'gpt-4-vision-preview'
1144
- | 'gpt-4'
1145
- | 'gpt-4-0314'
1146
- | 'gpt-4-0613'
1147
- | 'gpt-4-32k'
1148
- | 'gpt-4-32k-0314'
1149
- | 'gpt-4-32k-0613'
1150
- | 'gpt-3.5-turbo'
1151
- | 'gpt-3.5-turbo-16k'
1152
- | 'gpt-3.5-turbo-0613'
1153
- | 'gpt-3.5-turbo-1106'
1154
- | 'gpt-3.5-turbo-0125'
1155
- | 'gpt-3.5-turbo-16k-0613'
1156
- | null;
1157
-
1158
- /**
1159
- * Specifies the format that the model must output. Compatible with
1160
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1161
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1162
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1163
- *
1164
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1165
- * message the model generates is valid JSON.
1166
- *
1167
- * **Important:** when using JSON mode, you **must** also instruct the model to
1168
- * produce JSON yourself via a system or user message. Without this, the model may
1169
- * generate an unending stream of whitespace until the generation reaches the token
1170
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
1171
- * the message content may be partially cut off if `finish_reason="length"`, which
1172
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
1173
- * max context length.
1174
- */
1175
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
1176
-
1177
- /**
1178
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1179
- * make the output more random, while lower values like 0.2 will make it more
1180
- * focused and deterministic.
1181
- */
1182
- temperature?: number | null;
1183
-
1184
- /**
1185
- * Controls which (if any) tool is called by the model. `none` means the model will
1186
- * not call any tools and instead generates a message. `auto` is the default value
1187
- * and means the model can pick between generating a message or calling one or more
1188
- * tools. `required` means the model must call one or more tools before responding
1189
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
1190
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1191
- * call that tool.
1192
- */
1193
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
1194
-
1195
- /**
1196
- * Override the tools the assistant can use for this run. This is useful for
1197
- * modifying the behavior on a per-run basis.
1198
- */
1199
- tools?: Array<AssistantsAPI.AssistantTool> | null;
1200
-
1201
- /**
1202
- * An alternative to sampling with temperature, called nucleus sampling, where the
1203
- * model considers the results of the tokens with top_p probability mass. So 0.1
1204
- * means only the tokens comprising the top 10% probability mass are considered.
1205
- *
1206
- * We generally recommend altering this or temperature but not both.
1207
- */
1208
- top_p?: number | null;
1209
-
1210
- /**
1211
- * Controls for how a thread will be truncated prior to the run. Use this to
1212
- * control the intial context window of the run.
1213
- */
1214
- truncation_strategy?: RunCreateAndStreamParams.TruncationStrategy | null;
1215
- }
1216
-
1217
- export namespace RunCreateAndStreamParams {
1218
- export interface AdditionalMessage {
1219
- /**
1220
- * The text contents of the message.
1221
- */
1222
- content: string | Array<MessagesAPI.MessageContentPartParam>;
1223
-
1224
- /**
1225
- * The role of the entity that is creating the message. Allowed values include:
1226
- *
1227
- * - `user`: Indicates the message is sent by an actual user and should be used in
1228
- * most cases to represent user-generated messages.
1229
- * - `assistant`: Indicates the message is generated by the assistant. Use this
1230
- * value to insert messages from the assistant into the conversation.
1231
- */
1232
- role: 'user' | 'assistant';
1233
-
1234
- /**
1235
- * A list of files attached to the message, and the tools they should be added to.
1236
- */
1237
- attachments?: Array<AdditionalMessage.Attachment> | null;
1238
-
1239
- /**
1240
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1241
- * for storing additional information about the object in a structured format. Keys
1242
- * can be a maximum of 64 characters long and values can be a maxium of 512
1243
- * characters long.
1244
- */
1245
- metadata?: unknown | null;
1246
- }
1247
-
1248
- export namespace AdditionalMessage {
1249
- export interface Attachment {
1250
- /**
1251
- * The ID of the file to attach to the message.
1252
- */
1253
- file_id?: string;
1254
-
1255
- /**
1256
- * The tools to add this file to.
1257
- */
1258
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
1259
- }
1260
- }
1261
-
1262
- /**
1263
- * Controls for how a thread will be truncated prior to the run. Use this to
1264
- * control the intial context window of the run.
1265
- */
1266
- export interface TruncationStrategy {
1267
- /**
1268
- * The truncation strategy to use for the thread. The default is `auto`. If set to
1269
- * `last_messages`, the thread will be truncated to the n most recent messages in
1270
- * the thread. When set to `auto`, messages in the middle of the thread will be
1271
- * dropped to fit the context length of the model, `max_prompt_tokens`.
1272
- */
1273
- type: 'auto' | 'last_messages';
1274
-
1275
- /**
1276
- * The number of most recent messages from the thread when constructing the context
1277
- * for the run.
1278
- */
1279
- last_messages?: number | null;
1280
- }
1281
- }
1282
-
1283
- export interface RunStreamParams {
1284
- /**
1285
- * The ID of the
1286
- * [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
1287
- * execute this run.
1288
- */
1289
- assistant_id: string;
1290
-
1291
- /**
1292
- * Appends additional instructions at the end of the instructions for the run. This
1293
- * is useful for modifying the behavior on a per-run basis without overriding other
1294
- * instructions.
1295
- */
1296
- additional_instructions?: string | null;
1297
-
1298
- /**
1299
- * Adds additional messages to the thread before creating the run.
1300
- */
1301
- additional_messages?: Array<RunStreamParams.AdditionalMessage> | null;
1302
-
1303
- /**
1304
- * Overrides the
1305
- * [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
1306
- * of the assistant. This is useful for modifying the behavior on a per-run basis.
1307
- */
1308
- instructions?: string | null;
1309
-
1310
- /**
1311
- * The maximum number of completion tokens that may be used over the course of the
1312
- * run. The run will make a best effort to use only the number of completion tokens
1313
- * specified, across multiple turns of the run. If the run exceeds the number of
1314
- * completion tokens specified, the run will end with status `incomplete`. See
1315
- * `incomplete_details` for more info.
1316
- */
1317
- max_completion_tokens?: number | null;
1318
-
1319
- /**
1320
- * The maximum number of prompt tokens that may be used over the course of the run.
1321
- * The run will make a best effort to use only the number of prompt tokens
1322
- * specified, across multiple turns of the run. If the run exceeds the number of
1323
- * prompt tokens specified, the run will end with status `incomplete`. See
1324
- * `incomplete_details` for more info.
1325
- */
1326
- max_prompt_tokens?: number | null;
1327
-
1328
- /**
1329
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1330
- * for storing additional information about the object in a structured format. Keys
1331
- * can be a maximum of 64 characters long and values can be a maxium of 512
1332
- * characters long.
1333
- */
1334
- metadata?: unknown | null;
1335
-
1336
- /**
1337
- * The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
1338
- * be used to execute this run. If a value is provided here, it will override the
1339
- * model associated with the assistant. If not, the model associated with the
1340
- * assistant will be used.
1341
- */
1342
- model?:
1343
- | (string & {})
1344
- | 'gpt-4o'
1345
- | 'gpt-4o-2024-05-13'
1346
- | 'gpt-4-turbo'
1347
- | 'gpt-4-turbo-2024-04-09'
1348
- | 'gpt-4-0125-preview'
1349
- | 'gpt-4-turbo-preview'
1350
- | 'gpt-4-1106-preview'
1351
- | 'gpt-4-vision-preview'
1352
- | 'gpt-4'
1353
- | 'gpt-4-0314'
1354
- | 'gpt-4-0613'
1355
- | 'gpt-4-32k'
1356
- | 'gpt-4-32k-0314'
1357
- | 'gpt-4-32k-0613'
1358
- | 'gpt-3.5-turbo'
1359
- | 'gpt-3.5-turbo-16k'
1360
- | 'gpt-3.5-turbo-0613'
1361
- | 'gpt-3.5-turbo-1106'
1362
- | 'gpt-3.5-turbo-0125'
1363
- | 'gpt-3.5-turbo-16k-0613'
1364
- | null;
1365
-
1366
- /**
1367
- * Specifies the format that the model must output. Compatible with
1368
- * [GPT-4o](https://platform.openai.com/docs/models/gpt-4o),
1369
- * [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4),
1370
- * and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
1371
- *
1372
- * Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1373
- * message the model generates is valid JSON.
1374
- *
1375
- * **Important:** when using JSON mode, you **must** also instruct the model to
1376
- * produce JSON yourself via a system or user message. Without this, the model may
1377
- * generate an unending stream of whitespace until the generation reaches the token
1378
- * limit, resulting in a long-running and seemingly "stuck" request. Also note that
1379
- * the message content may be partially cut off if `finish_reason="length"`, which
1380
- * indicates the generation exceeded `max_tokens` or the conversation exceeded the
1381
- * max context length.
1382
- */
1383
- response_format?: ThreadsAPI.AssistantResponseFormatOption | null;
1384
-
1385
- /**
1386
- * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
1387
- * make the output more random, while lower values like 0.2 will make it more
1388
- * focused and deterministic.
1389
- */
1390
- temperature?: number | null;
1391
-
1392
- /**
1393
- * Controls which (if any) tool is called by the model. `none` means the model will
1394
- * not call any tools and instead generates a message. `auto` is the default value
1395
- * and means the model can pick between generating a message or calling one or more
1396
- * tools. `required` means the model must call one or more tools before responding
1397
- * to the user. Specifying a particular tool like `{"type": "file_search"}` or
1398
- * `{"type": "function", "function": {"name": "my_function"}}` forces the model to
1399
- * call that tool.
1400
- */
1401
- tool_choice?: ThreadsAPI.AssistantToolChoiceOption | null;
1402
-
1403
- /**
1404
- * Override the tools the assistant can use for this run. This is useful for
1405
- * modifying the behavior on a per-run basis.
1406
- */
1407
- tools?: Array<AssistantsAPI.AssistantTool> | null;
1408
-
1409
- /**
1410
- * An alternative to sampling with temperature, called nucleus sampling, where the
1411
- * model considers the results of the tokens with top_p probability mass. So 0.1
1412
- * means only the tokens comprising the top 10% probability mass are considered.
1413
- *
1414
- * We generally recommend altering this or temperature but not both.
1415
- */
1416
- top_p?: number | null;
1417
-
1418
- /**
1419
- * Controls for how a thread will be truncated prior to the run. Use this to
1420
- * control the intial context window of the run.
1421
- */
1422
- truncation_strategy?: RunStreamParams.TruncationStrategy | null;
1423
- }
1424
-
1425
- export namespace RunStreamParams {
1426
- export interface AdditionalMessage {
1427
- /**
1428
- * The text contents of the message.
1429
- */
1430
- content: string | Array<MessagesAPI.MessageContentPartParam>;
1431
-
1432
- /**
1433
- * The role of the entity that is creating the message. Allowed values include:
1434
- *
1435
- * - `user`: Indicates the message is sent by an actual user and should be used in
1436
- * most cases to represent user-generated messages.
1437
- * - `assistant`: Indicates the message is generated by the assistant. Use this
1438
- * value to insert messages from the assistant into the conversation.
1439
- */
1440
- role: 'user' | 'assistant';
1441
-
1442
- /**
1443
- * A list of files attached to the message, and the tools they should be added to.
1444
- */
1445
- attachments?: Array<AdditionalMessage.Attachment> | null;
1446
-
1447
- /**
1448
- * Set of 16 key-value pairs that can be attached to an object. This can be useful
1449
- * for storing additional information about the object in a structured format. Keys
1450
- * can be a maximum of 64 characters long and values can be a maxium of 512
1451
- * characters long.
1452
- */
1453
- metadata?: unknown | null;
1454
- }
1455
-
1456
- export namespace AdditionalMessage {
1457
- export interface Attachment {
1458
- /**
1459
- * The ID of the file to attach to the message.
1460
- */
1461
- file_id?: string;
1462
-
1463
- /**
1464
- * The tools to add this file to.
1465
- */
1466
- tools?: Array<AssistantsAPI.CodeInterpreterTool | AssistantsAPI.FileSearchTool>;
1467
- }
1468
- }
1469
-
1470
- /**
1471
- * Controls for how a thread will be truncated prior to the run. Use this to
1472
- * control the intial context window of the run.
1473
- */
1474
- export interface TruncationStrategy {
1475
- /**
1476
- * The truncation strategy to use for the thread. The default is `auto`. If set to
1477
- * `last_messages`, the thread will be truncated to the n most recent messages in
1478
- * the thread. When set to `auto`, messages in the middle of the thread will be
1479
- * dropped to fit the context length of the model, `max_prompt_tokens`.
1480
- */
1481
- type: 'auto' | 'last_messages';
1482
-
1483
- /**
1484
- * The number of most recent messages from the thread when constructing the context
1485
- * for the run.
1486
- */
1487
- last_messages?: number | null;
1488
- }
1489
- }
1490
-
1491
- export type RunSubmitToolOutputsParams =
1492
- | RunSubmitToolOutputsParamsNonStreaming
1493
- | RunSubmitToolOutputsParamsStreaming;
1494
-
1495
- export interface RunSubmitToolOutputsParamsBase {
1496
- /**
1497
- * A list of tools for which the outputs are being submitted.
1498
- */
1499
- tool_outputs: Array<RunSubmitToolOutputsParams.ToolOutput>;
1500
-
1501
- /**
1502
- * If `true`, returns a stream of events that happen during the Run as server-sent
1503
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1504
- * message.
1505
- */
1506
- stream?: boolean | null;
1507
- }
1508
-
1509
- export namespace RunSubmitToolOutputsParams {
1510
- export interface ToolOutput {
1511
- /**
1512
- * The output of the tool call to be submitted to continue the run.
1513
- */
1514
- output?: string;
1515
-
1516
- /**
1517
- * The ID of the tool call in the `required_action` object within the run object
1518
- * the output is being submitted for.
1519
- */
1520
- tool_call_id?: string;
1521
- }
1522
-
1523
- export type RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
1524
- export type RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
1525
- }
1526
-
1527
- export interface RunSubmitToolOutputsParamsNonStreaming extends RunSubmitToolOutputsParamsBase {
1528
- /**
1529
- * If `true`, returns a stream of events that happen during the Run as server-sent
1530
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1531
- * message.
1532
- */
1533
- stream?: false | null;
1534
- }
1535
-
1536
- export interface RunSubmitToolOutputsParamsStreaming extends RunSubmitToolOutputsParamsBase {
1537
- /**
1538
- * If `true`, returns a stream of events that happen during the Run as server-sent
1539
- * events, terminating when the Run enters a terminal state with a `data: [DONE]`
1540
- * message.
1541
- */
1542
- stream: true;
1543
- }
1544
-
1545
- export interface RunSubmitToolOutputsAndPollParams {
1546
- /**
1547
- * A list of tools for which the outputs are being submitted.
1548
- */
1549
- tool_outputs: Array<RunSubmitToolOutputsAndPollParams.ToolOutput>;
1550
- }
1551
-
1552
- export namespace RunSubmitToolOutputsAndPollParams {
1553
- export interface ToolOutput {
1554
- /**
1555
- * The output of the tool call to be submitted to continue the run.
1556
- */
1557
- output?: string;
1558
-
1559
- /**
1560
- * The ID of the tool call in the `required_action` object within the run object
1561
- * the output is being submitted for.
1562
- */
1563
- tool_call_id?: string;
1564
- }
1565
- }
1566
-
1567
- export interface RunSubmitToolOutputsStreamParams {
1568
- /**
1569
- * A list of tools for which the outputs are being submitted.
1570
- */
1571
- tool_outputs: Array<RunSubmitToolOutputsStreamParams.ToolOutput>;
1572
- }
1573
-
1574
- export namespace RunSubmitToolOutputsStreamParams {
1575
- export interface ToolOutput {
1576
- /**
1577
- * The output of the tool call to be submitted to continue the run.
1578
- */
1579
- output?: string;
1580
-
1581
- /**
1582
- * The ID of the tool call in the `required_action` object within the run object
1583
- * the output is being submitted for.
1584
- */
1585
- tool_call_id?: string;
1586
- }
1587
- }
1588
-
1589
- export namespace Runs {
1590
- export import RequiredActionFunctionToolCall = RunsAPI.RequiredActionFunctionToolCall;
1591
- export import Run = RunsAPI.Run;
1592
- export import RunStatus = RunsAPI.RunStatus;
1593
- export import RunsPage = RunsAPI.RunsPage;
1594
- export import RunCreateParams = RunsAPI.RunCreateParams;
1595
- export import RunCreateParamsNonStreaming = RunsAPI.RunCreateParamsNonStreaming;
1596
- export import RunCreateParamsStreaming = RunsAPI.RunCreateParamsStreaming;
1597
- export import RunUpdateParams = RunsAPI.RunUpdateParams;
1598
- export import RunListParams = RunsAPI.RunListParams;
1599
- export import RunCreateAndPollParams = RunsAPI.RunCreateAndPollParams;
1600
- export import RunCreateAndStreamParams = RunsAPI.RunCreateAndStreamParams;
1601
- export import RunStreamParams = RunsAPI.RunStreamParams;
1602
- export import RunSubmitToolOutputsParams = RunsAPI.RunSubmitToolOutputsParams;
1603
- export import RunSubmitToolOutputsParamsNonStreaming = RunsAPI.RunSubmitToolOutputsParamsNonStreaming;
1604
- export import RunSubmitToolOutputsParamsStreaming = RunsAPI.RunSubmitToolOutputsParamsStreaming;
1605
- export import RunSubmitToolOutputsAndPollParams = RunsAPI.RunSubmitToolOutputsAndPollParams;
1606
- export import RunSubmitToolOutputsStreamParams = RunsAPI.RunSubmitToolOutputsStreamParams;
1607
- export import Steps = StepsAPI.Steps;
1608
- export import CodeInterpreterLogs = StepsAPI.CodeInterpreterLogs;
1609
- export import CodeInterpreterOutputImage = StepsAPI.CodeInterpreterOutputImage;
1610
- export import CodeInterpreterToolCall = StepsAPI.CodeInterpreterToolCall;
1611
- export import CodeInterpreterToolCallDelta = StepsAPI.CodeInterpreterToolCallDelta;
1612
- export import FileSearchToolCall = StepsAPI.FileSearchToolCall;
1613
- export import FileSearchToolCallDelta = StepsAPI.FileSearchToolCallDelta;
1614
- export import FunctionToolCall = StepsAPI.FunctionToolCall;
1615
- export import FunctionToolCallDelta = StepsAPI.FunctionToolCallDelta;
1616
- export import MessageCreationStepDetails = StepsAPI.MessageCreationStepDetails;
1617
- export import RunStep = StepsAPI.RunStep;
1618
- export import RunStepDelta = StepsAPI.RunStepDelta;
1619
- export import RunStepDeltaEvent = StepsAPI.RunStepDeltaEvent;
1620
- export import RunStepDeltaMessageDelta = StepsAPI.RunStepDeltaMessageDelta;
1621
- export import ToolCall = StepsAPI.ToolCall;
1622
- export import ToolCallDelta = StepsAPI.ToolCallDelta;
1623
- export import ToolCallDeltaObject = StepsAPI.ToolCallDeltaObject;
1624
- export import ToolCallsStepDetails = StepsAPI.ToolCallsStepDetails;
1625
- export import RunStepsPage = StepsAPI.RunStepsPage;
1626
- export import StepListParams = StepsAPI.StepListParams;
1627
- }