@superinterface/react 5.1.2 → 5.2.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (417) hide show
  1. package/dist/index.cjs +1 -1
  2. package/dist/index.cjs.map +1 -1
  3. package/dist/index.d.cts +5 -5
  4. package/dist/index.d.ts +5 -5
  5. package/dist/index.js +1 -1
  6. package/dist/index.js.map +1 -1
  7. package/dist/server.d.cts +1 -1
  8. package/dist/server.d.ts +1 -1
  9. package/package.json +8 -6
  10. package/types/index.d.ts +4 -0
  11. package/types/node_modules/openai/LICENSE +201 -0
  12. package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.mts +7 -0
  13. package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.ts +7 -0
  14. package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.mts +32 -0
  15. package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.ts +32 -0
  16. package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.mts +21 -0
  17. package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.ts +21 -0
  18. package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.mts +12 -0
  19. package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.ts +12 -0
  20. package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.mts +38 -0
  21. package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.ts +38 -0
  22. package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.mts +38 -0
  23. package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.ts +38 -0
  24. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.mts +3 -0
  25. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.ts +3 -0
  26. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.mts +13 -0
  27. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.ts +13 -0
  28. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.mts +15 -0
  29. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.ts +15 -0
  30. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.mts +5 -0
  31. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.ts +5 -0
  32. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.mts +4 -0
  33. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.ts +4 -0
  34. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.mts +4 -0
  35. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.ts +4 -0
  36. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.mts +16 -0
  37. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.ts +16 -0
  38. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.mts +7 -0
  39. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.ts +7 -0
  40. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.mts +5 -0
  41. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.ts +5 -0
  42. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.mts +7 -0
  43. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.ts +7 -0
  44. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.mts +9 -0
  45. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.ts +9 -0
  46. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.mts +10 -0
  47. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.ts +10 -0
  48. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.mts +16 -0
  49. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.ts +16 -0
  50. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.mts +7 -0
  51. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.ts +7 -0
  52. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.mts +5 -0
  53. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.ts +5 -0
  54. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.mts +6 -0
  55. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.ts +6 -0
  56. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.mts +11 -0
  57. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.ts +11 -0
  58. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.mts +14 -0
  59. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.ts +14 -0
  60. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.mts +11 -0
  61. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.ts +11 -0
  62. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.mts +5 -0
  63. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.ts +5 -0
  64. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.mts +6 -0
  65. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.ts +6 -0
  66. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.mts +5 -0
  67. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.ts +5 -0
  68. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.mts +4 -0
  69. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.ts +4 -0
  70. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.mts +14 -0
  71. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.ts +14 -0
  72. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.mts +14 -0
  73. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.ts +14 -0
  74. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.mts +70 -0
  75. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.ts +70 -0
  76. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.mts +14 -0
  77. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.ts +14 -0
  78. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.mts +5 -0
  79. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.ts +5 -0
  80. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.mts +24 -0
  81. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.ts +24 -0
  82. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.mts +3 -0
  83. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.ts +3 -0
  84. package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.mts +4 -0
  85. package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.ts +4 -0
  86. package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.mts +11 -0
  87. package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.ts +11 -0
  88. package/types/node_modules/openai/api-promise.d.mts +2 -0
  89. package/types/node_modules/openai/api-promise.d.ts +2 -0
  90. package/types/node_modules/openai/azure.d.mts +63 -0
  91. package/types/node_modules/openai/azure.d.ts +63 -0
  92. package/types/node_modules/openai/beta/realtime/index.d.mts +2 -0
  93. package/types/node_modules/openai/beta/realtime/index.d.ts +2 -0
  94. package/types/node_modules/openai/beta/realtime/internal-base.d.mts +45 -0
  95. package/types/node_modules/openai/beta/realtime/internal-base.d.ts +45 -0
  96. package/types/node_modules/openai/beta/realtime/websocket.d.mts +36 -0
  97. package/types/node_modules/openai/beta/realtime/websocket.d.ts +36 -0
  98. package/types/node_modules/openai/beta/realtime/ws.d.mts +27 -0
  99. package/types/node_modules/openai/beta/realtime/ws.d.ts +27 -0
  100. package/types/node_modules/openai/client.d.mts +297 -0
  101. package/types/node_modules/openai/client.d.ts +297 -0
  102. package/types/node_modules/openai/core/api-promise.d.mts +49 -0
  103. package/types/node_modules/openai/core/api-promise.d.ts +49 -0
  104. package/types/node_modules/openai/core/error.d.mts +59 -0
  105. package/types/node_modules/openai/core/error.d.ts +59 -0
  106. package/types/node_modules/openai/core/pagination.d.mts +89 -0
  107. package/types/node_modules/openai/core/pagination.d.ts +89 -0
  108. package/types/node_modules/openai/core/resource.d.mts +6 -0
  109. package/types/node_modules/openai/core/resource.d.ts +6 -0
  110. package/types/node_modules/openai/core/streaming.d.mts +33 -0
  111. package/types/node_modules/openai/core/streaming.d.ts +33 -0
  112. package/types/node_modules/openai/core/uploads.d.mts +3 -0
  113. package/types/node_modules/openai/core/uploads.d.ts +3 -0
  114. package/types/node_modules/openai/error.d.mts +2 -0
  115. package/types/node_modules/openai/error.d.ts +2 -0
  116. package/types/node_modules/openai/helpers/audio.d.mts +9 -0
  117. package/types/node_modules/openai/helpers/audio.d.ts +9 -0
  118. package/types/node_modules/openai/helpers/zod.d.mts +70 -0
  119. package/types/node_modules/openai/helpers/zod.d.ts +70 -0
  120. package/types/node_modules/openai/index.d.mts +8 -0
  121. package/types/node_modules/openai/index.d.ts +8 -0
  122. package/types/node_modules/openai/internal/builtin-types.d.mts +73 -0
  123. package/types/node_modules/openai/internal/builtin-types.d.ts +73 -0
  124. package/types/node_modules/openai/internal/decoders/line.d.mts +17 -0
  125. package/types/node_modules/openai/internal/decoders/line.d.ts +17 -0
  126. package/types/node_modules/openai/internal/detect-platform.d.mts +15 -0
  127. package/types/node_modules/openai/internal/detect-platform.d.ts +15 -0
  128. package/types/node_modules/openai/internal/errors.d.mts +3 -0
  129. package/types/node_modules/openai/internal/errors.d.ts +3 -0
  130. package/types/node_modules/openai/internal/headers.d.mts +20 -0
  131. package/types/node_modules/openai/internal/headers.d.ts +20 -0
  132. package/types/node_modules/openai/internal/parse.d.mts +17 -0
  133. package/types/node_modules/openai/internal/parse.d.ts +17 -0
  134. package/types/node_modules/openai/internal/qs/formats.d.mts +7 -0
  135. package/types/node_modules/openai/internal/qs/formats.d.ts +7 -0
  136. package/types/node_modules/openai/internal/qs/index.d.mts +10 -0
  137. package/types/node_modules/openai/internal/qs/index.d.ts +10 -0
  138. package/types/node_modules/openai/internal/qs/stringify.d.mts +3 -0
  139. package/types/node_modules/openai/internal/qs/stringify.d.ts +3 -0
  140. package/types/node_modules/openai/internal/qs/types.d.mts +57 -0
  141. package/types/node_modules/openai/internal/qs/types.d.ts +57 -0
  142. package/types/node_modules/openai/internal/qs/utils.d.mts +15 -0
  143. package/types/node_modules/openai/internal/qs/utils.d.ts +15 -0
  144. package/types/node_modules/openai/internal/request-options.d.mts +78 -0
  145. package/types/node_modules/openai/internal/request-options.d.ts +78 -0
  146. package/types/node_modules/openai/internal/shim-types.d.mts +17 -0
  147. package/types/node_modules/openai/internal/shim-types.d.ts +17 -0
  148. package/types/node_modules/openai/internal/shims.d.mts +20 -0
  149. package/types/node_modules/openai/internal/shims.d.ts +20 -0
  150. package/types/node_modules/openai/internal/stream-utils.d.mts +8 -0
  151. package/types/node_modules/openai/internal/stream-utils.d.ts +8 -0
  152. package/types/node_modules/openai/internal/to-file.d.mts +45 -0
  153. package/types/node_modules/openai/internal/to-file.d.ts +45 -0
  154. package/types/node_modules/openai/internal/types.d.mts +69 -0
  155. package/types/node_modules/openai/internal/types.d.ts +69 -0
  156. package/types/node_modules/openai/internal/uploads.d.mts +42 -0
  157. package/types/node_modules/openai/internal/uploads.d.ts +42 -0
  158. package/types/node_modules/openai/internal/utils/base64.d.mts +9 -0
  159. package/types/node_modules/openai/internal/utils/base64.d.ts +9 -0
  160. package/types/node_modules/openai/internal/utils/bytes.d.mts +4 -0
  161. package/types/node_modules/openai/internal/utils/bytes.d.ts +4 -0
  162. package/types/node_modules/openai/internal/utils/env.d.mts +9 -0
  163. package/types/node_modules/openai/internal/utils/env.d.ts +9 -0
  164. package/types/node_modules/openai/internal/utils/log.d.mts +37 -0
  165. package/types/node_modules/openai/internal/utils/log.d.ts +37 -0
  166. package/types/node_modules/openai/internal/utils/path.d.mts +15 -0
  167. package/types/node_modules/openai/internal/utils/path.d.ts +15 -0
  168. package/types/node_modules/openai/internal/utils/sleep.d.mts +2 -0
  169. package/types/node_modules/openai/internal/utils/sleep.d.ts +2 -0
  170. package/types/node_modules/openai/internal/utils/uuid.d.mts +5 -0
  171. package/types/node_modules/openai/internal/utils/uuid.d.ts +5 -0
  172. package/types/node_modules/openai/internal/utils/values.d.mts +18 -0
  173. package/types/node_modules/openai/internal/utils/values.d.ts +18 -0
  174. package/types/node_modules/openai/internal/utils.d.mts +7 -0
  175. package/types/node_modules/openai/internal/utils.d.ts +7 -0
  176. package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.mts +59 -0
  177. package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +59 -0
  178. package/types/node_modules/openai/lib/AssistantStream.d.mts +60 -0
  179. package/types/node_modules/openai/lib/AssistantStream.d.ts +60 -0
  180. package/types/node_modules/openai/lib/ChatCompletionRunner.d.mts +16 -0
  181. package/types/node_modules/openai/lib/ChatCompletionRunner.d.ts +16 -0
  182. package/types/node_modules/openai/lib/ChatCompletionStream.d.mts +208 -0
  183. package/types/node_modules/openai/lib/ChatCompletionStream.d.ts +208 -0
  184. package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.mts +19 -0
  185. package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +19 -0
  186. package/types/node_modules/openai/lib/EventEmitter.d.mts +45 -0
  187. package/types/node_modules/openai/lib/EventEmitter.d.ts +45 -0
  188. package/types/node_modules/openai/lib/EventStream.d.mts +62 -0
  189. package/types/node_modules/openai/lib/EventStream.d.ts +62 -0
  190. package/types/node_modules/openai/lib/ResponsesParser.d.mts +36 -0
  191. package/types/node_modules/openai/lib/ResponsesParser.d.ts +36 -0
  192. package/types/node_modules/openai/lib/RunnableFunction.d.mts +83 -0
  193. package/types/node_modules/openai/lib/RunnableFunction.d.ts +83 -0
  194. package/types/node_modules/openai/lib/Util.d.mts +5 -0
  195. package/types/node_modules/openai/lib/Util.d.ts +5 -0
  196. package/types/node_modules/openai/lib/chatCompletionUtils.d.mts +5 -0
  197. package/types/node_modules/openai/lib/chatCompletionUtils.d.ts +5 -0
  198. package/types/node_modules/openai/lib/jsonschema.d.mts +106 -0
  199. package/types/node_modules/openai/lib/jsonschema.d.ts +106 -0
  200. package/types/node_modules/openai/lib/parser.d.mts +47 -0
  201. package/types/node_modules/openai/lib/parser.d.ts +47 -0
  202. package/types/node_modules/openai/lib/responses/EventTypes.d.mts +9 -0
  203. package/types/node_modules/openai/lib/responses/EventTypes.d.ts +9 -0
  204. package/types/node_modules/openai/lib/responses/ResponseStream.d.mts +59 -0
  205. package/types/node_modules/openai/lib/responses/ResponseStream.d.ts +59 -0
  206. package/types/node_modules/openai/package.json +233 -0
  207. package/types/node_modules/openai/pagination.d.mts +2 -0
  208. package/types/node_modules/openai/pagination.d.ts +2 -0
  209. package/types/node_modules/openai/realtime/index.d.mts +2 -0
  210. package/types/node_modules/openai/realtime/index.d.ts +2 -0
  211. package/types/node_modules/openai/realtime/internal-base.d.mts +45 -0
  212. package/types/node_modules/openai/realtime/internal-base.d.ts +45 -0
  213. package/types/node_modules/openai/realtime/websocket.d.mts +36 -0
  214. package/types/node_modules/openai/realtime/websocket.d.ts +36 -0
  215. package/types/node_modules/openai/realtime/ws.d.mts +27 -0
  216. package/types/node_modules/openai/realtime/ws.d.ts +27 -0
  217. package/types/node_modules/openai/resource.d.mts +2 -0
  218. package/types/node_modules/openai/resource.d.ts +2 -0
  219. package/types/node_modules/openai/resources/audio/audio.d.mts +26 -0
  220. package/types/node_modules/openai/resources/audio/audio.d.ts +26 -0
  221. package/types/node_modules/openai/resources/audio/index.d.mts +5 -0
  222. package/types/node_modules/openai/resources/audio/index.d.ts +5 -0
  223. package/types/node_modules/openai/resources/audio/speech.d.mts +64 -0
  224. package/types/node_modules/openai/resources/audio/speech.d.ts +64 -0
  225. package/types/node_modules/openai/resources/audio/transcriptions.d.mts +485 -0
  226. package/types/node_modules/openai/resources/audio/transcriptions.d.ts +485 -0
  227. package/types/node_modules/openai/resources/audio/translations.d.mts +81 -0
  228. package/types/node_modules/openai/resources/audio/translations.d.ts +81 -0
  229. package/types/node_modules/openai/resources/audio.d.mts +2 -0
  230. package/types/node_modules/openai/resources/audio.d.ts +2 -0
  231. package/types/node_modules/openai/resources/batches.d.mts +279 -0
  232. package/types/node_modules/openai/resources/batches.d.ts +279 -0
  233. package/types/node_modules/openai/resources/beta/assistants.d.mts +1232 -0
  234. package/types/node_modules/openai/resources/beta/assistants.d.ts +1232 -0
  235. package/types/node_modules/openai/resources/beta/beta.d.mts +18 -0
  236. package/types/node_modules/openai/resources/beta/beta.d.ts +18 -0
  237. package/types/node_modules/openai/resources/beta/index.d.mts +5 -0
  238. package/types/node_modules/openai/resources/beta/index.d.ts +5 -0
  239. package/types/node_modules/openai/resources/beta/realtime/index.d.mts +4 -0
  240. package/types/node_modules/openai/resources/beta/realtime/index.d.ts +4 -0
  241. package/types/node_modules/openai/resources/beta/realtime/realtime.d.mts +2332 -0
  242. package/types/node_modules/openai/resources/beta/realtime/realtime.d.ts +2332 -0
  243. package/types/node_modules/openai/resources/beta/realtime/sessions.d.mts +744 -0
  244. package/types/node_modules/openai/resources/beta/realtime/sessions.d.ts +744 -0
  245. package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts +299 -0
  246. package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts +299 -0
  247. package/types/node_modules/openai/resources/beta/realtime.d.mts +2 -0
  248. package/types/node_modules/openai/resources/beta/realtime.d.ts +2 -0
  249. package/types/node_modules/openai/resources/beta/threads/index.d.mts +4 -0
  250. package/types/node_modules/openai/resources/beta/threads/index.d.ts +4 -0
  251. package/types/node_modules/openai/resources/beta/threads/messages.d.mts +594 -0
  252. package/types/node_modules/openai/resources/beta/threads/messages.d.ts +594 -0
  253. package/types/node_modules/openai/resources/beta/threads/runs/index.d.mts +3 -0
  254. package/types/node_modules/openai/resources/beta/threads/runs/index.d.ts +3 -0
  255. package/types/node_modules/openai/resources/beta/threads/runs/runs.d.mts +733 -0
  256. package/types/node_modules/openai/resources/beta/threads/runs/runs.d.ts +733 -0
  257. package/types/node_modules/openai/resources/beta/threads/runs/steps.d.mts +615 -0
  258. package/types/node_modules/openai/resources/beta/threads/runs/steps.d.ts +615 -0
  259. package/types/node_modules/openai/resources/beta/threads/runs.d.mts +2 -0
  260. package/types/node_modules/openai/resources/beta/threads/runs.d.ts +2 -0
  261. package/types/node_modules/openai/resources/beta/threads/threads.d.mts +1044 -0
  262. package/types/node_modules/openai/resources/beta/threads/threads.d.ts +1044 -0
  263. package/types/node_modules/openai/resources/beta/threads.d.mts +2 -0
  264. package/types/node_modules/openai/resources/beta/threads.d.ts +2 -0
  265. package/types/node_modules/openai/resources/beta.d.mts +2 -0
  266. package/types/node_modules/openai/resources/beta.d.ts +2 -0
  267. package/types/node_modules/openai/resources/chat/chat.d.mts +13 -0
  268. package/types/node_modules/openai/resources/chat/chat.d.ts +13 -0
  269. package/types/node_modules/openai/resources/chat/completions/completions.d.mts +1627 -0
  270. package/types/node_modules/openai/resources/chat/completions/completions.d.ts +1627 -0
  271. package/types/node_modules/openai/resources/chat/completions/index.d.mts +4 -0
  272. package/types/node_modules/openai/resources/chat/completions/index.d.ts +4 -0
  273. package/types/node_modules/openai/resources/chat/completions/messages.d.mts +34 -0
  274. package/types/node_modules/openai/resources/chat/completions/messages.d.ts +34 -0
  275. package/types/node_modules/openai/resources/chat/completions.d.mts +2 -0
  276. package/types/node_modules/openai/resources/chat/completions.d.ts +2 -0
  277. package/types/node_modules/openai/resources/chat/index.d.mts +3 -0
  278. package/types/node_modules/openai/resources/chat/index.d.ts +3 -0
  279. package/types/node_modules/openai/resources/chat.d.mts +2 -0
  280. package/types/node_modules/openai/resources/chat.d.ts +2 -0
  281. package/types/node_modules/openai/resources/completions.d.mts +329 -0
  282. package/types/node_modules/openai/resources/completions.d.ts +329 -0
  283. package/types/node_modules/openai/resources/containers/containers.d.mts +200 -0
  284. package/types/node_modules/openai/resources/containers/containers.d.ts +200 -0
  285. package/types/node_modules/openai/resources/containers/files/content.d.mts +16 -0
  286. package/types/node_modules/openai/resources/containers/files/content.d.ts +16 -0
  287. package/types/node_modules/openai/resources/containers/files/files.d.mts +148 -0
  288. package/types/node_modules/openai/resources/containers/files/files.d.ts +148 -0
  289. package/types/node_modules/openai/resources/containers/files/index.d.mts +3 -0
  290. package/types/node_modules/openai/resources/containers/files/index.d.ts +3 -0
  291. package/types/node_modules/openai/resources/containers/files.d.mts +2 -0
  292. package/types/node_modules/openai/resources/containers/files.d.ts +2 -0
  293. package/types/node_modules/openai/resources/containers/index.d.mts +3 -0
  294. package/types/node_modules/openai/resources/containers/index.d.ts +3 -0
  295. package/types/node_modules/openai/resources/containers.d.mts +2 -0
  296. package/types/node_modules/openai/resources/containers.d.ts +2 -0
  297. package/types/node_modules/openai/resources/conversations/conversations.d.mts +176 -0
  298. package/types/node_modules/openai/resources/conversations/conversations.d.ts +176 -0
  299. package/types/node_modules/openai/resources/conversations/index.d.mts +3 -0
  300. package/types/node_modules/openai/resources/conversations/index.d.ts +3 -0
  301. package/types/node_modules/openai/resources/conversations/items.d.mts +367 -0
  302. package/types/node_modules/openai/resources/conversations/items.d.ts +367 -0
  303. package/types/node_modules/openai/resources/conversations.d.mts +2 -0
  304. package/types/node_modules/openai/resources/conversations.d.ts +2 -0
  305. package/types/node_modules/openai/resources/embeddings.d.mts +113 -0
  306. package/types/node_modules/openai/resources/embeddings.d.ts +113 -0
  307. package/types/node_modules/openai/resources/evals/evals.d.mts +735 -0
  308. package/types/node_modules/openai/resources/evals/evals.d.ts +735 -0
  309. package/types/node_modules/openai/resources/evals/index.d.mts +3 -0
  310. package/types/node_modules/openai/resources/evals/index.d.ts +3 -0
  311. package/types/node_modules/openai/resources/evals/runs/index.d.mts +3 -0
  312. package/types/node_modules/openai/resources/evals/runs/index.d.ts +3 -0
  313. package/types/node_modules/openai/resources/evals/runs/output-items.d.mts +382 -0
  314. package/types/node_modules/openai/resources/evals/runs/output-items.d.ts +382 -0
  315. package/types/node_modules/openai/resources/evals/runs/runs.d.mts +2290 -0
  316. package/types/node_modules/openai/resources/evals/runs/runs.d.ts +2290 -0
  317. package/types/node_modules/openai/resources/evals/runs.d.mts +2 -0
  318. package/types/node_modules/openai/resources/evals/runs.d.ts +2 -0
  319. package/types/node_modules/openai/resources/evals.d.mts +2 -0
  320. package/types/node_modules/openai/resources/evals.d.ts +2 -0
  321. package/types/node_modules/openai/resources/files.d.mts +164 -0
  322. package/types/node_modules/openai/resources/files.d.ts +164 -0
  323. package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.mts +10 -0
  324. package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.ts +10 -0
  325. package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.mts +119 -0
  326. package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.ts +119 -0
  327. package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.mts +3 -0
  328. package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.ts +3 -0
  329. package/types/node_modules/openai/resources/fine-tuning/alpha.d.mts +2 -0
  330. package/types/node_modules/openai/resources/fine-tuning/alpha.d.ts +2 -0
  331. package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.mts +10 -0
  332. package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.ts +10 -0
  333. package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.mts +3 -0
  334. package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.ts +3 -0
  335. package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.mts +160 -0
  336. package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.ts +160 -0
  337. package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.mts +2 -0
  338. package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.ts +2 -0
  339. package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.mts +22 -0
  340. package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +22 -0
  341. package/types/node_modules/openai/resources/fine-tuning/index.d.mts +6 -0
  342. package/types/node_modules/openai/resources/fine-tuning/index.d.ts +6 -0
  343. package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.mts +74 -0
  344. package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +74 -0
  345. package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.mts +3 -0
  346. package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +3 -0
  347. package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.mts +528 -0
  348. package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +528 -0
  349. package/types/node_modules/openai/resources/fine-tuning/jobs.d.mts +2 -0
  350. package/types/node_modules/openai/resources/fine-tuning/jobs.d.ts +2 -0
  351. package/types/node_modules/openai/resources/fine-tuning/methods.d.mts +120 -0
  352. package/types/node_modules/openai/resources/fine-tuning/methods.d.ts +120 -0
  353. package/types/node_modules/openai/resources/fine-tuning.d.mts +2 -0
  354. package/types/node_modules/openai/resources/fine-tuning.d.ts +2 -0
  355. package/types/node_modules/openai/resources/graders/grader-models.d.mts +304 -0
  356. package/types/node_modules/openai/resources/graders/grader-models.d.ts +304 -0
  357. package/types/node_modules/openai/resources/graders/graders.d.mts +10 -0
  358. package/types/node_modules/openai/resources/graders/graders.d.ts +10 -0
  359. package/types/node_modules/openai/resources/graders/index.d.mts +3 -0
  360. package/types/node_modules/openai/resources/graders/index.d.ts +3 -0
  361. package/types/node_modules/openai/resources/graders.d.mts +2 -0
  362. package/types/node_modules/openai/resources/graders.d.ts +2 -0
  363. package/types/node_modules/openai/resources/images.d.mts +653 -0
  364. package/types/node_modules/openai/resources/images.d.ts +653 -0
  365. package/types/node_modules/openai/resources/index.d.mts +22 -0
  366. package/types/node_modules/openai/resources/index.d.ts +22 -0
  367. package/types/node_modules/openai/resources/models.d.mts +52 -0
  368. package/types/node_modules/openai/resources/models.d.ts +52 -0
  369. package/types/node_modules/openai/resources/moderations.d.mts +295 -0
  370. package/types/node_modules/openai/resources/moderations.d.ts +295 -0
  371. package/types/node_modules/openai/resources/realtime/client-secrets.d.mts +594 -0
  372. package/types/node_modules/openai/resources/realtime/client-secrets.d.ts +594 -0
  373. package/types/node_modules/openai/resources/realtime/index.d.mts +3 -0
  374. package/types/node_modules/openai/resources/realtime/index.d.ts +3 -0
  375. package/types/node_modules/openai/resources/realtime/realtime.d.mts +3828 -0
  376. package/types/node_modules/openai/resources/realtime/realtime.d.ts +3828 -0
  377. package/types/node_modules/openai/resources/realtime.d.mts +2 -0
  378. package/types/node_modules/openai/resources/realtime.d.ts +2 -0
  379. package/types/node_modules/openai/resources/responses/index.d.mts +3 -0
  380. package/types/node_modules/openai/resources/responses/index.d.ts +3 -0
  381. package/types/node_modules/openai/resources/responses/input-items.d.mts +65 -0
  382. package/types/node_modules/openai/resources/responses/input-items.d.ts +65 -0
  383. package/types/node_modules/openai/resources/responses/responses.d.mts +4705 -0
  384. package/types/node_modules/openai/resources/responses/responses.d.ts +4705 -0
  385. package/types/node_modules/openai/resources/responses.d.mts +2 -0
  386. package/types/node_modules/openai/resources/responses.d.ts +2 -0
  387. package/types/node_modules/openai/resources/shared.d.mts +265 -0
  388. package/types/node_modules/openai/resources/shared.d.ts +265 -0
  389. package/types/node_modules/openai/resources/uploads/index.d.mts +3 -0
  390. package/types/node_modules/openai/resources/uploads/index.d.ts +3 -0
  391. package/types/node_modules/openai/resources/uploads/parts.d.mts +51 -0
  392. package/types/node_modules/openai/resources/uploads/parts.d.ts +51 -0
  393. package/types/node_modules/openai/resources/uploads/uploads.d.mts +157 -0
  394. package/types/node_modules/openai/resources/uploads/uploads.d.ts +157 -0
  395. package/types/node_modules/openai/resources/uploads.d.mts +2 -0
  396. package/types/node_modules/openai/resources/uploads.d.ts +2 -0
  397. package/types/node_modules/openai/resources/vector-stores/file-batches.d.mts +172 -0
  398. package/types/node_modules/openai/resources/vector-stores/file-batches.d.ts +172 -0
  399. package/types/node_modules/openai/resources/vector-stores/files.d.mts +231 -0
  400. package/types/node_modules/openai/resources/vector-stores/files.d.ts +231 -0
  401. package/types/node_modules/openai/resources/vector-stores/index.d.mts +4 -0
  402. package/types/node_modules/openai/resources/vector-stores/index.d.ts +4 -0
  403. package/types/node_modules/openai/resources/vector-stores/vector-stores.d.mts +373 -0
  404. package/types/node_modules/openai/resources/vector-stores/vector-stores.d.ts +373 -0
  405. package/types/node_modules/openai/resources/vector-stores.d.mts +2 -0
  406. package/types/node_modules/openai/resources/vector-stores.d.ts +2 -0
  407. package/types/node_modules/openai/resources/webhooks.d.mts +587 -0
  408. package/types/node_modules/openai/resources/webhooks.d.ts +587 -0
  409. package/types/node_modules/openai/resources.d.mts +2 -0
  410. package/types/node_modules/openai/resources.d.ts +2 -0
  411. package/types/node_modules/openai/src/_vendor/zod-to-json-schema/LICENSE +15 -0
  412. package/types/node_modules/openai/streaming.d.mts +2 -0
  413. package/types/node_modules/openai/streaming.d.ts +2 -0
  414. package/types/node_modules/openai/uploads.d.mts +2 -0
  415. package/types/node_modules/openai/uploads.d.ts +2 -0
  416. package/types/node_modules/openai/version.d.mts +2 -0
  417. package/types/node_modules/openai/version.d.ts +2 -0
@@ -0,0 +1,299 @@
1
+ import { APIResource } from "../../../core/resource.mjs";
2
+ import { APIPromise } from "../../../core/api-promise.mjs";
3
+ import { RequestOptions } from "../../../internal/request-options.mjs";
4
+ export declare class TranscriptionSessions extends APIResource {
5
+ /**
6
+ * Create an ephemeral API token for use in client-side applications with the
7
+ * Realtime API specifically for realtime transcriptions. Can be configured with
8
+ * the same session parameters as the `transcription_session.update` client event.
9
+ *
10
+ * It responds with a session object, plus a `client_secret` key which contains a
11
+ * usable ephemeral API token that can be used to authenticate browser clients for
12
+ * the Realtime API.
13
+ *
14
+ * @example
15
+ * ```ts
16
+ * const transcriptionSession =
17
+ * await client.beta.realtime.transcriptionSessions.create();
18
+ * ```
19
+ */
20
+ create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
21
+ }
22
+ /**
23
+ * A new Realtime transcription session configuration.
24
+ *
25
+ * When a session is created on the server via REST API, the session object also
26
+ * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
27
+ * not present when a session is updated via the WebSocket API.
28
+ */
29
+ export interface TranscriptionSession {
30
+ /**
31
+ * Ephemeral key returned by the API. Only present when the session is created on
32
+ * the server via REST API.
33
+ */
34
+ client_secret: TranscriptionSession.ClientSecret;
35
+ /**
36
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
37
+ */
38
+ input_audio_format?: string;
39
+ /**
40
+ * Configuration of the transcription model.
41
+ */
42
+ input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
43
+ /**
44
+ * The set of modalities the model can respond with. To disable audio, set this to
45
+ * ["text"].
46
+ */
47
+ modalities?: Array<'text' | 'audio'>;
48
+ /**
49
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
50
+ * means that the model will detect the start and end of speech based on audio
51
+ * volume and respond at the end of user speech.
52
+ */
53
+ turn_detection?: TranscriptionSession.TurnDetection;
54
+ }
55
+ export declare namespace TranscriptionSession {
56
+ /**
57
+ * Ephemeral key returned by the API. Only present when the session is created on
58
+ * the server via REST API.
59
+ */
60
+ interface ClientSecret {
61
+ /**
62
+ * Timestamp for when the token expires. Currently, all tokens expire after one
63
+ * minute.
64
+ */
65
+ expires_at: number;
66
+ /**
67
+ * Ephemeral key usable in client environments to authenticate connections to the
68
+ * Realtime API. Use this in client-side environments rather than a standard API
69
+ * token, which should only be used server-side.
70
+ */
71
+ value: string;
72
+ }
73
+ /**
74
+ * Configuration of the transcription model.
75
+ */
76
+ interface InputAudioTranscription {
77
+ /**
78
+ * The language of the input audio. Supplying the input language in
79
+ * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
80
+ * format will improve accuracy and latency.
81
+ */
82
+ language?: string;
83
+ /**
84
+ * The model to use for transcription. Can be `gpt-4o-transcribe`,
85
+ * `gpt-4o-mini-transcribe`, or `whisper-1`.
86
+ */
87
+ model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
88
+ /**
89
+ * An optional text to guide the model's style or continue a previous audio
90
+ * segment. The
91
+ * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
92
+ * should match the audio language.
93
+ */
94
+ prompt?: string;
95
+ }
96
+ /**
97
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
98
+ * means that the model will detect the start and end of speech based on audio
99
+ * volume and respond at the end of user speech.
100
+ */
101
+ interface TurnDetection {
102
+ /**
103
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
104
+ * Defaults to 300ms.
105
+ */
106
+ prefix_padding_ms?: number;
107
+ /**
108
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
109
+ * With shorter values the model will respond more quickly, but may jump in on
110
+ * short pauses from the user.
111
+ */
112
+ silence_duration_ms?: number;
113
+ /**
114
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
115
+ * threshold will require louder audio to activate the model, and thus might
116
+ * perform better in noisy environments.
117
+ */
118
+ threshold?: number;
119
+ /**
120
+ * Type of turn detection, only `server_vad` is currently supported.
121
+ */
122
+ type?: string;
123
+ }
124
+ }
125
+ export interface TranscriptionSessionCreateParams {
126
+ /**
127
+ * Configuration options for the generated client secret.
128
+ */
129
+ client_secret?: TranscriptionSessionCreateParams.ClientSecret;
130
+ /**
131
+ * The set of items to include in the transcription. Current available items are:
132
+ *
133
+ * - `item.input_audio_transcription.logprobs`
134
+ */
135
+ include?: Array<string>;
136
+ /**
137
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
138
+ * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
139
+ * (mono), and little-endian byte order.
140
+ */
141
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
142
+ /**
143
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
144
+ * off. Noise reduction filters audio added to the input audio buffer before it is
145
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
146
+ * detection accuracy (reducing false positives) and model performance by improving
147
+ * perception of the input audio.
148
+ */
149
+ input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
150
+ /**
151
+ * Configuration for input audio transcription. The client can optionally set the
152
+ * language and prompt for transcription, these offer additional guidance to the
153
+ * transcription service.
154
+ */
155
+ input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
156
+ /**
157
+ * The set of modalities the model can respond with. To disable audio, set this to
158
+ * ["text"].
159
+ */
160
+ modalities?: Array<'text' | 'audio'>;
161
+ /**
162
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
163
+ * set to `null` to turn off, in which case the client must manually trigger model
164
+ * response. Server VAD means that the model will detect the start and end of
165
+ * speech based on audio volume and respond at the end of user speech. Semantic VAD
166
+ * is more advanced and uses a turn detection model (in conjunction with VAD) to
167
+ * semantically estimate whether the user has finished speaking, then dynamically
168
+ * sets a timeout based on this probability. For example, if user audio trails off
169
+ * with "uhhm", the model will score a low probability of turn end and wait longer
170
+ * for the user to continue speaking. This can be useful for more natural
171
+ * conversations, but may have a higher latency.
172
+ */
173
+ turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
174
+ }
175
+ export declare namespace TranscriptionSessionCreateParams {
176
+ /**
177
+ * Configuration options for the generated client secret.
178
+ */
179
+ interface ClientSecret {
180
+ /**
181
+ * Configuration for the ephemeral token expiration.
182
+ */
183
+ expires_at?: ClientSecret.ExpiresAt;
184
+ }
185
+ namespace ClientSecret {
186
+ /**
187
+ * Configuration for the ephemeral token expiration.
188
+ */
189
+ interface ExpiresAt {
190
+ /**
191
+ * The anchor point for the ephemeral token expiration. Only `created_at` is
192
+ * currently supported.
193
+ */
194
+ anchor?: 'created_at';
195
+ /**
196
+ * The number of seconds from the anchor point to the expiration. Select a value
197
+ * between `10` and `7200`.
198
+ */
199
+ seconds?: number;
200
+ }
201
+ }
202
+ /**
203
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
204
+ * off. Noise reduction filters audio added to the input audio buffer before it is
205
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
206
+ * detection accuracy (reducing false positives) and model performance by improving
207
+ * perception of the input audio.
208
+ */
209
+ interface InputAudioNoiseReduction {
210
+ /**
211
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
212
+ * headphones, `far_field` is for far-field microphones such as laptop or
213
+ * conference room microphones.
214
+ */
215
+ type?: 'near_field' | 'far_field';
216
+ }
217
+ /**
218
+ * Configuration for input audio transcription. The client can optionally set the
219
+ * language and prompt for transcription, these offer additional guidance to the
220
+ * transcription service.
221
+ */
222
+ interface InputAudioTranscription {
223
+ /**
224
+ * The language of the input audio. Supplying the input language in
225
+ * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
226
+ * format will improve accuracy and latency.
227
+ */
228
+ language?: string;
229
+ /**
230
+ * The model to use for transcription, current options are `gpt-4o-transcribe`,
231
+ * `gpt-4o-mini-transcribe`, and `whisper-1`.
232
+ */
233
+ model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
234
+ /**
235
+ * An optional text to guide the model's style or continue a previous audio
236
+ * segment. For `whisper-1`, the
237
+ * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
238
+ * For `gpt-4o-transcribe` models, the prompt is a free text string, for example
239
+ * "expect words related to technology".
240
+ */
241
+ prompt?: string;
242
+ }
243
+ /**
244
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
245
+ * set to `null` to turn off, in which case the client must manually trigger model
246
+ * response. Server VAD means that the model will detect the start and end of
247
+ * speech based on audio volume and respond at the end of user speech. Semantic VAD
248
+ * is more advanced and uses a turn detection model (in conjunction with VAD) to
249
+ * semantically estimate whether the user has finished speaking, then dynamically
250
+ * sets a timeout based on this probability. For example, if user audio trails off
251
+ * with "uhhm", the model will score a low probability of turn end and wait longer
252
+ * for the user to continue speaking. This can be useful for more natural
253
+ * conversations, but may have a higher latency.
254
+ */
255
+ interface TurnDetection {
256
+ /**
257
+ * Whether or not to automatically generate a response when a VAD stop event
258
+ * occurs. Not available for transcription sessions.
259
+ */
260
+ create_response?: boolean;
261
+ /**
262
+ * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
263
+ * will wait longer for the user to continue speaking, `high` will respond more
264
+ * quickly. `auto` is the default and is equivalent to `medium`.
265
+ */
266
+ eagerness?: 'low' | 'medium' | 'high' | 'auto';
267
+ /**
268
+ * Whether or not to automatically interrupt any ongoing response with output to
269
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
270
+ * occurs. Not available for transcription sessions.
271
+ */
272
+ interrupt_response?: boolean;
273
+ /**
274
+ * Used only for `server_vad` mode. Amount of audio to include before the VAD
275
+ * detected speech (in milliseconds). Defaults to 300ms.
276
+ */
277
+ prefix_padding_ms?: number;
278
+ /**
279
+ * Used only for `server_vad` mode. Duration of silence to detect speech stop (in
280
+ * milliseconds). Defaults to 500ms. With shorter values the model will respond
281
+ * more quickly, but may jump in on short pauses from the user.
282
+ */
283
+ silence_duration_ms?: number;
284
+ /**
285
+ * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
286
+ * defaults to 0.5. A higher threshold will require louder audio to activate the
287
+ * model, and thus might perform better in noisy environments.
288
+ */
289
+ threshold?: number;
290
+ /**
291
+ * Type of turn detection.
292
+ */
293
+ type?: 'server_vad' | 'semantic_vad';
294
+ }
295
+ }
296
+ export declare namespace TranscriptionSessions {
297
+ export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
298
+ }
299
+ //# sourceMappingURL=transcription-sessions.d.mts.map
@@ -0,0 +1,299 @@
1
+ import { APIResource } from "../../../core/resource.js";
2
+ import { APIPromise } from "../../../core/api-promise.js";
3
+ import { RequestOptions } from "../../../internal/request-options.js";
4
+ export declare class TranscriptionSessions extends APIResource {
5
+ /**
6
+ * Create an ephemeral API token for use in client-side applications with the
7
+ * Realtime API specifically for realtime transcriptions. Can be configured with
8
+ * the same session parameters as the `transcription_session.update` client event.
9
+ *
10
+ * It responds with a session object, plus a `client_secret` key which contains a
11
+ * usable ephemeral API token that can be used to authenticate browser clients for
12
+ * the Realtime API.
13
+ *
14
+ * @example
15
+ * ```ts
16
+ * const transcriptionSession =
17
+ * await client.beta.realtime.transcriptionSessions.create();
18
+ * ```
19
+ */
20
+ create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
21
+ }
22
+ /**
23
+ * A new Realtime transcription session configuration.
24
+ *
25
+ * When a session is created on the server via REST API, the session object also
26
+ * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
27
+ * not present when a session is updated via the WebSocket API.
28
+ */
29
+ export interface TranscriptionSession {
30
+ /**
31
+ * Ephemeral key returned by the API. Only present when the session is created on
32
+ * the server via REST API.
33
+ */
34
+ client_secret: TranscriptionSession.ClientSecret;
35
+ /**
36
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
37
+ */
38
+ input_audio_format?: string;
39
+ /**
40
+ * Configuration of the transcription model.
41
+ */
42
+ input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
43
+ /**
44
+ * The set of modalities the model can respond with. To disable audio, set this to
45
+ * ["text"].
46
+ */
47
+ modalities?: Array<'text' | 'audio'>;
48
+ /**
49
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
50
+ * means that the model will detect the start and end of speech based on audio
51
+ * volume and respond at the end of user speech.
52
+ */
53
+ turn_detection?: TranscriptionSession.TurnDetection;
54
+ }
55
+ export declare namespace TranscriptionSession {
56
+ /**
57
+ * Ephemeral key returned by the API. Only present when the session is created on
58
+ * the server via REST API.
59
+ */
60
+ interface ClientSecret {
61
+ /**
62
+ * Timestamp for when the token expires. Currently, all tokens expire after one
63
+ * minute.
64
+ */
65
+ expires_at: number;
66
+ /**
67
+ * Ephemeral key usable in client environments to authenticate connections to the
68
+ * Realtime API. Use this in client-side environments rather than a standard API
69
+ * token, which should only be used server-side.
70
+ */
71
+ value: string;
72
+ }
73
+ /**
74
+ * Configuration of the transcription model.
75
+ */
76
+ interface InputAudioTranscription {
77
+ /**
78
+ * The language of the input audio. Supplying the input language in
79
+ * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
80
+ * format will improve accuracy and latency.
81
+ */
82
+ language?: string;
83
+ /**
84
+ * The model to use for transcription. Can be `gpt-4o-transcribe`,
85
+ * `gpt-4o-mini-transcribe`, or `whisper-1`.
86
+ */
87
+ model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
88
+ /**
89
+ * An optional text to guide the model's style or continue a previous audio
90
+ * segment. The
91
+ * [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
92
+ * should match the audio language.
93
+ */
94
+ prompt?: string;
95
+ }
96
+ /**
97
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
98
+ * means that the model will detect the start and end of speech based on audio
99
+ * volume and respond at the end of user speech.
100
+ */
101
+ interface TurnDetection {
102
+ /**
103
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
104
+ * Defaults to 300ms.
105
+ */
106
+ prefix_padding_ms?: number;
107
+ /**
108
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
109
+ * With shorter values the model will respond more quickly, but may jump in on
110
+ * short pauses from the user.
111
+ */
112
+ silence_duration_ms?: number;
113
+ /**
114
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
115
+ * threshold will require louder audio to activate the model, and thus might
116
+ * perform better in noisy environments.
117
+ */
118
+ threshold?: number;
119
+ /**
120
+ * Type of turn detection, only `server_vad` is currently supported.
121
+ */
122
+ type?: string;
123
+ }
124
+ }
125
+ export interface TranscriptionSessionCreateParams {
126
+ /**
127
+ * Configuration options for the generated client secret.
128
+ */
129
+ client_secret?: TranscriptionSessionCreateParams.ClientSecret;
130
+ /**
131
+ * The set of items to include in the transcription. Current available items are:
132
+ *
133
+ * - `item.input_audio_transcription.logprobs`
134
+ */
135
+ include?: Array<string>;
136
+ /**
137
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
138
+ * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
139
+ * (mono), and little-endian byte order.
140
+ */
141
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
142
+ /**
143
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
144
+ * off. Noise reduction filters audio added to the input audio buffer before it is
145
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
146
+ * detection accuracy (reducing false positives) and model performance by improving
147
+ * perception of the input audio.
148
+ */
149
+ input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
150
+ /**
151
+ * Configuration for input audio transcription. The client can optionally set the
152
+ * language and prompt for transcription, these offer additional guidance to the
153
+ * transcription service.
154
+ */
155
+ input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
156
+ /**
157
+ * The set of modalities the model can respond with. To disable audio, set this to
158
+ * ["text"].
159
+ */
160
+ modalities?: Array<'text' | 'audio'>;
161
+ /**
162
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
163
+ * set to `null` to turn off, in which case the client must manually trigger model
164
+ * response. Server VAD means that the model will detect the start and end of
165
+ * speech based on audio volume and respond at the end of user speech. Semantic VAD
166
+ * is more advanced and uses a turn detection model (in conjunction with VAD) to
167
+ * semantically estimate whether the user has finished speaking, then dynamically
168
+ * sets a timeout based on this probability. For example, if user audio trails off
169
+ * with "uhhm", the model will score a low probability of turn end and wait longer
170
+ * for the user to continue speaking. This can be useful for more natural
171
+ * conversations, but may have a higher latency.
172
+ */
173
+ turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
174
+ }
175
+ export declare namespace TranscriptionSessionCreateParams {
176
+ /**
177
+ * Configuration options for the generated client secret.
178
+ */
179
+ interface ClientSecret {
180
+ /**
181
+ * Configuration for the ephemeral token expiration.
182
+ */
183
+ expires_at?: ClientSecret.ExpiresAt;
184
+ }
185
+ namespace ClientSecret {
186
+ /**
187
+ * Configuration for the ephemeral token expiration.
188
+ */
189
+ interface ExpiresAt {
190
+ /**
191
+ * The anchor point for the ephemeral token expiration. Only `created_at` is
192
+ * currently supported.
193
+ */
194
+ anchor?: 'created_at';
195
+ /**
196
+ * The number of seconds from the anchor point to the expiration. Select a value
197
+ * between `10` and `7200`.
198
+ */
199
+ seconds?: number;
200
+ }
201
+ }
202
+ /**
203
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
204
+ * off. Noise reduction filters audio added to the input audio buffer before it is
205
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
206
+ * detection accuracy (reducing false positives) and model performance by improving
207
+ * perception of the input audio.
208
+ */
209
+ interface InputAudioNoiseReduction {
210
+ /**
211
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
212
+ * headphones, `far_field` is for far-field microphones such as laptop or
213
+ * conference room microphones.
214
+ */
215
+ type?: 'near_field' | 'far_field';
216
+ }
217
+ /**
218
+ * Configuration for input audio transcription. The client can optionally set the
219
+ * language and prompt for transcription, these offer additional guidance to the
220
+ * transcription service.
221
+ */
222
+ interface InputAudioTranscription {
223
+ /**
224
+ * The language of the input audio. Supplying the input language in
225
+ * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
226
+ * format will improve accuracy and latency.
227
+ */
228
+ language?: string;
229
+ /**
230
+ * The model to use for transcription, current options are `gpt-4o-transcribe`,
231
+ * `gpt-4o-mini-transcribe`, and `whisper-1`.
232
+ */
233
+ model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
234
+ /**
235
+ * An optional text to guide the model's style or continue a previous audio
236
+ * segment. For `whisper-1`, the
237
+ * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
238
+ * For `gpt-4o-transcribe` models, the prompt is a free text string, for example
239
+ * "expect words related to technology".
240
+ */
241
+ prompt?: string;
242
+ }
243
+ /**
244
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
245
+ * set to `null` to turn off, in which case the client must manually trigger model
246
+ * response. Server VAD means that the model will detect the start and end of
247
+ * speech based on audio volume and respond at the end of user speech. Semantic VAD
248
+ * is more advanced and uses a turn detection model (in conjunction with VAD) to
249
+ * semantically estimate whether the user has finished speaking, then dynamically
250
+ * sets a timeout based on this probability. For example, if user audio trails off
251
+ * with "uhhm", the model will score a low probability of turn end and wait longer
252
+ * for the user to continue speaking. This can be useful for more natural
253
+ * conversations, but may have a higher latency.
254
+ */
255
+ interface TurnDetection {
256
+ /**
257
+ * Whether or not to automatically generate a response when a VAD stop event
258
+ * occurs. Not available for transcription sessions.
259
+ */
260
+ create_response?: boolean;
261
+ /**
262
+ * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
263
+ * will wait longer for the user to continue speaking, `high` will respond more
264
+ * quickly. `auto` is the default and is equivalent to `medium`.
265
+ */
266
+ eagerness?: 'low' | 'medium' | 'high' | 'auto';
267
+ /**
268
+ * Whether or not to automatically interrupt any ongoing response with output to
269
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
270
+ * occurs. Not available for transcription sessions.
271
+ */
272
+ interrupt_response?: boolean;
273
+ /**
274
+ * Used only for `server_vad` mode. Amount of audio to include before the VAD
275
+ * detected speech (in milliseconds). Defaults to 300ms.
276
+ */
277
+ prefix_padding_ms?: number;
278
+ /**
279
+ * Used only for `server_vad` mode. Duration of silence to detect speech stop (in
280
+ * milliseconds). Defaults to 500ms. With shorter values the model will respond
281
+ * more quickly, but may jump in on short pauses from the user.
282
+ */
283
+ silence_duration_ms?: number;
284
+ /**
285
+ * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
286
+ * defaults to 0.5. A higher threshold will require louder audio to activate the
287
+ * model, and thus might perform better in noisy environments.
288
+ */
289
+ threshold?: number;
290
+ /**
291
+ * Type of turn detection.
292
+ */
293
+ type?: 'server_vad' | 'semantic_vad';
294
+ }
295
+ }
296
+ export declare namespace TranscriptionSessions {
297
+ export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
298
+ }
299
+ //# sourceMappingURL=transcription-sessions.d.ts.map
@@ -0,0 +1,2 @@
1
+ export * from "./realtime/index.mjs";
2
+ //# sourceMappingURL=realtime.d.mts.map
@@ -0,0 +1,2 @@
1
+ export * from "./realtime/index.js";
2
+ //# sourceMappingURL=realtime.d.ts.map
@@ -0,0 +1,4 @@
1
+ export { Messages, type Annotation, type AnnotationDelta, type FileCitationAnnotation, type FileCitationDeltaAnnotation, type FilePathAnnotation, type FilePathDeltaAnnotation, type ImageFile, type ImageFileContentBlock, type ImageFileDelta, type ImageFileDeltaBlock, type ImageURL, type ImageURLContentBlock, type ImageURLDelta, type ImageURLDeltaBlock, type Message, type MessageContent, type MessageContentDelta, type MessageContentPartParam, type MessageDeleted, type MessageDelta, type MessageDeltaEvent, type RefusalContentBlock, type RefusalDeltaBlock, type Text, type TextContentBlock, type TextContentBlockParam, type TextDelta, type TextDeltaBlock, type MessageCreateParams, type MessageRetrieveParams, type MessageUpdateParams, type MessageListParams, type MessageDeleteParams, type MessagesPage, } from "./messages.mjs";
2
+ export { Runs, type RequiredActionFunctionToolCall, type Run, type RunStatus, type RunCreateParams, type RunCreateParamsNonStreaming, type RunCreateParamsStreaming, type RunRetrieveParams, type RunUpdateParams, type RunListParams, type RunCancelParams, type RunSubmitToolOutputsParams, type RunSubmitToolOutputsParamsNonStreaming, type RunSubmitToolOutputsParamsStreaming, type RunsPage, type RunCreateAndPollParams, type RunCreateAndStreamParams, type RunStreamParams, type RunSubmitToolOutputsAndPollParams, type RunSubmitToolOutputsStreamParams, } from "./runs/index.mjs";
3
+ export { Threads, type AssistantResponseFormatOption, type AssistantToolChoice, type AssistantToolChoiceFunction, type AssistantToolChoiceOption, type Thread, type ThreadDeleted, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, type ThreadCreateAndRunParamsNonStreaming, type ThreadCreateAndRunParamsStreaming, type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from "./threads.mjs";
4
+ //# sourceMappingURL=index.d.mts.map
@@ -0,0 +1,4 @@
1
+ export { Messages, type Annotation, type AnnotationDelta, type FileCitationAnnotation, type FileCitationDeltaAnnotation, type FilePathAnnotation, type FilePathDeltaAnnotation, type ImageFile, type ImageFileContentBlock, type ImageFileDelta, type ImageFileDeltaBlock, type ImageURL, type ImageURLContentBlock, type ImageURLDelta, type ImageURLDeltaBlock, type Message, type MessageContent, type MessageContentDelta, type MessageContentPartParam, type MessageDeleted, type MessageDelta, type MessageDeltaEvent, type RefusalContentBlock, type RefusalDeltaBlock, type Text, type TextContentBlock, type TextContentBlockParam, type TextDelta, type TextDeltaBlock, type MessageCreateParams, type MessageRetrieveParams, type MessageUpdateParams, type MessageListParams, type MessageDeleteParams, type MessagesPage, } from "./messages.js";
2
+ export { Runs, type RequiredActionFunctionToolCall, type Run, type RunStatus, type RunCreateParams, type RunCreateParamsNonStreaming, type RunCreateParamsStreaming, type RunRetrieveParams, type RunUpdateParams, type RunListParams, type RunCancelParams, type RunSubmitToolOutputsParams, type RunSubmitToolOutputsParamsNonStreaming, type RunSubmitToolOutputsParamsStreaming, type RunsPage, type RunCreateAndPollParams, type RunCreateAndStreamParams, type RunStreamParams, type RunSubmitToolOutputsAndPollParams, type RunSubmitToolOutputsStreamParams, } from "./runs/index.js";
3
+ export { Threads, type AssistantResponseFormatOption, type AssistantToolChoice, type AssistantToolChoiceFunction, type AssistantToolChoiceOption, type Thread, type ThreadDeleted, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, type ThreadCreateAndRunParamsNonStreaming, type ThreadCreateAndRunParamsStreaming, type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from "./threads.js";
4
+ //# sourceMappingURL=index.d.ts.map