@superinterface/react 5.1.2 → 5.2.0-beta.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (417) hide show
  1. package/dist/index.cjs +1 -1
  2. package/dist/index.cjs.map +1 -1
  3. package/dist/index.d.cts +5 -5
  4. package/dist/index.d.ts +5 -5
  5. package/dist/index.js +1 -1
  6. package/dist/index.js.map +1 -1
  7. package/dist/server.d.cts +1 -1
  8. package/dist/server.d.ts +1 -1
  9. package/package.json +8 -6
  10. package/types/index.d.ts +4 -0
  11. package/types/node_modules/openai/LICENSE +201 -0
  12. package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.mts +7 -0
  13. package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.ts +7 -0
  14. package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.mts +32 -0
  15. package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.ts +32 -0
  16. package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.mts +21 -0
  17. package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.ts +21 -0
  18. package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.mts +12 -0
  19. package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.ts +12 -0
  20. package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.mts +38 -0
  21. package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.ts +38 -0
  22. package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.mts +38 -0
  23. package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.ts +38 -0
  24. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.mts +3 -0
  25. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.ts +3 -0
  26. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.mts +13 -0
  27. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.ts +13 -0
  28. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.mts +15 -0
  29. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.ts +15 -0
  30. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.mts +5 -0
  31. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.ts +5 -0
  32. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.mts +4 -0
  33. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.ts +4 -0
  34. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.mts +4 -0
  35. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.ts +4 -0
  36. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.mts +16 -0
  37. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.ts +16 -0
  38. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.mts +7 -0
  39. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.ts +7 -0
  40. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.mts +5 -0
  41. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.ts +5 -0
  42. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.mts +7 -0
  43. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.ts +7 -0
  44. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.mts +9 -0
  45. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.ts +9 -0
  46. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.mts +10 -0
  47. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.ts +10 -0
  48. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.mts +16 -0
  49. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.ts +16 -0
  50. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.mts +7 -0
  51. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.ts +7 -0
  52. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.mts +5 -0
  53. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.ts +5 -0
  54. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.mts +6 -0
  55. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.ts +6 -0
  56. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.mts +11 -0
  57. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.ts +11 -0
  58. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.mts +14 -0
  59. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.ts +14 -0
  60. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.mts +11 -0
  61. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.ts +11 -0
  62. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.mts +5 -0
  63. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.ts +5 -0
  64. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.mts +6 -0
  65. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.ts +6 -0
  66. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.mts +5 -0
  67. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.ts +5 -0
  68. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.mts +4 -0
  69. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.ts +4 -0
  70. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.mts +14 -0
  71. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.ts +14 -0
  72. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.mts +14 -0
  73. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.ts +14 -0
  74. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.mts +70 -0
  75. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.ts +70 -0
  76. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.mts +14 -0
  77. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.ts +14 -0
  78. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.mts +5 -0
  79. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.ts +5 -0
  80. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.mts +24 -0
  81. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.ts +24 -0
  82. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.mts +3 -0
  83. package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.ts +3 -0
  84. package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.mts +4 -0
  85. package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.ts +4 -0
  86. package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.mts +11 -0
  87. package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.ts +11 -0
  88. package/types/node_modules/openai/api-promise.d.mts +2 -0
  89. package/types/node_modules/openai/api-promise.d.ts +2 -0
  90. package/types/node_modules/openai/azure.d.mts +63 -0
  91. package/types/node_modules/openai/azure.d.ts +63 -0
  92. package/types/node_modules/openai/beta/realtime/index.d.mts +2 -0
  93. package/types/node_modules/openai/beta/realtime/index.d.ts +2 -0
  94. package/types/node_modules/openai/beta/realtime/internal-base.d.mts +45 -0
  95. package/types/node_modules/openai/beta/realtime/internal-base.d.ts +45 -0
  96. package/types/node_modules/openai/beta/realtime/websocket.d.mts +36 -0
  97. package/types/node_modules/openai/beta/realtime/websocket.d.ts +36 -0
  98. package/types/node_modules/openai/beta/realtime/ws.d.mts +27 -0
  99. package/types/node_modules/openai/beta/realtime/ws.d.ts +27 -0
  100. package/types/node_modules/openai/client.d.mts +297 -0
  101. package/types/node_modules/openai/client.d.ts +297 -0
  102. package/types/node_modules/openai/core/api-promise.d.mts +49 -0
  103. package/types/node_modules/openai/core/api-promise.d.ts +49 -0
  104. package/types/node_modules/openai/core/error.d.mts +59 -0
  105. package/types/node_modules/openai/core/error.d.ts +59 -0
  106. package/types/node_modules/openai/core/pagination.d.mts +89 -0
  107. package/types/node_modules/openai/core/pagination.d.ts +89 -0
  108. package/types/node_modules/openai/core/resource.d.mts +6 -0
  109. package/types/node_modules/openai/core/resource.d.ts +6 -0
  110. package/types/node_modules/openai/core/streaming.d.mts +33 -0
  111. package/types/node_modules/openai/core/streaming.d.ts +33 -0
  112. package/types/node_modules/openai/core/uploads.d.mts +3 -0
  113. package/types/node_modules/openai/core/uploads.d.ts +3 -0
  114. package/types/node_modules/openai/error.d.mts +2 -0
  115. package/types/node_modules/openai/error.d.ts +2 -0
  116. package/types/node_modules/openai/helpers/audio.d.mts +9 -0
  117. package/types/node_modules/openai/helpers/audio.d.ts +9 -0
  118. package/types/node_modules/openai/helpers/zod.d.mts +70 -0
  119. package/types/node_modules/openai/helpers/zod.d.ts +70 -0
  120. package/types/node_modules/openai/index.d.mts +8 -0
  121. package/types/node_modules/openai/index.d.ts +8 -0
  122. package/types/node_modules/openai/internal/builtin-types.d.mts +73 -0
  123. package/types/node_modules/openai/internal/builtin-types.d.ts +73 -0
  124. package/types/node_modules/openai/internal/decoders/line.d.mts +17 -0
  125. package/types/node_modules/openai/internal/decoders/line.d.ts +17 -0
  126. package/types/node_modules/openai/internal/detect-platform.d.mts +15 -0
  127. package/types/node_modules/openai/internal/detect-platform.d.ts +15 -0
  128. package/types/node_modules/openai/internal/errors.d.mts +3 -0
  129. package/types/node_modules/openai/internal/errors.d.ts +3 -0
  130. package/types/node_modules/openai/internal/headers.d.mts +20 -0
  131. package/types/node_modules/openai/internal/headers.d.ts +20 -0
  132. package/types/node_modules/openai/internal/parse.d.mts +17 -0
  133. package/types/node_modules/openai/internal/parse.d.ts +17 -0
  134. package/types/node_modules/openai/internal/qs/formats.d.mts +7 -0
  135. package/types/node_modules/openai/internal/qs/formats.d.ts +7 -0
  136. package/types/node_modules/openai/internal/qs/index.d.mts +10 -0
  137. package/types/node_modules/openai/internal/qs/index.d.ts +10 -0
  138. package/types/node_modules/openai/internal/qs/stringify.d.mts +3 -0
  139. package/types/node_modules/openai/internal/qs/stringify.d.ts +3 -0
  140. package/types/node_modules/openai/internal/qs/types.d.mts +57 -0
  141. package/types/node_modules/openai/internal/qs/types.d.ts +57 -0
  142. package/types/node_modules/openai/internal/qs/utils.d.mts +15 -0
  143. package/types/node_modules/openai/internal/qs/utils.d.ts +15 -0
  144. package/types/node_modules/openai/internal/request-options.d.mts +78 -0
  145. package/types/node_modules/openai/internal/request-options.d.ts +78 -0
  146. package/types/node_modules/openai/internal/shim-types.d.mts +17 -0
  147. package/types/node_modules/openai/internal/shim-types.d.ts +17 -0
  148. package/types/node_modules/openai/internal/shims.d.mts +20 -0
  149. package/types/node_modules/openai/internal/shims.d.ts +20 -0
  150. package/types/node_modules/openai/internal/stream-utils.d.mts +8 -0
  151. package/types/node_modules/openai/internal/stream-utils.d.ts +8 -0
  152. package/types/node_modules/openai/internal/to-file.d.mts +45 -0
  153. package/types/node_modules/openai/internal/to-file.d.ts +45 -0
  154. package/types/node_modules/openai/internal/types.d.mts +69 -0
  155. package/types/node_modules/openai/internal/types.d.ts +69 -0
  156. package/types/node_modules/openai/internal/uploads.d.mts +42 -0
  157. package/types/node_modules/openai/internal/uploads.d.ts +42 -0
  158. package/types/node_modules/openai/internal/utils/base64.d.mts +9 -0
  159. package/types/node_modules/openai/internal/utils/base64.d.ts +9 -0
  160. package/types/node_modules/openai/internal/utils/bytes.d.mts +4 -0
  161. package/types/node_modules/openai/internal/utils/bytes.d.ts +4 -0
  162. package/types/node_modules/openai/internal/utils/env.d.mts +9 -0
  163. package/types/node_modules/openai/internal/utils/env.d.ts +9 -0
  164. package/types/node_modules/openai/internal/utils/log.d.mts +37 -0
  165. package/types/node_modules/openai/internal/utils/log.d.ts +37 -0
  166. package/types/node_modules/openai/internal/utils/path.d.mts +15 -0
  167. package/types/node_modules/openai/internal/utils/path.d.ts +15 -0
  168. package/types/node_modules/openai/internal/utils/sleep.d.mts +2 -0
  169. package/types/node_modules/openai/internal/utils/sleep.d.ts +2 -0
  170. package/types/node_modules/openai/internal/utils/uuid.d.mts +5 -0
  171. package/types/node_modules/openai/internal/utils/uuid.d.ts +5 -0
  172. package/types/node_modules/openai/internal/utils/values.d.mts +18 -0
  173. package/types/node_modules/openai/internal/utils/values.d.ts +18 -0
  174. package/types/node_modules/openai/internal/utils.d.mts +7 -0
  175. package/types/node_modules/openai/internal/utils.d.ts +7 -0
  176. package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.mts +59 -0
  177. package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +59 -0
  178. package/types/node_modules/openai/lib/AssistantStream.d.mts +60 -0
  179. package/types/node_modules/openai/lib/AssistantStream.d.ts +60 -0
  180. package/types/node_modules/openai/lib/ChatCompletionRunner.d.mts +16 -0
  181. package/types/node_modules/openai/lib/ChatCompletionRunner.d.ts +16 -0
  182. package/types/node_modules/openai/lib/ChatCompletionStream.d.mts +208 -0
  183. package/types/node_modules/openai/lib/ChatCompletionStream.d.ts +208 -0
  184. package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.mts +19 -0
  185. package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +19 -0
  186. package/types/node_modules/openai/lib/EventEmitter.d.mts +45 -0
  187. package/types/node_modules/openai/lib/EventEmitter.d.ts +45 -0
  188. package/types/node_modules/openai/lib/EventStream.d.mts +62 -0
  189. package/types/node_modules/openai/lib/EventStream.d.ts +62 -0
  190. package/types/node_modules/openai/lib/ResponsesParser.d.mts +36 -0
  191. package/types/node_modules/openai/lib/ResponsesParser.d.ts +36 -0
  192. package/types/node_modules/openai/lib/RunnableFunction.d.mts +83 -0
  193. package/types/node_modules/openai/lib/RunnableFunction.d.ts +83 -0
  194. package/types/node_modules/openai/lib/Util.d.mts +5 -0
  195. package/types/node_modules/openai/lib/Util.d.ts +5 -0
  196. package/types/node_modules/openai/lib/chatCompletionUtils.d.mts +5 -0
  197. package/types/node_modules/openai/lib/chatCompletionUtils.d.ts +5 -0
  198. package/types/node_modules/openai/lib/jsonschema.d.mts +106 -0
  199. package/types/node_modules/openai/lib/jsonschema.d.ts +106 -0
  200. package/types/node_modules/openai/lib/parser.d.mts +47 -0
  201. package/types/node_modules/openai/lib/parser.d.ts +47 -0
  202. package/types/node_modules/openai/lib/responses/EventTypes.d.mts +9 -0
  203. package/types/node_modules/openai/lib/responses/EventTypes.d.ts +9 -0
  204. package/types/node_modules/openai/lib/responses/ResponseStream.d.mts +59 -0
  205. package/types/node_modules/openai/lib/responses/ResponseStream.d.ts +59 -0
  206. package/types/node_modules/openai/package.json +233 -0
  207. package/types/node_modules/openai/pagination.d.mts +2 -0
  208. package/types/node_modules/openai/pagination.d.ts +2 -0
  209. package/types/node_modules/openai/realtime/index.d.mts +2 -0
  210. package/types/node_modules/openai/realtime/index.d.ts +2 -0
  211. package/types/node_modules/openai/realtime/internal-base.d.mts +45 -0
  212. package/types/node_modules/openai/realtime/internal-base.d.ts +45 -0
  213. package/types/node_modules/openai/realtime/websocket.d.mts +36 -0
  214. package/types/node_modules/openai/realtime/websocket.d.ts +36 -0
  215. package/types/node_modules/openai/realtime/ws.d.mts +27 -0
  216. package/types/node_modules/openai/realtime/ws.d.ts +27 -0
  217. package/types/node_modules/openai/resource.d.mts +2 -0
  218. package/types/node_modules/openai/resource.d.ts +2 -0
  219. package/types/node_modules/openai/resources/audio/audio.d.mts +26 -0
  220. package/types/node_modules/openai/resources/audio/audio.d.ts +26 -0
  221. package/types/node_modules/openai/resources/audio/index.d.mts +5 -0
  222. package/types/node_modules/openai/resources/audio/index.d.ts +5 -0
  223. package/types/node_modules/openai/resources/audio/speech.d.mts +64 -0
  224. package/types/node_modules/openai/resources/audio/speech.d.ts +64 -0
  225. package/types/node_modules/openai/resources/audio/transcriptions.d.mts +485 -0
  226. package/types/node_modules/openai/resources/audio/transcriptions.d.ts +485 -0
  227. package/types/node_modules/openai/resources/audio/translations.d.mts +81 -0
  228. package/types/node_modules/openai/resources/audio/translations.d.ts +81 -0
  229. package/types/node_modules/openai/resources/audio.d.mts +2 -0
  230. package/types/node_modules/openai/resources/audio.d.ts +2 -0
  231. package/types/node_modules/openai/resources/batches.d.mts +279 -0
  232. package/types/node_modules/openai/resources/batches.d.ts +279 -0
  233. package/types/node_modules/openai/resources/beta/assistants.d.mts +1232 -0
  234. package/types/node_modules/openai/resources/beta/assistants.d.ts +1232 -0
  235. package/types/node_modules/openai/resources/beta/beta.d.mts +18 -0
  236. package/types/node_modules/openai/resources/beta/beta.d.ts +18 -0
  237. package/types/node_modules/openai/resources/beta/index.d.mts +5 -0
  238. package/types/node_modules/openai/resources/beta/index.d.ts +5 -0
  239. package/types/node_modules/openai/resources/beta/realtime/index.d.mts +4 -0
  240. package/types/node_modules/openai/resources/beta/realtime/index.d.ts +4 -0
  241. package/types/node_modules/openai/resources/beta/realtime/realtime.d.mts +2332 -0
  242. package/types/node_modules/openai/resources/beta/realtime/realtime.d.ts +2332 -0
  243. package/types/node_modules/openai/resources/beta/realtime/sessions.d.mts +744 -0
  244. package/types/node_modules/openai/resources/beta/realtime/sessions.d.ts +744 -0
  245. package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts +299 -0
  246. package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts +299 -0
  247. package/types/node_modules/openai/resources/beta/realtime.d.mts +2 -0
  248. package/types/node_modules/openai/resources/beta/realtime.d.ts +2 -0
  249. package/types/node_modules/openai/resources/beta/threads/index.d.mts +4 -0
  250. package/types/node_modules/openai/resources/beta/threads/index.d.ts +4 -0
  251. package/types/node_modules/openai/resources/beta/threads/messages.d.mts +594 -0
  252. package/types/node_modules/openai/resources/beta/threads/messages.d.ts +594 -0
  253. package/types/node_modules/openai/resources/beta/threads/runs/index.d.mts +3 -0
  254. package/types/node_modules/openai/resources/beta/threads/runs/index.d.ts +3 -0
  255. package/types/node_modules/openai/resources/beta/threads/runs/runs.d.mts +733 -0
  256. package/types/node_modules/openai/resources/beta/threads/runs/runs.d.ts +733 -0
  257. package/types/node_modules/openai/resources/beta/threads/runs/steps.d.mts +615 -0
  258. package/types/node_modules/openai/resources/beta/threads/runs/steps.d.ts +615 -0
  259. package/types/node_modules/openai/resources/beta/threads/runs.d.mts +2 -0
  260. package/types/node_modules/openai/resources/beta/threads/runs.d.ts +2 -0
  261. package/types/node_modules/openai/resources/beta/threads/threads.d.mts +1044 -0
  262. package/types/node_modules/openai/resources/beta/threads/threads.d.ts +1044 -0
  263. package/types/node_modules/openai/resources/beta/threads.d.mts +2 -0
  264. package/types/node_modules/openai/resources/beta/threads.d.ts +2 -0
  265. package/types/node_modules/openai/resources/beta.d.mts +2 -0
  266. package/types/node_modules/openai/resources/beta.d.ts +2 -0
  267. package/types/node_modules/openai/resources/chat/chat.d.mts +13 -0
  268. package/types/node_modules/openai/resources/chat/chat.d.ts +13 -0
  269. package/types/node_modules/openai/resources/chat/completions/completions.d.mts +1627 -0
  270. package/types/node_modules/openai/resources/chat/completions/completions.d.ts +1627 -0
  271. package/types/node_modules/openai/resources/chat/completions/index.d.mts +4 -0
  272. package/types/node_modules/openai/resources/chat/completions/index.d.ts +4 -0
  273. package/types/node_modules/openai/resources/chat/completions/messages.d.mts +34 -0
  274. package/types/node_modules/openai/resources/chat/completions/messages.d.ts +34 -0
  275. package/types/node_modules/openai/resources/chat/completions.d.mts +2 -0
  276. package/types/node_modules/openai/resources/chat/completions.d.ts +2 -0
  277. package/types/node_modules/openai/resources/chat/index.d.mts +3 -0
  278. package/types/node_modules/openai/resources/chat/index.d.ts +3 -0
  279. package/types/node_modules/openai/resources/chat.d.mts +2 -0
  280. package/types/node_modules/openai/resources/chat.d.ts +2 -0
  281. package/types/node_modules/openai/resources/completions.d.mts +329 -0
  282. package/types/node_modules/openai/resources/completions.d.ts +329 -0
  283. package/types/node_modules/openai/resources/containers/containers.d.mts +200 -0
  284. package/types/node_modules/openai/resources/containers/containers.d.ts +200 -0
  285. package/types/node_modules/openai/resources/containers/files/content.d.mts +16 -0
  286. package/types/node_modules/openai/resources/containers/files/content.d.ts +16 -0
  287. package/types/node_modules/openai/resources/containers/files/files.d.mts +148 -0
  288. package/types/node_modules/openai/resources/containers/files/files.d.ts +148 -0
  289. package/types/node_modules/openai/resources/containers/files/index.d.mts +3 -0
  290. package/types/node_modules/openai/resources/containers/files/index.d.ts +3 -0
  291. package/types/node_modules/openai/resources/containers/files.d.mts +2 -0
  292. package/types/node_modules/openai/resources/containers/files.d.ts +2 -0
  293. package/types/node_modules/openai/resources/containers/index.d.mts +3 -0
  294. package/types/node_modules/openai/resources/containers/index.d.ts +3 -0
  295. package/types/node_modules/openai/resources/containers.d.mts +2 -0
  296. package/types/node_modules/openai/resources/containers.d.ts +2 -0
  297. package/types/node_modules/openai/resources/conversations/conversations.d.mts +176 -0
  298. package/types/node_modules/openai/resources/conversations/conversations.d.ts +176 -0
  299. package/types/node_modules/openai/resources/conversations/index.d.mts +3 -0
  300. package/types/node_modules/openai/resources/conversations/index.d.ts +3 -0
  301. package/types/node_modules/openai/resources/conversations/items.d.mts +367 -0
  302. package/types/node_modules/openai/resources/conversations/items.d.ts +367 -0
  303. package/types/node_modules/openai/resources/conversations.d.mts +2 -0
  304. package/types/node_modules/openai/resources/conversations.d.ts +2 -0
  305. package/types/node_modules/openai/resources/embeddings.d.mts +113 -0
  306. package/types/node_modules/openai/resources/embeddings.d.ts +113 -0
  307. package/types/node_modules/openai/resources/evals/evals.d.mts +735 -0
  308. package/types/node_modules/openai/resources/evals/evals.d.ts +735 -0
  309. package/types/node_modules/openai/resources/evals/index.d.mts +3 -0
  310. package/types/node_modules/openai/resources/evals/index.d.ts +3 -0
  311. package/types/node_modules/openai/resources/evals/runs/index.d.mts +3 -0
  312. package/types/node_modules/openai/resources/evals/runs/index.d.ts +3 -0
  313. package/types/node_modules/openai/resources/evals/runs/output-items.d.mts +382 -0
  314. package/types/node_modules/openai/resources/evals/runs/output-items.d.ts +382 -0
  315. package/types/node_modules/openai/resources/evals/runs/runs.d.mts +2290 -0
  316. package/types/node_modules/openai/resources/evals/runs/runs.d.ts +2290 -0
  317. package/types/node_modules/openai/resources/evals/runs.d.mts +2 -0
  318. package/types/node_modules/openai/resources/evals/runs.d.ts +2 -0
  319. package/types/node_modules/openai/resources/evals.d.mts +2 -0
  320. package/types/node_modules/openai/resources/evals.d.ts +2 -0
  321. package/types/node_modules/openai/resources/files.d.mts +164 -0
  322. package/types/node_modules/openai/resources/files.d.ts +164 -0
  323. package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.mts +10 -0
  324. package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.ts +10 -0
  325. package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.mts +119 -0
  326. package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.ts +119 -0
  327. package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.mts +3 -0
  328. package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.ts +3 -0
  329. package/types/node_modules/openai/resources/fine-tuning/alpha.d.mts +2 -0
  330. package/types/node_modules/openai/resources/fine-tuning/alpha.d.ts +2 -0
  331. package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.mts +10 -0
  332. package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.ts +10 -0
  333. package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.mts +3 -0
  334. package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.ts +3 -0
  335. package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.mts +160 -0
  336. package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.ts +160 -0
  337. package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.mts +2 -0
  338. package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.ts +2 -0
  339. package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.mts +22 -0
  340. package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +22 -0
  341. package/types/node_modules/openai/resources/fine-tuning/index.d.mts +6 -0
  342. package/types/node_modules/openai/resources/fine-tuning/index.d.ts +6 -0
  343. package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.mts +74 -0
  344. package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +74 -0
  345. package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.mts +3 -0
  346. package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +3 -0
  347. package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.mts +528 -0
  348. package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +528 -0
  349. package/types/node_modules/openai/resources/fine-tuning/jobs.d.mts +2 -0
  350. package/types/node_modules/openai/resources/fine-tuning/jobs.d.ts +2 -0
  351. package/types/node_modules/openai/resources/fine-tuning/methods.d.mts +120 -0
  352. package/types/node_modules/openai/resources/fine-tuning/methods.d.ts +120 -0
  353. package/types/node_modules/openai/resources/fine-tuning.d.mts +2 -0
  354. package/types/node_modules/openai/resources/fine-tuning.d.ts +2 -0
  355. package/types/node_modules/openai/resources/graders/grader-models.d.mts +304 -0
  356. package/types/node_modules/openai/resources/graders/grader-models.d.ts +304 -0
  357. package/types/node_modules/openai/resources/graders/graders.d.mts +10 -0
  358. package/types/node_modules/openai/resources/graders/graders.d.ts +10 -0
  359. package/types/node_modules/openai/resources/graders/index.d.mts +3 -0
  360. package/types/node_modules/openai/resources/graders/index.d.ts +3 -0
  361. package/types/node_modules/openai/resources/graders.d.mts +2 -0
  362. package/types/node_modules/openai/resources/graders.d.ts +2 -0
  363. package/types/node_modules/openai/resources/images.d.mts +653 -0
  364. package/types/node_modules/openai/resources/images.d.ts +653 -0
  365. package/types/node_modules/openai/resources/index.d.mts +22 -0
  366. package/types/node_modules/openai/resources/index.d.ts +22 -0
  367. package/types/node_modules/openai/resources/models.d.mts +52 -0
  368. package/types/node_modules/openai/resources/models.d.ts +52 -0
  369. package/types/node_modules/openai/resources/moderations.d.mts +295 -0
  370. package/types/node_modules/openai/resources/moderations.d.ts +295 -0
  371. package/types/node_modules/openai/resources/realtime/client-secrets.d.mts +594 -0
  372. package/types/node_modules/openai/resources/realtime/client-secrets.d.ts +594 -0
  373. package/types/node_modules/openai/resources/realtime/index.d.mts +3 -0
  374. package/types/node_modules/openai/resources/realtime/index.d.ts +3 -0
  375. package/types/node_modules/openai/resources/realtime/realtime.d.mts +3828 -0
  376. package/types/node_modules/openai/resources/realtime/realtime.d.ts +3828 -0
  377. package/types/node_modules/openai/resources/realtime.d.mts +2 -0
  378. package/types/node_modules/openai/resources/realtime.d.ts +2 -0
  379. package/types/node_modules/openai/resources/responses/index.d.mts +3 -0
  380. package/types/node_modules/openai/resources/responses/index.d.ts +3 -0
  381. package/types/node_modules/openai/resources/responses/input-items.d.mts +65 -0
  382. package/types/node_modules/openai/resources/responses/input-items.d.ts +65 -0
  383. package/types/node_modules/openai/resources/responses/responses.d.mts +4705 -0
  384. package/types/node_modules/openai/resources/responses/responses.d.ts +4705 -0
  385. package/types/node_modules/openai/resources/responses.d.mts +2 -0
  386. package/types/node_modules/openai/resources/responses.d.ts +2 -0
  387. package/types/node_modules/openai/resources/shared.d.mts +265 -0
  388. package/types/node_modules/openai/resources/shared.d.ts +265 -0
  389. package/types/node_modules/openai/resources/uploads/index.d.mts +3 -0
  390. package/types/node_modules/openai/resources/uploads/index.d.ts +3 -0
  391. package/types/node_modules/openai/resources/uploads/parts.d.mts +51 -0
  392. package/types/node_modules/openai/resources/uploads/parts.d.ts +51 -0
  393. package/types/node_modules/openai/resources/uploads/uploads.d.mts +157 -0
  394. package/types/node_modules/openai/resources/uploads/uploads.d.ts +157 -0
  395. package/types/node_modules/openai/resources/uploads.d.mts +2 -0
  396. package/types/node_modules/openai/resources/uploads.d.ts +2 -0
  397. package/types/node_modules/openai/resources/vector-stores/file-batches.d.mts +172 -0
  398. package/types/node_modules/openai/resources/vector-stores/file-batches.d.ts +172 -0
  399. package/types/node_modules/openai/resources/vector-stores/files.d.mts +231 -0
  400. package/types/node_modules/openai/resources/vector-stores/files.d.ts +231 -0
  401. package/types/node_modules/openai/resources/vector-stores/index.d.mts +4 -0
  402. package/types/node_modules/openai/resources/vector-stores/index.d.ts +4 -0
  403. package/types/node_modules/openai/resources/vector-stores/vector-stores.d.mts +373 -0
  404. package/types/node_modules/openai/resources/vector-stores/vector-stores.d.ts +373 -0
  405. package/types/node_modules/openai/resources/vector-stores.d.mts +2 -0
  406. package/types/node_modules/openai/resources/vector-stores.d.ts +2 -0
  407. package/types/node_modules/openai/resources/webhooks.d.mts +587 -0
  408. package/types/node_modules/openai/resources/webhooks.d.ts +587 -0
  409. package/types/node_modules/openai/resources.d.mts +2 -0
  410. package/types/node_modules/openai/resources.d.ts +2 -0
  411. package/types/node_modules/openai/src/_vendor/zod-to-json-schema/LICENSE +15 -0
  412. package/types/node_modules/openai/streaming.d.mts +2 -0
  413. package/types/node_modules/openai/streaming.d.ts +2 -0
  414. package/types/node_modules/openai/uploads.d.mts +2 -0
  415. package/types/node_modules/openai/uploads.d.ts +2 -0
  416. package/types/node_modules/openai/version.d.mts +2 -0
  417. package/types/node_modules/openai/version.d.ts +2 -0
@@ -0,0 +1,3828 @@
1
+ import { APIResource } from "../../core/resource.js";
2
+ import * as RealtimeAPI from "./realtime.js";
3
+ import * as Shared from "../shared.js";
4
+ import * as ClientSecretsAPI from "./client-secrets.js";
5
+ import { ClientSecretCreateParams, ClientSecretCreateResponse, ClientSecrets, RealtimeSessionClientSecret, RealtimeSessionCreateResponse, RealtimeTranscriptionSessionCreateResponse, RealtimeTranscriptionSessionTurnDetection } from "./client-secrets.js";
6
+ import * as ResponsesAPI from "../responses/responses.js";
7
+ export declare class Realtime extends APIResource {
8
+ clientSecrets: ClientSecretsAPI.ClientSecrets;
9
+ }
10
+ export interface AudioTranscription {
11
+ /**
12
+ * The language of the input audio. Supplying the input language in
13
+ * [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
14
+ * format will improve accuracy and latency.
15
+ */
16
+ language?: string;
17
+ /**
18
+ * The model to use for transcription. Current options are `whisper-1`,
19
+ * `gpt-4o-transcribe-latest`, `gpt-4o-mini-transcribe`, and `gpt-4o-transcribe`.
20
+ */
21
+ model?: 'whisper-1' | 'gpt-4o-transcribe-latest' | 'gpt-4o-mini-transcribe' | 'gpt-4o-transcribe';
22
+ /**
23
+ * An optional text to guide the model's style or continue a previous audio
24
+ * segment. For `whisper-1`, the
25
+ * [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
26
+ * For `gpt-4o-transcribe` models, the prompt is a free text string, for example
27
+ * "expect words related to technology".
28
+ */
29
+ prompt?: string;
30
+ }
31
+ /**
32
+ * Returned when a conversation is created. Emitted right after session creation.
33
+ */
34
+ export interface ConversationCreatedEvent {
35
+ /**
36
+ * The conversation resource.
37
+ */
38
+ conversation: ConversationCreatedEvent.Conversation;
39
+ /**
40
+ * The unique ID of the server event.
41
+ */
42
+ event_id: string;
43
+ /**
44
+ * The event type, must be `conversation.created`.
45
+ */
46
+ type: 'conversation.created';
47
+ }
48
+ export declare namespace ConversationCreatedEvent {
49
+ /**
50
+ * The conversation resource.
51
+ */
52
+ interface Conversation {
53
+ /**
54
+ * The unique ID of the conversation.
55
+ */
56
+ id?: string;
57
+ /**
58
+ * The object type, must be `realtime.conversation`.
59
+ */
60
+ object?: 'realtime.conversation';
61
+ }
62
+ }
63
+ /**
64
+ * A single item within a Realtime conversation.
65
+ */
66
+ export type ConversationItem = RealtimeConversationItemSystemMessage | RealtimeConversationItemUserMessage | RealtimeConversationItemAssistantMessage | RealtimeConversationItemFunctionCall | RealtimeConversationItemFunctionCallOutput | RealtimeMcpApprovalResponse | RealtimeMcpListTools | RealtimeMcpToolCall | RealtimeMcpApprovalRequest;
67
+ /**
68
+ * Sent by the server when an Item is added to the default Conversation. This can
69
+ * happen in several cases:
70
+ *
71
+ * - When the client sends a `conversation.item.create` event.
72
+ * - When the input audio buffer is committed. In this case the item will be a user
73
+ * message containing the audio from the buffer.
74
+ * - When the model is generating a Response. In this case the
75
+ * `conversation.item.added` event will be sent when the model starts generating
76
+ * a specific Item, and thus it will not yet have any content (and `status` will
77
+ * be `in_progress`).
78
+ *
79
+ * The event will include the full content of the Item (except when model is
80
+ * generating a Response) except for audio data, which can be retrieved separately
81
+ * with a `conversation.item.retrieve` event if necessary.
82
+ */
83
+ export interface ConversationItemAdded {
84
+ /**
85
+ * The unique ID of the server event.
86
+ */
87
+ event_id: string;
88
+ /**
89
+ * A single item within a Realtime conversation.
90
+ */
91
+ item: ConversationItem;
92
+ /**
93
+ * The event type, must be `conversation.item.added`.
94
+ */
95
+ type: 'conversation.item.added';
96
+ /**
97
+ * The ID of the item that precedes this one, if any. This is used to maintain
98
+ * ordering when items are inserted.
99
+ */
100
+ previous_item_id?: string | null;
101
+ }
102
+ /**
103
+ * Add a new Item to the Conversation's context, including messages, function
104
+ * calls, and function call responses. This event can be used both to populate a
105
+ * "history" of the conversation and to add new items mid-stream, but has the
106
+ * current limitation that it cannot populate assistant audio messages.
107
+ *
108
+ * If successful, the server will respond with a `conversation.item.created` event,
109
+ * otherwise an `error` event will be sent.
110
+ */
111
+ export interface ConversationItemCreateEvent {
112
+ /**
113
+ * A single item within a Realtime conversation.
114
+ */
115
+ item: ConversationItem;
116
+ /**
117
+ * The event type, must be `conversation.item.create`.
118
+ */
119
+ type: 'conversation.item.create';
120
+ /**
121
+ * Optional client-generated ID used to identify this event.
122
+ */
123
+ event_id?: string;
124
+ /**
125
+ * The ID of the preceding item after which the new item will be inserted. If not
126
+ * set, the new item will be appended to the end of the conversation. If set to
127
+ * `root`, the new item will be added to the beginning of the conversation. If set
128
+ * to an existing ID, it allows an item to be inserted mid-conversation. If the ID
129
+ * cannot be found, an error will be returned and the item will not be added.
130
+ */
131
+ previous_item_id?: string;
132
+ }
133
+ /**
134
+ * Returned when a conversation item is created. There are several scenarios that
135
+ * produce this event:
136
+ *
137
+ * - The server is generating a Response, which if successful will produce either
138
+ * one or two Items, which will be of type `message` (role `assistant`) or type
139
+ * `function_call`.
140
+ * - The input audio buffer has been committed, either by the client or the server
141
+ * (in `server_vad` mode). The server will take the content of the input audio
142
+ * buffer and add it to a new user message Item.
143
+ * - The client has sent a `conversation.item.create` event to add a new Item to
144
+ * the Conversation.
145
+ */
146
+ export interface ConversationItemCreatedEvent {
147
+ /**
148
+ * The unique ID of the server event.
149
+ */
150
+ event_id: string;
151
+ /**
152
+ * A single item within a Realtime conversation.
153
+ */
154
+ item: ConversationItem;
155
+ /**
156
+ * The event type, must be `conversation.item.created`.
157
+ */
158
+ type: 'conversation.item.created';
159
+ /**
160
+ * The ID of the preceding item in the Conversation context, allows the client to
161
+ * understand the order of the conversation. Can be `null` if the item has no
162
+ * predecessor.
163
+ */
164
+ previous_item_id?: string | null;
165
+ }
166
+ /**
167
+ * Send this event when you want to remove any item from the conversation history.
168
+ * The server will respond with a `conversation.item.deleted` event, unless the
169
+ * item does not exist in the conversation history, in which case the server will
170
+ * respond with an error.
171
+ */
172
+ export interface ConversationItemDeleteEvent {
173
+ /**
174
+ * The ID of the item to delete.
175
+ */
176
+ item_id: string;
177
+ /**
178
+ * The event type, must be `conversation.item.delete`.
179
+ */
180
+ type: 'conversation.item.delete';
181
+ /**
182
+ * Optional client-generated ID used to identify this event.
183
+ */
184
+ event_id?: string;
185
+ }
186
+ /**
187
+ * Returned when an item in the conversation is deleted by the client with a
188
+ * `conversation.item.delete` event. This event is used to synchronize the server's
189
+ * understanding of the conversation history with the client's view.
190
+ */
191
+ export interface ConversationItemDeletedEvent {
192
+ /**
193
+ * The unique ID of the server event.
194
+ */
195
+ event_id: string;
196
+ /**
197
+ * The ID of the item that was deleted.
198
+ */
199
+ item_id: string;
200
+ /**
201
+ * The event type, must be `conversation.item.deleted`.
202
+ */
203
+ type: 'conversation.item.deleted';
204
+ }
205
+ /**
206
+ * Returned when a conversation item is finalized.
207
+ *
208
+ * The event will include the full content of the Item except for audio data, which
209
+ * can be retrieved separately with a `conversation.item.retrieve` event if needed.
210
+ */
211
+ export interface ConversationItemDone {
212
+ /**
213
+ * The unique ID of the server event.
214
+ */
215
+ event_id: string;
216
+ /**
217
+ * A single item within a Realtime conversation.
218
+ */
219
+ item: ConversationItem;
220
+ /**
221
+ * The event type, must be `conversation.item.done`.
222
+ */
223
+ type: 'conversation.item.done';
224
+ /**
225
+ * The ID of the item that precedes this one, if any. This is used to maintain
226
+ * ordering when items are inserted.
227
+ */
228
+ previous_item_id?: string | null;
229
+ }
230
+ /**
231
+ * This event is the output of audio transcription for user audio written to the
232
+ * user audio buffer. Transcription begins when the input audio buffer is committed
233
+ * by the client or server (when VAD is enabled). Transcription runs asynchronously
234
+ * with Response creation, so this event may come before or after the Response
235
+ * events.
236
+ *
237
+ * Realtime API models accept audio natively, and thus input transcription is a
238
+ * separate process run on a separate ASR (Automatic Speech Recognition) model. The
239
+ * transcript may diverge somewhat from the model's interpretation, and should be
240
+ * treated as a rough guide.
241
+ */
242
+ export interface ConversationItemInputAudioTranscriptionCompletedEvent {
243
+ /**
244
+ * The index of the content part containing the audio.
245
+ */
246
+ content_index: number;
247
+ /**
248
+ * The unique ID of the server event.
249
+ */
250
+ event_id: string;
251
+ /**
252
+ * The ID of the item containing the audio that is being transcribed.
253
+ */
254
+ item_id: string;
255
+ /**
256
+ * The transcribed text.
257
+ */
258
+ transcript: string;
259
+ /**
260
+ * The event type, must be `conversation.item.input_audio_transcription.completed`.
261
+ */
262
+ type: 'conversation.item.input_audio_transcription.completed';
263
+ /**
264
+ * Usage statistics for the transcription, this is billed according to the ASR
265
+ * model's pricing rather than the realtime model's pricing.
266
+ */
267
+ usage: ConversationItemInputAudioTranscriptionCompletedEvent.TranscriptTextUsageTokens | ConversationItemInputAudioTranscriptionCompletedEvent.TranscriptTextUsageDuration;
268
+ /**
269
+ * The log probabilities of the transcription.
270
+ */
271
+ logprobs?: Array<LogProbProperties> | null;
272
+ }
273
+ export declare namespace ConversationItemInputAudioTranscriptionCompletedEvent {
274
+ /**
275
+ * Usage statistics for models billed by token usage.
276
+ */
277
+ interface TranscriptTextUsageTokens {
278
+ /**
279
+ * Number of input tokens billed for this request.
280
+ */
281
+ input_tokens: number;
282
+ /**
283
+ * Number of output tokens generated.
284
+ */
285
+ output_tokens: number;
286
+ /**
287
+ * Total number of tokens used (input + output).
288
+ */
289
+ total_tokens: number;
290
+ /**
291
+ * The type of the usage object. Always `tokens` for this variant.
292
+ */
293
+ type: 'tokens';
294
+ /**
295
+ * Details about the input tokens billed for this request.
296
+ */
297
+ input_token_details?: TranscriptTextUsageTokens.InputTokenDetails;
298
+ }
299
+ namespace TranscriptTextUsageTokens {
300
+ /**
301
+ * Details about the input tokens billed for this request.
302
+ */
303
+ interface InputTokenDetails {
304
+ /**
305
+ * Number of audio tokens billed for this request.
306
+ */
307
+ audio_tokens?: number;
308
+ /**
309
+ * Number of text tokens billed for this request.
310
+ */
311
+ text_tokens?: number;
312
+ }
313
+ }
314
+ /**
315
+ * Usage statistics for models billed by audio input duration.
316
+ */
317
+ interface TranscriptTextUsageDuration {
318
+ /**
319
+ * Duration of the input audio in seconds.
320
+ */
321
+ seconds: number;
322
+ /**
323
+ * The type of the usage object. Always `duration` for this variant.
324
+ */
325
+ type: 'duration';
326
+ }
327
+ }
328
+ /**
329
+ * Returned when the text value of an input audio transcription content part is
330
+ * updated with incremental transcription results.
331
+ */
332
+ export interface ConversationItemInputAudioTranscriptionDeltaEvent {
333
+ /**
334
+ * The unique ID of the server event.
335
+ */
336
+ event_id: string;
337
+ /**
338
+ * The ID of the item containing the audio that is being transcribed.
339
+ */
340
+ item_id: string;
341
+ /**
342
+ * The event type, must be `conversation.item.input_audio_transcription.delta`.
343
+ */
344
+ type: 'conversation.item.input_audio_transcription.delta';
345
+ /**
346
+ * The index of the content part in the item's content array.
347
+ */
348
+ content_index?: number;
349
+ /**
350
+ * The text delta.
351
+ */
352
+ delta?: string;
353
+ /**
354
+ * The log probabilities of the transcription. These can be enabled by
355
+ * configurating the session with
356
+ * `"include": ["item.input_audio_transcription.logprobs"]`. Each entry in the
357
+ * array corresponds a log probability of which token would be selected for this
358
+ * chunk of transcription. This can help to identify if it was possible there were
359
+ * multiple valid options for a given chunk of transcription.
360
+ */
361
+ logprobs?: Array<LogProbProperties> | null;
362
+ }
363
+ /**
364
+ * Returned when input audio transcription is configured, and a transcription
365
+ * request for a user message failed. These events are separate from other `error`
366
+ * events so that the client can identify the related Item.
367
+ */
368
+ export interface ConversationItemInputAudioTranscriptionFailedEvent {
369
+ /**
370
+ * The index of the content part containing the audio.
371
+ */
372
+ content_index: number;
373
+ /**
374
+ * Details of the transcription error.
375
+ */
376
+ error: ConversationItemInputAudioTranscriptionFailedEvent.Error;
377
+ /**
378
+ * The unique ID of the server event.
379
+ */
380
+ event_id: string;
381
+ /**
382
+ * The ID of the user message item.
383
+ */
384
+ item_id: string;
385
+ /**
386
+ * The event type, must be `conversation.item.input_audio_transcription.failed`.
387
+ */
388
+ type: 'conversation.item.input_audio_transcription.failed';
389
+ }
390
+ export declare namespace ConversationItemInputAudioTranscriptionFailedEvent {
391
+ /**
392
+ * Details of the transcription error.
393
+ */
394
+ interface Error {
395
+ /**
396
+ * Error code, if any.
397
+ */
398
+ code?: string;
399
+ /**
400
+ * A human-readable error message.
401
+ */
402
+ message?: string;
403
+ /**
404
+ * Parameter related to the error, if any.
405
+ */
406
+ param?: string;
407
+ /**
408
+ * The type of error.
409
+ */
410
+ type?: string;
411
+ }
412
+ }
413
+ /**
414
+ * Returned when an input audio transcription segment is identified for an item.
415
+ */
416
+ export interface ConversationItemInputAudioTranscriptionSegment {
417
+ /**
418
+ * The segment identifier.
419
+ */
420
+ id: string;
421
+ /**
422
+ * The index of the input audio content part within the item.
423
+ */
424
+ content_index: number;
425
+ /**
426
+ * End time of the segment in seconds.
427
+ */
428
+ end: number;
429
+ /**
430
+ * The unique ID of the server event.
431
+ */
432
+ event_id: string;
433
+ /**
434
+ * The ID of the item containing the input audio content.
435
+ */
436
+ item_id: string;
437
+ /**
438
+ * The detected speaker label for this segment.
439
+ */
440
+ speaker: string;
441
+ /**
442
+ * Start time of the segment in seconds.
443
+ */
444
+ start: number;
445
+ /**
446
+ * The text for this segment.
447
+ */
448
+ text: string;
449
+ /**
450
+ * The event type, must be `conversation.item.input_audio_transcription.segment`.
451
+ */
452
+ type: 'conversation.item.input_audio_transcription.segment';
453
+ }
454
+ /**
455
+ * Send this event when you want to retrieve the server's representation of a
456
+ * specific item in the conversation history. This is useful, for example, to
457
+ * inspect user audio after noise cancellation and VAD. The server will respond
458
+ * with a `conversation.item.retrieved` event, unless the item does not exist in
459
+ * the conversation history, in which case the server will respond with an error.
460
+ */
461
+ export interface ConversationItemRetrieveEvent {
462
+ /**
463
+ * The ID of the item to retrieve.
464
+ */
465
+ item_id: string;
466
+ /**
467
+ * The event type, must be `conversation.item.retrieve`.
468
+ */
469
+ type: 'conversation.item.retrieve';
470
+ /**
471
+ * Optional client-generated ID used to identify this event.
472
+ */
473
+ event_id?: string;
474
+ }
475
+ /**
476
+ * Send this event to truncate a previous assistant message’s audio. The server
477
+ * will produce audio faster than realtime, so this event is useful when the user
478
+ * interrupts to truncate audio that has already been sent to the client but not
479
+ * yet played. This will synchronize the server's understanding of the audio with
480
+ * the client's playback.
481
+ *
482
+ * Truncating audio will delete the server-side text transcript to ensure there is
483
+ * not text in the context that hasn't been heard by the user.
484
+ *
485
+ * If successful, the server will respond with a `conversation.item.truncated`
486
+ * event.
487
+ */
488
+ export interface ConversationItemTruncateEvent {
489
+ /**
490
+ * Inclusive duration up to which audio is truncated, in milliseconds. If the
491
+ * audio_end_ms is greater than the actual audio duration, the server will respond
492
+ * with an error.
493
+ */
494
+ audio_end_ms: number;
495
+ /**
496
+ * The index of the content part to truncate. Set this to `0`.
497
+ */
498
+ content_index: number;
499
+ /**
500
+ * The ID of the assistant message item to truncate. Only assistant message items
501
+ * can be truncated.
502
+ */
503
+ item_id: string;
504
+ /**
505
+ * The event type, must be `conversation.item.truncate`.
506
+ */
507
+ type: 'conversation.item.truncate';
508
+ /**
509
+ * Optional client-generated ID used to identify this event.
510
+ */
511
+ event_id?: string;
512
+ }
513
+ /**
514
+ * Returned when an earlier assistant audio message item is truncated by the client
515
+ * with a `conversation.item.truncate` event. This event is used to synchronize the
516
+ * server's understanding of the audio with the client's playback.
517
+ *
518
+ * This action will truncate the audio and remove the server-side text transcript
519
+ * to ensure there is no text in the context that hasn't been heard by the user.
520
+ */
521
+ export interface ConversationItemTruncatedEvent {
522
+ /**
523
+ * The duration up to which the audio was truncated, in milliseconds.
524
+ */
525
+ audio_end_ms: number;
526
+ /**
527
+ * The index of the content part that was truncated.
528
+ */
529
+ content_index: number;
530
+ /**
531
+ * The unique ID of the server event.
532
+ */
533
+ event_id: string;
534
+ /**
535
+ * The ID of the assistant message item that was truncated.
536
+ */
537
+ item_id: string;
538
+ /**
539
+ * The event type, must be `conversation.item.truncated`.
540
+ */
541
+ type: 'conversation.item.truncated';
542
+ }
543
+ /**
544
+ * The item to add to the conversation.
545
+ */
546
+ export interface ConversationItemWithReference {
547
+ /**
548
+ * For an item of type (`message` | `function_call` | `function_call_output`) this
549
+ * field allows the client to assign the unique ID of the item. It is not required
550
+ * because the server will generate one if not provided.
551
+ *
552
+ * For an item of type `item_reference`, this field is required and is a reference
553
+ * to any item that has previously existed in the conversation.
554
+ */
555
+ id?: string;
556
+ /**
557
+ * The arguments of the function call (for `function_call` items).
558
+ */
559
+ arguments?: string;
560
+ /**
561
+ * The ID of the function call (for `function_call` and `function_call_output`
562
+ * items). If passed on a `function_call_output` item, the server will check that a
563
+ * `function_call` item with the same ID exists in the conversation history.
564
+ */
565
+ call_id?: string;
566
+ /**
567
+ * The content of the message, applicable for `message` items.
568
+ *
569
+ * - Message items of role `system` support only `input_text` content
570
+ * - Message items of role `user` support `input_text` and `input_audio` content
571
+ * - Message items of role `assistant` support `text` content.
572
+ */
573
+ content?: Array<ConversationItemWithReference.Content>;
574
+ /**
575
+ * The name of the function being called (for `function_call` items).
576
+ */
577
+ name?: string;
578
+ /**
579
+ * Identifier for the API object being returned - always `realtime.item`.
580
+ */
581
+ object?: 'realtime.item';
582
+ /**
583
+ * The output of the function call (for `function_call_output` items).
584
+ */
585
+ output?: string;
586
+ /**
587
+ * The role of the message sender (`user`, `assistant`, `system`), only applicable
588
+ * for `message` items.
589
+ */
590
+ role?: 'user' | 'assistant' | 'system';
591
+ /**
592
+ * The status of the item (`completed`, `incomplete`, `in_progress`). These have no
593
+ * effect on the conversation, but are accepted for consistency with the
594
+ * `conversation.item.created` event.
595
+ */
596
+ status?: 'completed' | 'incomplete' | 'in_progress';
597
+ /**
598
+ * The type of the item (`message`, `function_call`, `function_call_output`,
599
+ * `item_reference`).
600
+ */
601
+ type?: 'message' | 'function_call' | 'function_call_output' | 'item_reference';
602
+ }
603
+ export declare namespace ConversationItemWithReference {
604
+ interface Content {
605
+ /**
606
+ * ID of a previous conversation item to reference (for `item_reference` content
607
+ * types in `response.create` events). These can reference both client and server
608
+ * created items.
609
+ */
610
+ id?: string;
611
+ /**
612
+ * Base64-encoded audio bytes, used for `input_audio` content type.
613
+ */
614
+ audio?: string;
615
+ /**
616
+ * The text content, used for `input_text` and `text` content types.
617
+ */
618
+ text?: string;
619
+ /**
620
+ * The transcript of the audio, used for `input_audio` content type.
621
+ */
622
+ transcript?: string;
623
+ /**
624
+ * The content type (`input_text`, `input_audio`, `item_reference`, `text`).
625
+ */
626
+ type?: 'input_text' | 'input_audio' | 'item_reference' | 'text';
627
+ }
628
+ }
629
+ /**
630
+ * Send this event to append audio bytes to the input audio buffer. The audio
631
+ * buffer is temporary storage you can write to and later commit. A "commit" will
632
+ * create a new user message item in the conversation history from the buffer
633
+ * content and clear the buffer. Input audio transcription (if enabled) will be
634
+ * generated when the buffer is committed.
635
+ *
636
+ * If VAD is enabled the audio buffer is used to detect speech and the server will
637
+ * decide when to commit. When Server VAD is disabled, you must commit the audio
638
+ * buffer manually. Input audio noise reduction operates on writes to the audio
639
+ * buffer.
640
+ *
641
+ * The client may choose how much audio to place in each event up to a maximum of
642
+ * 15 MiB, for example streaming smaller chunks from the client may allow the VAD
643
+ * to be more responsive. Unlike most other client events, the server will not send
644
+ * a confirmation response to this event.
645
+ */
646
+ export interface InputAudioBufferAppendEvent {
647
+ /**
648
+ * Base64-encoded audio bytes. This must be in the format specified by the
649
+ * `input_audio_format` field in the session configuration.
650
+ */
651
+ audio: string;
652
+ /**
653
+ * The event type, must be `input_audio_buffer.append`.
654
+ */
655
+ type: 'input_audio_buffer.append';
656
+ /**
657
+ * Optional client-generated ID used to identify this event.
658
+ */
659
+ event_id?: string;
660
+ }
661
+ /**
662
+ * Send this event to clear the audio bytes in the buffer. The server will respond
663
+ * with an `input_audio_buffer.cleared` event.
664
+ */
665
+ export interface InputAudioBufferClearEvent {
666
+ /**
667
+ * The event type, must be `input_audio_buffer.clear`.
668
+ */
669
+ type: 'input_audio_buffer.clear';
670
+ /**
671
+ * Optional client-generated ID used to identify this event.
672
+ */
673
+ event_id?: string;
674
+ }
675
+ /**
676
+ * Returned when the input audio buffer is cleared by the client with a
677
+ * `input_audio_buffer.clear` event.
678
+ */
679
+ export interface InputAudioBufferClearedEvent {
680
+ /**
681
+ * The unique ID of the server event.
682
+ */
683
+ event_id: string;
684
+ /**
685
+ * The event type, must be `input_audio_buffer.cleared`.
686
+ */
687
+ type: 'input_audio_buffer.cleared';
688
+ }
689
+ /**
690
+ * Send this event to commit the user input audio buffer, which will create a new
691
+ * user message item in the conversation. This event will produce an error if the
692
+ * input audio buffer is empty. When in Server VAD mode, the client does not need
693
+ * to send this event, the server will commit the audio buffer automatically.
694
+ *
695
+ * Committing the input audio buffer will trigger input audio transcription (if
696
+ * enabled in session configuration), but it will not create a response from the
697
+ * model. The server will respond with an `input_audio_buffer.committed` event.
698
+ */
699
+ export interface InputAudioBufferCommitEvent {
700
+ /**
701
+ * The event type, must be `input_audio_buffer.commit`.
702
+ */
703
+ type: 'input_audio_buffer.commit';
704
+ /**
705
+ * Optional client-generated ID used to identify this event.
706
+ */
707
+ event_id?: string;
708
+ }
709
+ /**
710
+ * Returned when an input audio buffer is committed, either by the client or
711
+ * automatically in server VAD mode. The `item_id` property is the ID of the user
712
+ * message item that will be created, thus a `conversation.item.created` event will
713
+ * also be sent to the client.
714
+ */
715
+ export interface InputAudioBufferCommittedEvent {
716
+ /**
717
+ * The unique ID of the server event.
718
+ */
719
+ event_id: string;
720
+ /**
721
+ * The ID of the user message item that will be created.
722
+ */
723
+ item_id: string;
724
+ /**
725
+ * The event type, must be `input_audio_buffer.committed`.
726
+ */
727
+ type: 'input_audio_buffer.committed';
728
+ /**
729
+ * The ID of the preceding item after which the new item will be inserted. Can be
730
+ * `null` if the item has no predecessor.
731
+ */
732
+ previous_item_id?: string | null;
733
+ }
734
+ /**
735
+ * Sent by the server when in `server_vad` mode to indicate that speech has been
736
+ * detected in the audio buffer. This can happen any time audio is added to the
737
+ * buffer (unless speech is already detected). The client may want to use this
738
+ * event to interrupt audio playback or provide visual feedback to the user.
739
+ *
740
+ * The client should expect to receive a `input_audio_buffer.speech_stopped` event
741
+ * when speech stops. The `item_id` property is the ID of the user message item
742
+ * that will be created when speech stops and will also be included in the
743
+ * `input_audio_buffer.speech_stopped` event (unless the client manually commits
744
+ * the audio buffer during VAD activation).
745
+ */
746
+ export interface InputAudioBufferSpeechStartedEvent {
747
+ /**
748
+ * Milliseconds from the start of all audio written to the buffer during the
749
+ * session when speech was first detected. This will correspond to the beginning of
750
+ * audio sent to the model, and thus includes the `prefix_padding_ms` configured in
751
+ * the Session.
752
+ */
753
+ audio_start_ms: number;
754
+ /**
755
+ * The unique ID of the server event.
756
+ */
757
+ event_id: string;
758
+ /**
759
+ * The ID of the user message item that will be created when speech stops.
760
+ */
761
+ item_id: string;
762
+ /**
763
+ * The event type, must be `input_audio_buffer.speech_started`.
764
+ */
765
+ type: 'input_audio_buffer.speech_started';
766
+ }
767
+ /**
768
+ * Returned in `server_vad` mode when the server detects the end of speech in the
769
+ * audio buffer. The server will also send an `conversation.item.created` event
770
+ * with the user message item that is created from the audio buffer.
771
+ */
772
+ export interface InputAudioBufferSpeechStoppedEvent {
773
+ /**
774
+ * Milliseconds since the session started when speech stopped. This will correspond
775
+ * to the end of audio sent to the model, and thus includes the
776
+ * `min_silence_duration_ms` configured in the Session.
777
+ */
778
+ audio_end_ms: number;
779
+ /**
780
+ * The unique ID of the server event.
781
+ */
782
+ event_id: string;
783
+ /**
784
+ * The ID of the user message item that will be created.
785
+ */
786
+ item_id: string;
787
+ /**
788
+ * The event type, must be `input_audio_buffer.speech_stopped`.
789
+ */
790
+ type: 'input_audio_buffer.speech_stopped';
791
+ }
792
+ /**
793
+ * Returned when the Server VAD timeout is triggered for the input audio buffer.
794
+ * This is configured with `idle_timeout_ms` in the `turn_detection` settings of
795
+ * the session, and it indicates that there hasn't been any speech detected for the
796
+ * configured duration.
797
+ *
798
+ * The `audio_start_ms` and `audio_end_ms` fields indicate the segment of audio
799
+ * after the last model response up to the triggering time, as an offset from the
800
+ * beginning of audio written to the input audio buffer. This means it demarcates
801
+ * the segment of audio that was silent and the difference between the start and
802
+ * end values will roughly match the configured timeout.
803
+ *
804
+ * The empty audio will be committed to the conversation as an `input_audio` item
805
+ * (there will be a `input_audio_buffer.committed` event) and a model response will
806
+ * be generated. There may be speech that didn't trigger VAD but is still detected
807
+ * by the model, so the model may respond with something relevant to the
808
+ * conversation or a prompt to continue speaking.
809
+ */
810
+ export interface InputAudioBufferTimeoutTriggered {
811
+ /**
812
+ * Millisecond offset of audio written to the input audio buffer at the time the
813
+ * timeout was triggered.
814
+ */
815
+ audio_end_ms: number;
816
+ /**
817
+ * Millisecond offset of audio written to the input audio buffer that was after the
818
+ * playback time of the last model response.
819
+ */
820
+ audio_start_ms: number;
821
+ /**
822
+ * The unique ID of the server event.
823
+ */
824
+ event_id: string;
825
+ /**
826
+ * The ID of the item associated with this segment.
827
+ */
828
+ item_id: string;
829
+ /**
830
+ * The event type, must be `input_audio_buffer.timeout_triggered`.
831
+ */
832
+ type: 'input_audio_buffer.timeout_triggered';
833
+ }
834
+ /**
835
+ * A log probability object.
836
+ */
837
+ export interface LogProbProperties {
838
+ /**
839
+ * The token that was used to generate the log probability.
840
+ */
841
+ token: string;
842
+ /**
843
+ * The bytes that were used to generate the log probability.
844
+ */
845
+ bytes: Array<number>;
846
+ /**
847
+ * The log probability of the token.
848
+ */
849
+ logprob: number;
850
+ }
851
+ /**
852
+ * Returned when listing MCP tools has completed for an item.
853
+ */
854
+ export interface McpListToolsCompleted {
855
+ /**
856
+ * The unique ID of the server event.
857
+ */
858
+ event_id: string;
859
+ /**
860
+ * The ID of the MCP list tools item.
861
+ */
862
+ item_id: string;
863
+ /**
864
+ * The event type, must be `mcp_list_tools.completed`.
865
+ */
866
+ type: 'mcp_list_tools.completed';
867
+ }
868
+ /**
869
+ * Returned when listing MCP tools has failed for an item.
870
+ */
871
+ export interface McpListToolsFailed {
872
+ /**
873
+ * The unique ID of the server event.
874
+ */
875
+ event_id: string;
876
+ /**
877
+ * The ID of the MCP list tools item.
878
+ */
879
+ item_id: string;
880
+ /**
881
+ * The event type, must be `mcp_list_tools.failed`.
882
+ */
883
+ type: 'mcp_list_tools.failed';
884
+ }
885
+ /**
886
+ * Returned when listing MCP tools is in progress for an item.
887
+ */
888
+ export interface McpListToolsInProgress {
889
+ /**
890
+ * The unique ID of the server event.
891
+ */
892
+ event_id: string;
893
+ /**
894
+ * The ID of the MCP list tools item.
895
+ */
896
+ item_id: string;
897
+ /**
898
+ * The event type, must be `mcp_list_tools.in_progress`.
899
+ */
900
+ type: 'mcp_list_tools.in_progress';
901
+ }
902
+ /**
903
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
904
+ * headphones, `far_field` is for far-field microphones such as laptop or
905
+ * conference room microphones.
906
+ */
907
+ export type NoiseReductionType = 'near_field' | 'far_field';
908
+ /**
909
+ * **WebRTC Only:** Emit to cut off the current audio response. This will trigger
910
+ * the server to stop generating audio and emit a `output_audio_buffer.cleared`
911
+ * event. This event should be preceded by a `response.cancel` client event to stop
912
+ * the generation of the current response.
913
+ * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
914
+ */
915
+ export interface OutputAudioBufferClearEvent {
916
+ /**
917
+ * The event type, must be `output_audio_buffer.clear`.
918
+ */
919
+ type: 'output_audio_buffer.clear';
920
+ /**
921
+ * The unique ID of the client event used for error handling.
922
+ */
923
+ event_id?: string;
924
+ }
925
+ /**
926
+ * Emitted at the beginning of a Response to indicate the updated rate limits. When
927
+ * a Response is created some tokens will be "reserved" for the output tokens, the
928
+ * rate limits shown here reflect that reservation, which is then adjusted
929
+ * accordingly once the Response is completed.
930
+ */
931
+ export interface RateLimitsUpdatedEvent {
932
+ /**
933
+ * The unique ID of the server event.
934
+ */
935
+ event_id: string;
936
+ /**
937
+ * List of rate limit information.
938
+ */
939
+ rate_limits: Array<RateLimitsUpdatedEvent.RateLimit>;
940
+ /**
941
+ * The event type, must be `rate_limits.updated`.
942
+ */
943
+ type: 'rate_limits.updated';
944
+ }
945
+ export declare namespace RateLimitsUpdatedEvent {
946
+ interface RateLimit {
947
+ /**
948
+ * The maximum allowed value for the rate limit.
949
+ */
950
+ limit?: number;
951
+ /**
952
+ * The name of the rate limit (`requests`, `tokens`).
953
+ */
954
+ name?: 'requests' | 'tokens';
955
+ /**
956
+ * The remaining value before the limit is reached.
957
+ */
958
+ remaining?: number;
959
+ /**
960
+ * Seconds until the rate limit resets.
961
+ */
962
+ reset_seconds?: number;
963
+ }
964
+ }
965
+ /**
966
+ * Configuration for input and output audio.
967
+ */
968
+ export interface RealtimeAudioConfig {
969
+ input?: RealtimeAudioConfigInput;
970
+ output?: RealtimeAudioConfigOutput;
971
+ }
972
+ export interface RealtimeAudioConfigInput {
973
+ /**
974
+ * The format of the input audio.
975
+ */
976
+ format?: RealtimeAudioFormats;
977
+ /**
978
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
979
+ * off. Noise reduction filters audio added to the input audio buffer before it is
980
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
981
+ * detection accuracy (reducing false positives) and model performance by improving
982
+ * perception of the input audio.
983
+ */
984
+ noise_reduction?: RealtimeAudioConfigInput.NoiseReduction;
985
+ /**
986
+ * Configuration for input audio transcription, defaults to off and can be set to
987
+ * `null` to turn off once on. Input audio transcription is not native to the
988
+ * model, since the model consumes audio directly. Transcription runs
989
+ * asynchronously through
990
+ * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
991
+ * and should be treated as guidance of input audio content rather than precisely
992
+ * what the model heard. The client can optionally set the language and prompt for
993
+ * transcription, these offer additional guidance to the transcription service.
994
+ */
995
+ transcription?: AudioTranscription;
996
+ /**
997
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
998
+ * set to `null` to turn off, in which case the client must manually trigger model
999
+ * response.
1000
+ *
1001
+ * Server VAD means that the model will detect the start and end of speech based on
1002
+ * audio volume and respond at the end of user speech.
1003
+ *
1004
+ * Semantic VAD is more advanced and uses a turn detection model (in conjunction
1005
+ * with VAD) to semantically estimate whether the user has finished speaking, then
1006
+ * dynamically sets a timeout based on this probability. For example, if user audio
1007
+ * trails off with "uhhm", the model will score a low probability of turn end and
1008
+ * wait longer for the user to continue speaking. This can be useful for more
1009
+ * natural conversations, but may have a higher latency.
1010
+ */
1011
+ turn_detection?: RealtimeAudioInputTurnDetection | null;
1012
+ }
1013
+ export declare namespace RealtimeAudioConfigInput {
1014
+ /**
1015
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
1016
+ * off. Noise reduction filters audio added to the input audio buffer before it is
1017
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
1018
+ * detection accuracy (reducing false positives) and model performance by improving
1019
+ * perception of the input audio.
1020
+ */
1021
+ interface NoiseReduction {
1022
+ /**
1023
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
1024
+ * headphones, `far_field` is for far-field microphones such as laptop or
1025
+ * conference room microphones.
1026
+ */
1027
+ type?: RealtimeAPI.NoiseReductionType;
1028
+ }
1029
+ }
1030
+ export interface RealtimeAudioConfigOutput {
1031
+ /**
1032
+ * The format of the output audio.
1033
+ */
1034
+ format?: RealtimeAudioFormats;
1035
+ /**
1036
+ * The speed of the model's spoken response as a multiple of the original speed.
1037
+ * 1.0 is the default speed. 0.25 is the minimum speed. 1.5 is the maximum speed.
1038
+ * This value can only be changed in between model turns, not while a response is
1039
+ * in progress.
1040
+ *
1041
+ * This parameter is a post-processing adjustment to the audio after it is
1042
+ * generated, it's also possible to prompt the model to speak faster or slower.
1043
+ */
1044
+ speed?: number;
1045
+ /**
1046
+ * The voice the model uses to respond. Voice cannot be changed during the session
1047
+ * once the model has responded with audio at least once. Current voice options are
1048
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
1049
+ * and `cedar`. We recommend `marin` and `cedar` for best quality.
1050
+ */
1051
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse' | 'marin' | 'cedar';
1052
+ }
1053
+ /**
1054
+ * The PCM audio format. Only a 24kHz sample rate is supported.
1055
+ */
1056
+ export type RealtimeAudioFormats = RealtimeAudioFormats.AudioPCM | RealtimeAudioFormats.AudioPCMU | RealtimeAudioFormats.AudioPCMA;
1057
+ export declare namespace RealtimeAudioFormats {
1058
+ /**
1059
+ * The PCM audio format. Only a 24kHz sample rate is supported.
1060
+ */
1061
+ interface AudioPCM {
1062
+ /**
1063
+ * The sample rate of the audio. Always `24000`.
1064
+ */
1065
+ rate?: 24000;
1066
+ /**
1067
+ * The audio format. Always `audio/pcm`.
1068
+ */
1069
+ type?: 'audio/pcm';
1070
+ }
1071
+ /**
1072
+ * The G.711 μ-law format.
1073
+ */
1074
+ interface AudioPCMU {
1075
+ /**
1076
+ * The audio format. Always `audio/pcmu`.
1077
+ */
1078
+ type?: 'audio/pcmu';
1079
+ }
1080
+ /**
1081
+ * The G.711 A-law format.
1082
+ */
1083
+ interface AudioPCMA {
1084
+ /**
1085
+ * The audio format. Always `audio/pcma`.
1086
+ */
1087
+ type?: 'audio/pcma';
1088
+ }
1089
+ }
1090
+ /**
1091
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
1092
+ * set to `null` to turn off, in which case the client must manually trigger model
1093
+ * response.
1094
+ *
1095
+ * Server VAD means that the model will detect the start and end of speech based on
1096
+ * audio volume and respond at the end of user speech.
1097
+ *
1098
+ * Semantic VAD is more advanced and uses a turn detection model (in conjunction
1099
+ * with VAD) to semantically estimate whether the user has finished speaking, then
1100
+ * dynamically sets a timeout based on this probability. For example, if user audio
1101
+ * trails off with "uhhm", the model will score a low probability of turn end and
1102
+ * wait longer for the user to continue speaking. This can be useful for more
1103
+ * natural conversations, but may have a higher latency.
1104
+ */
1105
+ export type RealtimeAudioInputTurnDetection = RealtimeAudioInputTurnDetection.ServerVad | RealtimeAudioInputTurnDetection.SemanticVad;
1106
+ export declare namespace RealtimeAudioInputTurnDetection {
1107
+ /**
1108
+ * Server-side voice activity detection (VAD) which flips on when user speech is
1109
+ * detected and off after a period of silence.
1110
+ */
1111
+ interface ServerVad {
1112
+ /**
1113
+ * Type of turn detection, `server_vad` to turn on simple Server VAD.
1114
+ */
1115
+ type: 'server_vad';
1116
+ /**
1117
+ * Whether or not to automatically generate a response when a VAD stop event
1118
+ * occurs.
1119
+ */
1120
+ create_response?: boolean;
1121
+ /**
1122
+ * Optional timeout after which a model response will be triggered automatically.
1123
+ * This is useful for situations in which a long pause from the user is unexpected,
1124
+ * such as a phone call. The model will effectively prompt the user to continue the
1125
+ * conversation based on the current context.
1126
+ *
1127
+ * The timeout value will be applied after the last model response's audio has
1128
+ * finished playing, i.e. it's set to the `response.done` time plus audio playback
1129
+ * duration.
1130
+ *
1131
+ * An `input_audio_buffer.timeout_triggered` event (plus events associated with the
1132
+ * Response) will be emitted when the timeout is reached. Idle timeout is currently
1133
+ * only supported for `server_vad` mode.
1134
+ */
1135
+ idle_timeout_ms?: number | null;
1136
+ /**
1137
+ * Whether or not to automatically interrupt any ongoing response with output to
1138
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
1139
+ * occurs.
1140
+ */
1141
+ interrupt_response?: boolean;
1142
+ /**
1143
+ * Used only for `server_vad` mode. Amount of audio to include before the VAD
1144
+ * detected speech (in milliseconds). Defaults to 300ms.
1145
+ */
1146
+ prefix_padding_ms?: number;
1147
+ /**
1148
+ * Used only for `server_vad` mode. Duration of silence to detect speech stop (in
1149
+ * milliseconds). Defaults to 500ms. With shorter values the model will respond
1150
+ * more quickly, but may jump in on short pauses from the user.
1151
+ */
1152
+ silence_duration_ms?: number;
1153
+ /**
1154
+ * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
1155
+ * defaults to 0.5. A higher threshold will require louder audio to activate the
1156
+ * model, and thus might perform better in noisy environments.
1157
+ */
1158
+ threshold?: number;
1159
+ }
1160
+ /**
1161
+ * Server-side semantic turn detection which uses a model to determine when the
1162
+ * user has finished speaking.
1163
+ */
1164
+ interface SemanticVad {
1165
+ /**
1166
+ * Type of turn detection, `semantic_vad` to turn on Semantic VAD.
1167
+ */
1168
+ type: 'semantic_vad';
1169
+ /**
1170
+ * Whether or not to automatically generate a response when a VAD stop event
1171
+ * occurs.
1172
+ */
1173
+ create_response?: boolean;
1174
+ /**
1175
+ * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
1176
+ * will wait longer for the user to continue speaking, `high` will respond more
1177
+ * quickly. `auto` is the default and is equivalent to `medium`. `low`, `medium`,
1178
+ * and `high` have max timeouts of 8s, 4s, and 2s respectively.
1179
+ */
1180
+ eagerness?: 'low' | 'medium' | 'high' | 'auto';
1181
+ /**
1182
+ * Whether or not to automatically interrupt any ongoing response with output to
1183
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
1184
+ * occurs.
1185
+ */
1186
+ interrupt_response?: boolean;
1187
+ }
1188
+ }
1189
+ /**
1190
+ * A realtime client event.
1191
+ */
1192
+ export type RealtimeClientEvent = ConversationItemCreateEvent | ConversationItemDeleteEvent | ConversationItemRetrieveEvent | ConversationItemTruncateEvent | InputAudioBufferAppendEvent | InputAudioBufferClearEvent | OutputAudioBufferClearEvent | InputAudioBufferCommitEvent | ResponseCancelEvent | ResponseCreateEvent | SessionUpdateEvent;
1193
+ /**
1194
+ * An assistant message item in a Realtime conversation.
1195
+ */
1196
+ export interface RealtimeConversationItemAssistantMessage {
1197
+ /**
1198
+ * The content of the message.
1199
+ */
1200
+ content: Array<RealtimeConversationItemAssistantMessage.Content>;
1201
+ /**
1202
+ * The role of the message sender. Always `assistant`.
1203
+ */
1204
+ role: 'assistant';
1205
+ /**
1206
+ * The type of the item. Always `message`.
1207
+ */
1208
+ type: 'message';
1209
+ /**
1210
+ * The unique ID of the item. This may be provided by the client or generated by
1211
+ * the server.
1212
+ */
1213
+ id?: string;
1214
+ /**
1215
+ * Identifier for the API object being returned - always `realtime.item`. Optional
1216
+ * when creating a new item.
1217
+ */
1218
+ object?: 'realtime.item';
1219
+ /**
1220
+ * The status of the item. Has no effect on the conversation.
1221
+ */
1222
+ status?: 'completed' | 'incomplete' | 'in_progress';
1223
+ }
1224
+ export declare namespace RealtimeConversationItemAssistantMessage {
1225
+ interface Content {
1226
+ /**
1227
+ * Base64-encoded audio bytes, these will be parsed as the format specified in the
1228
+ * session output audio type configuration. This defaults to PCM 16-bit 24kHz mono
1229
+ * if not specified.
1230
+ */
1231
+ audio?: string;
1232
+ /**
1233
+ * The text content.
1234
+ */
1235
+ text?: string;
1236
+ /**
1237
+ * The transcript of the audio content, this will always be present if the output
1238
+ * type is `audio`.
1239
+ */
1240
+ transcript?: string;
1241
+ /**
1242
+ * The content type, `output_text` or `output_audio` depending on the session
1243
+ * `output_modalities` configuration.
1244
+ */
1245
+ type?: 'output_text' | 'output_audio';
1246
+ }
1247
+ }
1248
+ /**
1249
+ * A function call item in a Realtime conversation.
1250
+ */
1251
+ export interface RealtimeConversationItemFunctionCall {
1252
+ /**
1253
+ * The arguments of the function call. This is a JSON-encoded string representing
1254
+ * the arguments passed to the function, for example
1255
+ * `{"arg1": "value1", "arg2": 42}`.
1256
+ */
1257
+ arguments: string;
1258
+ /**
1259
+ * The name of the function being called.
1260
+ */
1261
+ name: string;
1262
+ /**
1263
+ * The type of the item. Always `function_call`.
1264
+ */
1265
+ type: 'function_call';
1266
+ /**
1267
+ * The unique ID of the item. This may be provided by the client or generated by
1268
+ * the server.
1269
+ */
1270
+ id?: string;
1271
+ /**
1272
+ * The ID of the function call.
1273
+ */
1274
+ call_id?: string;
1275
+ /**
1276
+ * Identifier for the API object being returned - always `realtime.item`. Optional
1277
+ * when creating a new item.
1278
+ */
1279
+ object?: 'realtime.item';
1280
+ /**
1281
+ * The status of the item. Has no effect on the conversation.
1282
+ */
1283
+ status?: 'completed' | 'incomplete' | 'in_progress';
1284
+ }
1285
+ /**
1286
+ * A function call output item in a Realtime conversation.
1287
+ */
1288
+ export interface RealtimeConversationItemFunctionCallOutput {
1289
+ /**
1290
+ * The ID of the function call this output is for.
1291
+ */
1292
+ call_id: string;
1293
+ /**
1294
+ * The output of the function call, this is free text and can contain any
1295
+ * information or simply be empty.
1296
+ */
1297
+ output: string;
1298
+ /**
1299
+ * The type of the item. Always `function_call_output`.
1300
+ */
1301
+ type: 'function_call_output';
1302
+ /**
1303
+ * The unique ID of the item. This may be provided by the client or generated by
1304
+ * the server.
1305
+ */
1306
+ id?: string;
1307
+ /**
1308
+ * Identifier for the API object being returned - always `realtime.item`. Optional
1309
+ * when creating a new item.
1310
+ */
1311
+ object?: 'realtime.item';
1312
+ /**
1313
+ * The status of the item. Has no effect on the conversation.
1314
+ */
1315
+ status?: 'completed' | 'incomplete' | 'in_progress';
1316
+ }
1317
+ /**
1318
+ * A system message in a Realtime conversation can be used to provide additional
1319
+ * context or instructions to the model. This is similar but distinct from the
1320
+ * instruction prompt provided at the start of a conversation, as system messages
1321
+ * can be added at any point in the conversation. For major changes to the
1322
+ * conversation's behavior, use instructions, but for smaller updates (e.g. "the
1323
+ * user is now asking about a different topic"), use system messages.
1324
+ */
1325
+ export interface RealtimeConversationItemSystemMessage {
1326
+ /**
1327
+ * The content of the message.
1328
+ */
1329
+ content: Array<RealtimeConversationItemSystemMessage.Content>;
1330
+ /**
1331
+ * The role of the message sender. Always `system`.
1332
+ */
1333
+ role: 'system';
1334
+ /**
1335
+ * The type of the item. Always `message`.
1336
+ */
1337
+ type: 'message';
1338
+ /**
1339
+ * The unique ID of the item. This may be provided by the client or generated by
1340
+ * the server.
1341
+ */
1342
+ id?: string;
1343
+ /**
1344
+ * Identifier for the API object being returned - always `realtime.item`. Optional
1345
+ * when creating a new item.
1346
+ */
1347
+ object?: 'realtime.item';
1348
+ /**
1349
+ * The status of the item. Has no effect on the conversation.
1350
+ */
1351
+ status?: 'completed' | 'incomplete' | 'in_progress';
1352
+ }
1353
+ export declare namespace RealtimeConversationItemSystemMessage {
1354
+ interface Content {
1355
+ /**
1356
+ * The text content.
1357
+ */
1358
+ text?: string;
1359
+ /**
1360
+ * The content type. Always `input_text` for system messages.
1361
+ */
1362
+ type?: 'input_text';
1363
+ }
1364
+ }
1365
+ /**
1366
+ * A user message item in a Realtime conversation.
1367
+ */
1368
+ export interface RealtimeConversationItemUserMessage {
1369
+ /**
1370
+ * The content of the message.
1371
+ */
1372
+ content: Array<RealtimeConversationItemUserMessage.Content>;
1373
+ /**
1374
+ * The role of the message sender. Always `user`.
1375
+ */
1376
+ role: 'user';
1377
+ /**
1378
+ * The type of the item. Always `message`.
1379
+ */
1380
+ type: 'message';
1381
+ /**
1382
+ * The unique ID of the item. This may be provided by the client or generated by
1383
+ * the server.
1384
+ */
1385
+ id?: string;
1386
+ /**
1387
+ * Identifier for the API object being returned - always `realtime.item`. Optional
1388
+ * when creating a new item.
1389
+ */
1390
+ object?: 'realtime.item';
1391
+ /**
1392
+ * The status of the item. Has no effect on the conversation.
1393
+ */
1394
+ status?: 'completed' | 'incomplete' | 'in_progress';
1395
+ }
1396
+ export declare namespace RealtimeConversationItemUserMessage {
1397
+ interface Content {
1398
+ /**
1399
+ * Base64-encoded audio bytes (for `input_audio`), these will be parsed as the
1400
+ * format specified in the session input audio type configuration. This defaults to
1401
+ * PCM 16-bit 24kHz mono if not specified.
1402
+ */
1403
+ audio?: string;
1404
+ /**
1405
+ * The detail level of the image (for `input_image`). `auto` will default to
1406
+ * `high`.
1407
+ */
1408
+ detail?: 'auto' | 'low' | 'high';
1409
+ /**
1410
+ * Base64-encoded image bytes (for `input_image`) as a data URI. For example
1411
+ * `data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA...`. Supported formats are PNG
1412
+ * and JPEG.
1413
+ */
1414
+ image_url?: string;
1415
+ /**
1416
+ * The text content (for `input_text`).
1417
+ */
1418
+ text?: string;
1419
+ /**
1420
+ * Transcript of the audio (for `input_audio`). This is not sent to the model, but
1421
+ * will be attached to the message item for reference.
1422
+ */
1423
+ transcript?: string;
1424
+ /**
1425
+ * The content type (`input_text`, `input_audio`, or `input_image`).
1426
+ */
1427
+ type?: 'input_text' | 'input_audio' | 'input_image';
1428
+ }
1429
+ }
1430
+ /**
1431
+ * Details of the error.
1432
+ */
1433
+ export interface RealtimeError {
1434
+ /**
1435
+ * A human-readable error message.
1436
+ */
1437
+ message: string;
1438
+ /**
1439
+ * The type of error (e.g., "invalid_request_error", "server_error").
1440
+ */
1441
+ type: string;
1442
+ /**
1443
+ * Error code, if any.
1444
+ */
1445
+ code?: string | null;
1446
+ /**
1447
+ * The event_id of the client event that caused the error, if applicable.
1448
+ */
1449
+ event_id?: string | null;
1450
+ /**
1451
+ * Parameter related to the error, if any.
1452
+ */
1453
+ param?: string | null;
1454
+ }
1455
+ /**
1456
+ * Returned when an error occurs, which could be a client problem or a server
1457
+ * problem. Most errors are recoverable and the session will stay open, we
1458
+ * recommend to implementors to monitor and log error messages by default.
1459
+ */
1460
+ export interface RealtimeErrorEvent {
1461
+ /**
1462
+ * Details of the error.
1463
+ */
1464
+ error: RealtimeError;
1465
+ /**
1466
+ * The unique ID of the server event.
1467
+ */
1468
+ event_id: string;
1469
+ /**
1470
+ * The event type, must be `error`.
1471
+ */
1472
+ type: 'error';
1473
+ }
1474
+ export interface RealtimeFunctionTool {
1475
+ /**
1476
+ * The description of the function, including guidance on when and how to call it,
1477
+ * and guidance about what to tell the user when calling (if anything).
1478
+ */
1479
+ description?: string;
1480
+ /**
1481
+ * The name of the function.
1482
+ */
1483
+ name?: string;
1484
+ /**
1485
+ * Parameters of the function in JSON Schema.
1486
+ */
1487
+ parameters?: unknown;
1488
+ /**
1489
+ * The type of the tool, i.e. `function`.
1490
+ */
1491
+ type?: 'function';
1492
+ }
1493
+ /**
1494
+ * A Realtime item requesting human approval of a tool invocation.
1495
+ */
1496
+ export interface RealtimeMcpApprovalRequest {
1497
+ /**
1498
+ * The unique ID of the approval request.
1499
+ */
1500
+ id: string;
1501
+ /**
1502
+ * A JSON string of arguments for the tool.
1503
+ */
1504
+ arguments: string;
1505
+ /**
1506
+ * The name of the tool to run.
1507
+ */
1508
+ name: string;
1509
+ /**
1510
+ * The label of the MCP server making the request.
1511
+ */
1512
+ server_label: string;
1513
+ /**
1514
+ * The type of the item. Always `mcp_approval_request`.
1515
+ */
1516
+ type: 'mcp_approval_request';
1517
+ }
1518
+ /**
1519
+ * A Realtime item responding to an MCP approval request.
1520
+ */
1521
+ export interface RealtimeMcpApprovalResponse {
1522
+ /**
1523
+ * The unique ID of the approval response.
1524
+ */
1525
+ id: string;
1526
+ /**
1527
+ * The ID of the approval request being answered.
1528
+ */
1529
+ approval_request_id: string;
1530
+ /**
1531
+ * Whether the request was approved.
1532
+ */
1533
+ approve: boolean;
1534
+ /**
1535
+ * The type of the item. Always `mcp_approval_response`.
1536
+ */
1537
+ type: 'mcp_approval_response';
1538
+ /**
1539
+ * Optional reason for the decision.
1540
+ */
1541
+ reason?: string | null;
1542
+ }
1543
+ /**
1544
+ * A Realtime item listing tools available on an MCP server.
1545
+ */
1546
+ export interface RealtimeMcpListTools {
1547
+ /**
1548
+ * The label of the MCP server.
1549
+ */
1550
+ server_label: string;
1551
+ /**
1552
+ * The tools available on the server.
1553
+ */
1554
+ tools: Array<RealtimeMcpListTools.Tool>;
1555
+ /**
1556
+ * The type of the item. Always `mcp_list_tools`.
1557
+ */
1558
+ type: 'mcp_list_tools';
1559
+ /**
1560
+ * The unique ID of the list.
1561
+ */
1562
+ id?: string;
1563
+ }
1564
+ export declare namespace RealtimeMcpListTools {
1565
+ /**
1566
+ * A tool available on an MCP server.
1567
+ */
1568
+ interface Tool {
1569
+ /**
1570
+ * The JSON schema describing the tool's input.
1571
+ */
1572
+ input_schema: unknown;
1573
+ /**
1574
+ * The name of the tool.
1575
+ */
1576
+ name: string;
1577
+ /**
1578
+ * Additional annotations about the tool.
1579
+ */
1580
+ annotations?: unknown | null;
1581
+ /**
1582
+ * The description of the tool.
1583
+ */
1584
+ description?: string | null;
1585
+ }
1586
+ }
1587
+ export interface RealtimeMcpProtocolError {
1588
+ code: number;
1589
+ message: string;
1590
+ type: 'protocol_error';
1591
+ }
1592
+ /**
1593
+ * A Realtime item representing an invocation of a tool on an MCP server.
1594
+ */
1595
+ export interface RealtimeMcpToolCall {
1596
+ /**
1597
+ * The unique ID of the tool call.
1598
+ */
1599
+ id: string;
1600
+ /**
1601
+ * A JSON string of the arguments passed to the tool.
1602
+ */
1603
+ arguments: string;
1604
+ /**
1605
+ * The name of the tool that was run.
1606
+ */
1607
+ name: string;
1608
+ /**
1609
+ * The label of the MCP server running the tool.
1610
+ */
1611
+ server_label: string;
1612
+ /**
1613
+ * The type of the item. Always `mcp_call`.
1614
+ */
1615
+ type: 'mcp_call';
1616
+ /**
1617
+ * The ID of an associated approval request, if any.
1618
+ */
1619
+ approval_request_id?: string | null;
1620
+ /**
1621
+ * The error from the tool call, if any.
1622
+ */
1623
+ error?: RealtimeMcpProtocolError | RealtimeMcpToolExecutionError | RealtimeMcphttpError | null;
1624
+ /**
1625
+ * The output from the tool call.
1626
+ */
1627
+ output?: string | null;
1628
+ }
1629
+ export interface RealtimeMcpToolExecutionError {
1630
+ message: string;
1631
+ type: 'tool_execution_error';
1632
+ }
1633
+ export interface RealtimeMcphttpError {
1634
+ code: number;
1635
+ message: string;
1636
+ type: 'http_error';
1637
+ }
1638
+ /**
1639
+ * The response resource.
1640
+ */
1641
+ export interface RealtimeResponse {
1642
+ /**
1643
+ * The unique ID of the response, will look like `resp_1234`.
1644
+ */
1645
+ id?: string;
1646
+ /**
1647
+ * Configuration for audio output.
1648
+ */
1649
+ audio?: RealtimeResponse.Audio;
1650
+ /**
1651
+ * Which conversation the response is added to, determined by the `conversation`
1652
+ * field in the `response.create` event. If `auto`, the response will be added to
1653
+ * the default conversation and the value of `conversation_id` will be an id like
1654
+ * `conv_1234`. If `none`, the response will not be added to any conversation and
1655
+ * the value of `conversation_id` will be `null`. If responses are being triggered
1656
+ * automatically by VAD the response will be added to the default conversation
1657
+ */
1658
+ conversation_id?: string;
1659
+ /**
1660
+ * Maximum number of output tokens for a single assistant response, inclusive of
1661
+ * tool calls, that was used in this response.
1662
+ */
1663
+ max_output_tokens?: number | 'inf';
1664
+ /**
1665
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
1666
+ * for storing additional information about the object in a structured format, and
1667
+ * querying for objects via API or the dashboard.
1668
+ *
1669
+ * Keys are strings with a maximum length of 64 characters. Values are strings with
1670
+ * a maximum length of 512 characters.
1671
+ */
1672
+ metadata?: Shared.Metadata | null;
1673
+ /**
1674
+ * The object type, must be `realtime.response`.
1675
+ */
1676
+ object?: 'realtime.response';
1677
+ /**
1678
+ * The list of output items generated by the response.
1679
+ */
1680
+ output?: Array<ConversationItem>;
1681
+ /**
1682
+ * The set of modalities the model used to respond, currently the only possible
1683
+ * values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
1684
+ * transcript. Setting the output to mode `text` will disable audio output from the
1685
+ * model.
1686
+ */
1687
+ output_modalities?: Array<'text' | 'audio'>;
1688
+ /**
1689
+ * The final status of the response (`completed`, `cancelled`, `failed`, or
1690
+ * `incomplete`, `in_progress`).
1691
+ */
1692
+ status?: 'completed' | 'cancelled' | 'failed' | 'incomplete' | 'in_progress';
1693
+ /**
1694
+ * Additional details about the status.
1695
+ */
1696
+ status_details?: RealtimeResponseStatus;
1697
+ /**
1698
+ * Usage statistics for the Response, this will correspond to billing. A Realtime
1699
+ * API session will maintain a conversation context and append new Items to the
1700
+ * Conversation, thus output from previous turns (text and audio tokens) will
1701
+ * become the input for later turns.
1702
+ */
1703
+ usage?: RealtimeResponseUsage;
1704
+ }
1705
+ export declare namespace RealtimeResponse {
1706
+ /**
1707
+ * Configuration for audio output.
1708
+ */
1709
+ interface Audio {
1710
+ output?: Audio.Output;
1711
+ }
1712
+ namespace Audio {
1713
+ interface Output {
1714
+ /**
1715
+ * The format of the output audio.
1716
+ */
1717
+ format?: RealtimeAPI.RealtimeAudioFormats;
1718
+ /**
1719
+ * The voice the model uses to respond. Voice cannot be changed during the session
1720
+ * once the model has responded with audio at least once. Current voice options are
1721
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
1722
+ * and `cedar`. We recommend `marin` and `cedar` for best quality.
1723
+ */
1724
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse' | 'marin' | 'cedar';
1725
+ }
1726
+ }
1727
+ }
1728
+ /**
1729
+ * Configuration for audio input and output.
1730
+ */
1731
+ export interface RealtimeResponseCreateAudioOutput {
1732
+ output?: RealtimeResponseCreateAudioOutput.Output;
1733
+ }
1734
+ export declare namespace RealtimeResponseCreateAudioOutput {
1735
+ interface Output {
1736
+ /**
1737
+ * The format of the output audio.
1738
+ */
1739
+ format?: RealtimeAPI.RealtimeAudioFormats;
1740
+ /**
1741
+ * The voice the model uses to respond. Voice cannot be changed during the session
1742
+ * once the model has responded with audio at least once. Current voice options are
1743
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`,
1744
+ * and `cedar`. We recommend `marin` and `cedar` for best quality.
1745
+ */
1746
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse' | 'marin' | 'cedar';
1747
+ }
1748
+ }
1749
+ /**
1750
+ * Give the model access to additional tools via remote Model Context Protocol
1751
+ * (MCP) servers.
1752
+ * [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
1753
+ */
1754
+ export interface RealtimeResponseCreateMcpTool {
1755
+ /**
1756
+ * A label for this MCP server, used to identify it in tool calls.
1757
+ */
1758
+ server_label: string;
1759
+ /**
1760
+ * The type of the MCP tool. Always `mcp`.
1761
+ */
1762
+ type: 'mcp';
1763
+ /**
1764
+ * List of allowed tool names or a filter object.
1765
+ */
1766
+ allowed_tools?: Array<string> | RealtimeResponseCreateMcpTool.McpToolFilter | null;
1767
+ /**
1768
+ * An OAuth access token that can be used with a remote MCP server, either with a
1769
+ * custom MCP server URL or a service connector. Your application must handle the
1770
+ * OAuth authorization flow and provide the token here.
1771
+ */
1772
+ authorization?: string;
1773
+ /**
1774
+ * Identifier for service connectors, like those available in ChatGPT. One of
1775
+ * `server_url` or `connector_id` must be provided. Learn more about service
1776
+ * connectors
1777
+ * [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
1778
+ *
1779
+ * Currently supported `connector_id` values are:
1780
+ *
1781
+ * - Dropbox: `connector_dropbox`
1782
+ * - Gmail: `connector_gmail`
1783
+ * - Google Calendar: `connector_googlecalendar`
1784
+ * - Google Drive: `connector_googledrive`
1785
+ * - Microsoft Teams: `connector_microsoftteams`
1786
+ * - Outlook Calendar: `connector_outlookcalendar`
1787
+ * - Outlook Email: `connector_outlookemail`
1788
+ * - SharePoint: `connector_sharepoint`
1789
+ */
1790
+ connector_id?: 'connector_dropbox' | 'connector_gmail' | 'connector_googlecalendar' | 'connector_googledrive' | 'connector_microsoftteams' | 'connector_outlookcalendar' | 'connector_outlookemail' | 'connector_sharepoint';
1791
+ /**
1792
+ * Optional HTTP headers to send to the MCP server. Use for authentication or other
1793
+ * purposes.
1794
+ */
1795
+ headers?: {
1796
+ [key: string]: string;
1797
+ } | null;
1798
+ /**
1799
+ * Specify which of the MCP server's tools require approval.
1800
+ */
1801
+ require_approval?: RealtimeResponseCreateMcpTool.McpToolApprovalFilter | 'always' | 'never' | null;
1802
+ /**
1803
+ * Optional description of the MCP server, used to provide more context.
1804
+ */
1805
+ server_description?: string;
1806
+ /**
1807
+ * The URL for the MCP server. One of `server_url` or `connector_id` must be
1808
+ * provided.
1809
+ */
1810
+ server_url?: string;
1811
+ }
1812
+ export declare namespace RealtimeResponseCreateMcpTool {
1813
+ /**
1814
+ * A filter object to specify which tools are allowed.
1815
+ */
1816
+ interface McpToolFilter {
1817
+ /**
1818
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
1819
+ * is
1820
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
1821
+ * it will match this filter.
1822
+ */
1823
+ read_only?: boolean;
1824
+ /**
1825
+ * List of allowed tool names.
1826
+ */
1827
+ tool_names?: Array<string>;
1828
+ }
1829
+ /**
1830
+ * Specify which of the MCP server's tools require approval. Can be `always`,
1831
+ * `never`, or a filter object associated with tools that require approval.
1832
+ */
1833
+ interface McpToolApprovalFilter {
1834
+ /**
1835
+ * A filter object to specify which tools are allowed.
1836
+ */
1837
+ always?: McpToolApprovalFilter.Always;
1838
+ /**
1839
+ * A filter object to specify which tools are allowed.
1840
+ */
1841
+ never?: McpToolApprovalFilter.Never;
1842
+ }
1843
+ namespace McpToolApprovalFilter {
1844
+ /**
1845
+ * A filter object to specify which tools are allowed.
1846
+ */
1847
+ interface Always {
1848
+ /**
1849
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
1850
+ * is
1851
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
1852
+ * it will match this filter.
1853
+ */
1854
+ read_only?: boolean;
1855
+ /**
1856
+ * List of allowed tool names.
1857
+ */
1858
+ tool_names?: Array<string>;
1859
+ }
1860
+ /**
1861
+ * A filter object to specify which tools are allowed.
1862
+ */
1863
+ interface Never {
1864
+ /**
1865
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
1866
+ * is
1867
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
1868
+ * it will match this filter.
1869
+ */
1870
+ read_only?: boolean;
1871
+ /**
1872
+ * List of allowed tool names.
1873
+ */
1874
+ tool_names?: Array<string>;
1875
+ }
1876
+ }
1877
+ }
1878
+ /**
1879
+ * Create a new Realtime response with these parameters
1880
+ */
1881
+ export interface RealtimeResponseCreateParams {
1882
+ /**
1883
+ * Configuration for audio input and output.
1884
+ */
1885
+ audio?: RealtimeResponseCreateAudioOutput;
1886
+ /**
1887
+ * Controls which conversation the response is added to. Currently supports `auto`
1888
+ * and `none`, with `auto` as the default value. The `auto` value means that the
1889
+ * contents of the response will be added to the default conversation. Set this to
1890
+ * `none` to create an out-of-band response which will not add items to default
1891
+ * conversation.
1892
+ */
1893
+ conversation?: (string & {}) | 'auto' | 'none';
1894
+ /**
1895
+ * Input items to include in the prompt for the model. Using this field creates a
1896
+ * new context for this Response instead of using the default conversation. An
1897
+ * empty array `[]` will clear the context for this Response. Note that this can
1898
+ * include references to items that previously appeared in the session using their
1899
+ * id.
1900
+ */
1901
+ input?: Array<ConversationItem>;
1902
+ /**
1903
+ * The default system instructions (i.e. system message) prepended to model calls.
1904
+ * This field allows the client to guide the model on desired responses. The model
1905
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
1906
+ * "act friendly", "here are examples of good responses") and on audio behavior
1907
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
1908
+ * instructions are not guaranteed to be followed by the model, but they provide
1909
+ * guidance to the model on the desired behavior. Note that the server sets default
1910
+ * instructions which will be used if this field is not set and are visible in the
1911
+ * `session.created` event at the start of the session.
1912
+ */
1913
+ instructions?: string;
1914
+ /**
1915
+ * Maximum number of output tokens for a single assistant response, inclusive of
1916
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
1917
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
1918
+ */
1919
+ max_output_tokens?: number | 'inf';
1920
+ /**
1921
+ * Set of 16 key-value pairs that can be attached to an object. This can be useful
1922
+ * for storing additional information about the object in a structured format, and
1923
+ * querying for objects via API or the dashboard.
1924
+ *
1925
+ * Keys are strings with a maximum length of 64 characters. Values are strings with
1926
+ * a maximum length of 512 characters.
1927
+ */
1928
+ metadata?: Shared.Metadata | null;
1929
+ /**
1930
+ * The set of modalities the model used to respond, currently the only possible
1931
+ * values are `[\"audio\"]`, `[\"text\"]`. Audio output always include a text
1932
+ * transcript. Setting the output to mode `text` will disable audio output from the
1933
+ * model.
1934
+ */
1935
+ output_modalities?: Array<'text' | 'audio'>;
1936
+ /**
1937
+ * Reference to a prompt template and its variables.
1938
+ * [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
1939
+ */
1940
+ prompt?: ResponsesAPI.ResponsePrompt | null;
1941
+ /**
1942
+ * How the model chooses tools. Provide one of the string modes or force a specific
1943
+ * function/MCP tool.
1944
+ */
1945
+ tool_choice?: ResponsesAPI.ToolChoiceOptions | ResponsesAPI.ToolChoiceFunction | ResponsesAPI.ToolChoiceMcp;
1946
+ /**
1947
+ * Tools available to the model.
1948
+ */
1949
+ tools?: Array<RealtimeFunctionTool | RealtimeResponseCreateMcpTool>;
1950
+ }
1951
+ /**
1952
+ * Additional details about the status.
1953
+ */
1954
+ export interface RealtimeResponseStatus {
1955
+ /**
1956
+ * A description of the error that caused the response to fail, populated when the
1957
+ * `status` is `failed`.
1958
+ */
1959
+ error?: RealtimeResponseStatus.Error;
1960
+ /**
1961
+ * The reason the Response did not complete. For a `cancelled` Response, one of
1962
+ * `turn_detected` (the server VAD detected a new start of speech) or
1963
+ * `client_cancelled` (the client sent a cancel event). For an `incomplete`
1964
+ * Response, one of `max_output_tokens` or `content_filter` (the server-side safety
1965
+ * filter activated and cut off the response).
1966
+ */
1967
+ reason?: 'turn_detected' | 'client_cancelled' | 'max_output_tokens' | 'content_filter';
1968
+ /**
1969
+ * The type of error that caused the response to fail, corresponding with the
1970
+ * `status` field (`completed`, `cancelled`, `incomplete`, `failed`).
1971
+ */
1972
+ type?: 'completed' | 'cancelled' | 'incomplete' | 'failed';
1973
+ }
1974
+ export declare namespace RealtimeResponseStatus {
1975
+ /**
1976
+ * A description of the error that caused the response to fail, populated when the
1977
+ * `status` is `failed`.
1978
+ */
1979
+ interface Error {
1980
+ /**
1981
+ * Error code, if any.
1982
+ */
1983
+ code?: string;
1984
+ /**
1985
+ * The type of error.
1986
+ */
1987
+ type?: string;
1988
+ }
1989
+ }
1990
+ /**
1991
+ * Usage statistics for the Response, this will correspond to billing. A Realtime
1992
+ * API session will maintain a conversation context and append new Items to the
1993
+ * Conversation, thus output from previous turns (text and audio tokens) will
1994
+ * become the input for later turns.
1995
+ */
1996
+ export interface RealtimeResponseUsage {
1997
+ /**
1998
+ * Details about the input tokens used in the Response. Cached tokens are tokens
1999
+ * from previous turns in the conversation that are included as context for the
2000
+ * current response. Cached tokens here are counted as a subset of input tokens,
2001
+ * meaning input tokens will include cached and uncached tokens.
2002
+ */
2003
+ input_token_details?: RealtimeResponseUsageInputTokenDetails;
2004
+ /**
2005
+ * The number of input tokens used in the Response, including text and audio
2006
+ * tokens.
2007
+ */
2008
+ input_tokens?: number;
2009
+ /**
2010
+ * Details about the output tokens used in the Response.
2011
+ */
2012
+ output_token_details?: RealtimeResponseUsageOutputTokenDetails;
2013
+ /**
2014
+ * The number of output tokens sent in the Response, including text and audio
2015
+ * tokens.
2016
+ */
2017
+ output_tokens?: number;
2018
+ /**
2019
+ * The total number of tokens in the Response including input and output text and
2020
+ * audio tokens.
2021
+ */
2022
+ total_tokens?: number;
2023
+ }
2024
+ /**
2025
+ * Details about the input tokens used in the Response. Cached tokens are tokens
2026
+ * from previous turns in the conversation that are included as context for the
2027
+ * current response. Cached tokens here are counted as a subset of input tokens,
2028
+ * meaning input tokens will include cached and uncached tokens.
2029
+ */
2030
+ export interface RealtimeResponseUsageInputTokenDetails {
2031
+ /**
2032
+ * The number of audio tokens used as input for the Response.
2033
+ */
2034
+ audio_tokens?: number;
2035
+ /**
2036
+ * The number of cached tokens used as input for the Response.
2037
+ */
2038
+ cached_tokens?: number;
2039
+ /**
2040
+ * Details about the cached tokens used as input for the Response.
2041
+ */
2042
+ cached_tokens_details?: RealtimeResponseUsageInputTokenDetails.CachedTokensDetails;
2043
+ /**
2044
+ * The number of image tokens used as input for the Response.
2045
+ */
2046
+ image_tokens?: number;
2047
+ /**
2048
+ * The number of text tokens used as input for the Response.
2049
+ */
2050
+ text_tokens?: number;
2051
+ }
2052
+ export declare namespace RealtimeResponseUsageInputTokenDetails {
2053
+ /**
2054
+ * Details about the cached tokens used as input for the Response.
2055
+ */
2056
+ interface CachedTokensDetails {
2057
+ /**
2058
+ * The number of cached audio tokens used as input for the Response.
2059
+ */
2060
+ audio_tokens?: number;
2061
+ /**
2062
+ * The number of cached image tokens used as input for the Response.
2063
+ */
2064
+ image_tokens?: number;
2065
+ /**
2066
+ * The number of cached text tokens used as input for the Response.
2067
+ */
2068
+ text_tokens?: number;
2069
+ }
2070
+ }
2071
+ /**
2072
+ * Details about the output tokens used in the Response.
2073
+ */
2074
+ export interface RealtimeResponseUsageOutputTokenDetails {
2075
+ /**
2076
+ * The number of audio tokens used in the Response.
2077
+ */
2078
+ audio_tokens?: number;
2079
+ /**
2080
+ * The number of text tokens used in the Response.
2081
+ */
2082
+ text_tokens?: number;
2083
+ }
2084
+ /**
2085
+ * A realtime server event.
2086
+ */
2087
+ export type RealtimeServerEvent = ConversationCreatedEvent | ConversationItemCreatedEvent | ConversationItemDeletedEvent | ConversationItemInputAudioTranscriptionCompletedEvent | ConversationItemInputAudioTranscriptionDeltaEvent | ConversationItemInputAudioTranscriptionFailedEvent | RealtimeServerEvent.ConversationItemRetrieved | ConversationItemTruncatedEvent | RealtimeErrorEvent | InputAudioBufferClearedEvent | InputAudioBufferCommittedEvent | InputAudioBufferSpeechStartedEvent | InputAudioBufferSpeechStoppedEvent | RateLimitsUpdatedEvent | ResponseAudioDeltaEvent | ResponseAudioDoneEvent | ResponseAudioTranscriptDeltaEvent | ResponseAudioTranscriptDoneEvent | ResponseContentPartAddedEvent | ResponseContentPartDoneEvent | ResponseCreatedEvent | ResponseDoneEvent | ResponseFunctionCallArgumentsDeltaEvent | ResponseFunctionCallArgumentsDoneEvent | ResponseOutputItemAddedEvent | ResponseOutputItemDoneEvent | ResponseTextDeltaEvent | ResponseTextDoneEvent | SessionCreatedEvent | SessionUpdatedEvent | RealtimeServerEvent.OutputAudioBufferStarted | RealtimeServerEvent.OutputAudioBufferStopped | RealtimeServerEvent.OutputAudioBufferCleared | ConversationItemAdded | ConversationItemDone | InputAudioBufferTimeoutTriggered | ConversationItemInputAudioTranscriptionSegment | McpListToolsInProgress | McpListToolsCompleted | McpListToolsFailed | ResponseMcpCallArgumentsDelta | ResponseMcpCallArgumentsDone | ResponseMcpCallInProgress | ResponseMcpCallCompleted | ResponseMcpCallFailed;
2088
+ export declare namespace RealtimeServerEvent {
2089
+ /**
2090
+ * Returned when a conversation item is retrieved with
2091
+ * `conversation.item.retrieve`. This is provided as a way to fetch the server's
2092
+ * representation of an item, for example to get access to the post-processed audio
2093
+ * data after noise cancellation and VAD. It includes the full content of the Item,
2094
+ * including audio data.
2095
+ */
2096
+ interface ConversationItemRetrieved {
2097
+ /**
2098
+ * The unique ID of the server event.
2099
+ */
2100
+ event_id: string;
2101
+ /**
2102
+ * A single item within a Realtime conversation.
2103
+ */
2104
+ item: RealtimeAPI.ConversationItem;
2105
+ /**
2106
+ * The event type, must be `conversation.item.retrieved`.
2107
+ */
2108
+ type: 'conversation.item.retrieved';
2109
+ }
2110
+ /**
2111
+ * **WebRTC Only:** Emitted when the server begins streaming audio to the client.
2112
+ * This event is emitted after an audio content part has been added
2113
+ * (`response.content_part.added`) to the response.
2114
+ * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
2115
+ */
2116
+ interface OutputAudioBufferStarted {
2117
+ /**
2118
+ * The unique ID of the server event.
2119
+ */
2120
+ event_id: string;
2121
+ /**
2122
+ * The unique ID of the response that produced the audio.
2123
+ */
2124
+ response_id: string;
2125
+ /**
2126
+ * The event type, must be `output_audio_buffer.started`.
2127
+ */
2128
+ type: 'output_audio_buffer.started';
2129
+ }
2130
+ /**
2131
+ * **WebRTC Only:** Emitted when the output audio buffer has been completely
2132
+ * drained on the server, and no more audio is forthcoming. This event is emitted
2133
+ * after the full response data has been sent to the client (`response.done`).
2134
+ * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
2135
+ */
2136
+ interface OutputAudioBufferStopped {
2137
+ /**
2138
+ * The unique ID of the server event.
2139
+ */
2140
+ event_id: string;
2141
+ /**
2142
+ * The unique ID of the response that produced the audio.
2143
+ */
2144
+ response_id: string;
2145
+ /**
2146
+ * The event type, must be `output_audio_buffer.stopped`.
2147
+ */
2148
+ type: 'output_audio_buffer.stopped';
2149
+ }
2150
+ /**
2151
+ * **WebRTC Only:** Emitted when the output audio buffer is cleared. This happens
2152
+ * either in VAD mode when the user has interrupted
2153
+ * (`input_audio_buffer.speech_started`), or when the client has emitted the
2154
+ * `output_audio_buffer.clear` event to manually cut off the current audio
2155
+ * response.
2156
+ * [Learn more](https://platform.openai.com/docs/guides/realtime-conversations#client-and-server-events-for-audio-in-webrtc).
2157
+ */
2158
+ interface OutputAudioBufferCleared {
2159
+ /**
2160
+ * The unique ID of the server event.
2161
+ */
2162
+ event_id: string;
2163
+ /**
2164
+ * The unique ID of the response that produced the audio.
2165
+ */
2166
+ response_id: string;
2167
+ /**
2168
+ * The event type, must be `output_audio_buffer.cleared`.
2169
+ */
2170
+ type: 'output_audio_buffer.cleared';
2171
+ }
2172
+ }
2173
+ /**
2174
+ * Realtime session object for the beta interface.
2175
+ */
2176
+ export interface RealtimeSession {
2177
+ /**
2178
+ * Unique identifier for the session that looks like `sess_1234567890abcdef`.
2179
+ */
2180
+ id?: string;
2181
+ /**
2182
+ * Expiration timestamp for the session, in seconds since epoch.
2183
+ */
2184
+ expires_at?: number;
2185
+ /**
2186
+ * Additional fields to include in server outputs.
2187
+ *
2188
+ * - `item.input_audio_transcription.logprobs`: Include logprobs for input audio
2189
+ * transcription.
2190
+ */
2191
+ include?: Array<'item.input_audio_transcription.logprobs'> | null;
2192
+ /**
2193
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
2194
+ * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
2195
+ * (mono), and little-endian byte order.
2196
+ */
2197
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
2198
+ /**
2199
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
2200
+ * off. Noise reduction filters audio added to the input audio buffer before it is
2201
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
2202
+ * detection accuracy (reducing false positives) and model performance by improving
2203
+ * perception of the input audio.
2204
+ */
2205
+ input_audio_noise_reduction?: RealtimeSession.InputAudioNoiseReduction;
2206
+ /**
2207
+ * Configuration for input audio transcription, defaults to off and can be set to
2208
+ * `null` to turn off once on. Input audio transcription is not native to the
2209
+ * model, since the model consumes audio directly. Transcription runs
2210
+ * asynchronously through
2211
+ * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
2212
+ * and should be treated as guidance of input audio content rather than precisely
2213
+ * what the model heard. The client can optionally set the language and prompt for
2214
+ * transcription, these offer additional guidance to the transcription service.
2215
+ */
2216
+ input_audio_transcription?: AudioTranscription | null;
2217
+ /**
2218
+ * The default system instructions (i.e. system message) prepended to model calls.
2219
+ * This field allows the client to guide the model on desired responses. The model
2220
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
2221
+ * "act friendly", "here are examples of good responses") and on audio behavior
2222
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
2223
+ * instructions are not guaranteed to be followed by the model, but they provide
2224
+ * guidance to the model on the desired behavior.
2225
+ *
2226
+ * Note that the server sets default instructions which will be used if this field
2227
+ * is not set and are visible in the `session.created` event at the start of the
2228
+ * session.
2229
+ */
2230
+ instructions?: string;
2231
+ /**
2232
+ * Maximum number of output tokens for a single assistant response, inclusive of
2233
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
2234
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
2235
+ */
2236
+ max_response_output_tokens?: number | 'inf';
2237
+ /**
2238
+ * The set of modalities the model can respond with. To disable audio, set this to
2239
+ * ["text"].
2240
+ */
2241
+ modalities?: Array<'text' | 'audio'>;
2242
+ /**
2243
+ * The Realtime model used for this session.
2244
+ */
2245
+ model?: 'gpt-realtime' | 'gpt-realtime-2025-08-28' | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
2246
+ /**
2247
+ * The object type. Always `realtime.session`.
2248
+ */
2249
+ object?: 'realtime.session';
2250
+ /**
2251
+ * The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
2252
+ * For `pcm16`, output audio is sampled at a rate of 24kHz.
2253
+ */
2254
+ output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
2255
+ /**
2256
+ * Reference to a prompt template and its variables.
2257
+ * [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2258
+ */
2259
+ prompt?: ResponsesAPI.ResponsePrompt | null;
2260
+ /**
2261
+ * The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
2262
+ * minimum speed. 1.5 is the maximum speed. This value can only be changed in
2263
+ * between model turns, not while a response is in progress.
2264
+ */
2265
+ speed?: number;
2266
+ /**
2267
+ * Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
2268
+ * temperature of 0.8 is highly recommended for best performance.
2269
+ */
2270
+ temperature?: number;
2271
+ /**
2272
+ * How the model chooses tools. Options are `auto`, `none`, `required`, or specify
2273
+ * a function.
2274
+ */
2275
+ tool_choice?: string;
2276
+ /**
2277
+ * Tools (functions) available to the model.
2278
+ */
2279
+ tools?: Array<RealtimeFunctionTool>;
2280
+ /**
2281
+ * Configuration options for tracing. Set to null to disable tracing. Once tracing
2282
+ * is enabled for a session, the configuration cannot be modified.
2283
+ *
2284
+ * `auto` will create a trace for the session with default values for the workflow
2285
+ * name, group id, and metadata.
2286
+ */
2287
+ tracing?: 'auto' | RealtimeSession.TracingConfiguration | null;
2288
+ /**
2289
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
2290
+ * set to `null` to turn off, in which case the client must manually trigger model
2291
+ * response.
2292
+ *
2293
+ * Server VAD means that the model will detect the start and end of speech based on
2294
+ * audio volume and respond at the end of user speech.
2295
+ *
2296
+ * Semantic VAD is more advanced and uses a turn detection model (in conjunction
2297
+ * with VAD) to semantically estimate whether the user has finished speaking, then
2298
+ * dynamically sets a timeout based on this probability. For example, if user audio
2299
+ * trails off with "uhhm", the model will score a low probability of turn end and
2300
+ * wait longer for the user to continue speaking. This can be useful for more
2301
+ * natural conversations, but may have a higher latency.
2302
+ */
2303
+ turn_detection?: RealtimeSession.ServerVad | RealtimeSession.SemanticVad | null;
2304
+ /**
2305
+ * The voice the model uses to respond. Voice cannot be changed during the session
2306
+ * once the model has responded with audio at least once. Current voice options are
2307
+ * `alloy`, `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, and `verse`.
2308
+ */
2309
+ voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'sage' | 'shimmer' | 'verse' | 'marin' | 'cedar';
2310
+ }
2311
+ export declare namespace RealtimeSession {
2312
+ /**
2313
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
2314
+ * off. Noise reduction filters audio added to the input audio buffer before it is
2315
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
2316
+ * detection accuracy (reducing false positives) and model performance by improving
2317
+ * perception of the input audio.
2318
+ */
2319
+ interface InputAudioNoiseReduction {
2320
+ /**
2321
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
2322
+ * headphones, `far_field` is for far-field microphones such as laptop or
2323
+ * conference room microphones.
2324
+ */
2325
+ type?: RealtimeAPI.NoiseReductionType;
2326
+ }
2327
+ /**
2328
+ * Granular configuration for tracing.
2329
+ */
2330
+ interface TracingConfiguration {
2331
+ /**
2332
+ * The group id to attach to this trace to enable filtering and grouping in the
2333
+ * traces dashboard.
2334
+ */
2335
+ group_id?: string;
2336
+ /**
2337
+ * The arbitrary metadata to attach to this trace to enable filtering in the traces
2338
+ * dashboard.
2339
+ */
2340
+ metadata?: unknown;
2341
+ /**
2342
+ * The name of the workflow to attach to this trace. This is used to name the trace
2343
+ * in the traces dashboard.
2344
+ */
2345
+ workflow_name?: string;
2346
+ }
2347
+ /**
2348
+ * Server-side voice activity detection (VAD) which flips on when user speech is
2349
+ * detected and off after a period of silence.
2350
+ */
2351
+ interface ServerVad {
2352
+ /**
2353
+ * Type of turn detection, `server_vad` to turn on simple Server VAD.
2354
+ */
2355
+ type: 'server_vad';
2356
+ /**
2357
+ * Whether or not to automatically generate a response when a VAD stop event
2358
+ * occurs.
2359
+ */
2360
+ create_response?: boolean;
2361
+ /**
2362
+ * Optional timeout after which a model response will be triggered automatically.
2363
+ * This is useful for situations in which a long pause from the user is unexpected,
2364
+ * such as a phone call. The model will effectively prompt the user to continue the
2365
+ * conversation based on the current context.
2366
+ *
2367
+ * The timeout value will be applied after the last model response's audio has
2368
+ * finished playing, i.e. it's set to the `response.done` time plus audio playback
2369
+ * duration.
2370
+ *
2371
+ * An `input_audio_buffer.timeout_triggered` event (plus events associated with the
2372
+ * Response) will be emitted when the timeout is reached. Idle timeout is currently
2373
+ * only supported for `server_vad` mode.
2374
+ */
2375
+ idle_timeout_ms?: number | null;
2376
+ /**
2377
+ * Whether or not to automatically interrupt any ongoing response with output to
2378
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
2379
+ * occurs.
2380
+ */
2381
+ interrupt_response?: boolean;
2382
+ /**
2383
+ * Used only for `server_vad` mode. Amount of audio to include before the VAD
2384
+ * detected speech (in milliseconds). Defaults to 300ms.
2385
+ */
2386
+ prefix_padding_ms?: number;
2387
+ /**
2388
+ * Used only for `server_vad` mode. Duration of silence to detect speech stop (in
2389
+ * milliseconds). Defaults to 500ms. With shorter values the model will respond
2390
+ * more quickly, but may jump in on short pauses from the user.
2391
+ */
2392
+ silence_duration_ms?: number;
2393
+ /**
2394
+ * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
2395
+ * defaults to 0.5. A higher threshold will require louder audio to activate the
2396
+ * model, and thus might perform better in noisy environments.
2397
+ */
2398
+ threshold?: number;
2399
+ }
2400
+ /**
2401
+ * Server-side semantic turn detection which uses a model to determine when the
2402
+ * user has finished speaking.
2403
+ */
2404
+ interface SemanticVad {
2405
+ /**
2406
+ * Type of turn detection, `semantic_vad` to turn on Semantic VAD.
2407
+ */
2408
+ type: 'semantic_vad';
2409
+ /**
2410
+ * Whether or not to automatically generate a response when a VAD stop event
2411
+ * occurs.
2412
+ */
2413
+ create_response?: boolean;
2414
+ /**
2415
+ * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
2416
+ * will wait longer for the user to continue speaking, `high` will respond more
2417
+ * quickly. `auto` is the default and is equivalent to `medium`. `low`, `medium`,
2418
+ * and `high` have max timeouts of 8s, 4s, and 2s respectively.
2419
+ */
2420
+ eagerness?: 'low' | 'medium' | 'high' | 'auto';
2421
+ /**
2422
+ * Whether or not to automatically interrupt any ongoing response with output to
2423
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
2424
+ * occurs.
2425
+ */
2426
+ interrupt_response?: boolean;
2427
+ }
2428
+ }
2429
+ /**
2430
+ * Realtime session object configuration.
2431
+ */
2432
+ export interface RealtimeSessionCreateRequest {
2433
+ /**
2434
+ * The type of session to create. Always `realtime` for the Realtime API.
2435
+ */
2436
+ type: 'realtime';
2437
+ /**
2438
+ * Configuration for input and output audio.
2439
+ */
2440
+ audio?: RealtimeAudioConfig;
2441
+ /**
2442
+ * Additional fields to include in server outputs.
2443
+ *
2444
+ * `item.input_audio_transcription.logprobs`: Include logprobs for input audio
2445
+ * transcription.
2446
+ */
2447
+ include?: Array<'item.input_audio_transcription.logprobs'>;
2448
+ /**
2449
+ * The default system instructions (i.e. system message) prepended to model calls.
2450
+ * This field allows the client to guide the model on desired responses. The model
2451
+ * can be instructed on response content and format, (e.g. "be extremely succinct",
2452
+ * "act friendly", "here are examples of good responses") and on audio behavior
2453
+ * (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
2454
+ * instructions are not guaranteed to be followed by the model, but they provide
2455
+ * guidance to the model on the desired behavior.
2456
+ *
2457
+ * Note that the server sets default instructions which will be used if this field
2458
+ * is not set and are visible in the `session.created` event at the start of the
2459
+ * session.
2460
+ */
2461
+ instructions?: string;
2462
+ /**
2463
+ * Maximum number of output tokens for a single assistant response, inclusive of
2464
+ * tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
2465
+ * `inf` for the maximum available tokens for a given model. Defaults to `inf`.
2466
+ */
2467
+ max_output_tokens?: number | 'inf';
2468
+ /**
2469
+ * The Realtime model used for this session.
2470
+ */
2471
+ model?: (string & {}) | 'gpt-realtime' | 'gpt-realtime-2025-08-28' | 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
2472
+ /**
2473
+ * The set of modalities the model can respond with. It defaults to `["audio"]`,
2474
+ * indicating that the model will respond with audio plus a transcript. `["text"]`
2475
+ * can be used to make the model respond with text only. It is not possible to
2476
+ * request both `text` and `audio` at the same time.
2477
+ */
2478
+ output_modalities?: Array<'text' | 'audio'>;
2479
+ /**
2480
+ * Reference to a prompt template and its variables.
2481
+ * [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
2482
+ */
2483
+ prompt?: ResponsesAPI.ResponsePrompt | null;
2484
+ /**
2485
+ * How the model chooses tools. Provide one of the string modes or force a specific
2486
+ * function/MCP tool.
2487
+ */
2488
+ tool_choice?: RealtimeToolChoiceConfig;
2489
+ /**
2490
+ * Tools available to the model.
2491
+ */
2492
+ tools?: RealtimeToolsConfig;
2493
+ /**
2494
+ * Realtime API can write session traces to the
2495
+ * [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
2496
+ * tracing is enabled for a session, the configuration cannot be modified.
2497
+ *
2498
+ * `auto` will create a trace for the session with default values for the workflow
2499
+ * name, group id, and metadata.
2500
+ */
2501
+ tracing?: RealtimeTracingConfig | null;
2502
+ /**
2503
+ * Controls how the realtime conversation is truncated prior to model inference.
2504
+ * The default is `auto`.
2505
+ */
2506
+ truncation?: RealtimeTruncation;
2507
+ }
2508
+ /**
2509
+ * How the model chooses tools. Provide one of the string modes or force a specific
2510
+ * function/MCP tool.
2511
+ */
2512
+ export type RealtimeToolChoiceConfig = ResponsesAPI.ToolChoiceOptions | ResponsesAPI.ToolChoiceFunction | ResponsesAPI.ToolChoiceMcp;
2513
+ /**
2514
+ * Tools available to the model.
2515
+ */
2516
+ export type RealtimeToolsConfig = Array<RealtimeToolsConfigUnion>;
2517
+ /**
2518
+ * Give the model access to additional tools via remote Model Context Protocol
2519
+ * (MCP) servers.
2520
+ * [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
2521
+ */
2522
+ export type RealtimeToolsConfigUnion = RealtimeFunctionTool | RealtimeToolsConfigUnion.Mcp;
2523
+ export declare namespace RealtimeToolsConfigUnion {
2524
+ /**
2525
+ * Give the model access to additional tools via remote Model Context Protocol
2526
+ * (MCP) servers.
2527
+ * [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
2528
+ */
2529
+ interface Mcp {
2530
+ /**
2531
+ * A label for this MCP server, used to identify it in tool calls.
2532
+ */
2533
+ server_label: string;
2534
+ /**
2535
+ * The type of the MCP tool. Always `mcp`.
2536
+ */
2537
+ type: 'mcp';
2538
+ /**
2539
+ * List of allowed tool names or a filter object.
2540
+ */
2541
+ allowed_tools?: Array<string> | Mcp.McpToolFilter | null;
2542
+ /**
2543
+ * An OAuth access token that can be used with a remote MCP server, either with a
2544
+ * custom MCP server URL or a service connector. Your application must handle the
2545
+ * OAuth authorization flow and provide the token here.
2546
+ */
2547
+ authorization?: string;
2548
+ /**
2549
+ * Identifier for service connectors, like those available in ChatGPT. One of
2550
+ * `server_url` or `connector_id` must be provided. Learn more about service
2551
+ * connectors
2552
+ * [here](https://platform.openai.com/docs/guides/tools-remote-mcp#connectors).
2553
+ *
2554
+ * Currently supported `connector_id` values are:
2555
+ *
2556
+ * - Dropbox: `connector_dropbox`
2557
+ * - Gmail: `connector_gmail`
2558
+ * - Google Calendar: `connector_googlecalendar`
2559
+ * - Google Drive: `connector_googledrive`
2560
+ * - Microsoft Teams: `connector_microsoftteams`
2561
+ * - Outlook Calendar: `connector_outlookcalendar`
2562
+ * - Outlook Email: `connector_outlookemail`
2563
+ * - SharePoint: `connector_sharepoint`
2564
+ */
2565
+ connector_id?: 'connector_dropbox' | 'connector_gmail' | 'connector_googlecalendar' | 'connector_googledrive' | 'connector_microsoftteams' | 'connector_outlookcalendar' | 'connector_outlookemail' | 'connector_sharepoint';
2566
+ /**
2567
+ * Optional HTTP headers to send to the MCP server. Use for authentication or other
2568
+ * purposes.
2569
+ */
2570
+ headers?: {
2571
+ [key: string]: string;
2572
+ } | null;
2573
+ /**
2574
+ * Specify which of the MCP server's tools require approval.
2575
+ */
2576
+ require_approval?: Mcp.McpToolApprovalFilter | 'always' | 'never' | null;
2577
+ /**
2578
+ * Optional description of the MCP server, used to provide more context.
2579
+ */
2580
+ server_description?: string;
2581
+ /**
2582
+ * The URL for the MCP server. One of `server_url` or `connector_id` must be
2583
+ * provided.
2584
+ */
2585
+ server_url?: string;
2586
+ }
2587
+ namespace Mcp {
2588
+ /**
2589
+ * A filter object to specify which tools are allowed.
2590
+ */
2591
+ interface McpToolFilter {
2592
+ /**
2593
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
2594
+ * is
2595
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
2596
+ * it will match this filter.
2597
+ */
2598
+ read_only?: boolean;
2599
+ /**
2600
+ * List of allowed tool names.
2601
+ */
2602
+ tool_names?: Array<string>;
2603
+ }
2604
+ /**
2605
+ * Specify which of the MCP server's tools require approval. Can be `always`,
2606
+ * `never`, or a filter object associated with tools that require approval.
2607
+ */
2608
+ interface McpToolApprovalFilter {
2609
+ /**
2610
+ * A filter object to specify which tools are allowed.
2611
+ */
2612
+ always?: McpToolApprovalFilter.Always;
2613
+ /**
2614
+ * A filter object to specify which tools are allowed.
2615
+ */
2616
+ never?: McpToolApprovalFilter.Never;
2617
+ }
2618
+ namespace McpToolApprovalFilter {
2619
+ /**
2620
+ * A filter object to specify which tools are allowed.
2621
+ */
2622
+ interface Always {
2623
+ /**
2624
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
2625
+ * is
2626
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
2627
+ * it will match this filter.
2628
+ */
2629
+ read_only?: boolean;
2630
+ /**
2631
+ * List of allowed tool names.
2632
+ */
2633
+ tool_names?: Array<string>;
2634
+ }
2635
+ /**
2636
+ * A filter object to specify which tools are allowed.
2637
+ */
2638
+ interface Never {
2639
+ /**
2640
+ * Indicates whether or not a tool modifies data or is read-only. If an MCP server
2641
+ * is
2642
+ * [annotated with `readOnlyHint`](https://modelcontextprotocol.io/specification/2025-06-18/schema#toolannotations-readonlyhint),
2643
+ * it will match this filter.
2644
+ */
2645
+ read_only?: boolean;
2646
+ /**
2647
+ * List of allowed tool names.
2648
+ */
2649
+ tool_names?: Array<string>;
2650
+ }
2651
+ }
2652
+ }
2653
+ }
2654
+ /**
2655
+ * Realtime API can write session traces to the
2656
+ * [Traces Dashboard](/logs?api=traces). Set to null to disable tracing. Once
2657
+ * tracing is enabled for a session, the configuration cannot be modified.
2658
+ *
2659
+ * `auto` will create a trace for the session with default values for the workflow
2660
+ * name, group id, and metadata.
2661
+ */
2662
+ export type RealtimeTracingConfig = 'auto' | RealtimeTracingConfig.TracingConfiguration;
2663
+ export declare namespace RealtimeTracingConfig {
2664
+ /**
2665
+ * Granular configuration for tracing.
2666
+ */
2667
+ interface TracingConfiguration {
2668
+ /**
2669
+ * The group id to attach to this trace to enable filtering and grouping in the
2670
+ * Traces Dashboard.
2671
+ */
2672
+ group_id?: string;
2673
+ /**
2674
+ * The arbitrary metadata to attach to this trace to enable filtering in the Traces
2675
+ * Dashboard.
2676
+ */
2677
+ metadata?: unknown;
2678
+ /**
2679
+ * The name of the workflow to attach to this trace. This is used to name the trace
2680
+ * in the Traces Dashboard.
2681
+ */
2682
+ workflow_name?: string;
2683
+ }
2684
+ }
2685
+ /**
2686
+ * Configuration for input and output audio.
2687
+ */
2688
+ export interface RealtimeTranscriptionSessionAudio {
2689
+ input?: RealtimeTranscriptionSessionAudioInput;
2690
+ }
2691
+ export interface RealtimeTranscriptionSessionAudioInput {
2692
+ /**
2693
+ * The PCM audio format. Only a 24kHz sample rate is supported.
2694
+ */
2695
+ format?: RealtimeAudioFormats;
2696
+ /**
2697
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
2698
+ * off. Noise reduction filters audio added to the input audio buffer before it is
2699
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
2700
+ * detection accuracy (reducing false positives) and model performance by improving
2701
+ * perception of the input audio.
2702
+ */
2703
+ noise_reduction?: RealtimeTranscriptionSessionAudioInput.NoiseReduction;
2704
+ /**
2705
+ * Configuration for input audio transcription, defaults to off and can be set to
2706
+ * `null` to turn off once on. Input audio transcription is not native to the
2707
+ * model, since the model consumes audio directly. Transcription runs
2708
+ * asynchronously through
2709
+ * [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
2710
+ * and should be treated as guidance of input audio content rather than precisely
2711
+ * what the model heard. The client can optionally set the language and prompt for
2712
+ * transcription, these offer additional guidance to the transcription service.
2713
+ */
2714
+ transcription?: AudioTranscription;
2715
+ /**
2716
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
2717
+ * set to `null` to turn off, in which case the client must manually trigger model
2718
+ * response.
2719
+ *
2720
+ * Server VAD means that the model will detect the start and end of speech based on
2721
+ * audio volume and respond at the end of user speech.
2722
+ *
2723
+ * Semantic VAD is more advanced and uses a turn detection model (in conjunction
2724
+ * with VAD) to semantically estimate whether the user has finished speaking, then
2725
+ * dynamically sets a timeout based on this probability. For example, if user audio
2726
+ * trails off with "uhhm", the model will score a low probability of turn end and
2727
+ * wait longer for the user to continue speaking. This can be useful for more
2728
+ * natural conversations, but may have a higher latency.
2729
+ */
2730
+ turn_detection?: RealtimeTranscriptionSessionAudioInputTurnDetection | null;
2731
+ }
2732
+ export declare namespace RealtimeTranscriptionSessionAudioInput {
2733
+ /**
2734
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
2735
+ * off. Noise reduction filters audio added to the input audio buffer before it is
2736
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
2737
+ * detection accuracy (reducing false positives) and model performance by improving
2738
+ * perception of the input audio.
2739
+ */
2740
+ interface NoiseReduction {
2741
+ /**
2742
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
2743
+ * headphones, `far_field` is for far-field microphones such as laptop or
2744
+ * conference room microphones.
2745
+ */
2746
+ type?: RealtimeAPI.NoiseReductionType;
2747
+ }
2748
+ }
2749
+ /**
2750
+ * Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
2751
+ * set to `null` to turn off, in which case the client must manually trigger model
2752
+ * response.
2753
+ *
2754
+ * Server VAD means that the model will detect the start and end of speech based on
2755
+ * audio volume and respond at the end of user speech.
2756
+ *
2757
+ * Semantic VAD is more advanced and uses a turn detection model (in conjunction
2758
+ * with VAD) to semantically estimate whether the user has finished speaking, then
2759
+ * dynamically sets a timeout based on this probability. For example, if user audio
2760
+ * trails off with "uhhm", the model will score a low probability of turn end and
2761
+ * wait longer for the user to continue speaking. This can be useful for more
2762
+ * natural conversations, but may have a higher latency.
2763
+ */
2764
+ export type RealtimeTranscriptionSessionAudioInputTurnDetection = RealtimeTranscriptionSessionAudioInputTurnDetection.ServerVad | RealtimeTranscriptionSessionAudioInputTurnDetection.SemanticVad;
2765
+ export declare namespace RealtimeTranscriptionSessionAudioInputTurnDetection {
2766
+ /**
2767
+ * Server-side voice activity detection (VAD) which flips on when user speech is
2768
+ * detected and off after a period of silence.
2769
+ */
2770
+ interface ServerVad {
2771
+ /**
2772
+ * Type of turn detection, `server_vad` to turn on simple Server VAD.
2773
+ */
2774
+ type: 'server_vad';
2775
+ /**
2776
+ * Whether or not to automatically generate a response when a VAD stop event
2777
+ * occurs.
2778
+ */
2779
+ create_response?: boolean;
2780
+ /**
2781
+ * Optional timeout after which a model response will be triggered automatically.
2782
+ * This is useful for situations in which a long pause from the user is unexpected,
2783
+ * such as a phone call. The model will effectively prompt the user to continue the
2784
+ * conversation based on the current context.
2785
+ *
2786
+ * The timeout value will be applied after the last model response's audio has
2787
+ * finished playing, i.e. it's set to the `response.done` time plus audio playback
2788
+ * duration.
2789
+ *
2790
+ * An `input_audio_buffer.timeout_triggered` event (plus events associated with the
2791
+ * Response) will be emitted when the timeout is reached. Idle timeout is currently
2792
+ * only supported for `server_vad` mode.
2793
+ */
2794
+ idle_timeout_ms?: number | null;
2795
+ /**
2796
+ * Whether or not to automatically interrupt any ongoing response with output to
2797
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
2798
+ * occurs.
2799
+ */
2800
+ interrupt_response?: boolean;
2801
+ /**
2802
+ * Used only for `server_vad` mode. Amount of audio to include before the VAD
2803
+ * detected speech (in milliseconds). Defaults to 300ms.
2804
+ */
2805
+ prefix_padding_ms?: number;
2806
+ /**
2807
+ * Used only for `server_vad` mode. Duration of silence to detect speech stop (in
2808
+ * milliseconds). Defaults to 500ms. With shorter values the model will respond
2809
+ * more quickly, but may jump in on short pauses from the user.
2810
+ */
2811
+ silence_duration_ms?: number;
2812
+ /**
2813
+ * Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
2814
+ * defaults to 0.5. A higher threshold will require louder audio to activate the
2815
+ * model, and thus might perform better in noisy environments.
2816
+ */
2817
+ threshold?: number;
2818
+ }
2819
+ /**
2820
+ * Server-side semantic turn detection which uses a model to determine when the
2821
+ * user has finished speaking.
2822
+ */
2823
+ interface SemanticVad {
2824
+ /**
2825
+ * Type of turn detection, `semantic_vad` to turn on Semantic VAD.
2826
+ */
2827
+ type: 'semantic_vad';
2828
+ /**
2829
+ * Whether or not to automatically generate a response when a VAD stop event
2830
+ * occurs.
2831
+ */
2832
+ create_response?: boolean;
2833
+ /**
2834
+ * Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
2835
+ * will wait longer for the user to continue speaking, `high` will respond more
2836
+ * quickly. `auto` is the default and is equivalent to `medium`. `low`, `medium`,
2837
+ * and `high` have max timeouts of 8s, 4s, and 2s respectively.
2838
+ */
2839
+ eagerness?: 'low' | 'medium' | 'high' | 'auto';
2840
+ /**
2841
+ * Whether or not to automatically interrupt any ongoing response with output to
2842
+ * the default conversation (i.e. `conversation` of `auto`) when a VAD start event
2843
+ * occurs.
2844
+ */
2845
+ interrupt_response?: boolean;
2846
+ }
2847
+ }
2848
+ /**
2849
+ * Realtime transcription session object configuration.
2850
+ */
2851
+ export interface RealtimeTranscriptionSessionCreateRequest {
2852
+ /**
2853
+ * The type of session to create. Always `transcription` for transcription
2854
+ * sessions.
2855
+ */
2856
+ type: 'transcription';
2857
+ /**
2858
+ * Configuration for input and output audio.
2859
+ */
2860
+ audio?: RealtimeTranscriptionSessionAudio;
2861
+ /**
2862
+ * Additional fields to include in server outputs.
2863
+ *
2864
+ * `item.input_audio_transcription.logprobs`: Include logprobs for input audio
2865
+ * transcription.
2866
+ */
2867
+ include?: Array<'item.input_audio_transcription.logprobs'>;
2868
+ }
2869
+ /**
2870
+ * Controls how the realtime conversation is truncated prior to model inference.
2871
+ * The default is `auto`.
2872
+ */
2873
+ export type RealtimeTruncation = 'auto' | 'disabled' | RealtimeTruncationRetentionRatio;
2874
+ /**
2875
+ * Retain a fraction of the conversation tokens when the conversation exceeds the
2876
+ * input token limit. This allows you to amortize truncations across multiple
2877
+ * turns, which can help improve cached token usage.
2878
+ */
2879
+ export interface RealtimeTruncationRetentionRatio {
2880
+ /**
2881
+ * Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when the
2882
+ * conversation exceeds the input token limit.
2883
+ */
2884
+ retention_ratio: number;
2885
+ /**
2886
+ * Use retention ratio truncation.
2887
+ */
2888
+ type: 'retention_ratio';
2889
+ }
2890
+ /**
2891
+ * Returned when the model-generated audio is updated.
2892
+ */
2893
+ export interface ResponseAudioDeltaEvent {
2894
+ /**
2895
+ * The index of the content part in the item's content array.
2896
+ */
2897
+ content_index: number;
2898
+ /**
2899
+ * Base64-encoded audio data delta.
2900
+ */
2901
+ delta: string;
2902
+ /**
2903
+ * The unique ID of the server event.
2904
+ */
2905
+ event_id: string;
2906
+ /**
2907
+ * The ID of the item.
2908
+ */
2909
+ item_id: string;
2910
+ /**
2911
+ * The index of the output item in the response.
2912
+ */
2913
+ output_index: number;
2914
+ /**
2915
+ * The ID of the response.
2916
+ */
2917
+ response_id: string;
2918
+ /**
2919
+ * The event type, must be `response.output_audio.delta`.
2920
+ */
2921
+ type: 'response.output_audio.delta';
2922
+ }
2923
+ /**
2924
+ * Returned when the model-generated audio is done. Also emitted when a Response is
2925
+ * interrupted, incomplete, or cancelled.
2926
+ */
2927
+ export interface ResponseAudioDoneEvent {
2928
+ /**
2929
+ * The index of the content part in the item's content array.
2930
+ */
2931
+ content_index: number;
2932
+ /**
2933
+ * The unique ID of the server event.
2934
+ */
2935
+ event_id: string;
2936
+ /**
2937
+ * The ID of the item.
2938
+ */
2939
+ item_id: string;
2940
+ /**
2941
+ * The index of the output item in the response.
2942
+ */
2943
+ output_index: number;
2944
+ /**
2945
+ * The ID of the response.
2946
+ */
2947
+ response_id: string;
2948
+ /**
2949
+ * The event type, must be `response.output_audio.done`.
2950
+ */
2951
+ type: 'response.output_audio.done';
2952
+ }
2953
+ /**
2954
+ * Returned when the model-generated transcription of audio output is updated.
2955
+ */
2956
+ export interface ResponseAudioTranscriptDeltaEvent {
2957
+ /**
2958
+ * The index of the content part in the item's content array.
2959
+ */
2960
+ content_index: number;
2961
+ /**
2962
+ * The transcript delta.
2963
+ */
2964
+ delta: string;
2965
+ /**
2966
+ * The unique ID of the server event.
2967
+ */
2968
+ event_id: string;
2969
+ /**
2970
+ * The ID of the item.
2971
+ */
2972
+ item_id: string;
2973
+ /**
2974
+ * The index of the output item in the response.
2975
+ */
2976
+ output_index: number;
2977
+ /**
2978
+ * The ID of the response.
2979
+ */
2980
+ response_id: string;
2981
+ /**
2982
+ * The event type, must be `response.output_audio_transcript.delta`.
2983
+ */
2984
+ type: 'response.output_audio_transcript.delta';
2985
+ }
2986
+ /**
2987
+ * Returned when the model-generated transcription of audio output is done
2988
+ * streaming. Also emitted when a Response is interrupted, incomplete, or
2989
+ * cancelled.
2990
+ */
2991
+ export interface ResponseAudioTranscriptDoneEvent {
2992
+ /**
2993
+ * The index of the content part in the item's content array.
2994
+ */
2995
+ content_index: number;
2996
+ /**
2997
+ * The unique ID of the server event.
2998
+ */
2999
+ event_id: string;
3000
+ /**
3001
+ * The ID of the item.
3002
+ */
3003
+ item_id: string;
3004
+ /**
3005
+ * The index of the output item in the response.
3006
+ */
3007
+ output_index: number;
3008
+ /**
3009
+ * The ID of the response.
3010
+ */
3011
+ response_id: string;
3012
+ /**
3013
+ * The final transcript of the audio.
3014
+ */
3015
+ transcript: string;
3016
+ /**
3017
+ * The event type, must be `response.output_audio_transcript.done`.
3018
+ */
3019
+ type: 'response.output_audio_transcript.done';
3020
+ }
3021
+ /**
3022
+ * Send this event to cancel an in-progress response. The server will respond with
3023
+ * a `response.done` event with a status of `response.status=cancelled`. If there
3024
+ * is no response to cancel, the server will respond with an error. It's safe to
3025
+ * call `response.cancel` even if no response is in progress, an error will be
3026
+ * returned the session will remain unaffected.
3027
+ */
3028
+ export interface ResponseCancelEvent {
3029
+ /**
3030
+ * The event type, must be `response.cancel`.
3031
+ */
3032
+ type: 'response.cancel';
3033
+ /**
3034
+ * Optional client-generated ID used to identify this event.
3035
+ */
3036
+ event_id?: string;
3037
+ /**
3038
+ * A specific response ID to cancel - if not provided, will cancel an in-progress
3039
+ * response in the default conversation.
3040
+ */
3041
+ response_id?: string;
3042
+ }
3043
+ /**
3044
+ * Returned when a new content part is added to an assistant message item during
3045
+ * response generation.
3046
+ */
3047
+ export interface ResponseContentPartAddedEvent {
3048
+ /**
3049
+ * The index of the content part in the item's content array.
3050
+ */
3051
+ content_index: number;
3052
+ /**
3053
+ * The unique ID of the server event.
3054
+ */
3055
+ event_id: string;
3056
+ /**
3057
+ * The ID of the item to which the content part was added.
3058
+ */
3059
+ item_id: string;
3060
+ /**
3061
+ * The index of the output item in the response.
3062
+ */
3063
+ output_index: number;
3064
+ /**
3065
+ * The content part that was added.
3066
+ */
3067
+ part: ResponseContentPartAddedEvent.Part;
3068
+ /**
3069
+ * The ID of the response.
3070
+ */
3071
+ response_id: string;
3072
+ /**
3073
+ * The event type, must be `response.content_part.added`.
3074
+ */
3075
+ type: 'response.content_part.added';
3076
+ }
3077
+ export declare namespace ResponseContentPartAddedEvent {
3078
+ /**
3079
+ * The content part that was added.
3080
+ */
3081
+ interface Part {
3082
+ /**
3083
+ * Base64-encoded audio data (if type is "audio").
3084
+ */
3085
+ audio?: string;
3086
+ /**
3087
+ * The text content (if type is "text").
3088
+ */
3089
+ text?: string;
3090
+ /**
3091
+ * The transcript of the audio (if type is "audio").
3092
+ */
3093
+ transcript?: string;
3094
+ /**
3095
+ * The content type ("text", "audio").
3096
+ */
3097
+ type?: 'text' | 'audio';
3098
+ }
3099
+ }
3100
+ /**
3101
+ * Returned when a content part is done streaming in an assistant message item.
3102
+ * Also emitted when a Response is interrupted, incomplete, or cancelled.
3103
+ */
3104
+ export interface ResponseContentPartDoneEvent {
3105
+ /**
3106
+ * The index of the content part in the item's content array.
3107
+ */
3108
+ content_index: number;
3109
+ /**
3110
+ * The unique ID of the server event.
3111
+ */
3112
+ event_id: string;
3113
+ /**
3114
+ * The ID of the item.
3115
+ */
3116
+ item_id: string;
3117
+ /**
3118
+ * The index of the output item in the response.
3119
+ */
3120
+ output_index: number;
3121
+ /**
3122
+ * The content part that is done.
3123
+ */
3124
+ part: ResponseContentPartDoneEvent.Part;
3125
+ /**
3126
+ * The ID of the response.
3127
+ */
3128
+ response_id: string;
3129
+ /**
3130
+ * The event type, must be `response.content_part.done`.
3131
+ */
3132
+ type: 'response.content_part.done';
3133
+ }
3134
+ export declare namespace ResponseContentPartDoneEvent {
3135
+ /**
3136
+ * The content part that is done.
3137
+ */
3138
+ interface Part {
3139
+ /**
3140
+ * Base64-encoded audio data (if type is "audio").
3141
+ */
3142
+ audio?: string;
3143
+ /**
3144
+ * The text content (if type is "text").
3145
+ */
3146
+ text?: string;
3147
+ /**
3148
+ * The transcript of the audio (if type is "audio").
3149
+ */
3150
+ transcript?: string;
3151
+ /**
3152
+ * The content type ("text", "audio").
3153
+ */
3154
+ type?: 'text' | 'audio';
3155
+ }
3156
+ }
3157
+ /**
3158
+ * This event instructs the server to create a Response, which means triggering
3159
+ * model inference. When in Server VAD mode, the server will create Responses
3160
+ * automatically.
3161
+ *
3162
+ * A Response will include at least one Item, and may have two, in which case the
3163
+ * second will be a function call. These Items will be appended to the conversation
3164
+ * history by default.
3165
+ *
3166
+ * The server will respond with a `response.created` event, events for Items and
3167
+ * content created, and finally a `response.done` event to indicate the Response is
3168
+ * complete.
3169
+ *
3170
+ * The `response.create` event includes inference configuration like `instructions`
3171
+ * and `tools`. If these are set, they will override the Session's configuration
3172
+ * for this Response only.
3173
+ *
3174
+ * Responses can be created out-of-band of the default Conversation, meaning that
3175
+ * they can have arbitrary input, and it's possible to disable writing the output
3176
+ * to the Conversation. Only one Response can write to the default Conversation at
3177
+ * a time, but otherwise multiple Responses can be created in parallel. The
3178
+ * `metadata` field is a good way to disambiguate multiple simultaneous Responses.
3179
+ *
3180
+ * Clients can set `conversation` to `none` to create a Response that does not
3181
+ * write to the default Conversation. Arbitrary input can be provided with the
3182
+ * `input` field, which is an array accepting raw Items and references to existing
3183
+ * Items.
3184
+ */
3185
+ export interface ResponseCreateEvent {
3186
+ /**
3187
+ * The event type, must be `response.create`.
3188
+ */
3189
+ type: 'response.create';
3190
+ /**
3191
+ * Optional client-generated ID used to identify this event.
3192
+ */
3193
+ event_id?: string;
3194
+ /**
3195
+ * Create a new Realtime response with these parameters
3196
+ */
3197
+ response?: RealtimeResponseCreateParams;
3198
+ }
3199
+ /**
3200
+ * Returned when a new Response is created. The first event of response creation,
3201
+ * where the response is in an initial state of `in_progress`.
3202
+ */
3203
+ export interface ResponseCreatedEvent {
3204
+ /**
3205
+ * The unique ID of the server event.
3206
+ */
3207
+ event_id: string;
3208
+ /**
3209
+ * The response resource.
3210
+ */
3211
+ response: RealtimeResponse;
3212
+ /**
3213
+ * The event type, must be `response.created`.
3214
+ */
3215
+ type: 'response.created';
3216
+ }
3217
+ /**
3218
+ * Returned when a Response is done streaming. Always emitted, no matter the final
3219
+ * state. The Response object included in the `response.done` event will include
3220
+ * all output Items in the Response but will omit the raw audio data.
3221
+ *
3222
+ * Clients should check the `status` field of the Response to determine if it was
3223
+ * successful (`completed`) or if there was another outcome: `cancelled`, `failed`,
3224
+ * or `incomplete`.
3225
+ *
3226
+ * A response will contain all output items that were generated during the
3227
+ * response, excluding any audio content.
3228
+ */
3229
+ export interface ResponseDoneEvent {
3230
+ /**
3231
+ * The unique ID of the server event.
3232
+ */
3233
+ event_id: string;
3234
+ /**
3235
+ * The response resource.
3236
+ */
3237
+ response: RealtimeResponse;
3238
+ /**
3239
+ * The event type, must be `response.done`.
3240
+ */
3241
+ type: 'response.done';
3242
+ }
3243
+ /**
3244
+ * Returned when the model-generated function call arguments are updated.
3245
+ */
3246
+ export interface ResponseFunctionCallArgumentsDeltaEvent {
3247
+ /**
3248
+ * The ID of the function call.
3249
+ */
3250
+ call_id: string;
3251
+ /**
3252
+ * The arguments delta as a JSON string.
3253
+ */
3254
+ delta: string;
3255
+ /**
3256
+ * The unique ID of the server event.
3257
+ */
3258
+ event_id: string;
3259
+ /**
3260
+ * The ID of the function call item.
3261
+ */
3262
+ item_id: string;
3263
+ /**
3264
+ * The index of the output item in the response.
3265
+ */
3266
+ output_index: number;
3267
+ /**
3268
+ * The ID of the response.
3269
+ */
3270
+ response_id: string;
3271
+ /**
3272
+ * The event type, must be `response.function_call_arguments.delta`.
3273
+ */
3274
+ type: 'response.function_call_arguments.delta';
3275
+ }
3276
+ /**
3277
+ * Returned when the model-generated function call arguments are done streaming.
3278
+ * Also emitted when a Response is interrupted, incomplete, or cancelled.
3279
+ */
3280
+ export interface ResponseFunctionCallArgumentsDoneEvent {
3281
+ /**
3282
+ * The final arguments as a JSON string.
3283
+ */
3284
+ arguments: string;
3285
+ /**
3286
+ * The ID of the function call.
3287
+ */
3288
+ call_id: string;
3289
+ /**
3290
+ * The unique ID of the server event.
3291
+ */
3292
+ event_id: string;
3293
+ /**
3294
+ * The ID of the function call item.
3295
+ */
3296
+ item_id: string;
3297
+ /**
3298
+ * The index of the output item in the response.
3299
+ */
3300
+ output_index: number;
3301
+ /**
3302
+ * The ID of the response.
3303
+ */
3304
+ response_id: string;
3305
+ /**
3306
+ * The event type, must be `response.function_call_arguments.done`.
3307
+ */
3308
+ type: 'response.function_call_arguments.done';
3309
+ }
3310
+ /**
3311
+ * Returned when MCP tool call arguments are updated during response generation.
3312
+ */
3313
+ export interface ResponseMcpCallArgumentsDelta {
3314
+ /**
3315
+ * The JSON-encoded arguments delta.
3316
+ */
3317
+ delta: string;
3318
+ /**
3319
+ * The unique ID of the server event.
3320
+ */
3321
+ event_id: string;
3322
+ /**
3323
+ * The ID of the MCP tool call item.
3324
+ */
3325
+ item_id: string;
3326
+ /**
3327
+ * The index of the output item in the response.
3328
+ */
3329
+ output_index: number;
3330
+ /**
3331
+ * The ID of the response.
3332
+ */
3333
+ response_id: string;
3334
+ /**
3335
+ * The event type, must be `response.mcp_call_arguments.delta`.
3336
+ */
3337
+ type: 'response.mcp_call_arguments.delta';
3338
+ /**
3339
+ * If present, indicates the delta text was obfuscated.
3340
+ */
3341
+ obfuscation?: string | null;
3342
+ }
3343
+ /**
3344
+ * Returned when MCP tool call arguments are finalized during response generation.
3345
+ */
3346
+ export interface ResponseMcpCallArgumentsDone {
3347
+ /**
3348
+ * The final JSON-encoded arguments string.
3349
+ */
3350
+ arguments: string;
3351
+ /**
3352
+ * The unique ID of the server event.
3353
+ */
3354
+ event_id: string;
3355
+ /**
3356
+ * The ID of the MCP tool call item.
3357
+ */
3358
+ item_id: string;
3359
+ /**
3360
+ * The index of the output item in the response.
3361
+ */
3362
+ output_index: number;
3363
+ /**
3364
+ * The ID of the response.
3365
+ */
3366
+ response_id: string;
3367
+ /**
3368
+ * The event type, must be `response.mcp_call_arguments.done`.
3369
+ */
3370
+ type: 'response.mcp_call_arguments.done';
3371
+ }
3372
+ /**
3373
+ * Returned when an MCP tool call has completed successfully.
3374
+ */
3375
+ export interface ResponseMcpCallCompleted {
3376
+ /**
3377
+ * The unique ID of the server event.
3378
+ */
3379
+ event_id: string;
3380
+ /**
3381
+ * The ID of the MCP tool call item.
3382
+ */
3383
+ item_id: string;
3384
+ /**
3385
+ * The index of the output item in the response.
3386
+ */
3387
+ output_index: number;
3388
+ /**
3389
+ * The event type, must be `response.mcp_call.completed`.
3390
+ */
3391
+ type: 'response.mcp_call.completed';
3392
+ }
3393
+ /**
3394
+ * Returned when an MCP tool call has failed.
3395
+ */
3396
+ export interface ResponseMcpCallFailed {
3397
+ /**
3398
+ * The unique ID of the server event.
3399
+ */
3400
+ event_id: string;
3401
+ /**
3402
+ * The ID of the MCP tool call item.
3403
+ */
3404
+ item_id: string;
3405
+ /**
3406
+ * The index of the output item in the response.
3407
+ */
3408
+ output_index: number;
3409
+ /**
3410
+ * The event type, must be `response.mcp_call.failed`.
3411
+ */
3412
+ type: 'response.mcp_call.failed';
3413
+ }
3414
+ /**
3415
+ * Returned when an MCP tool call has started and is in progress.
3416
+ */
3417
+ export interface ResponseMcpCallInProgress {
3418
+ /**
3419
+ * The unique ID of the server event.
3420
+ */
3421
+ event_id: string;
3422
+ /**
3423
+ * The ID of the MCP tool call item.
3424
+ */
3425
+ item_id: string;
3426
+ /**
3427
+ * The index of the output item in the response.
3428
+ */
3429
+ output_index: number;
3430
+ /**
3431
+ * The event type, must be `response.mcp_call.in_progress`.
3432
+ */
3433
+ type: 'response.mcp_call.in_progress';
3434
+ }
3435
+ /**
3436
+ * Returned when a new Item is created during Response generation.
3437
+ */
3438
+ export interface ResponseOutputItemAddedEvent {
3439
+ /**
3440
+ * The unique ID of the server event.
3441
+ */
3442
+ event_id: string;
3443
+ /**
3444
+ * A single item within a Realtime conversation.
3445
+ */
3446
+ item: ConversationItem;
3447
+ /**
3448
+ * The index of the output item in the Response.
3449
+ */
3450
+ output_index: number;
3451
+ /**
3452
+ * The ID of the Response to which the item belongs.
3453
+ */
3454
+ response_id: string;
3455
+ /**
3456
+ * The event type, must be `response.output_item.added`.
3457
+ */
3458
+ type: 'response.output_item.added';
3459
+ }
3460
+ /**
3461
+ * Returned when an Item is done streaming. Also emitted when a Response is
3462
+ * interrupted, incomplete, or cancelled.
3463
+ */
3464
+ export interface ResponseOutputItemDoneEvent {
3465
+ /**
3466
+ * The unique ID of the server event.
3467
+ */
3468
+ event_id: string;
3469
+ /**
3470
+ * A single item within a Realtime conversation.
3471
+ */
3472
+ item: ConversationItem;
3473
+ /**
3474
+ * The index of the output item in the Response.
3475
+ */
3476
+ output_index: number;
3477
+ /**
3478
+ * The ID of the Response to which the item belongs.
3479
+ */
3480
+ response_id: string;
3481
+ /**
3482
+ * The event type, must be `response.output_item.done`.
3483
+ */
3484
+ type: 'response.output_item.done';
3485
+ }
3486
+ /**
3487
+ * Returned when the text value of an "output_text" content part is updated.
3488
+ */
3489
+ export interface ResponseTextDeltaEvent {
3490
+ /**
3491
+ * The index of the content part in the item's content array.
3492
+ */
3493
+ content_index: number;
3494
+ /**
3495
+ * The text delta.
3496
+ */
3497
+ delta: string;
3498
+ /**
3499
+ * The unique ID of the server event.
3500
+ */
3501
+ event_id: string;
3502
+ /**
3503
+ * The ID of the item.
3504
+ */
3505
+ item_id: string;
3506
+ /**
3507
+ * The index of the output item in the response.
3508
+ */
3509
+ output_index: number;
3510
+ /**
3511
+ * The ID of the response.
3512
+ */
3513
+ response_id: string;
3514
+ /**
3515
+ * The event type, must be `response.output_text.delta`.
3516
+ */
3517
+ type: 'response.output_text.delta';
3518
+ }
3519
+ /**
3520
+ * Returned when the text value of an "output_text" content part is done streaming.
3521
+ * Also emitted when a Response is interrupted, incomplete, or cancelled.
3522
+ */
3523
+ export interface ResponseTextDoneEvent {
3524
+ /**
3525
+ * The index of the content part in the item's content array.
3526
+ */
3527
+ content_index: number;
3528
+ /**
3529
+ * The unique ID of the server event.
3530
+ */
3531
+ event_id: string;
3532
+ /**
3533
+ * The ID of the item.
3534
+ */
3535
+ item_id: string;
3536
+ /**
3537
+ * The index of the output item in the response.
3538
+ */
3539
+ output_index: number;
3540
+ /**
3541
+ * The ID of the response.
3542
+ */
3543
+ response_id: string;
3544
+ /**
3545
+ * The final text content.
3546
+ */
3547
+ text: string;
3548
+ /**
3549
+ * The event type, must be `response.output_text.done`.
3550
+ */
3551
+ type: 'response.output_text.done';
3552
+ }
3553
+ /**
3554
+ * Returned when a Session is created. Emitted automatically when a new connection
3555
+ * is established as the first server event. This event will contain the default
3556
+ * Session configuration.
3557
+ */
3558
+ export interface SessionCreatedEvent {
3559
+ /**
3560
+ * The unique ID of the server event.
3561
+ */
3562
+ event_id: string;
3563
+ /**
3564
+ * The session configuration.
3565
+ */
3566
+ session: RealtimeSessionCreateRequest | RealtimeTranscriptionSessionCreateRequest;
3567
+ /**
3568
+ * The event type, must be `session.created`.
3569
+ */
3570
+ type: 'session.created';
3571
+ }
3572
+ /**
3573
+ * Send this event to update the session’s configuration. The client may send this
3574
+ * event at any time to update any field except for `voice` and `model`. `voice`
3575
+ * can be updated only if there have been no other audio outputs yet.
3576
+ *
3577
+ * When the server receives a `session.update`, it will respond with a
3578
+ * `session.updated` event showing the full, effective configuration. Only the
3579
+ * fields that are present in the `session.update` are updated. To clear a field
3580
+ * like `instructions`, pass an empty string. To clear a field like `tools`, pass
3581
+ * an empty array. To clear a field like `turn_detection`, pass `null`.
3582
+ */
3583
+ export interface SessionUpdateEvent {
3584
+ /**
3585
+ * Update the Realtime session. Choose either a realtime session or a transcription
3586
+ * session.
3587
+ */
3588
+ session: RealtimeSessionCreateRequest | RealtimeTranscriptionSessionCreateRequest;
3589
+ /**
3590
+ * The event type, must be `session.update`.
3591
+ */
3592
+ type: 'session.update';
3593
+ /**
3594
+ * Optional client-generated ID used to identify this event. This is an arbitrary
3595
+ * string that a client may assign. It will be passed back if there is an error
3596
+ * with the event, but the corresponding `session.updated` event will not include
3597
+ * it.
3598
+ */
3599
+ event_id?: string;
3600
+ }
3601
+ /**
3602
+ * Returned when a session is updated with a `session.update` event, unless there
3603
+ * is an error.
3604
+ */
3605
+ export interface SessionUpdatedEvent {
3606
+ /**
3607
+ * The unique ID of the server event.
3608
+ */
3609
+ event_id: string;
3610
+ /**
3611
+ * The session configuration.
3612
+ */
3613
+ session: RealtimeSessionCreateRequest | RealtimeTranscriptionSessionCreateRequest;
3614
+ /**
3615
+ * The event type, must be `session.updated`.
3616
+ */
3617
+ type: 'session.updated';
3618
+ }
3619
+ /**
3620
+ * Send this event to update a transcription session.
3621
+ */
3622
+ export interface TranscriptionSessionUpdate {
3623
+ /**
3624
+ * Realtime transcription session object configuration.
3625
+ */
3626
+ session: TranscriptionSessionUpdate.Session;
3627
+ /**
3628
+ * The event type, must be `transcription_session.update`.
3629
+ */
3630
+ type: 'transcription_session.update';
3631
+ /**
3632
+ * Optional client-generated ID used to identify this event.
3633
+ */
3634
+ event_id?: string;
3635
+ }
3636
+ export declare namespace TranscriptionSessionUpdate {
3637
+ /**
3638
+ * Realtime transcription session object configuration.
3639
+ */
3640
+ interface Session {
3641
+ /**
3642
+ * The set of items to include in the transcription. Current available items are:
3643
+ * `item.input_audio_transcription.logprobs`
3644
+ */
3645
+ include?: Array<'item.input_audio_transcription.logprobs'>;
3646
+ /**
3647
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
3648
+ * `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
3649
+ * (mono), and little-endian byte order.
3650
+ */
3651
+ input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
3652
+ /**
3653
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
3654
+ * off. Noise reduction filters audio added to the input audio buffer before it is
3655
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
3656
+ * detection accuracy (reducing false positives) and model performance by improving
3657
+ * perception of the input audio.
3658
+ */
3659
+ input_audio_noise_reduction?: Session.InputAudioNoiseReduction;
3660
+ /**
3661
+ * Configuration for input audio transcription. The client can optionally set the
3662
+ * language and prompt for transcription, these offer additional guidance to the
3663
+ * transcription service.
3664
+ */
3665
+ input_audio_transcription?: RealtimeAPI.AudioTranscription;
3666
+ /**
3667
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
3668
+ * means that the model will detect the start and end of speech based on audio
3669
+ * volume and respond at the end of user speech.
3670
+ */
3671
+ turn_detection?: Session.TurnDetection;
3672
+ }
3673
+ namespace Session {
3674
+ /**
3675
+ * Configuration for input audio noise reduction. This can be set to `null` to turn
3676
+ * off. Noise reduction filters audio added to the input audio buffer before it is
3677
+ * sent to VAD and the model. Filtering the audio can improve VAD and turn
3678
+ * detection accuracy (reducing false positives) and model performance by improving
3679
+ * perception of the input audio.
3680
+ */
3681
+ interface InputAudioNoiseReduction {
3682
+ /**
3683
+ * Type of noise reduction. `near_field` is for close-talking microphones such as
3684
+ * headphones, `far_field` is for far-field microphones such as laptop or
3685
+ * conference room microphones.
3686
+ */
3687
+ type?: RealtimeAPI.NoiseReductionType;
3688
+ }
3689
+ /**
3690
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
3691
+ * means that the model will detect the start and end of speech based on audio
3692
+ * volume and respond at the end of user speech.
3693
+ */
3694
+ interface TurnDetection {
3695
+ /**
3696
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
3697
+ * Defaults to 300ms.
3698
+ */
3699
+ prefix_padding_ms?: number;
3700
+ /**
3701
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
3702
+ * With shorter values the model will respond more quickly, but may jump in on
3703
+ * short pauses from the user.
3704
+ */
3705
+ silence_duration_ms?: number;
3706
+ /**
3707
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
3708
+ * threshold will require louder audio to activate the model, and thus might
3709
+ * perform better in noisy environments.
3710
+ */
3711
+ threshold?: number;
3712
+ /**
3713
+ * Type of turn detection. Only `server_vad` is currently supported for
3714
+ * transcription sessions.
3715
+ */
3716
+ type?: 'server_vad';
3717
+ }
3718
+ }
3719
+ }
3720
+ /**
3721
+ * Returned when a transcription session is updated with a
3722
+ * `transcription_session.update` event, unless there is an error.
3723
+ */
3724
+ export interface TranscriptionSessionUpdatedEvent {
3725
+ /**
3726
+ * The unique ID of the server event.
3727
+ */
3728
+ event_id: string;
3729
+ /**
3730
+ * A new Realtime transcription session configuration.
3731
+ *
3732
+ * When a session is created on the server via REST API, the session object also
3733
+ * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
3734
+ * not present when a session is updated via the WebSocket API.
3735
+ */
3736
+ session: TranscriptionSessionUpdatedEvent.Session;
3737
+ /**
3738
+ * The event type, must be `transcription_session.updated`.
3739
+ */
3740
+ type: 'transcription_session.updated';
3741
+ }
3742
+ export declare namespace TranscriptionSessionUpdatedEvent {
3743
+ /**
3744
+ * A new Realtime transcription session configuration.
3745
+ *
3746
+ * When a session is created on the server via REST API, the session object also
3747
+ * contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
3748
+ * not present when a session is updated via the WebSocket API.
3749
+ */
3750
+ interface Session {
3751
+ /**
3752
+ * Ephemeral key returned by the API. Only present when the session is created on
3753
+ * the server via REST API.
3754
+ */
3755
+ client_secret: Session.ClientSecret;
3756
+ /**
3757
+ * The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
3758
+ */
3759
+ input_audio_format?: string;
3760
+ /**
3761
+ * Configuration of the transcription model.
3762
+ */
3763
+ input_audio_transcription?: RealtimeAPI.AudioTranscription;
3764
+ /**
3765
+ * The set of modalities the model can respond with. To disable audio, set this to
3766
+ * ["text"].
3767
+ */
3768
+ modalities?: Array<'text' | 'audio'>;
3769
+ /**
3770
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
3771
+ * means that the model will detect the start and end of speech based on audio
3772
+ * volume and respond at the end of user speech.
3773
+ */
3774
+ turn_detection?: Session.TurnDetection;
3775
+ }
3776
+ namespace Session {
3777
+ /**
3778
+ * Ephemeral key returned by the API. Only present when the session is created on
3779
+ * the server via REST API.
3780
+ */
3781
+ interface ClientSecret {
3782
+ /**
3783
+ * Timestamp for when the token expires. Currently, all tokens expire after one
3784
+ * minute.
3785
+ */
3786
+ expires_at: number;
3787
+ /**
3788
+ * Ephemeral key usable in client environments to authenticate connections to the
3789
+ * Realtime API. Use this in client-side environments rather than a standard API
3790
+ * token, which should only be used server-side.
3791
+ */
3792
+ value: string;
3793
+ }
3794
+ /**
3795
+ * Configuration for turn detection. Can be set to `null` to turn off. Server VAD
3796
+ * means that the model will detect the start and end of speech based on audio
3797
+ * volume and respond at the end of user speech.
3798
+ */
3799
+ interface TurnDetection {
3800
+ /**
3801
+ * Amount of audio to include before the VAD detected speech (in milliseconds).
3802
+ * Defaults to 300ms.
3803
+ */
3804
+ prefix_padding_ms?: number;
3805
+ /**
3806
+ * Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
3807
+ * With shorter values the model will respond more quickly, but may jump in on
3808
+ * short pauses from the user.
3809
+ */
3810
+ silence_duration_ms?: number;
3811
+ /**
3812
+ * Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
3813
+ * threshold will require louder audio to activate the model, and thus might
3814
+ * perform better in noisy environments.
3815
+ */
3816
+ threshold?: number;
3817
+ /**
3818
+ * Type of turn detection, only `server_vad` is currently supported.
3819
+ */
3820
+ type?: string;
3821
+ }
3822
+ }
3823
+ }
3824
+ export declare namespace Realtime {
3825
+ export { type AudioTranscription as AudioTranscription, type ConversationCreatedEvent as ConversationCreatedEvent, type ConversationItem as ConversationItem, type ConversationItemAdded as ConversationItemAdded, type ConversationItemCreateEvent as ConversationItemCreateEvent, type ConversationItemCreatedEvent as ConversationItemCreatedEvent, type ConversationItemDeleteEvent as ConversationItemDeleteEvent, type ConversationItemDeletedEvent as ConversationItemDeletedEvent, type ConversationItemDone as ConversationItemDone, type ConversationItemInputAudioTranscriptionCompletedEvent as ConversationItemInputAudioTranscriptionCompletedEvent, type ConversationItemInputAudioTranscriptionDeltaEvent as ConversationItemInputAudioTranscriptionDeltaEvent, type ConversationItemInputAudioTranscriptionFailedEvent as ConversationItemInputAudioTranscriptionFailedEvent, type ConversationItemInputAudioTranscriptionSegment as ConversationItemInputAudioTranscriptionSegment, type ConversationItemRetrieveEvent as ConversationItemRetrieveEvent, type ConversationItemTruncateEvent as ConversationItemTruncateEvent, type ConversationItemTruncatedEvent as ConversationItemTruncatedEvent, type ConversationItemWithReference as ConversationItemWithReference, type InputAudioBufferAppendEvent as InputAudioBufferAppendEvent, type InputAudioBufferClearEvent as InputAudioBufferClearEvent, type InputAudioBufferClearedEvent as InputAudioBufferClearedEvent, type InputAudioBufferCommitEvent as InputAudioBufferCommitEvent, type InputAudioBufferCommittedEvent as InputAudioBufferCommittedEvent, type InputAudioBufferSpeechStartedEvent as InputAudioBufferSpeechStartedEvent, type InputAudioBufferSpeechStoppedEvent as InputAudioBufferSpeechStoppedEvent, type InputAudioBufferTimeoutTriggered as InputAudioBufferTimeoutTriggered, type LogProbProperties as LogProbProperties, type McpListToolsCompleted as McpListToolsCompleted, type McpListToolsFailed as McpListToolsFailed, type McpListToolsInProgress as McpListToolsInProgress, type NoiseReductionType as NoiseReductionType, type OutputAudioBufferClearEvent as OutputAudioBufferClearEvent, type RateLimitsUpdatedEvent as RateLimitsUpdatedEvent, type RealtimeAudioConfig as RealtimeAudioConfig, type RealtimeAudioConfigInput as RealtimeAudioConfigInput, type RealtimeAudioConfigOutput as RealtimeAudioConfigOutput, type RealtimeAudioFormats as RealtimeAudioFormats, type RealtimeAudioInputTurnDetection as RealtimeAudioInputTurnDetection, type RealtimeClientEvent as RealtimeClientEvent, type RealtimeConversationItemAssistantMessage as RealtimeConversationItemAssistantMessage, type RealtimeConversationItemFunctionCall as RealtimeConversationItemFunctionCall, type RealtimeConversationItemFunctionCallOutput as RealtimeConversationItemFunctionCallOutput, type RealtimeConversationItemSystemMessage as RealtimeConversationItemSystemMessage, type RealtimeConversationItemUserMessage as RealtimeConversationItemUserMessage, type RealtimeError as RealtimeError, type RealtimeErrorEvent as RealtimeErrorEvent, type RealtimeFunctionTool as RealtimeFunctionTool, type RealtimeMcpApprovalRequest as RealtimeMcpApprovalRequest, type RealtimeMcpApprovalResponse as RealtimeMcpApprovalResponse, type RealtimeMcpListTools as RealtimeMcpListTools, type RealtimeMcpProtocolError as RealtimeMcpProtocolError, type RealtimeMcpToolCall as RealtimeMcpToolCall, type RealtimeMcpToolExecutionError as RealtimeMcpToolExecutionError, type RealtimeMcphttpError as RealtimeMcphttpError, type RealtimeResponse as RealtimeResponse, type RealtimeResponseCreateAudioOutput as RealtimeResponseCreateAudioOutput, type RealtimeResponseCreateMcpTool as RealtimeResponseCreateMcpTool, type RealtimeResponseCreateParams as RealtimeResponseCreateParams, type RealtimeResponseStatus as RealtimeResponseStatus, type RealtimeResponseUsage as RealtimeResponseUsage, type RealtimeResponseUsageInputTokenDetails as RealtimeResponseUsageInputTokenDetails, type RealtimeResponseUsageOutputTokenDetails as RealtimeResponseUsageOutputTokenDetails, type RealtimeServerEvent as RealtimeServerEvent, type RealtimeSession as RealtimeSession, type RealtimeSessionCreateRequest as RealtimeSessionCreateRequest, type RealtimeToolChoiceConfig as RealtimeToolChoiceConfig, type RealtimeToolsConfig as RealtimeToolsConfig, type RealtimeToolsConfigUnion as RealtimeToolsConfigUnion, type RealtimeTracingConfig as RealtimeTracingConfig, type RealtimeTranscriptionSessionAudio as RealtimeTranscriptionSessionAudio, type RealtimeTranscriptionSessionAudioInput as RealtimeTranscriptionSessionAudioInput, type RealtimeTranscriptionSessionAudioInputTurnDetection as RealtimeTranscriptionSessionAudioInputTurnDetection, type RealtimeTranscriptionSessionCreateRequest as RealtimeTranscriptionSessionCreateRequest, type RealtimeTruncation as RealtimeTruncation, type RealtimeTruncationRetentionRatio as RealtimeTruncationRetentionRatio, type ResponseAudioDeltaEvent as ResponseAudioDeltaEvent, type ResponseAudioDoneEvent as ResponseAudioDoneEvent, type ResponseAudioTranscriptDeltaEvent as ResponseAudioTranscriptDeltaEvent, type ResponseAudioTranscriptDoneEvent as ResponseAudioTranscriptDoneEvent, type ResponseCancelEvent as ResponseCancelEvent, type ResponseContentPartAddedEvent as ResponseContentPartAddedEvent, type ResponseContentPartDoneEvent as ResponseContentPartDoneEvent, type ResponseCreateEvent as ResponseCreateEvent, type ResponseCreatedEvent as ResponseCreatedEvent, type ResponseDoneEvent as ResponseDoneEvent, type ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent, type ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent, type ResponseMcpCallArgumentsDelta as ResponseMcpCallArgumentsDelta, type ResponseMcpCallArgumentsDone as ResponseMcpCallArgumentsDone, type ResponseMcpCallCompleted as ResponseMcpCallCompleted, type ResponseMcpCallFailed as ResponseMcpCallFailed, type ResponseMcpCallInProgress as ResponseMcpCallInProgress, type ResponseOutputItemAddedEvent as ResponseOutputItemAddedEvent, type ResponseOutputItemDoneEvent as ResponseOutputItemDoneEvent, type ResponseTextDeltaEvent as ResponseTextDeltaEvent, type ResponseTextDoneEvent as ResponseTextDoneEvent, type SessionCreatedEvent as SessionCreatedEvent, type SessionUpdateEvent as SessionUpdateEvent, type SessionUpdatedEvent as SessionUpdatedEvent, type TranscriptionSessionUpdate as TranscriptionSessionUpdate, type TranscriptionSessionUpdatedEvent as TranscriptionSessionUpdatedEvent, };
3826
+ export { ClientSecrets as ClientSecrets, type RealtimeSessionClientSecret as RealtimeSessionClientSecret, type RealtimeSessionCreateResponse as RealtimeSessionCreateResponse, type RealtimeTranscriptionSessionCreateResponse as RealtimeTranscriptionSessionCreateResponse, type RealtimeTranscriptionSessionTurnDetection as RealtimeTranscriptionSessionTurnDetection, type ClientSecretCreateResponse as ClientSecretCreateResponse, type ClientSecretCreateParams as ClientSecretCreateParams, };
3827
+ }
3828
+ //# sourceMappingURL=realtime.d.ts.map