@superinterface/react 5.1.3 → 5.2.0-beta.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.cts +5 -5
- package/dist/index.d.ts +5 -5
- package/dist/server.d.cts +1 -1
- package/dist/server.d.ts +1 -1
- package/package.json +8 -6
- package/types/index.d.ts +4 -0
- package/types/node_modules/openai/LICENSE +201 -0
- package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.mts +7 -0
- package/types/node_modules/openai/_vendor/partial-json-parser/parser.d.ts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.mts +32 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/Options.d.ts +32 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.mts +21 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/Refs.d.ts +21 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.mts +12 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/errorMessages.d.ts +12 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.mts +38 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/index.d.ts +38 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.mts +38 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parseDef.d.ts +38 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.mts +3 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/any.d.ts +3 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.mts +13 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/array.d.ts +13 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.mts +15 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/bigint.d.ts +15 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/boolean.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.mts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/branded.d.ts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.mts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/catch.d.ts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.mts +16 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/date.d.ts +16 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.mts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/default.d.ts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/effects.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.mts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/enum.d.ts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.mts +9 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/intersection.d.ts +9 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.mts +10 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/literal.d.ts +10 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.mts +16 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/map.d.ts +16 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.mts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nativeEnum.d.ts +7 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/never.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.mts +6 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/null.d.ts +6 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.mts +11 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/nullable.d.ts +11 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.mts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/number.d.ts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.mts +11 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/object.d.ts +11 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/optional.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.mts +6 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/pipeline.d.ts +6 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/promise.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.mts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/readonly.d.ts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.mts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/record.d.ts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.mts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/set.d.ts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.mts +70 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/string.d.ts +70 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.mts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/tuple.d.ts +14 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.mts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/undefined.d.ts +5 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.mts +24 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/union.d.ts +24 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.mts +3 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/parsers/unknown.d.ts +3 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.mts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/util.d.ts +4 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.mts +11 -0
- package/types/node_modules/openai/_vendor/zod-to-json-schema/zodToJsonSchema.d.ts +11 -0
- package/types/node_modules/openai/api-promise.d.mts +2 -0
- package/types/node_modules/openai/api-promise.d.ts +2 -0
- package/types/node_modules/openai/azure.d.mts +63 -0
- package/types/node_modules/openai/azure.d.ts +63 -0
- package/types/node_modules/openai/beta/realtime/index.d.mts +2 -0
- package/types/node_modules/openai/beta/realtime/index.d.ts +2 -0
- package/types/node_modules/openai/beta/realtime/internal-base.d.mts +45 -0
- package/types/node_modules/openai/beta/realtime/internal-base.d.ts +45 -0
- package/types/node_modules/openai/beta/realtime/websocket.d.mts +36 -0
- package/types/node_modules/openai/beta/realtime/websocket.d.ts +36 -0
- package/types/node_modules/openai/beta/realtime/ws.d.mts +27 -0
- package/types/node_modules/openai/beta/realtime/ws.d.ts +27 -0
- package/types/node_modules/openai/client.d.mts +297 -0
- package/types/node_modules/openai/client.d.ts +297 -0
- package/types/node_modules/openai/core/api-promise.d.mts +49 -0
- package/types/node_modules/openai/core/api-promise.d.ts +49 -0
- package/types/node_modules/openai/core/error.d.mts +59 -0
- package/types/node_modules/openai/core/error.d.ts +59 -0
- package/types/node_modules/openai/core/pagination.d.mts +89 -0
- package/types/node_modules/openai/core/pagination.d.ts +89 -0
- package/types/node_modules/openai/core/resource.d.mts +6 -0
- package/types/node_modules/openai/core/resource.d.ts +6 -0
- package/types/node_modules/openai/core/streaming.d.mts +33 -0
- package/types/node_modules/openai/core/streaming.d.ts +33 -0
- package/types/node_modules/openai/core/uploads.d.mts +3 -0
- package/types/node_modules/openai/core/uploads.d.ts +3 -0
- package/types/node_modules/openai/error.d.mts +2 -0
- package/types/node_modules/openai/error.d.ts +2 -0
- package/types/node_modules/openai/helpers/audio.d.mts +9 -0
- package/types/node_modules/openai/helpers/audio.d.ts +9 -0
- package/types/node_modules/openai/helpers/zod.d.mts +70 -0
- package/types/node_modules/openai/helpers/zod.d.ts +70 -0
- package/types/node_modules/openai/index.d.mts +8 -0
- package/types/node_modules/openai/index.d.ts +8 -0
- package/types/node_modules/openai/internal/builtin-types.d.mts +73 -0
- package/types/node_modules/openai/internal/builtin-types.d.ts +73 -0
- package/types/node_modules/openai/internal/decoders/line.d.mts +17 -0
- package/types/node_modules/openai/internal/decoders/line.d.ts +17 -0
- package/types/node_modules/openai/internal/detect-platform.d.mts +15 -0
- package/types/node_modules/openai/internal/detect-platform.d.ts +15 -0
- package/types/node_modules/openai/internal/errors.d.mts +3 -0
- package/types/node_modules/openai/internal/errors.d.ts +3 -0
- package/types/node_modules/openai/internal/headers.d.mts +20 -0
- package/types/node_modules/openai/internal/headers.d.ts +20 -0
- package/types/node_modules/openai/internal/parse.d.mts +17 -0
- package/types/node_modules/openai/internal/parse.d.ts +17 -0
- package/types/node_modules/openai/internal/qs/formats.d.mts +7 -0
- package/types/node_modules/openai/internal/qs/formats.d.ts +7 -0
- package/types/node_modules/openai/internal/qs/index.d.mts +10 -0
- package/types/node_modules/openai/internal/qs/index.d.ts +10 -0
- package/types/node_modules/openai/internal/qs/stringify.d.mts +3 -0
- package/types/node_modules/openai/internal/qs/stringify.d.ts +3 -0
- package/types/node_modules/openai/internal/qs/types.d.mts +57 -0
- package/types/node_modules/openai/internal/qs/types.d.ts +57 -0
- package/types/node_modules/openai/internal/qs/utils.d.mts +15 -0
- package/types/node_modules/openai/internal/qs/utils.d.ts +15 -0
- package/types/node_modules/openai/internal/request-options.d.mts +78 -0
- package/types/node_modules/openai/internal/request-options.d.ts +78 -0
- package/types/node_modules/openai/internal/shim-types.d.mts +17 -0
- package/types/node_modules/openai/internal/shim-types.d.ts +17 -0
- package/types/node_modules/openai/internal/shims.d.mts +20 -0
- package/types/node_modules/openai/internal/shims.d.ts +20 -0
- package/types/node_modules/openai/internal/stream-utils.d.mts +8 -0
- package/types/node_modules/openai/internal/stream-utils.d.ts +8 -0
- package/types/node_modules/openai/internal/to-file.d.mts +45 -0
- package/types/node_modules/openai/internal/to-file.d.ts +45 -0
- package/types/node_modules/openai/internal/types.d.mts +69 -0
- package/types/node_modules/openai/internal/types.d.ts +69 -0
- package/types/node_modules/openai/internal/uploads.d.mts +42 -0
- package/types/node_modules/openai/internal/uploads.d.ts +42 -0
- package/types/node_modules/openai/internal/utils/base64.d.mts +9 -0
- package/types/node_modules/openai/internal/utils/base64.d.ts +9 -0
- package/types/node_modules/openai/internal/utils/bytes.d.mts +4 -0
- package/types/node_modules/openai/internal/utils/bytes.d.ts +4 -0
- package/types/node_modules/openai/internal/utils/env.d.mts +9 -0
- package/types/node_modules/openai/internal/utils/env.d.ts +9 -0
- package/types/node_modules/openai/internal/utils/log.d.mts +37 -0
- package/types/node_modules/openai/internal/utils/log.d.ts +37 -0
- package/types/node_modules/openai/internal/utils/path.d.mts +15 -0
- package/types/node_modules/openai/internal/utils/path.d.ts +15 -0
- package/types/node_modules/openai/internal/utils/sleep.d.mts +2 -0
- package/types/node_modules/openai/internal/utils/sleep.d.ts +2 -0
- package/types/node_modules/openai/internal/utils/uuid.d.mts +5 -0
- package/types/node_modules/openai/internal/utils/uuid.d.ts +5 -0
- package/types/node_modules/openai/internal/utils/values.d.mts +18 -0
- package/types/node_modules/openai/internal/utils/values.d.ts +18 -0
- package/types/node_modules/openai/internal/utils.d.mts +7 -0
- package/types/node_modules/openai/internal/utils.d.ts +7 -0
- package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.mts +59 -0
- package/types/node_modules/openai/lib/AbstractChatCompletionRunner.d.ts +59 -0
- package/types/node_modules/openai/lib/AssistantStream.d.mts +60 -0
- package/types/node_modules/openai/lib/AssistantStream.d.ts +60 -0
- package/types/node_modules/openai/lib/ChatCompletionRunner.d.mts +16 -0
- package/types/node_modules/openai/lib/ChatCompletionRunner.d.ts +16 -0
- package/types/node_modules/openai/lib/ChatCompletionStream.d.mts +208 -0
- package/types/node_modules/openai/lib/ChatCompletionStream.d.ts +208 -0
- package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.mts +19 -0
- package/types/node_modules/openai/lib/ChatCompletionStreamingRunner.d.ts +19 -0
- package/types/node_modules/openai/lib/EventEmitter.d.mts +45 -0
- package/types/node_modules/openai/lib/EventEmitter.d.ts +45 -0
- package/types/node_modules/openai/lib/EventStream.d.mts +62 -0
- package/types/node_modules/openai/lib/EventStream.d.ts +62 -0
- package/types/node_modules/openai/lib/ResponsesParser.d.mts +36 -0
- package/types/node_modules/openai/lib/ResponsesParser.d.ts +36 -0
- package/types/node_modules/openai/lib/RunnableFunction.d.mts +83 -0
- package/types/node_modules/openai/lib/RunnableFunction.d.ts +83 -0
- package/types/node_modules/openai/lib/Util.d.mts +5 -0
- package/types/node_modules/openai/lib/Util.d.ts +5 -0
- package/types/node_modules/openai/lib/chatCompletionUtils.d.mts +5 -0
- package/types/node_modules/openai/lib/chatCompletionUtils.d.ts +5 -0
- package/types/node_modules/openai/lib/jsonschema.d.mts +106 -0
- package/types/node_modules/openai/lib/jsonschema.d.ts +106 -0
- package/types/node_modules/openai/lib/parser.d.mts +47 -0
- package/types/node_modules/openai/lib/parser.d.ts +47 -0
- package/types/node_modules/openai/lib/responses/EventTypes.d.mts +9 -0
- package/types/node_modules/openai/lib/responses/EventTypes.d.ts +9 -0
- package/types/node_modules/openai/lib/responses/ResponseStream.d.mts +59 -0
- package/types/node_modules/openai/lib/responses/ResponseStream.d.ts +59 -0
- package/types/node_modules/openai/package.json +233 -0
- package/types/node_modules/openai/pagination.d.mts +2 -0
- package/types/node_modules/openai/pagination.d.ts +2 -0
- package/types/node_modules/openai/realtime/index.d.mts +2 -0
- package/types/node_modules/openai/realtime/index.d.ts +2 -0
- package/types/node_modules/openai/realtime/internal-base.d.mts +45 -0
- package/types/node_modules/openai/realtime/internal-base.d.ts +45 -0
- package/types/node_modules/openai/realtime/websocket.d.mts +36 -0
- package/types/node_modules/openai/realtime/websocket.d.ts +36 -0
- package/types/node_modules/openai/realtime/ws.d.mts +27 -0
- package/types/node_modules/openai/realtime/ws.d.ts +27 -0
- package/types/node_modules/openai/resource.d.mts +2 -0
- package/types/node_modules/openai/resource.d.ts +2 -0
- package/types/node_modules/openai/resources/audio/audio.d.mts +26 -0
- package/types/node_modules/openai/resources/audio/audio.d.ts +26 -0
- package/types/node_modules/openai/resources/audio/index.d.mts +5 -0
- package/types/node_modules/openai/resources/audio/index.d.ts +5 -0
- package/types/node_modules/openai/resources/audio/speech.d.mts +64 -0
- package/types/node_modules/openai/resources/audio/speech.d.ts +64 -0
- package/types/node_modules/openai/resources/audio/transcriptions.d.mts +485 -0
- package/types/node_modules/openai/resources/audio/transcriptions.d.ts +485 -0
- package/types/node_modules/openai/resources/audio/translations.d.mts +81 -0
- package/types/node_modules/openai/resources/audio/translations.d.ts +81 -0
- package/types/node_modules/openai/resources/audio.d.mts +2 -0
- package/types/node_modules/openai/resources/audio.d.ts +2 -0
- package/types/node_modules/openai/resources/batches.d.mts +279 -0
- package/types/node_modules/openai/resources/batches.d.ts +279 -0
- package/types/node_modules/openai/resources/beta/assistants.d.mts +1232 -0
- package/types/node_modules/openai/resources/beta/assistants.d.ts +1232 -0
- package/types/node_modules/openai/resources/beta/beta.d.mts +18 -0
- package/types/node_modules/openai/resources/beta/beta.d.ts +18 -0
- package/types/node_modules/openai/resources/beta/index.d.mts +5 -0
- package/types/node_modules/openai/resources/beta/index.d.ts +5 -0
- package/types/node_modules/openai/resources/beta/realtime/index.d.mts +4 -0
- package/types/node_modules/openai/resources/beta/realtime/index.d.ts +4 -0
- package/types/node_modules/openai/resources/beta/realtime/realtime.d.mts +2332 -0
- package/types/node_modules/openai/resources/beta/realtime/realtime.d.ts +2332 -0
- package/types/node_modules/openai/resources/beta/realtime/sessions.d.mts +744 -0
- package/types/node_modules/openai/resources/beta/realtime/sessions.d.ts +744 -0
- package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts +299 -0
- package/types/node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts +299 -0
- package/types/node_modules/openai/resources/beta/realtime.d.mts +2 -0
- package/types/node_modules/openai/resources/beta/realtime.d.ts +2 -0
- package/types/node_modules/openai/resources/beta/threads/index.d.mts +4 -0
- package/types/node_modules/openai/resources/beta/threads/index.d.ts +4 -0
- package/types/node_modules/openai/resources/beta/threads/messages.d.mts +594 -0
- package/types/node_modules/openai/resources/beta/threads/messages.d.ts +594 -0
- package/types/node_modules/openai/resources/beta/threads/runs/index.d.mts +3 -0
- package/types/node_modules/openai/resources/beta/threads/runs/index.d.ts +3 -0
- package/types/node_modules/openai/resources/beta/threads/runs/runs.d.mts +733 -0
- package/types/node_modules/openai/resources/beta/threads/runs/runs.d.ts +733 -0
- package/types/node_modules/openai/resources/beta/threads/runs/steps.d.mts +615 -0
- package/types/node_modules/openai/resources/beta/threads/runs/steps.d.ts +615 -0
- package/types/node_modules/openai/resources/beta/threads/runs.d.mts +2 -0
- package/types/node_modules/openai/resources/beta/threads/runs.d.ts +2 -0
- package/types/node_modules/openai/resources/beta/threads/threads.d.mts +1044 -0
- package/types/node_modules/openai/resources/beta/threads/threads.d.ts +1044 -0
- package/types/node_modules/openai/resources/beta/threads.d.mts +2 -0
- package/types/node_modules/openai/resources/beta/threads.d.ts +2 -0
- package/types/node_modules/openai/resources/beta.d.mts +2 -0
- package/types/node_modules/openai/resources/beta.d.ts +2 -0
- package/types/node_modules/openai/resources/chat/chat.d.mts +13 -0
- package/types/node_modules/openai/resources/chat/chat.d.ts +13 -0
- package/types/node_modules/openai/resources/chat/completions/completions.d.mts +1627 -0
- package/types/node_modules/openai/resources/chat/completions/completions.d.ts +1627 -0
- package/types/node_modules/openai/resources/chat/completions/index.d.mts +4 -0
- package/types/node_modules/openai/resources/chat/completions/index.d.ts +4 -0
- package/types/node_modules/openai/resources/chat/completions/messages.d.mts +34 -0
- package/types/node_modules/openai/resources/chat/completions/messages.d.ts +34 -0
- package/types/node_modules/openai/resources/chat/completions.d.mts +2 -0
- package/types/node_modules/openai/resources/chat/completions.d.ts +2 -0
- package/types/node_modules/openai/resources/chat/index.d.mts +3 -0
- package/types/node_modules/openai/resources/chat/index.d.ts +3 -0
- package/types/node_modules/openai/resources/chat.d.mts +2 -0
- package/types/node_modules/openai/resources/chat.d.ts +2 -0
- package/types/node_modules/openai/resources/completions.d.mts +329 -0
- package/types/node_modules/openai/resources/completions.d.ts +329 -0
- package/types/node_modules/openai/resources/containers/containers.d.mts +200 -0
- package/types/node_modules/openai/resources/containers/containers.d.ts +200 -0
- package/types/node_modules/openai/resources/containers/files/content.d.mts +16 -0
- package/types/node_modules/openai/resources/containers/files/content.d.ts +16 -0
- package/types/node_modules/openai/resources/containers/files/files.d.mts +148 -0
- package/types/node_modules/openai/resources/containers/files/files.d.ts +148 -0
- package/types/node_modules/openai/resources/containers/files/index.d.mts +3 -0
- package/types/node_modules/openai/resources/containers/files/index.d.ts +3 -0
- package/types/node_modules/openai/resources/containers/files.d.mts +2 -0
- package/types/node_modules/openai/resources/containers/files.d.ts +2 -0
- package/types/node_modules/openai/resources/containers/index.d.mts +3 -0
- package/types/node_modules/openai/resources/containers/index.d.ts +3 -0
- package/types/node_modules/openai/resources/containers.d.mts +2 -0
- package/types/node_modules/openai/resources/containers.d.ts +2 -0
- package/types/node_modules/openai/resources/conversations/conversations.d.mts +176 -0
- package/types/node_modules/openai/resources/conversations/conversations.d.ts +176 -0
- package/types/node_modules/openai/resources/conversations/index.d.mts +3 -0
- package/types/node_modules/openai/resources/conversations/index.d.ts +3 -0
- package/types/node_modules/openai/resources/conversations/items.d.mts +367 -0
- package/types/node_modules/openai/resources/conversations/items.d.ts +367 -0
- package/types/node_modules/openai/resources/conversations.d.mts +2 -0
- package/types/node_modules/openai/resources/conversations.d.ts +2 -0
- package/types/node_modules/openai/resources/embeddings.d.mts +113 -0
- package/types/node_modules/openai/resources/embeddings.d.ts +113 -0
- package/types/node_modules/openai/resources/evals/evals.d.mts +735 -0
- package/types/node_modules/openai/resources/evals/evals.d.ts +735 -0
- package/types/node_modules/openai/resources/evals/index.d.mts +3 -0
- package/types/node_modules/openai/resources/evals/index.d.ts +3 -0
- package/types/node_modules/openai/resources/evals/runs/index.d.mts +3 -0
- package/types/node_modules/openai/resources/evals/runs/index.d.ts +3 -0
- package/types/node_modules/openai/resources/evals/runs/output-items.d.mts +382 -0
- package/types/node_modules/openai/resources/evals/runs/output-items.d.ts +382 -0
- package/types/node_modules/openai/resources/evals/runs/runs.d.mts +2290 -0
- package/types/node_modules/openai/resources/evals/runs/runs.d.ts +2290 -0
- package/types/node_modules/openai/resources/evals/runs.d.mts +2 -0
- package/types/node_modules/openai/resources/evals/runs.d.ts +2 -0
- package/types/node_modules/openai/resources/evals.d.mts +2 -0
- package/types/node_modules/openai/resources/evals.d.ts +2 -0
- package/types/node_modules/openai/resources/files.d.mts +164 -0
- package/types/node_modules/openai/resources/files.d.ts +164 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.mts +10 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/alpha.d.ts +10 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.mts +119 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/graders.d.ts +119 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.mts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha/index.d.ts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha.d.mts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/alpha.d.ts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.mts +10 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/checkpoints.d.ts +10 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.mts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/index.d.ts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.mts +160 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints/permissions.d.ts +160 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.mts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/checkpoints.d.ts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.mts +22 -0
- package/types/node_modules/openai/resources/fine-tuning/fine-tuning.d.ts +22 -0
- package/types/node_modules/openai/resources/fine-tuning/index.d.mts +6 -0
- package/types/node_modules/openai/resources/fine-tuning/index.d.ts +6 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.mts +74 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/checkpoints.d.ts +74 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.mts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/index.d.ts +3 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.mts +528 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs/jobs.d.ts +528 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs.d.mts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/jobs.d.ts +2 -0
- package/types/node_modules/openai/resources/fine-tuning/methods.d.mts +120 -0
- package/types/node_modules/openai/resources/fine-tuning/methods.d.ts +120 -0
- package/types/node_modules/openai/resources/fine-tuning.d.mts +2 -0
- package/types/node_modules/openai/resources/fine-tuning.d.ts +2 -0
- package/types/node_modules/openai/resources/graders/grader-models.d.mts +304 -0
- package/types/node_modules/openai/resources/graders/grader-models.d.ts +304 -0
- package/types/node_modules/openai/resources/graders/graders.d.mts +10 -0
- package/types/node_modules/openai/resources/graders/graders.d.ts +10 -0
- package/types/node_modules/openai/resources/graders/index.d.mts +3 -0
- package/types/node_modules/openai/resources/graders/index.d.ts +3 -0
- package/types/node_modules/openai/resources/graders.d.mts +2 -0
- package/types/node_modules/openai/resources/graders.d.ts +2 -0
- package/types/node_modules/openai/resources/images.d.mts +653 -0
- package/types/node_modules/openai/resources/images.d.ts +653 -0
- package/types/node_modules/openai/resources/index.d.mts +22 -0
- package/types/node_modules/openai/resources/index.d.ts +22 -0
- package/types/node_modules/openai/resources/models.d.mts +52 -0
- package/types/node_modules/openai/resources/models.d.ts +52 -0
- package/types/node_modules/openai/resources/moderations.d.mts +295 -0
- package/types/node_modules/openai/resources/moderations.d.ts +295 -0
- package/types/node_modules/openai/resources/realtime/client-secrets.d.mts +594 -0
- package/types/node_modules/openai/resources/realtime/client-secrets.d.ts +594 -0
- package/types/node_modules/openai/resources/realtime/index.d.mts +3 -0
- package/types/node_modules/openai/resources/realtime/index.d.ts +3 -0
- package/types/node_modules/openai/resources/realtime/realtime.d.mts +3828 -0
- package/types/node_modules/openai/resources/realtime/realtime.d.ts +3828 -0
- package/types/node_modules/openai/resources/realtime.d.mts +2 -0
- package/types/node_modules/openai/resources/realtime.d.ts +2 -0
- package/types/node_modules/openai/resources/responses/index.d.mts +3 -0
- package/types/node_modules/openai/resources/responses/index.d.ts +3 -0
- package/types/node_modules/openai/resources/responses/input-items.d.mts +65 -0
- package/types/node_modules/openai/resources/responses/input-items.d.ts +65 -0
- package/types/node_modules/openai/resources/responses/responses.d.mts +4705 -0
- package/types/node_modules/openai/resources/responses/responses.d.ts +4705 -0
- package/types/node_modules/openai/resources/responses.d.mts +2 -0
- package/types/node_modules/openai/resources/responses.d.ts +2 -0
- package/types/node_modules/openai/resources/shared.d.mts +265 -0
- package/types/node_modules/openai/resources/shared.d.ts +265 -0
- package/types/node_modules/openai/resources/uploads/index.d.mts +3 -0
- package/types/node_modules/openai/resources/uploads/index.d.ts +3 -0
- package/types/node_modules/openai/resources/uploads/parts.d.mts +51 -0
- package/types/node_modules/openai/resources/uploads/parts.d.ts +51 -0
- package/types/node_modules/openai/resources/uploads/uploads.d.mts +157 -0
- package/types/node_modules/openai/resources/uploads/uploads.d.ts +157 -0
- package/types/node_modules/openai/resources/uploads.d.mts +2 -0
- package/types/node_modules/openai/resources/uploads.d.ts +2 -0
- package/types/node_modules/openai/resources/vector-stores/file-batches.d.mts +172 -0
- package/types/node_modules/openai/resources/vector-stores/file-batches.d.ts +172 -0
- package/types/node_modules/openai/resources/vector-stores/files.d.mts +231 -0
- package/types/node_modules/openai/resources/vector-stores/files.d.ts +231 -0
- package/types/node_modules/openai/resources/vector-stores/index.d.mts +4 -0
- package/types/node_modules/openai/resources/vector-stores/index.d.ts +4 -0
- package/types/node_modules/openai/resources/vector-stores/vector-stores.d.mts +373 -0
- package/types/node_modules/openai/resources/vector-stores/vector-stores.d.ts +373 -0
- package/types/node_modules/openai/resources/vector-stores.d.mts +2 -0
- package/types/node_modules/openai/resources/vector-stores.d.ts +2 -0
- package/types/node_modules/openai/resources/webhooks.d.mts +587 -0
- package/types/node_modules/openai/resources/webhooks.d.ts +587 -0
- package/types/node_modules/openai/resources.d.mts +2 -0
- package/types/node_modules/openai/resources.d.ts +2 -0
- package/types/node_modules/openai/src/_vendor/zod-to-json-schema/LICENSE +15 -0
- package/types/node_modules/openai/streaming.d.mts +2 -0
- package/types/node_modules/openai/streaming.d.ts +2 -0
- package/types/node_modules/openai/uploads.d.mts +2 -0
- package/types/node_modules/openai/uploads.d.ts +2 -0
- package/types/node_modules/openai/version.d.mts +2 -0
- package/types/node_modules/openai/version.d.ts +2 -0
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import { APIResource } from "../../../core/resource.mjs";
|
|
2
|
+
import { APIPromise } from "../../../core/api-promise.mjs";
|
|
3
|
+
import { RequestOptions } from "../../../internal/request-options.mjs";
|
|
4
|
+
export declare class TranscriptionSessions extends APIResource {
|
|
5
|
+
/**
|
|
6
|
+
* Create an ephemeral API token for use in client-side applications with the
|
|
7
|
+
* Realtime API specifically for realtime transcriptions. Can be configured with
|
|
8
|
+
* the same session parameters as the `transcription_session.update` client event.
|
|
9
|
+
*
|
|
10
|
+
* It responds with a session object, plus a `client_secret` key which contains a
|
|
11
|
+
* usable ephemeral API token that can be used to authenticate browser clients for
|
|
12
|
+
* the Realtime API.
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```ts
|
|
16
|
+
* const transcriptionSession =
|
|
17
|
+
* await client.beta.realtime.transcriptionSessions.create();
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* A new Realtime transcription session configuration.
|
|
24
|
+
*
|
|
25
|
+
* When a session is created on the server via REST API, the session object also
|
|
26
|
+
* contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
|
|
27
|
+
* not present when a session is updated via the WebSocket API.
|
|
28
|
+
*/
|
|
29
|
+
export interface TranscriptionSession {
|
|
30
|
+
/**
|
|
31
|
+
* Ephemeral key returned by the API. Only present when the session is created on
|
|
32
|
+
* the server via REST API.
|
|
33
|
+
*/
|
|
34
|
+
client_secret: TranscriptionSession.ClientSecret;
|
|
35
|
+
/**
|
|
36
|
+
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
|
37
|
+
*/
|
|
38
|
+
input_audio_format?: string;
|
|
39
|
+
/**
|
|
40
|
+
* Configuration of the transcription model.
|
|
41
|
+
*/
|
|
42
|
+
input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
|
|
43
|
+
/**
|
|
44
|
+
* The set of modalities the model can respond with. To disable audio, set this to
|
|
45
|
+
* ["text"].
|
|
46
|
+
*/
|
|
47
|
+
modalities?: Array<'text' | 'audio'>;
|
|
48
|
+
/**
|
|
49
|
+
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
|
50
|
+
* means that the model will detect the start and end of speech based on audio
|
|
51
|
+
* volume and respond at the end of user speech.
|
|
52
|
+
*/
|
|
53
|
+
turn_detection?: TranscriptionSession.TurnDetection;
|
|
54
|
+
}
|
|
55
|
+
export declare namespace TranscriptionSession {
|
|
56
|
+
/**
|
|
57
|
+
* Ephemeral key returned by the API. Only present when the session is created on
|
|
58
|
+
* the server via REST API.
|
|
59
|
+
*/
|
|
60
|
+
interface ClientSecret {
|
|
61
|
+
/**
|
|
62
|
+
* Timestamp for when the token expires. Currently, all tokens expire after one
|
|
63
|
+
* minute.
|
|
64
|
+
*/
|
|
65
|
+
expires_at: number;
|
|
66
|
+
/**
|
|
67
|
+
* Ephemeral key usable in client environments to authenticate connections to the
|
|
68
|
+
* Realtime API. Use this in client-side environments rather than a standard API
|
|
69
|
+
* token, which should only be used server-side.
|
|
70
|
+
*/
|
|
71
|
+
value: string;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Configuration of the transcription model.
|
|
75
|
+
*/
|
|
76
|
+
interface InputAudioTranscription {
|
|
77
|
+
/**
|
|
78
|
+
* The language of the input audio. Supplying the input language in
|
|
79
|
+
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
|
80
|
+
* format will improve accuracy and latency.
|
|
81
|
+
*/
|
|
82
|
+
language?: string;
|
|
83
|
+
/**
|
|
84
|
+
* The model to use for transcription. Can be `gpt-4o-transcribe`,
|
|
85
|
+
* `gpt-4o-mini-transcribe`, or `whisper-1`.
|
|
86
|
+
*/
|
|
87
|
+
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
|
88
|
+
/**
|
|
89
|
+
* An optional text to guide the model's style or continue a previous audio
|
|
90
|
+
* segment. The
|
|
91
|
+
* [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
|
92
|
+
* should match the audio language.
|
|
93
|
+
*/
|
|
94
|
+
prompt?: string;
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
|
98
|
+
* means that the model will detect the start and end of speech based on audio
|
|
99
|
+
* volume and respond at the end of user speech.
|
|
100
|
+
*/
|
|
101
|
+
interface TurnDetection {
|
|
102
|
+
/**
|
|
103
|
+
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
|
104
|
+
* Defaults to 300ms.
|
|
105
|
+
*/
|
|
106
|
+
prefix_padding_ms?: number;
|
|
107
|
+
/**
|
|
108
|
+
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
|
109
|
+
* With shorter values the model will respond more quickly, but may jump in on
|
|
110
|
+
* short pauses from the user.
|
|
111
|
+
*/
|
|
112
|
+
silence_duration_ms?: number;
|
|
113
|
+
/**
|
|
114
|
+
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
|
115
|
+
* threshold will require louder audio to activate the model, and thus might
|
|
116
|
+
* perform better in noisy environments.
|
|
117
|
+
*/
|
|
118
|
+
threshold?: number;
|
|
119
|
+
/**
|
|
120
|
+
* Type of turn detection, only `server_vad` is currently supported.
|
|
121
|
+
*/
|
|
122
|
+
type?: string;
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
export interface TranscriptionSessionCreateParams {
|
|
126
|
+
/**
|
|
127
|
+
* Configuration options for the generated client secret.
|
|
128
|
+
*/
|
|
129
|
+
client_secret?: TranscriptionSessionCreateParams.ClientSecret;
|
|
130
|
+
/**
|
|
131
|
+
* The set of items to include in the transcription. Current available items are:
|
|
132
|
+
*
|
|
133
|
+
* - `item.input_audio_transcription.logprobs`
|
|
134
|
+
*/
|
|
135
|
+
include?: Array<string>;
|
|
136
|
+
/**
|
|
137
|
+
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
|
138
|
+
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
|
139
|
+
* (mono), and little-endian byte order.
|
|
140
|
+
*/
|
|
141
|
+
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
|
142
|
+
/**
|
|
143
|
+
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
|
144
|
+
* off. Noise reduction filters audio added to the input audio buffer before it is
|
|
145
|
+
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
|
146
|
+
* detection accuracy (reducing false positives) and model performance by improving
|
|
147
|
+
* perception of the input audio.
|
|
148
|
+
*/
|
|
149
|
+
input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
|
|
150
|
+
/**
|
|
151
|
+
* Configuration for input audio transcription. The client can optionally set the
|
|
152
|
+
* language and prompt for transcription, these offer additional guidance to the
|
|
153
|
+
* transcription service.
|
|
154
|
+
*/
|
|
155
|
+
input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
|
|
156
|
+
/**
|
|
157
|
+
* The set of modalities the model can respond with. To disable audio, set this to
|
|
158
|
+
* ["text"].
|
|
159
|
+
*/
|
|
160
|
+
modalities?: Array<'text' | 'audio'>;
|
|
161
|
+
/**
|
|
162
|
+
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
|
163
|
+
* set to `null` to turn off, in which case the client must manually trigger model
|
|
164
|
+
* response. Server VAD means that the model will detect the start and end of
|
|
165
|
+
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
|
166
|
+
* is more advanced and uses a turn detection model (in conjunction with VAD) to
|
|
167
|
+
* semantically estimate whether the user has finished speaking, then dynamically
|
|
168
|
+
* sets a timeout based on this probability. For example, if user audio trails off
|
|
169
|
+
* with "uhhm", the model will score a low probability of turn end and wait longer
|
|
170
|
+
* for the user to continue speaking. This can be useful for more natural
|
|
171
|
+
* conversations, but may have a higher latency.
|
|
172
|
+
*/
|
|
173
|
+
turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
|
|
174
|
+
}
|
|
175
|
+
export declare namespace TranscriptionSessionCreateParams {
|
|
176
|
+
/**
|
|
177
|
+
* Configuration options for the generated client secret.
|
|
178
|
+
*/
|
|
179
|
+
interface ClientSecret {
|
|
180
|
+
/**
|
|
181
|
+
* Configuration for the ephemeral token expiration.
|
|
182
|
+
*/
|
|
183
|
+
expires_at?: ClientSecret.ExpiresAt;
|
|
184
|
+
}
|
|
185
|
+
namespace ClientSecret {
|
|
186
|
+
/**
|
|
187
|
+
* Configuration for the ephemeral token expiration.
|
|
188
|
+
*/
|
|
189
|
+
interface ExpiresAt {
|
|
190
|
+
/**
|
|
191
|
+
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
|
192
|
+
* currently supported.
|
|
193
|
+
*/
|
|
194
|
+
anchor?: 'created_at';
|
|
195
|
+
/**
|
|
196
|
+
* The number of seconds from the anchor point to the expiration. Select a value
|
|
197
|
+
* between `10` and `7200`.
|
|
198
|
+
*/
|
|
199
|
+
seconds?: number;
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
/**
|
|
203
|
+
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
|
204
|
+
* off. Noise reduction filters audio added to the input audio buffer before it is
|
|
205
|
+
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
|
206
|
+
* detection accuracy (reducing false positives) and model performance by improving
|
|
207
|
+
* perception of the input audio.
|
|
208
|
+
*/
|
|
209
|
+
interface InputAudioNoiseReduction {
|
|
210
|
+
/**
|
|
211
|
+
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
|
212
|
+
* headphones, `far_field` is for far-field microphones such as laptop or
|
|
213
|
+
* conference room microphones.
|
|
214
|
+
*/
|
|
215
|
+
type?: 'near_field' | 'far_field';
|
|
216
|
+
}
|
|
217
|
+
/**
|
|
218
|
+
* Configuration for input audio transcription. The client can optionally set the
|
|
219
|
+
* language and prompt for transcription, these offer additional guidance to the
|
|
220
|
+
* transcription service.
|
|
221
|
+
*/
|
|
222
|
+
interface InputAudioTranscription {
|
|
223
|
+
/**
|
|
224
|
+
* The language of the input audio. Supplying the input language in
|
|
225
|
+
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
|
226
|
+
* format will improve accuracy and latency.
|
|
227
|
+
*/
|
|
228
|
+
language?: string;
|
|
229
|
+
/**
|
|
230
|
+
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
|
231
|
+
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
|
232
|
+
*/
|
|
233
|
+
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
|
234
|
+
/**
|
|
235
|
+
* An optional text to guide the model's style or continue a previous audio
|
|
236
|
+
* segment. For `whisper-1`, the
|
|
237
|
+
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
|
238
|
+
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
|
239
|
+
* "expect words related to technology".
|
|
240
|
+
*/
|
|
241
|
+
prompt?: string;
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
|
245
|
+
* set to `null` to turn off, in which case the client must manually trigger model
|
|
246
|
+
* response. Server VAD means that the model will detect the start and end of
|
|
247
|
+
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
|
248
|
+
* is more advanced and uses a turn detection model (in conjunction with VAD) to
|
|
249
|
+
* semantically estimate whether the user has finished speaking, then dynamically
|
|
250
|
+
* sets a timeout based on this probability. For example, if user audio trails off
|
|
251
|
+
* with "uhhm", the model will score a low probability of turn end and wait longer
|
|
252
|
+
* for the user to continue speaking. This can be useful for more natural
|
|
253
|
+
* conversations, but may have a higher latency.
|
|
254
|
+
*/
|
|
255
|
+
interface TurnDetection {
|
|
256
|
+
/**
|
|
257
|
+
* Whether or not to automatically generate a response when a VAD stop event
|
|
258
|
+
* occurs. Not available for transcription sessions.
|
|
259
|
+
*/
|
|
260
|
+
create_response?: boolean;
|
|
261
|
+
/**
|
|
262
|
+
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
|
263
|
+
* will wait longer for the user to continue speaking, `high` will respond more
|
|
264
|
+
* quickly. `auto` is the default and is equivalent to `medium`.
|
|
265
|
+
*/
|
|
266
|
+
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
|
267
|
+
/**
|
|
268
|
+
* Whether or not to automatically interrupt any ongoing response with output to
|
|
269
|
+
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
|
270
|
+
* occurs. Not available for transcription sessions.
|
|
271
|
+
*/
|
|
272
|
+
interrupt_response?: boolean;
|
|
273
|
+
/**
|
|
274
|
+
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
|
275
|
+
* detected speech (in milliseconds). Defaults to 300ms.
|
|
276
|
+
*/
|
|
277
|
+
prefix_padding_ms?: number;
|
|
278
|
+
/**
|
|
279
|
+
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
|
280
|
+
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
|
281
|
+
* more quickly, but may jump in on short pauses from the user.
|
|
282
|
+
*/
|
|
283
|
+
silence_duration_ms?: number;
|
|
284
|
+
/**
|
|
285
|
+
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
|
286
|
+
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
|
287
|
+
* model, and thus might perform better in noisy environments.
|
|
288
|
+
*/
|
|
289
|
+
threshold?: number;
|
|
290
|
+
/**
|
|
291
|
+
* Type of turn detection.
|
|
292
|
+
*/
|
|
293
|
+
type?: 'server_vad' | 'semantic_vad';
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
export declare namespace TranscriptionSessions {
|
|
297
|
+
export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
|
|
298
|
+
}
|
|
299
|
+
//# sourceMappingURL=transcription-sessions.d.mts.map
|
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
import { APIResource } from "../../../core/resource.js";
|
|
2
|
+
import { APIPromise } from "../../../core/api-promise.js";
|
|
3
|
+
import { RequestOptions } from "../../../internal/request-options.js";
|
|
4
|
+
export declare class TranscriptionSessions extends APIResource {
|
|
5
|
+
/**
|
|
6
|
+
* Create an ephemeral API token for use in client-side applications with the
|
|
7
|
+
* Realtime API specifically for realtime transcriptions. Can be configured with
|
|
8
|
+
* the same session parameters as the `transcription_session.update` client event.
|
|
9
|
+
*
|
|
10
|
+
* It responds with a session object, plus a `client_secret` key which contains a
|
|
11
|
+
* usable ephemeral API token that can be used to authenticate browser clients for
|
|
12
|
+
* the Realtime API.
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```ts
|
|
16
|
+
* const transcriptionSession =
|
|
17
|
+
* await client.beta.realtime.transcriptionSessions.create();
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
|
|
21
|
+
}
|
|
22
|
+
/**
|
|
23
|
+
* A new Realtime transcription session configuration.
|
|
24
|
+
*
|
|
25
|
+
* When a session is created on the server via REST API, the session object also
|
|
26
|
+
* contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
|
|
27
|
+
* not present when a session is updated via the WebSocket API.
|
|
28
|
+
*/
|
|
29
|
+
export interface TranscriptionSession {
|
|
30
|
+
/**
|
|
31
|
+
* Ephemeral key returned by the API. Only present when the session is created on
|
|
32
|
+
* the server via REST API.
|
|
33
|
+
*/
|
|
34
|
+
client_secret: TranscriptionSession.ClientSecret;
|
|
35
|
+
/**
|
|
36
|
+
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
|
37
|
+
*/
|
|
38
|
+
input_audio_format?: string;
|
|
39
|
+
/**
|
|
40
|
+
* Configuration of the transcription model.
|
|
41
|
+
*/
|
|
42
|
+
input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
|
|
43
|
+
/**
|
|
44
|
+
* The set of modalities the model can respond with. To disable audio, set this to
|
|
45
|
+
* ["text"].
|
|
46
|
+
*/
|
|
47
|
+
modalities?: Array<'text' | 'audio'>;
|
|
48
|
+
/**
|
|
49
|
+
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
|
50
|
+
* means that the model will detect the start and end of speech based on audio
|
|
51
|
+
* volume and respond at the end of user speech.
|
|
52
|
+
*/
|
|
53
|
+
turn_detection?: TranscriptionSession.TurnDetection;
|
|
54
|
+
}
|
|
55
|
+
export declare namespace TranscriptionSession {
|
|
56
|
+
/**
|
|
57
|
+
* Ephemeral key returned by the API. Only present when the session is created on
|
|
58
|
+
* the server via REST API.
|
|
59
|
+
*/
|
|
60
|
+
interface ClientSecret {
|
|
61
|
+
/**
|
|
62
|
+
* Timestamp for when the token expires. Currently, all tokens expire after one
|
|
63
|
+
* minute.
|
|
64
|
+
*/
|
|
65
|
+
expires_at: number;
|
|
66
|
+
/**
|
|
67
|
+
* Ephemeral key usable in client environments to authenticate connections to the
|
|
68
|
+
* Realtime API. Use this in client-side environments rather than a standard API
|
|
69
|
+
* token, which should only be used server-side.
|
|
70
|
+
*/
|
|
71
|
+
value: string;
|
|
72
|
+
}
|
|
73
|
+
/**
|
|
74
|
+
* Configuration of the transcription model.
|
|
75
|
+
*/
|
|
76
|
+
interface InputAudioTranscription {
|
|
77
|
+
/**
|
|
78
|
+
* The language of the input audio. Supplying the input language in
|
|
79
|
+
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
|
80
|
+
* format will improve accuracy and latency.
|
|
81
|
+
*/
|
|
82
|
+
language?: string;
|
|
83
|
+
/**
|
|
84
|
+
* The model to use for transcription. Can be `gpt-4o-transcribe`,
|
|
85
|
+
* `gpt-4o-mini-transcribe`, or `whisper-1`.
|
|
86
|
+
*/
|
|
87
|
+
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
|
88
|
+
/**
|
|
89
|
+
* An optional text to guide the model's style or continue a previous audio
|
|
90
|
+
* segment. The
|
|
91
|
+
* [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
|
92
|
+
* should match the audio language.
|
|
93
|
+
*/
|
|
94
|
+
prompt?: string;
|
|
95
|
+
}
|
|
96
|
+
/**
|
|
97
|
+
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
|
98
|
+
* means that the model will detect the start and end of speech based on audio
|
|
99
|
+
* volume and respond at the end of user speech.
|
|
100
|
+
*/
|
|
101
|
+
interface TurnDetection {
|
|
102
|
+
/**
|
|
103
|
+
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
|
104
|
+
* Defaults to 300ms.
|
|
105
|
+
*/
|
|
106
|
+
prefix_padding_ms?: number;
|
|
107
|
+
/**
|
|
108
|
+
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
|
109
|
+
* With shorter values the model will respond more quickly, but may jump in on
|
|
110
|
+
* short pauses from the user.
|
|
111
|
+
*/
|
|
112
|
+
silence_duration_ms?: number;
|
|
113
|
+
/**
|
|
114
|
+
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
|
115
|
+
* threshold will require louder audio to activate the model, and thus might
|
|
116
|
+
* perform better in noisy environments.
|
|
117
|
+
*/
|
|
118
|
+
threshold?: number;
|
|
119
|
+
/**
|
|
120
|
+
* Type of turn detection, only `server_vad` is currently supported.
|
|
121
|
+
*/
|
|
122
|
+
type?: string;
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
export interface TranscriptionSessionCreateParams {
|
|
126
|
+
/**
|
|
127
|
+
* Configuration options for the generated client secret.
|
|
128
|
+
*/
|
|
129
|
+
client_secret?: TranscriptionSessionCreateParams.ClientSecret;
|
|
130
|
+
/**
|
|
131
|
+
* The set of items to include in the transcription. Current available items are:
|
|
132
|
+
*
|
|
133
|
+
* - `item.input_audio_transcription.logprobs`
|
|
134
|
+
*/
|
|
135
|
+
include?: Array<string>;
|
|
136
|
+
/**
|
|
137
|
+
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
|
138
|
+
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
|
139
|
+
* (mono), and little-endian byte order.
|
|
140
|
+
*/
|
|
141
|
+
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
|
142
|
+
/**
|
|
143
|
+
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
|
144
|
+
* off. Noise reduction filters audio added to the input audio buffer before it is
|
|
145
|
+
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
|
146
|
+
* detection accuracy (reducing false positives) and model performance by improving
|
|
147
|
+
* perception of the input audio.
|
|
148
|
+
*/
|
|
149
|
+
input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
|
|
150
|
+
/**
|
|
151
|
+
* Configuration for input audio transcription. The client can optionally set the
|
|
152
|
+
* language and prompt for transcription, these offer additional guidance to the
|
|
153
|
+
* transcription service.
|
|
154
|
+
*/
|
|
155
|
+
input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
|
|
156
|
+
/**
|
|
157
|
+
* The set of modalities the model can respond with. To disable audio, set this to
|
|
158
|
+
* ["text"].
|
|
159
|
+
*/
|
|
160
|
+
modalities?: Array<'text' | 'audio'>;
|
|
161
|
+
/**
|
|
162
|
+
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
|
163
|
+
* set to `null` to turn off, in which case the client must manually trigger model
|
|
164
|
+
* response. Server VAD means that the model will detect the start and end of
|
|
165
|
+
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
|
166
|
+
* is more advanced and uses a turn detection model (in conjunction with VAD) to
|
|
167
|
+
* semantically estimate whether the user has finished speaking, then dynamically
|
|
168
|
+
* sets a timeout based on this probability. For example, if user audio trails off
|
|
169
|
+
* with "uhhm", the model will score a low probability of turn end and wait longer
|
|
170
|
+
* for the user to continue speaking. This can be useful for more natural
|
|
171
|
+
* conversations, but may have a higher latency.
|
|
172
|
+
*/
|
|
173
|
+
turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
|
|
174
|
+
}
|
|
175
|
+
export declare namespace TranscriptionSessionCreateParams {
|
|
176
|
+
/**
|
|
177
|
+
* Configuration options for the generated client secret.
|
|
178
|
+
*/
|
|
179
|
+
interface ClientSecret {
|
|
180
|
+
/**
|
|
181
|
+
* Configuration for the ephemeral token expiration.
|
|
182
|
+
*/
|
|
183
|
+
expires_at?: ClientSecret.ExpiresAt;
|
|
184
|
+
}
|
|
185
|
+
namespace ClientSecret {
|
|
186
|
+
/**
|
|
187
|
+
* Configuration for the ephemeral token expiration.
|
|
188
|
+
*/
|
|
189
|
+
interface ExpiresAt {
|
|
190
|
+
/**
|
|
191
|
+
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
|
192
|
+
* currently supported.
|
|
193
|
+
*/
|
|
194
|
+
anchor?: 'created_at';
|
|
195
|
+
/**
|
|
196
|
+
* The number of seconds from the anchor point to the expiration. Select a value
|
|
197
|
+
* between `10` and `7200`.
|
|
198
|
+
*/
|
|
199
|
+
seconds?: number;
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
/**
|
|
203
|
+
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
|
204
|
+
* off. Noise reduction filters audio added to the input audio buffer before it is
|
|
205
|
+
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
|
206
|
+
* detection accuracy (reducing false positives) and model performance by improving
|
|
207
|
+
* perception of the input audio.
|
|
208
|
+
*/
|
|
209
|
+
interface InputAudioNoiseReduction {
|
|
210
|
+
/**
|
|
211
|
+
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
|
212
|
+
* headphones, `far_field` is for far-field microphones such as laptop or
|
|
213
|
+
* conference room microphones.
|
|
214
|
+
*/
|
|
215
|
+
type?: 'near_field' | 'far_field';
|
|
216
|
+
}
|
|
217
|
+
/**
|
|
218
|
+
* Configuration for input audio transcription. The client can optionally set the
|
|
219
|
+
* language and prompt for transcription, these offer additional guidance to the
|
|
220
|
+
* transcription service.
|
|
221
|
+
*/
|
|
222
|
+
interface InputAudioTranscription {
|
|
223
|
+
/**
|
|
224
|
+
* The language of the input audio. Supplying the input language in
|
|
225
|
+
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
|
226
|
+
* format will improve accuracy and latency.
|
|
227
|
+
*/
|
|
228
|
+
language?: string;
|
|
229
|
+
/**
|
|
230
|
+
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
|
231
|
+
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
|
232
|
+
*/
|
|
233
|
+
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
|
234
|
+
/**
|
|
235
|
+
* An optional text to guide the model's style or continue a previous audio
|
|
236
|
+
* segment. For `whisper-1`, the
|
|
237
|
+
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
|
238
|
+
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
|
239
|
+
* "expect words related to technology".
|
|
240
|
+
*/
|
|
241
|
+
prompt?: string;
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
|
245
|
+
* set to `null` to turn off, in which case the client must manually trigger model
|
|
246
|
+
* response. Server VAD means that the model will detect the start and end of
|
|
247
|
+
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
|
248
|
+
* is more advanced and uses a turn detection model (in conjunction with VAD) to
|
|
249
|
+
* semantically estimate whether the user has finished speaking, then dynamically
|
|
250
|
+
* sets a timeout based on this probability. For example, if user audio trails off
|
|
251
|
+
* with "uhhm", the model will score a low probability of turn end and wait longer
|
|
252
|
+
* for the user to continue speaking. This can be useful for more natural
|
|
253
|
+
* conversations, but may have a higher latency.
|
|
254
|
+
*/
|
|
255
|
+
interface TurnDetection {
|
|
256
|
+
/**
|
|
257
|
+
* Whether or not to automatically generate a response when a VAD stop event
|
|
258
|
+
* occurs. Not available for transcription sessions.
|
|
259
|
+
*/
|
|
260
|
+
create_response?: boolean;
|
|
261
|
+
/**
|
|
262
|
+
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
|
263
|
+
* will wait longer for the user to continue speaking, `high` will respond more
|
|
264
|
+
* quickly. `auto` is the default and is equivalent to `medium`.
|
|
265
|
+
*/
|
|
266
|
+
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
|
267
|
+
/**
|
|
268
|
+
* Whether or not to automatically interrupt any ongoing response with output to
|
|
269
|
+
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
|
270
|
+
* occurs. Not available for transcription sessions.
|
|
271
|
+
*/
|
|
272
|
+
interrupt_response?: boolean;
|
|
273
|
+
/**
|
|
274
|
+
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
|
275
|
+
* detected speech (in milliseconds). Defaults to 300ms.
|
|
276
|
+
*/
|
|
277
|
+
prefix_padding_ms?: number;
|
|
278
|
+
/**
|
|
279
|
+
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
|
280
|
+
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
|
281
|
+
* more quickly, but may jump in on short pauses from the user.
|
|
282
|
+
*/
|
|
283
|
+
silence_duration_ms?: number;
|
|
284
|
+
/**
|
|
285
|
+
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
|
286
|
+
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
|
287
|
+
* model, and thus might perform better in noisy environments.
|
|
288
|
+
*/
|
|
289
|
+
threshold?: number;
|
|
290
|
+
/**
|
|
291
|
+
* Type of turn detection.
|
|
292
|
+
*/
|
|
293
|
+
type?: 'server_vad' | 'semantic_vad';
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
export declare namespace TranscriptionSessions {
|
|
297
|
+
export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
|
|
298
|
+
}
|
|
299
|
+
//# sourceMappingURL=transcription-sessions.d.ts.map
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
export { Messages, type Annotation, type AnnotationDelta, type FileCitationAnnotation, type FileCitationDeltaAnnotation, type FilePathAnnotation, type FilePathDeltaAnnotation, type ImageFile, type ImageFileContentBlock, type ImageFileDelta, type ImageFileDeltaBlock, type ImageURL, type ImageURLContentBlock, type ImageURLDelta, type ImageURLDeltaBlock, type Message, type MessageContent, type MessageContentDelta, type MessageContentPartParam, type MessageDeleted, type MessageDelta, type MessageDeltaEvent, type RefusalContentBlock, type RefusalDeltaBlock, type Text, type TextContentBlock, type TextContentBlockParam, type TextDelta, type TextDeltaBlock, type MessageCreateParams, type MessageRetrieveParams, type MessageUpdateParams, type MessageListParams, type MessageDeleteParams, type MessagesPage, } from "./messages.mjs";
|
|
2
|
+
export { Runs, type RequiredActionFunctionToolCall, type Run, type RunStatus, type RunCreateParams, type RunCreateParamsNonStreaming, type RunCreateParamsStreaming, type RunRetrieveParams, type RunUpdateParams, type RunListParams, type RunCancelParams, type RunSubmitToolOutputsParams, type RunSubmitToolOutputsParamsNonStreaming, type RunSubmitToolOutputsParamsStreaming, type RunsPage, type RunCreateAndPollParams, type RunCreateAndStreamParams, type RunStreamParams, type RunSubmitToolOutputsAndPollParams, type RunSubmitToolOutputsStreamParams, } from "./runs/index.mjs";
|
|
3
|
+
export { Threads, type AssistantResponseFormatOption, type AssistantToolChoice, type AssistantToolChoiceFunction, type AssistantToolChoiceOption, type Thread, type ThreadDeleted, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, type ThreadCreateAndRunParamsNonStreaming, type ThreadCreateAndRunParamsStreaming, type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from "./threads.mjs";
|
|
4
|
+
//# sourceMappingURL=index.d.mts.map
|
|
@@ -0,0 +1,4 @@
|
|
|
1
|
+
export { Messages, type Annotation, type AnnotationDelta, type FileCitationAnnotation, type FileCitationDeltaAnnotation, type FilePathAnnotation, type FilePathDeltaAnnotation, type ImageFile, type ImageFileContentBlock, type ImageFileDelta, type ImageFileDeltaBlock, type ImageURL, type ImageURLContentBlock, type ImageURLDelta, type ImageURLDeltaBlock, type Message, type MessageContent, type MessageContentDelta, type MessageContentPartParam, type MessageDeleted, type MessageDelta, type MessageDeltaEvent, type RefusalContentBlock, type RefusalDeltaBlock, type Text, type TextContentBlock, type TextContentBlockParam, type TextDelta, type TextDeltaBlock, type MessageCreateParams, type MessageRetrieveParams, type MessageUpdateParams, type MessageListParams, type MessageDeleteParams, type MessagesPage, } from "./messages.js";
|
|
2
|
+
export { Runs, type RequiredActionFunctionToolCall, type Run, type RunStatus, type RunCreateParams, type RunCreateParamsNonStreaming, type RunCreateParamsStreaming, type RunRetrieveParams, type RunUpdateParams, type RunListParams, type RunCancelParams, type RunSubmitToolOutputsParams, type RunSubmitToolOutputsParamsNonStreaming, type RunSubmitToolOutputsParamsStreaming, type RunsPage, type RunCreateAndPollParams, type RunCreateAndStreamParams, type RunStreamParams, type RunSubmitToolOutputsAndPollParams, type RunSubmitToolOutputsStreamParams, } from "./runs/index.js";
|
|
3
|
+
export { Threads, type AssistantResponseFormatOption, type AssistantToolChoice, type AssistantToolChoiceFunction, type AssistantToolChoiceOption, type Thread, type ThreadDeleted, type ThreadCreateParams, type ThreadUpdateParams, type ThreadCreateAndRunParams, type ThreadCreateAndRunParamsNonStreaming, type ThreadCreateAndRunParamsStreaming, type ThreadCreateAndRunPollParams, type ThreadCreateAndRunStreamParams, } from "./threads.js";
|
|
4
|
+
//# sourceMappingURL=index.d.ts.map
|