openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -23,22 +23,24 @@ module OpenAI
|
|
23
23
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
24
24
|
# your own data as input for the model's response.
|
25
25
|
#
|
26
|
-
# @overload create(
|
27
|
-
#
|
28
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
29
|
-
#
|
30
|
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
26
|
+
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
31
27
|
#
|
32
28
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
33
29
|
#
|
34
30
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
35
31
|
#
|
32
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
33
|
+
#
|
36
34
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
37
35
|
#
|
38
36
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
39
37
|
#
|
38
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
39
|
+
#
|
40
40
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
41
41
|
#
|
42
|
+
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
43
|
+
#
|
42
44
|
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
43
45
|
#
|
44
46
|
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
|
@@ -47,7 +49,7 @@ module OpenAI
|
|
47
49
|
#
|
48
50
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
49
51
|
#
|
50
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the
|
52
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
51
53
|
#
|
52
54
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
53
55
|
#
|
@@ -55,10 +57,12 @@ module OpenAI
|
|
55
57
|
#
|
56
58
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
57
59
|
#
|
58
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
60
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
59
61
|
#
|
60
62
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
61
63
|
#
|
64
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
65
|
+
#
|
62
66
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
63
67
|
#
|
64
68
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
@@ -70,7 +74,7 @@ module OpenAI
|
|
70
74
|
# @return [OpenAI::Models::Responses::Response]
|
71
75
|
#
|
72
76
|
# @see OpenAI::Models::Responses::ResponseCreateParams
|
73
|
-
def create(params)
|
77
|
+
def create(params = {})
|
74
78
|
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
|
75
79
|
if parsed[:stream]
|
76
80
|
message = "Please use `#stream_raw` for the streaming use case."
|
@@ -183,22 +187,24 @@ module OpenAI
|
|
183
187
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
184
188
|
# your own data as input for the model's response.
|
185
189
|
#
|
186
|
-
# @overload stream_raw(
|
187
|
-
#
|
188
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
189
|
-
#
|
190
|
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
190
|
+
# @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
191
191
|
#
|
192
192
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
193
193
|
#
|
194
194
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
195
195
|
#
|
196
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
197
|
+
#
|
196
198
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
197
199
|
#
|
198
200
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
199
201
|
#
|
202
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
203
|
+
#
|
200
204
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
201
205
|
#
|
206
|
+
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
207
|
+
#
|
202
208
|
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
203
209
|
#
|
204
210
|
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
|
@@ -207,7 +213,7 @@ module OpenAI
|
|
207
213
|
#
|
208
214
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
209
215
|
#
|
210
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the
|
216
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
211
217
|
#
|
212
218
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
213
219
|
#
|
@@ -215,10 +221,12 @@ module OpenAI
|
|
215
221
|
#
|
216
222
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
217
223
|
#
|
218
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
224
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
219
225
|
#
|
220
226
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
221
227
|
#
|
228
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
229
|
+
#
|
222
230
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
223
231
|
#
|
224
232
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
@@ -230,7 +238,7 @@ module OpenAI
|
|
230
238
|
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningDeltaEvent, OpenAI::Models::Responses::ResponseReasoningDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
|
231
239
|
#
|
232
240
|
# @see OpenAI::Models::Responses::ResponseCreateParams
|
233
|
-
def stream_raw(params)
|
241
|
+
def stream_raw(params = {})
|
234
242
|
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
|
235
243
|
unless parsed.fetch(:stream, true)
|
236
244
|
message = "Please use `#create` for the non-streaming use case."
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "openssl"
|
4
|
+
require "base64"
|
5
|
+
|
6
|
+
module OpenAI
|
7
|
+
module Resources
|
8
|
+
class Webhooks
|
9
|
+
# Validates that the given payload was sent by OpenAI and parses the payload.
|
10
|
+
#
|
11
|
+
# @param payload [String] The raw webhook payload as a string
|
12
|
+
# @param headers [Hash] The webhook headers
|
13
|
+
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
14
|
+
#
|
15
|
+
# @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseCreatedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent]
|
16
|
+
#
|
17
|
+
# @raise [ArgumentError] if signature verification fails
|
18
|
+
def unwrap(
|
19
|
+
payload,
|
20
|
+
headers = {},
|
21
|
+
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"]
|
22
|
+
)
|
23
|
+
verify_signature(payload, headers, webhook_secret)
|
24
|
+
|
25
|
+
parsed = JSON.parse(payload, symbolize_names: true)
|
26
|
+
OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::Webhooks::UnwrapWebhookEvent, parsed)
|
27
|
+
end
|
28
|
+
|
29
|
+
# Validates whether or not the webhook payload was sent by OpenAI.
|
30
|
+
#
|
31
|
+
# @param payload [String] The webhook payload as a string
|
32
|
+
# @param headers [Hash] The webhook headers
|
33
|
+
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
|
34
|
+
# @param tolerance [Integer] Maximum age of the webhook in seconds (default: 300 = 5 minutes)
|
35
|
+
#
|
36
|
+
# @raise [ArgumentError] if the signature is invalid
|
37
|
+
def verify_signature(
|
38
|
+
payload,
|
39
|
+
headers,
|
40
|
+
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"],
|
41
|
+
tolerance = 300
|
42
|
+
)
|
43
|
+
if webhook_secret.nil?
|
44
|
+
raise ArgumentError,
|
45
|
+
"The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " \
|
46
|
+
"or passed to this function"
|
47
|
+
end
|
48
|
+
|
49
|
+
# Extract required headers
|
50
|
+
signature_header = headers["webhook-signature"] || headers[:webhook_signature]
|
51
|
+
timestamp_header = headers["webhook-timestamp"] || headers[:webhook_timestamp]
|
52
|
+
webhook_id = headers["webhook-id"] || headers[:webhook_id]
|
53
|
+
|
54
|
+
if signature_header.nil?
|
55
|
+
raise ArgumentError, "Missing required webhook-signature header"
|
56
|
+
end
|
57
|
+
|
58
|
+
if timestamp_header.nil?
|
59
|
+
raise ArgumentError, "Missing required webhook-timestamp header"
|
60
|
+
end
|
61
|
+
|
62
|
+
if webhook_id.nil?
|
63
|
+
raise ArgumentError, "Missing required webhook-id header"
|
64
|
+
end
|
65
|
+
|
66
|
+
# Validate timestamp to prevent replay attacks
|
67
|
+
begin
|
68
|
+
timestamp_seconds = timestamp_header.to_i
|
69
|
+
rescue ArgumentError
|
70
|
+
raise ArgumentError, "Invalid webhook timestamp format"
|
71
|
+
end
|
72
|
+
|
73
|
+
now = Time.now.to_i
|
74
|
+
|
75
|
+
if now - timestamp_seconds > tolerance
|
76
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too old"
|
77
|
+
end
|
78
|
+
|
79
|
+
if timestamp_seconds > now + tolerance
|
80
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too new"
|
81
|
+
end
|
82
|
+
|
83
|
+
# Extract signatures from v1,<base64> format
|
84
|
+
# The signature header can have multiple values, separated by spaces.
|
85
|
+
# Each value is in the format v1,<base64>. We should accept if any match.
|
86
|
+
signatures = signature_header.split.map do |part|
|
87
|
+
if part.start_with?("v1,")
|
88
|
+
part[3..]
|
89
|
+
else
|
90
|
+
part
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
# Decode the secret if it starts with whsec_
|
95
|
+
decoded_secret = if webhook_secret.start_with?("whsec_")
|
96
|
+
Base64.decode64(webhook_secret[6..])
|
97
|
+
else
|
98
|
+
webhook_secret
|
99
|
+
end
|
100
|
+
|
101
|
+
# Create the signed payload: {webhook_id}.{timestamp}.{payload}
|
102
|
+
signed_payload = "#{webhook_id}.#{timestamp_header}.#{payload}"
|
103
|
+
|
104
|
+
# Compute HMAC-SHA256 signature
|
105
|
+
expected_signature = Base64.encode64(
|
106
|
+
OpenSSL::HMAC.digest("sha256", decoded_secret, signed_payload)
|
107
|
+
).strip
|
108
|
+
|
109
|
+
# Accept if any signature matches using timing-safe comparison
|
110
|
+
return if signatures.any? { |signature| OpenSSL.secure_compare(expected_signature, signature) }
|
111
|
+
|
112
|
+
raise OpenAI::Errors::InvalidWebhookSignatureError,
|
113
|
+
"The given webhook signature does not match the expected signature"
|
114
|
+
end
|
115
|
+
|
116
|
+
# @api private
|
117
|
+
#
|
118
|
+
# @param client [OpenAI::Client]
|
119
|
+
def initialize(client:)
|
120
|
+
@client = client
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -441,6 +441,7 @@ require_relative "openai/models/responses/response_web_search_call_in_progress_e
|
|
441
441
|
require_relative "openai/models/responses/response_web_search_call_searching_event"
|
442
442
|
require_relative "openai/models/responses/tool"
|
443
443
|
require_relative "openai/models/responses/tool_choice_function"
|
444
|
+
require_relative "openai/models/responses/tool_choice_mcp"
|
444
445
|
require_relative "openai/models/responses/tool_choice_options"
|
445
446
|
require_relative "openai/models/responses/tool_choice_types"
|
446
447
|
require_relative "openai/models/responses/web_search_tool"
|
@@ -477,6 +478,22 @@ require_relative "openai/models/vector_stores/vector_store_file_deleted"
|
|
477
478
|
require_relative "openai/models/vector_store_search_params"
|
478
479
|
require_relative "openai/models/vector_store_search_response"
|
479
480
|
require_relative "openai/models/vector_store_update_params"
|
481
|
+
require_relative "openai/models/webhooks/batch_cancelled_webhook_event"
|
482
|
+
require_relative "openai/models/webhooks/batch_completed_webhook_event"
|
483
|
+
require_relative "openai/models/webhooks/batch_expired_webhook_event"
|
484
|
+
require_relative "openai/models/webhooks/batch_failed_webhook_event"
|
485
|
+
require_relative "openai/models/webhooks/eval_run_canceled_webhook_event"
|
486
|
+
require_relative "openai/models/webhooks/eval_run_failed_webhook_event"
|
487
|
+
require_relative "openai/models/webhooks/eval_run_succeeded_webhook_event"
|
488
|
+
require_relative "openai/models/webhooks/fine_tuning_job_cancelled_webhook_event"
|
489
|
+
require_relative "openai/models/webhooks/fine_tuning_job_failed_webhook_event"
|
490
|
+
require_relative "openai/models/webhooks/fine_tuning_job_succeeded_webhook_event"
|
491
|
+
require_relative "openai/models/webhooks/response_cancelled_webhook_event"
|
492
|
+
require_relative "openai/models/webhooks/response_completed_webhook_event"
|
493
|
+
require_relative "openai/models/webhooks/response_failed_webhook_event"
|
494
|
+
require_relative "openai/models/webhooks/response_incomplete_webhook_event"
|
495
|
+
require_relative "openai/models/webhooks/unwrap_webhook_event"
|
496
|
+
require_relative "openai/models/webhooks/webhook_unwrap_params"
|
480
497
|
require_relative "openai/models"
|
481
498
|
require_relative "openai/resources/audio"
|
482
499
|
require_relative "openai/resources/audio/speech"
|
@@ -521,3 +538,4 @@ require_relative "openai/resources/uploads/parts"
|
|
521
538
|
require_relative "openai/resources/vector_stores"
|
522
539
|
require_relative "openai/resources/vector_stores/file_batches"
|
523
540
|
require_relative "openai/resources/vector_stores/files"
|
541
|
+
require_relative "openai/resources/webhooks"
|
data/rbi/openai/client.rbi
CHANGED
data/rbi/openai/errors.rbi
CHANGED
@@ -8,6 +8,22 @@ module OpenAI
|
|
8
8
|
end
|
9
9
|
|
10
10
|
class ConversionError < OpenAI::Errors::Error
|
11
|
+
sig { returns(T.nilable(StandardError)) }
|
12
|
+
def cause
|
13
|
+
end
|
14
|
+
|
15
|
+
# @api private
|
16
|
+
sig do
|
17
|
+
params(
|
18
|
+
on: T::Class[StandardError],
|
19
|
+
method: Symbol,
|
20
|
+
target: T.anything,
|
21
|
+
value: T.anything,
|
22
|
+
cause: T.nilable(StandardError)
|
23
|
+
).returns(T.attached_class)
|
24
|
+
end
|
25
|
+
def self.new(on:, method:, target:, value:, cause: nil)
|
26
|
+
end
|
11
27
|
end
|
12
28
|
|
13
29
|
class APIError < OpenAI::Errors::Error
|
@@ -15,12 +15,14 @@ module OpenAI
|
|
15
15
|
CoerceState =
|
16
16
|
T.type_alias do
|
17
17
|
{
|
18
|
-
|
18
|
+
translate_names: T::Boolean,
|
19
|
+
strictness: T::Boolean,
|
19
20
|
exactness: {
|
20
21
|
yes: Integer,
|
21
22
|
no: Integer,
|
22
23
|
maybe: Integer
|
23
24
|
},
|
25
|
+
error: T::Class[StandardError],
|
24
26
|
branched: Integer
|
25
27
|
}
|
26
28
|
end
|
@@ -84,6 +86,15 @@ module OpenAI
|
|
84
86
|
def self.type_info(spec)
|
85
87
|
end
|
86
88
|
|
89
|
+
# @api private
|
90
|
+
sig do
|
91
|
+
params(translate_names: T::Boolean).returns(
|
92
|
+
OpenAI::Internal::Type::Converter::CoerceState
|
93
|
+
)
|
94
|
+
end
|
95
|
+
def self.new_coerce_state(translate_names: true)
|
96
|
+
end
|
97
|
+
|
87
98
|
# @api private
|
88
99
|
#
|
89
100
|
# Based on `target`, transform `value` into `target`, to the extent possible:
|
@@ -105,14 +116,11 @@ module OpenAI
|
|
105
116
|
def self.coerce(
|
106
117
|
target,
|
107
118
|
value,
|
108
|
-
# The `strictness` is one of `true`, `false
|
109
|
-
#
|
110
|
-
# targets:
|
119
|
+
# The `strictness` is one of `true`, `false`. This informs the coercion strategy
|
120
|
+
# when we have to decide between multiple possible conversion targets:
|
111
121
|
#
|
112
122
|
# - `true`: the conversion must be exact, with minimum coercion.
|
113
123
|
# - `false`: the conversion can be approximate, with some coercion.
|
114
|
-
# - `:strong`: the conversion must be exact, with no coercion, and raise an error
|
115
|
-
# if not possible.
|
116
124
|
#
|
117
125
|
# The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For
|
118
126
|
# any given conversion attempt, the exactness will be updated based on how closely
|
@@ -124,15 +132,7 @@ module OpenAI
|
|
124
132
|
# - `no`: the value cannot be converted to the target type.
|
125
133
|
#
|
126
134
|
# See implementation below for more details.
|
127
|
-
state:
|
128
|
-
strictness: true,
|
129
|
-
exactness: {
|
130
|
-
yes: 0,
|
131
|
-
no: 0,
|
132
|
-
maybe: 0
|
133
|
-
},
|
134
|
-
branched: 0
|
135
|
-
}
|
135
|
+
state: OpenAI::Internal::Type::Converter.new_coerce_state
|
136
136
|
)
|
137
137
|
end
|
138
138
|
|
@@ -78,6 +78,11 @@ module OpenAI
|
|
78
78
|
end
|
79
79
|
|
80
80
|
# @api private
|
81
|
+
#
|
82
|
+
# Tries to efficiently coerce the given value to one of the known variants.
|
83
|
+
#
|
84
|
+
# If the value cannot match any of the known variants, the coercion is considered
|
85
|
+
# non-viable and returns the original value.
|
81
86
|
sig do
|
82
87
|
override
|
83
88
|
.params(
|
@@ -35,6 +35,26 @@ module OpenAI
|
|
35
35
|
:"o3-pro-2025-06-10",
|
36
36
|
OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
|
37
37
|
)
|
38
|
+
O3_DEEP_RESEARCH =
|
39
|
+
T.let(
|
40
|
+
:"o3-deep-research",
|
41
|
+
OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
|
42
|
+
)
|
43
|
+
O3_DEEP_RESEARCH_2025_06_26 =
|
44
|
+
T.let(
|
45
|
+
:"o3-deep-research-2025-06-26",
|
46
|
+
OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
|
47
|
+
)
|
48
|
+
O4_MINI_DEEP_RESEARCH =
|
49
|
+
T.let(
|
50
|
+
:"o4-mini-deep-research",
|
51
|
+
OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
|
52
|
+
)
|
53
|
+
O4_MINI_DEEP_RESEARCH_2025_06_26 =
|
54
|
+
T.let(
|
55
|
+
:"o4-mini-deep-research-2025-06-26",
|
56
|
+
OpenAI::AllModels::ResponsesOnlyModel::TaggedSymbol
|
57
|
+
)
|
38
58
|
COMPUTER_USE_PREVIEW =
|
39
59
|
T.let(
|
40
60
|
:"computer-use-preview",
|
@@ -60,13 +60,30 @@ module OpenAI
|
|
60
60
|
attr_writer :response_format
|
61
61
|
|
62
62
|
# The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
|
63
|
-
# the default.
|
63
|
+
# the default.
|
64
64
|
sig { returns(T.nilable(Float)) }
|
65
65
|
attr_reader :speed
|
66
66
|
|
67
67
|
sig { params(speed: Float).void }
|
68
68
|
attr_writer :speed
|
69
69
|
|
70
|
+
# The format to stream the audio in. Supported formats are `sse` and `audio`.
|
71
|
+
# `sse` is not supported for `tts-1` or `tts-1-hd`.
|
72
|
+
sig do
|
73
|
+
returns(
|
74
|
+
T.nilable(OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol)
|
75
|
+
)
|
76
|
+
end
|
77
|
+
attr_reader :stream_format
|
78
|
+
|
79
|
+
sig do
|
80
|
+
params(
|
81
|
+
stream_format:
|
82
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol
|
83
|
+
).void
|
84
|
+
end
|
85
|
+
attr_writer :stream_format
|
86
|
+
|
70
87
|
sig do
|
71
88
|
params(
|
72
89
|
input: String,
|
@@ -77,6 +94,8 @@ module OpenAI
|
|
77
94
|
response_format:
|
78
95
|
OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
|
79
96
|
speed: Float,
|
97
|
+
stream_format:
|
98
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
|
80
99
|
request_options: OpenAI::RequestOptions::OrHash
|
81
100
|
).returns(T.attached_class)
|
82
101
|
end
|
@@ -98,8 +117,11 @@ module OpenAI
|
|
98
117
|
# `wav`, and `pcm`.
|
99
118
|
response_format: nil,
|
100
119
|
# The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
|
101
|
-
# the default.
|
120
|
+
# the default.
|
102
121
|
speed: nil,
|
122
|
+
# The format to stream the audio in. Supported formats are `sse` and `audio`.
|
123
|
+
# `sse` is not supported for `tts-1` or `tts-1-hd`.
|
124
|
+
stream_format: nil,
|
103
125
|
request_options: {}
|
104
126
|
)
|
105
127
|
end
|
@@ -118,6 +140,8 @@ module OpenAI
|
|
118
140
|
response_format:
|
119
141
|
OpenAI::Audio::SpeechCreateParams::ResponseFormat::OrSymbol,
|
120
142
|
speed: Float,
|
143
|
+
stream_format:
|
144
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::OrSymbol,
|
121
145
|
request_options: OpenAI::RequestOptions
|
122
146
|
}
|
123
147
|
)
|
@@ -267,6 +291,39 @@ module OpenAI
|
|
267
291
|
def self.values
|
268
292
|
end
|
269
293
|
end
|
294
|
+
|
295
|
+
# The format to stream the audio in. Supported formats are `sse` and `audio`.
|
296
|
+
# `sse` is not supported for `tts-1` or `tts-1-hd`.
|
297
|
+
module StreamFormat
|
298
|
+
extend OpenAI::Internal::Type::Enum
|
299
|
+
|
300
|
+
TaggedSymbol =
|
301
|
+
T.type_alias do
|
302
|
+
T.all(Symbol, OpenAI::Audio::SpeechCreateParams::StreamFormat)
|
303
|
+
end
|
304
|
+
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
305
|
+
|
306
|
+
SSE =
|
307
|
+
T.let(
|
308
|
+
:sse,
|
309
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
|
310
|
+
)
|
311
|
+
AUDIO =
|
312
|
+
T.let(
|
313
|
+
:audio,
|
314
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
|
315
|
+
)
|
316
|
+
|
317
|
+
sig do
|
318
|
+
override.returns(
|
319
|
+
T::Array[
|
320
|
+
OpenAI::Audio::SpeechCreateParams::StreamFormat::TaggedSymbol
|
321
|
+
]
|
322
|
+
)
|
323
|
+
end
|
324
|
+
def self.values
|
325
|
+
end
|
326
|
+
end
|
270
327
|
end
|
271
328
|
end
|
272
329
|
end
|