openai 0.16.0 → 0.17.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +33 -0
- data/README.md +14 -20
- data/lib/openai/internal/transport/base_client.rb +1 -1
- data/lib/openai/internal/type/array_of.rb +1 -0
- data/lib/openai/internal/type/base_model.rb +3 -1
- data/lib/openai/internal/type/converter.rb +27 -0
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/internal/type/hash_of.rb +1 -0
- data/lib/openai/internal/type/union.rb +22 -24
- data/lib/openai/models/beta/assistant_create_params.rb +4 -5
- data/lib/openai/models/beta/assistant_update_params.rb +22 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
- data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
- data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
- data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
- data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
- data/lib/openai/models/chat/chat_completion_message.rb +3 -5
- data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
- data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
- data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
- data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
- data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
- data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
- data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
- data/lib/openai/models/chat/completion_create_params.rb +35 -12
- data/lib/openai/models/chat_model.rb +7 -0
- data/lib/openai/models/custom_tool_input_format.rb +76 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
- data/lib/openai/models/evals/run_cancel_response.rb +2 -2
- data/lib/openai/models/evals/run_create_params.rb +2 -2
- data/lib/openai/models/evals/run_create_response.rb +2 -2
- data/lib/openai/models/evals/run_list_response.rb +2 -2
- data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
- data/lib/openai/models/reasoning.rb +4 -5
- data/lib/openai/models/reasoning_effort.rb +4 -4
- data/lib/openai/models/response_format_text_grammar.rb +27 -0
- data/lib/openai/models/response_format_text_python.rb +20 -0
- data/lib/openai/models/responses/custom_tool.rb +48 -0
- data/lib/openai/models/responses/response.rb +20 -12
- data/lib/openai/models/responses/response_create_params.rb +48 -10
- data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
- data/lib/openai/models/responses/response_input_item.rb +7 -1
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
- data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
- data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
- data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
- data/lib/openai/models/responses/response_stream_event.rb +13 -11
- data/lib/openai/models/responses/response_text_config.rb +27 -1
- data/lib/openai/models/responses/tool.rb +5 -1
- data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
- data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
- data/lib/openai/models/vector_store_search_params.rb +6 -1
- data/lib/openai/models.rb +6 -0
- data/lib/openai/resources/beta/assistants.rb +2 -2
- data/lib/openai/resources/beta/threads/runs.rb +2 -2
- data/lib/openai/resources/chat/completions.rb +16 -10
- data/lib/openai/resources/responses.rb +38 -22
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -2
- data/rbi/openai/internal/transport/base_client.rbi +1 -1
- data/rbi/openai/internal/type/converter.rbi +46 -0
- data/rbi/openai/internal/type/union.rbi +7 -2
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
- data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
- data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
- data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
- data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
- data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
- data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
- data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
- data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
- data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
- data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
- data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
- data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
- data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
- data/rbi/openai/models/chat/completion_create_params.rbi +106 -25
- data/rbi/openai/models/chat_model.rbi +11 -0
- data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
- data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
- data/rbi/openai/models/evals/run_create_params.rbi +4 -0
- data/rbi/openai/models/evals/run_create_response.rbi +2 -0
- data/rbi/openai/models/evals/run_list_response.rbi +2 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
- data/rbi/openai/models/reasoning.rbi +6 -8
- data/rbi/openai/models/reasoning_effort.rbi +4 -4
- data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
- data/rbi/openai/models/response_format_text_python.rbi +30 -0
- data/rbi/openai/models/responses/custom_tool.rbi +96 -0
- data/rbi/openai/models/responses/response.rbi +15 -5
- data/rbi/openai/models/responses/response_create_params.rbi +94 -7
- data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
- data/rbi/openai/models/responses/response_input_item.rbi +2 -0
- data/rbi/openai/models/responses/response_output_item.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
- data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
- data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
- data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
- data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
- data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
- data/rbi/openai/models/responses/response_text_config.rbi +64 -1
- data/rbi/openai/models/responses/tool.rbi +1 -0
- data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
- data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
- data/rbi/openai/models/vector_store_search_params.rbi +12 -1
- data/rbi/openai/models.rbi +6 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -8
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
- data/rbi/openai/resources/chat/completions.rbi +44 -19
- data/rbi/openai/resources/responses.rbi +215 -41
- data/sig/openai/internal/transport/base_client.rbs +1 -1
- data/sig/openai/internal/type/converter.rbs +17 -0
- data/sig/openai/internal/type/union.rbs +2 -2
- data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
- data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
- data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
- data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
- data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
- data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
- data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
- data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
- data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
- data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +23 -6
- data/sig/openai/models/chat_model.rbs +15 -1
- data/sig/openai/models/custom_tool_input_format.rbs +61 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/response_format_text_grammar.rbs +15 -0
- data/sig/openai/models/response_format_text_python.rbs +13 -0
- data/sig/openai/models/responses/custom_tool.rbs +43 -0
- data/sig/openai/models/responses/response.rbs +2 -0
- data/sig/openai/models/responses/response_create_params.rbs +19 -0
- data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
- data/sig/openai/models/responses/response_input_item.rbs +2 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
- data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
- data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
- data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
- data/sig/openai/models/responses/response_stream_event.rbs +4 -2
- data/sig/openai/models/responses/response_text_config.rbs +22 -3
- data/sig/openai/models/responses/tool.rbs +1 -0
- data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
- data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
- data/sig/openai/models/vector_store_search_params.rbs +2 -1
- data/sig/openai/models.rbs +6 -0
- data/sig/openai/resources/chat/completions.rbs +4 -2
- data/sig/openai/resources/responses.rbs +32 -0
- metadata +59 -8
- data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
- data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -23,13 +23,13 @@ module OpenAI
|
|
23
23
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
24
24
|
# your own data as input for the model's response.
|
25
25
|
#
|
26
|
-
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
26
|
+
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
27
27
|
#
|
28
28
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
29
29
|
#
|
30
30
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
31
31
|
#
|
32
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
32
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
33
33
|
#
|
34
34
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
35
35
|
#
|
@@ -57,13 +57,15 @@ module OpenAI
|
|
57
57
|
#
|
58
58
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
59
59
|
#
|
60
|
+
# @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`.
|
61
|
+
#
|
60
62
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
61
63
|
#
|
62
64
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
63
65
|
#
|
64
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
66
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
65
67
|
#
|
66
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
68
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
67
69
|
#
|
68
70
|
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
69
71
|
#
|
@@ -118,31 +120,37 @@ module OpenAI
|
|
118
120
|
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
119
121
|
# your own data as input for the model's response.
|
120
122
|
#
|
121
|
-
# @overload
|
122
|
-
#
|
123
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
124
|
-
#
|
125
|
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
123
|
+
# @overload stream(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
126
124
|
#
|
127
125
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
128
126
|
#
|
129
127
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
130
128
|
#
|
129
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
130
|
+
#
|
131
131
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
132
132
|
#
|
133
133
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
134
134
|
#
|
135
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
136
|
+
#
|
135
137
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
136
138
|
#
|
139
|
+
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
140
|
+
#
|
137
141
|
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
138
142
|
#
|
139
|
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to resume streams from a given response.
|
143
|
+
# @param previous_response_id [String, nil] The unique ID of the previous response to the response to the model. Use this to resume streams from a given response.
|
140
144
|
#
|
141
145
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
142
146
|
#
|
147
|
+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
148
|
+
#
|
143
149
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
144
150
|
#
|
145
|
-
# @param
|
151
|
+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
152
|
+
#
|
153
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
146
154
|
#
|
147
155
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
148
156
|
#
|
@@ -150,19 +158,21 @@ module OpenAI
|
|
150
158
|
#
|
151
159
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
152
160
|
#
|
153
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
161
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
162
|
+
#
|
163
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::Tool::Custom, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
154
164
|
#
|
155
|
-
# @param
|
165
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
156
166
|
#
|
157
167
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
158
168
|
#
|
159
169
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
160
170
|
#
|
161
|
-
# @param user [String]
|
171
|
+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
162
172
|
#
|
163
173
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
|
164
174
|
#
|
165
|
-
# @return [OpenAI::
|
175
|
+
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent>]
|
166
176
|
#
|
167
177
|
# @see OpenAI::Models::Responses::ResponseCreateParams
|
168
178
|
def stream(params)
|
@@ -234,7 +244,7 @@ module OpenAI
|
|
234
244
|
#
|
235
245
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
236
246
|
#
|
237
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
247
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
238
248
|
#
|
239
249
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
240
250
|
#
|
@@ -262,13 +272,15 @@ module OpenAI
|
|
262
272
|
#
|
263
273
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
264
274
|
#
|
275
|
+
# @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`.
|
276
|
+
#
|
265
277
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
266
278
|
#
|
267
279
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
268
280
|
#
|
269
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
281
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
270
282
|
#
|
271
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
283
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
272
284
|
#
|
273
285
|
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
274
286
|
#
|
@@ -280,7 +292,7 @@ module OpenAI
|
|
280
292
|
#
|
281
293
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
|
282
294
|
#
|
283
|
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::
|
295
|
+
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent>]
|
284
296
|
#
|
285
297
|
# @see OpenAI::Models::Responses::ResponseCreateParams
|
286
298
|
def stream_raw(params = {})
|
@@ -309,12 +321,14 @@ module OpenAI
|
|
309
321
|
#
|
310
322
|
# Retrieves a model response with the given ID.
|
311
323
|
#
|
312
|
-
# @overload retrieve(response_id, include: nil, starting_after: nil, request_options: {})
|
324
|
+
# @overload retrieve(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {})
|
313
325
|
#
|
314
326
|
# @param response_id [String] The ID of the response to retrieve.
|
315
327
|
#
|
316
328
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
|
317
329
|
#
|
330
|
+
# @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds
|
331
|
+
#
|
318
332
|
# @param starting_after [Integer] The sequence number of the event after which to start streaming.
|
319
333
|
#
|
320
334
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
|
@@ -344,17 +358,19 @@ module OpenAI
|
|
344
358
|
#
|
345
359
|
# Retrieves a model response with the given ID.
|
346
360
|
#
|
347
|
-
# @overload retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {})
|
361
|
+
# @overload retrieve_streaming(response_id, include: nil, include_obfuscation: nil, starting_after: nil, request_options: {})
|
348
362
|
#
|
349
363
|
# @param response_id [String] The ID of the response to retrieve.
|
350
364
|
#
|
351
365
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
|
352
366
|
#
|
367
|
+
# @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds
|
368
|
+
#
|
353
369
|
# @param starting_after [Integer] The sequence number of the event after which to start streaming.
|
354
370
|
#
|
355
371
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
|
356
372
|
#
|
357
|
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::
|
373
|
+
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseReasoningTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDeltaEvent, OpenAI::Models::Responses::ResponseCustomToolCallInputDoneEvent>]
|
358
374
|
#
|
359
375
|
# @see OpenAI::Models::Responses::ResponseRetrieveParams
|
360
376
|
def retrieve_streaming(response_id, params = {})
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -183,6 +183,8 @@ require_relative "openai/models/beta/threads/text_delta_block"
|
|
183
183
|
require_relative "openai/models/beta/thread_stream_event"
|
184
184
|
require_relative "openai/models/beta/thread_update_params"
|
185
185
|
require_relative "openai/models/chat/chat_completion"
|
186
|
+
require_relative "openai/models/chat/chat_completion_allowed_tool_choice"
|
187
|
+
require_relative "openai/models/chat/chat_completion_allowed_tools"
|
186
188
|
require_relative "openai/models/chat/chat_completion_assistant_message_param"
|
187
189
|
require_relative "openai/models/chat/chat_completion_audio"
|
188
190
|
require_relative "openai/models/chat/chat_completion_audio_param"
|
@@ -192,14 +194,19 @@ require_relative "openai/models/chat/chat_completion_content_part_image"
|
|
192
194
|
require_relative "openai/models/chat/chat_completion_content_part_input_audio"
|
193
195
|
require_relative "openai/models/chat/chat_completion_content_part_refusal"
|
194
196
|
require_relative "openai/models/chat/chat_completion_content_part_text"
|
197
|
+
require_relative "openai/models/chat/chat_completion_custom_tool"
|
195
198
|
require_relative "openai/models/chat/chat_completion_deleted"
|
196
199
|
require_relative "openai/models/chat/chat_completion_developer_message_param"
|
197
200
|
require_relative "openai/models/chat/chat_completion_function_call_option"
|
198
201
|
require_relative "openai/models/chat/chat_completion_function_message_param"
|
202
|
+
require_relative "openai/models/chat/chat_completion_function_tool"
|
203
|
+
require_relative "openai/models/chat/chat_completion_message_custom_tool_call"
|
204
|
+
require_relative "openai/models/chat/chat_completion_message_function_tool_call"
|
199
205
|
require_relative "openai/models/chat/chat_completion_message_param"
|
200
206
|
require_relative "openai/models/chat/chat_completion_message_tool_call"
|
201
207
|
require_relative "openai/models/chat/chat_completion_modality"
|
202
208
|
require_relative "openai/models/chat/chat_completion_named_tool_choice"
|
209
|
+
require_relative "openai/models/chat/chat_completion_named_tool_choice_custom"
|
203
210
|
require_relative "openai/models/chat/chat_completion_prediction_content"
|
204
211
|
require_relative "openai/models/chat/chat_completion_reasoning_effort"
|
205
212
|
require_relative "openai/models/chat/chat_completion_role"
|
@@ -240,6 +247,7 @@ require_relative "openai/models/containers/file_retrieve_params"
|
|
240
247
|
require_relative "openai/models/containers/file_retrieve_response"
|
241
248
|
require_relative "openai/models/containers/files/content_retrieve_params"
|
242
249
|
require_relative "openai/models/create_embedding_response"
|
250
|
+
require_relative "openai/models/custom_tool_input_format"
|
243
251
|
require_relative "openai/models/embedding"
|
244
252
|
require_relative "openai/models/embedding_create_params"
|
245
253
|
require_relative "openai/models/embedding_model"
|
@@ -348,7 +356,10 @@ require_relative "openai/models/reasoning"
|
|
348
356
|
require_relative "openai/models/response_format_json_object"
|
349
357
|
require_relative "openai/models/response_format_json_schema"
|
350
358
|
require_relative "openai/models/response_format_text"
|
359
|
+
require_relative "openai/models/response_format_text_grammar"
|
360
|
+
require_relative "openai/models/response_format_text_python"
|
351
361
|
require_relative "openai/models/responses/computer_tool"
|
362
|
+
require_relative "openai/models/responses/custom_tool"
|
352
363
|
require_relative "openai/models/responses/easy_input_message"
|
353
364
|
require_relative "openai/models/responses/file_search_tool"
|
354
365
|
require_relative "openai/models/responses/function_tool"
|
@@ -374,6 +385,10 @@ require_relative "openai/models/responses/response_content_part_added_event"
|
|
374
385
|
require_relative "openai/models/responses/response_content_part_done_event"
|
375
386
|
require_relative "openai/models/responses/response_created_event"
|
376
387
|
require_relative "openai/models/responses/response_create_params"
|
388
|
+
require_relative "openai/models/responses/response_custom_tool_call"
|
389
|
+
require_relative "openai/models/responses/response_custom_tool_call_input_delta_event"
|
390
|
+
require_relative "openai/models/responses/response_custom_tool_call_input_done_event"
|
391
|
+
require_relative "openai/models/responses/response_custom_tool_call_output"
|
377
392
|
require_relative "openai/models/responses/response_delete_params"
|
378
393
|
require_relative "openai/models/responses/response_error"
|
379
394
|
require_relative "openai/models/responses/response_error_event"
|
@@ -426,12 +441,12 @@ require_relative "openai/models/responses/response_output_text_annotation_added_
|
|
426
441
|
require_relative "openai/models/responses/response_prompt"
|
427
442
|
require_relative "openai/models/responses/response_queued_event"
|
428
443
|
require_relative "openai/models/responses/response_reasoning_item"
|
429
|
-
require_relative "openai/models/responses/response_reasoning_summary_delta_event"
|
430
|
-
require_relative "openai/models/responses/response_reasoning_summary_done_event"
|
431
444
|
require_relative "openai/models/responses/response_reasoning_summary_part_added_event"
|
432
445
|
require_relative "openai/models/responses/response_reasoning_summary_part_done_event"
|
433
446
|
require_relative "openai/models/responses/response_reasoning_summary_text_delta_event"
|
434
447
|
require_relative "openai/models/responses/response_reasoning_summary_text_done_event"
|
448
|
+
require_relative "openai/models/responses/response_reasoning_text_delta_event"
|
449
|
+
require_relative "openai/models/responses/response_reasoning_text_done_event"
|
435
450
|
require_relative "openai/models/responses/response_refusal_delta_event"
|
436
451
|
require_relative "openai/models/responses/response_refusal_done_event"
|
437
452
|
require_relative "openai/models/responses/response_retrieve_params"
|
@@ -445,6 +460,8 @@ require_relative "openai/models/responses/response_web_search_call_completed_eve
|
|
445
460
|
require_relative "openai/models/responses/response_web_search_call_in_progress_event"
|
446
461
|
require_relative "openai/models/responses/response_web_search_call_searching_event"
|
447
462
|
require_relative "openai/models/responses/tool"
|
463
|
+
require_relative "openai/models/responses/tool_choice_allowed"
|
464
|
+
require_relative "openai/models/responses/tool_choice_custom"
|
448
465
|
require_relative "openai/models/responses/tool_choice_function"
|
449
466
|
require_relative "openai/models/responses/tool_choice_mcp"
|
450
467
|
require_relative "openai/models/responses/tool_choice_options"
|
@@ -86,6 +86,52 @@ module OpenAI
|
|
86
86
|
def self.type_info(spec)
|
87
87
|
end
|
88
88
|
|
89
|
+
# @api private
|
90
|
+
sig do
|
91
|
+
params(
|
92
|
+
type_info:
|
93
|
+
T.any(
|
94
|
+
{
|
95
|
+
const:
|
96
|
+
T.nilable(
|
97
|
+
T.any(NilClass, T::Boolean, Integer, Float, Symbol)
|
98
|
+
),
|
99
|
+
enum:
|
100
|
+
T.nilable(
|
101
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input)
|
102
|
+
),
|
103
|
+
union:
|
104
|
+
T.nilable(
|
105
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input)
|
106
|
+
)
|
107
|
+
},
|
108
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input),
|
109
|
+
OpenAI::Internal::Type::Converter::Input
|
110
|
+
),
|
111
|
+
spec:
|
112
|
+
T.any(
|
113
|
+
{
|
114
|
+
const:
|
115
|
+
T.nilable(
|
116
|
+
T.any(NilClass, T::Boolean, Integer, Float, Symbol)
|
117
|
+
),
|
118
|
+
enum:
|
119
|
+
T.nilable(
|
120
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input)
|
121
|
+
),
|
122
|
+
union:
|
123
|
+
T.nilable(
|
124
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input)
|
125
|
+
)
|
126
|
+
},
|
127
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input),
|
128
|
+
OpenAI::Internal::Type::Converter::Input
|
129
|
+
)
|
130
|
+
).returns(OpenAI::Internal::AnyHash)
|
131
|
+
end
|
132
|
+
def self.meta_info(type_info, spec)
|
133
|
+
end
|
134
|
+
|
89
135
|
# @api private
|
90
136
|
sig do
|
91
137
|
params(translate_names: T::Boolean).returns(
|
@@ -16,7 +16,8 @@ module OpenAI
|
|
16
16
|
T::Array[
|
17
17
|
[
|
18
18
|
T.nilable(Symbol),
|
19
|
-
T.proc.returns(OpenAI::Internal::Type::Converter::Input)
|
19
|
+
T.proc.returns(OpenAI::Internal::Type::Converter::Input),
|
20
|
+
OpenAI::Internal::AnyHash
|
20
21
|
]
|
21
22
|
]
|
22
23
|
)
|
@@ -25,7 +26,11 @@ module OpenAI
|
|
25
26
|
end
|
26
27
|
|
27
28
|
# @api private
|
28
|
-
sig
|
29
|
+
sig do
|
30
|
+
returns(
|
31
|
+
T::Array[[T.nilable(Symbol), T.anything, OpenAI::Internal::AnyHash]]
|
32
|
+
)
|
33
|
+
end
|
29
34
|
protected def derefed_variants
|
30
35
|
end
|
31
36
|
|
@@ -45,12 +45,11 @@ module OpenAI
|
|
45
45
|
sig { returns(T.nilable(String)) }
|
46
46
|
attr_accessor :name
|
47
47
|
|
48
|
-
# **o-series models only**
|
49
|
-
#
|
50
48
|
# Constrains effort on reasoning for
|
51
49
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
52
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
53
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
50
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
51
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
52
|
+
# response.
|
54
53
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
55
54
|
attr_accessor :reasoning_effort
|
56
55
|
|
@@ -208,12 +207,11 @@ module OpenAI
|
|
208
207
|
metadata: nil,
|
209
208
|
# The name of the assistant. The maximum length is 256 characters.
|
210
209
|
name: nil,
|
211
|
-
# **o-series models only**
|
212
|
-
#
|
213
210
|
# Constrains effort on reasoning for
|
214
211
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
215
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
216
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
212
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
213
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
214
|
+
# response.
|
217
215
|
reasoning_effort: nil,
|
218
216
|
# Specifies the format that the model must output. Compatible with
|
219
217
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -65,12 +65,11 @@ module OpenAI
|
|
65
65
|
sig { returns(T.nilable(String)) }
|
66
66
|
attr_accessor :name
|
67
67
|
|
68
|
-
# **o-series models only**
|
69
|
-
#
|
70
68
|
# Constrains effort on reasoning for
|
71
69
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
72
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
73
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
70
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
71
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
72
|
+
# response.
|
74
73
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
75
74
|
attr_accessor :reasoning_effort
|
76
75
|
|
@@ -232,12 +231,11 @@ module OpenAI
|
|
232
231
|
model: nil,
|
233
232
|
# The name of the assistant. The maximum length is 256 characters.
|
234
233
|
name: nil,
|
235
|
-
# **o-series models only**
|
236
|
-
#
|
237
234
|
# Constrains effort on reasoning for
|
238
235
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
239
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
240
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
236
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
237
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
238
|
+
# response.
|
241
239
|
reasoning_effort: nil,
|
242
240
|
# Specifies the format that the model must output. Compatible with
|
243
241
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -354,6 +352,36 @@ module OpenAI
|
|
354
352
|
end
|
355
353
|
OrSymbol = T.type_alias { T.any(Symbol, String) }
|
356
354
|
|
355
|
+
GPT_5 =
|
356
|
+
T.let(
|
357
|
+
:"gpt-5",
|
358
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
359
|
+
)
|
360
|
+
GPT_5_MINI =
|
361
|
+
T.let(
|
362
|
+
:"gpt-5-mini",
|
363
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
364
|
+
)
|
365
|
+
GPT_5_NANO =
|
366
|
+
T.let(
|
367
|
+
:"gpt-5-nano",
|
368
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
369
|
+
)
|
370
|
+
GPT_5_2025_08_07 =
|
371
|
+
T.let(
|
372
|
+
:"gpt-5-2025-08-07",
|
373
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
374
|
+
)
|
375
|
+
GPT_5_MINI_2025_08_07 =
|
376
|
+
T.let(
|
377
|
+
:"gpt-5-mini-2025-08-07",
|
378
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
379
|
+
)
|
380
|
+
GPT_5_NANO_2025_08_07 =
|
381
|
+
T.let(
|
382
|
+
:"gpt-5-nano-2025-08-07",
|
383
|
+
OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol
|
384
|
+
)
|
357
385
|
GPT_4_1 =
|
358
386
|
T.let(
|
359
387
|
:"gpt-4.1",
|
@@ -111,12 +111,11 @@ module OpenAI
|
|
111
111
|
sig { params(parallel_tool_calls: T::Boolean).void }
|
112
112
|
attr_writer :parallel_tool_calls
|
113
113
|
|
114
|
-
# **o-series models only**
|
115
|
-
#
|
116
114
|
# Constrains effort on reasoning for
|
117
115
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
118
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
119
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
116
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
117
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
118
|
+
# response.
|
120
119
|
sig { returns(T.nilable(OpenAI::ReasoningEffort::OrSymbol)) }
|
121
120
|
attr_accessor :reasoning_effort
|
122
121
|
|
@@ -330,12 +329,11 @@ module OpenAI
|
|
330
329
|
# [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
|
331
330
|
# during tool use.
|
332
331
|
parallel_tool_calls: nil,
|
333
|
-
# **o-series models only**
|
334
|
-
#
|
335
332
|
# Constrains effort on reasoning for
|
336
333
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
337
|
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning
|
338
|
-
# result in faster responses and fewer tokens used on reasoning in a
|
334
|
+
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing reasoning
|
335
|
+
# effort can result in faster responses and fewer tokens used on reasoning in a
|
336
|
+
# response.
|
339
337
|
reasoning_effort: nil,
|
340
338
|
# Specifies the format that the model must output. Compatible with
|
341
339
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
@@ -0,0 +1,60 @@
|
|
1
|
+
# typed: strong
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice
|
6
|
+
|
7
|
+
module Chat
|
8
|
+
class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel
|
9
|
+
OrHash =
|
10
|
+
T.type_alias do
|
11
|
+
T.any(
|
12
|
+
OpenAI::Chat::ChatCompletionAllowedToolChoice,
|
13
|
+
OpenAI::Internal::AnyHash
|
14
|
+
)
|
15
|
+
end
|
16
|
+
|
17
|
+
# Constrains the tools available to the model to a pre-defined set.
|
18
|
+
sig { returns(OpenAI::Chat::ChatCompletionAllowedTools) }
|
19
|
+
attr_reader :allowed_tools
|
20
|
+
|
21
|
+
sig do
|
22
|
+
params(
|
23
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash
|
24
|
+
).void
|
25
|
+
end
|
26
|
+
attr_writer :allowed_tools
|
27
|
+
|
28
|
+
# Allowed tool configuration type. Always `allowed_tools`.
|
29
|
+
sig { returns(Symbol) }
|
30
|
+
attr_accessor :type
|
31
|
+
|
32
|
+
# Constrains the tools available to the model to a pre-defined set.
|
33
|
+
sig do
|
34
|
+
params(
|
35
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools::OrHash,
|
36
|
+
type: Symbol
|
37
|
+
).returns(T.attached_class)
|
38
|
+
end
|
39
|
+
def self.new(
|
40
|
+
# Constrains the tools available to the model to a pre-defined set.
|
41
|
+
allowed_tools:,
|
42
|
+
# Allowed tool configuration type. Always `allowed_tools`.
|
43
|
+
type: :allowed_tools
|
44
|
+
)
|
45
|
+
end
|
46
|
+
|
47
|
+
sig do
|
48
|
+
override.returns(
|
49
|
+
{
|
50
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
51
|
+
type: Symbol
|
52
|
+
}
|
53
|
+
)
|
54
|
+
end
|
55
|
+
def to_hash
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|