openai 0.35.2 → 0.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +21 -15
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/models/batch_create_params.rb +9 -6
- data/lib/openai/models/beta/assistant_create_params.rb +9 -5
- data/lib/openai/models/beta/assistant_update_params.rb +9 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
- data/lib/openai/models/chat/completion_create_params.rb +37 -6
- data/lib/openai/models/chat_model.rb +5 -0
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/conversation_item.rb +13 -1
- data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
- data/lib/openai/models/evals/run_cancel_response.rb +20 -12
- data/lib/openai/models/evals/run_create_params.rb +20 -12
- data/lib/openai/models/evals/run_create_response.rb +20 -12
- data/lib/openai/models/evals/run_list_response.rb +20 -12
- data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
- data/lib/openai/models/graders/score_model_grader.rb +9 -5
- data/lib/openai/models/reasoning.rb +10 -6
- data/lib/openai/models/reasoning_effort.rb +10 -5
- data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
- data/lib/openai/models/responses/function_shell_tool.rb +20 -0
- data/lib/openai/models/responses/input_token_count_params.rb +14 -8
- data/lib/openai/models/responses/response.rb +46 -11
- data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
- data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
- data/lib/openai/models/responses/response_create_params.rb +42 -9
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
- data/lib/openai/models/responses/response_input_item.rb +395 -1
- data/lib/openai/models/responses/response_item.rb +13 -1
- data/lib/openai/models/responses/response_item_list.rb +2 -2
- data/lib/openai/models/responses/response_output_item.rb +13 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +7 -1
- data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
- data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
- data/lib/openai/resources/chat/completions.rb +6 -2
- data/lib/openai/resources/conversations/items.rb +3 -3
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_items.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +3 -3
- data/lib/openai/resources/responses.rb +12 -8
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +9 -0
- data/rbi/openai/models/batch_create_params.rbi +17 -9
- data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
- data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
- data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
- data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
- data/rbi/openai/models/chat_model.rbi +7 -0
- data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
- data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
- data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
- data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
- data/rbi/openai/models/evals/run_create_params.rbi +44 -20
- data/rbi/openai/models/evals/run_create_response.rbi +40 -20
- data/rbi/openai/models/evals/run_list_response.rbi +40 -20
- data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
- data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
- data/rbi/openai/models/reasoning.rbi +18 -10
- data/rbi/openai/models/reasoning_effort.rbi +10 -5
- data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
- data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
- data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
- data/rbi/openai/models/responses/response.rbi +73 -2
- data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
- data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
- data/rbi/openai/models/responses/response_create_params.rbi +87 -5
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
- data/rbi/openai/models/responses/response_input_item.rbi +675 -0
- data/rbi/openai/models/responses/response_item.rbi +4 -0
- data/rbi/openai/models/responses/response_item_list.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
- data/rbi/openai/models/responses/tool.rbi +2 -0
- data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
- data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
- data/rbi/openai/resources/batches.rbi +4 -3
- data/rbi/openai/resources/beta/assistants.rbi +18 -10
- data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
- data/rbi/openai/resources/chat/completions.rbi +38 -12
- data/rbi/openai/resources/conversations/items.rbi +4 -0
- data/rbi/openai/resources/conversations.rbi +4 -0
- data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
- data/rbi/openai/resources/responses.rbi +28 -2
- data/sig/openai/models/batch_create_params.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +16 -0
- data/sig/openai/models/chat_model.rbs +11 -1
- data/sig/openai/models/conversations/conversation_item.rbs +4 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
- data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
- data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
- data/sig/openai/models/responses/response.rbs +18 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
- data/sig/openai/models/responses/response_create_params.rbs +18 -0
- data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
- data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
- data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
- data/sig/openai/models/responses/response_input_item.rbs +276 -0
- data/sig/openai/models/responses/response_item.rbs +4 -0
- data/sig/openai/models/responses/response_output_item.rbs +4 -0
- data/sig/openai/models/responses/tool.rbs +2 -0
- data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
- data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
- data/sig/openai/resources/chat/completions.rbs +2 -0
- data/sig/openai/resources/responses.rbs +2 -0
- metadata +29 -2
|
@@ -38,7 +38,7 @@ module OpenAI
|
|
|
38
38
|
# response will not be carried over to the next response. This makes it simple to
|
|
39
39
|
# swap out system (or developer) messages in new responses.
|
|
40
40
|
#
|
|
41
|
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
41
|
+
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
42
42
|
required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true
|
|
43
43
|
|
|
44
44
|
# @!attribute metadata
|
|
@@ -77,7 +77,7 @@ module OpenAI
|
|
|
77
77
|
# an `assistant` message with the content generated by the model, you might
|
|
78
78
|
# consider using the `output_text` property where supported in SDKs.
|
|
79
79
|
#
|
|
80
|
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>]
|
|
80
|
+
# @return [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>]
|
|
81
81
|
required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] }
|
|
82
82
|
|
|
83
83
|
# @!attribute parallel_tool_calls
|
|
@@ -100,7 +100,7 @@ module OpenAI
|
|
|
100
100
|
# response. See the `tools` parameter to see how to specify which tools the model
|
|
101
101
|
# can call.
|
|
102
102
|
#
|
|
103
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom]
|
|
103
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell]
|
|
104
104
|
required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
|
|
105
105
|
|
|
106
106
|
# @!attribute tools
|
|
@@ -124,7 +124,7 @@ module OpenAI
|
|
|
124
124
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
|
125
125
|
# You can also use custom tools to call your own code.
|
|
126
126
|
#
|
|
127
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>]
|
|
127
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>]
|
|
128
128
|
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
|
129
129
|
|
|
130
130
|
# @!attribute top_p
|
|
@@ -192,6 +192,17 @@ module OpenAI
|
|
|
192
192
|
# @return [String, nil]
|
|
193
193
|
optional :prompt_cache_key, String
|
|
194
194
|
|
|
195
|
+
# @!attribute prompt_cache_retention
|
|
196
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
197
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
198
|
+
# of 24 hours.
|
|
199
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
200
|
+
#
|
|
201
|
+
# @return [Symbol, OpenAI::Models::Responses::Response::PromptCacheRetention, nil]
|
|
202
|
+
optional :prompt_cache_retention,
|
|
203
|
+
enum: -> { OpenAI::Responses::Response::PromptCacheRetention },
|
|
204
|
+
nil?: true
|
|
205
|
+
|
|
195
206
|
# @!attribute reasoning
|
|
196
207
|
# **gpt-5 and o-series models only**
|
|
197
208
|
#
|
|
@@ -307,7 +318,7 @@ module OpenAI
|
|
|
307
318
|
texts.join
|
|
308
319
|
end
|
|
309
320
|
|
|
310
|
-
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
|
321
|
+
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, conversation: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
|
311
322
|
# Some parameter documentations has been truncated, see
|
|
312
323
|
# {OpenAI::Models::Responses::Response} for more details.
|
|
313
324
|
#
|
|
@@ -319,21 +330,21 @@ module OpenAI
|
|
|
319
330
|
#
|
|
320
331
|
# @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete.
|
|
321
332
|
#
|
|
322
|
-
# @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
|
|
333
|
+
# @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
|
|
323
334
|
#
|
|
324
335
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
|
325
336
|
#
|
|
326
337
|
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
|
327
338
|
#
|
|
328
|
-
# @param output [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>] An array of content items generated by the model.
|
|
339
|
+
# @param output [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseFunctionShellToolCall, OpenAI::Models::Responses::ResponseFunctionShellToolCallOutput, OpenAI::Models::Responses::ResponseApplyPatchToolCall, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>] An array of content items generated by the model.
|
|
329
340
|
#
|
|
330
341
|
# @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel.
|
|
331
342
|
#
|
|
332
343
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
|
333
344
|
#
|
|
334
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
|
345
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating
|
|
335
346
|
#
|
|
336
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
347
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
337
348
|
#
|
|
338
349
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
|
339
350
|
#
|
|
@@ -351,6 +362,8 @@ module OpenAI
|
|
|
351
362
|
#
|
|
352
363
|
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
|
353
364
|
#
|
|
365
|
+
# @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::Response::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
|
|
366
|
+
#
|
|
354
367
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
|
|
355
368
|
#
|
|
356
369
|
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
|
@@ -417,7 +430,7 @@ module OpenAI
|
|
|
417
430
|
variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray }
|
|
418
431
|
|
|
419
432
|
# @!method self.variants
|
|
420
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
|
433
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
|
421
434
|
|
|
422
435
|
# @type [OpenAI::Internal::Type::Converter]
|
|
423
436
|
ResponseInputItemArray =
|
|
@@ -458,8 +471,14 @@ module OpenAI
|
|
|
458
471
|
# Use this option to force the model to call a specific custom tool.
|
|
459
472
|
variant -> { OpenAI::Responses::ToolChoiceCustom }
|
|
460
473
|
|
|
474
|
+
# Forces the model to call the apply_patch tool when executing a tool call.
|
|
475
|
+
variant -> { OpenAI::Responses::ToolChoiceApplyPatch }
|
|
476
|
+
|
|
477
|
+
# Forces the model to call the function shell tool when a tool call is required.
|
|
478
|
+
variant -> { OpenAI::Responses::ToolChoiceShell }
|
|
479
|
+
|
|
461
480
|
# @!method self.variants
|
|
462
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
|
|
481
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)]
|
|
463
482
|
end
|
|
464
483
|
|
|
465
484
|
# @see OpenAI::Models::Responses::Response#conversation
|
|
@@ -477,6 +496,22 @@ module OpenAI
|
|
|
477
496
|
# @param id [String] The unique ID of the conversation.
|
|
478
497
|
end
|
|
479
498
|
|
|
499
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
500
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
501
|
+
# of 24 hours.
|
|
502
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
503
|
+
#
|
|
504
|
+
# @see OpenAI::Models::Responses::Response#prompt_cache_retention
|
|
505
|
+
module PromptCacheRetention
|
|
506
|
+
extend OpenAI::Internal::Type::Enum
|
|
507
|
+
|
|
508
|
+
IN_MEMORY = :"in-memory"
|
|
509
|
+
PROMPT_CACHE_RETENTION_24H = :"24h"
|
|
510
|
+
|
|
511
|
+
# @!method self.values
|
|
512
|
+
# @return [Array<Symbol>]
|
|
513
|
+
end
|
|
514
|
+
|
|
480
515
|
# Specifies the processing type used for serving the request.
|
|
481
516
|
#
|
|
482
517
|
# - If set to 'auto', then the request will be processed with the service tier
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Responses
|
|
6
|
+
class ResponseApplyPatchToolCall < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
# @!attribute id
|
|
8
|
+
# The unique ID of the apply patch tool call. Populated when this item is returned
|
|
9
|
+
# via API.
|
|
10
|
+
#
|
|
11
|
+
# @return [String]
|
|
12
|
+
required :id, String
|
|
13
|
+
|
|
14
|
+
# @!attribute call_id
|
|
15
|
+
# The unique ID of the apply patch tool call generated by the model.
|
|
16
|
+
#
|
|
17
|
+
# @return [String]
|
|
18
|
+
required :call_id, String
|
|
19
|
+
|
|
20
|
+
# @!attribute status
|
|
21
|
+
# The status of the apply patch tool call. One of `in_progress` or `completed`.
|
|
22
|
+
#
|
|
23
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Status]
|
|
24
|
+
required :status, enum: -> { OpenAI::Responses::ResponseApplyPatchToolCall::Status }
|
|
25
|
+
|
|
26
|
+
# @!attribute type
|
|
27
|
+
# The type of the item. Always `apply_patch_call`.
|
|
28
|
+
#
|
|
29
|
+
# @return [Symbol, :apply_patch_call]
|
|
30
|
+
required :type, const: :apply_patch_call
|
|
31
|
+
|
|
32
|
+
# @!attribute created_by
|
|
33
|
+
# The ID of the entity that created this tool call.
|
|
34
|
+
#
|
|
35
|
+
# @return [String, nil]
|
|
36
|
+
optional :created_by, String
|
|
37
|
+
|
|
38
|
+
# @!attribute operation
|
|
39
|
+
# One of the create_file, delete_file, or update_file operations applied via
|
|
40
|
+
# apply_patch.
|
|
41
|
+
#
|
|
42
|
+
# @return [OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile, nil]
|
|
43
|
+
optional :operation, union: -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation }
|
|
44
|
+
|
|
45
|
+
# @!method initialize(id:, call_id:, status:, created_by: nil, operation: nil, type: :apply_patch_call)
|
|
46
|
+
# Some parameter documentations has been truncated, see
|
|
47
|
+
# {OpenAI::Models::Responses::ResponseApplyPatchToolCall} for more details.
|
|
48
|
+
#
|
|
49
|
+
# A tool call that applies file diffs by creating, deleting, or updating files.
|
|
50
|
+
#
|
|
51
|
+
# @param id [String] The unique ID of the apply patch tool call. Populated when this item is returned
|
|
52
|
+
#
|
|
53
|
+
# @param call_id [String] The unique ID of the apply patch tool call generated by the model.
|
|
54
|
+
#
|
|
55
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Status] The status of the apply patch tool call. One of `in_progress` or `completed`.
|
|
56
|
+
#
|
|
57
|
+
# @param created_by [String] The ID of the entity that created this tool call.
|
|
58
|
+
#
|
|
59
|
+
# @param operation [OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile] One of the create_file, delete_file, or update_file operations applied via apply
|
|
60
|
+
#
|
|
61
|
+
# @param type [Symbol, :apply_patch_call] The type of the item. Always `apply_patch_call`.
|
|
62
|
+
|
|
63
|
+
# The status of the apply patch tool call. One of `in_progress` or `completed`.
|
|
64
|
+
#
|
|
65
|
+
# @see OpenAI::Models::Responses::ResponseApplyPatchToolCall#status
|
|
66
|
+
module Status
|
|
67
|
+
extend OpenAI::Internal::Type::Enum
|
|
68
|
+
|
|
69
|
+
IN_PROGRESS = :in_progress
|
|
70
|
+
COMPLETED = :completed
|
|
71
|
+
|
|
72
|
+
# @!method self.values
|
|
73
|
+
# @return [Array<Symbol>]
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
# One of the create_file, delete_file, or update_file operations applied via
|
|
77
|
+
# apply_patch.
|
|
78
|
+
#
|
|
79
|
+
# @see OpenAI::Models::Responses::ResponseApplyPatchToolCall#operation
|
|
80
|
+
module Operation
|
|
81
|
+
extend OpenAI::Internal::Type::Union
|
|
82
|
+
|
|
83
|
+
discriminator :type
|
|
84
|
+
|
|
85
|
+
# Instruction describing how to create a file via the apply_patch tool.
|
|
86
|
+
variant :create_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::CreateFile }
|
|
87
|
+
|
|
88
|
+
# Instruction describing how to delete a file via the apply_patch tool.
|
|
89
|
+
variant :delete_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile }
|
|
90
|
+
|
|
91
|
+
# Instruction describing how to update a file via the apply_patch tool.
|
|
92
|
+
variant :update_file, -> { OpenAI::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile }
|
|
93
|
+
|
|
94
|
+
class CreateFile < OpenAI::Internal::Type::BaseModel
|
|
95
|
+
# @!attribute diff
|
|
96
|
+
# Diff to apply.
|
|
97
|
+
#
|
|
98
|
+
# @return [String]
|
|
99
|
+
required :diff, String
|
|
100
|
+
|
|
101
|
+
# @!attribute path
|
|
102
|
+
# Path of the file to create.
|
|
103
|
+
#
|
|
104
|
+
# @return [String]
|
|
105
|
+
required :path, String
|
|
106
|
+
|
|
107
|
+
# @!attribute type
|
|
108
|
+
# Create a new file with the provided diff.
|
|
109
|
+
#
|
|
110
|
+
# @return [Symbol, :create_file]
|
|
111
|
+
required :type, const: :create_file
|
|
112
|
+
|
|
113
|
+
# @!method initialize(diff:, path:, type: :create_file)
|
|
114
|
+
# Instruction describing how to create a file via the apply_patch tool.
|
|
115
|
+
#
|
|
116
|
+
# @param diff [String] Diff to apply.
|
|
117
|
+
#
|
|
118
|
+
# @param path [String] Path of the file to create.
|
|
119
|
+
#
|
|
120
|
+
# @param type [Symbol, :create_file] Create a new file with the provided diff.
|
|
121
|
+
end
|
|
122
|
+
|
|
123
|
+
class DeleteFile < OpenAI::Internal::Type::BaseModel
|
|
124
|
+
# @!attribute path
|
|
125
|
+
# Path of the file to delete.
|
|
126
|
+
#
|
|
127
|
+
# @return [String]
|
|
128
|
+
required :path, String
|
|
129
|
+
|
|
130
|
+
# @!attribute type
|
|
131
|
+
# Delete the specified file.
|
|
132
|
+
#
|
|
133
|
+
# @return [Symbol, :delete_file]
|
|
134
|
+
required :type, const: :delete_file
|
|
135
|
+
|
|
136
|
+
# @!method initialize(path:, type: :delete_file)
|
|
137
|
+
# Instruction describing how to delete a file via the apply_patch tool.
|
|
138
|
+
#
|
|
139
|
+
# @param path [String] Path of the file to delete.
|
|
140
|
+
#
|
|
141
|
+
# @param type [Symbol, :delete_file] Delete the specified file.
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
class UpdateFile < OpenAI::Internal::Type::BaseModel
|
|
145
|
+
# @!attribute diff
|
|
146
|
+
# Diff to apply.
|
|
147
|
+
#
|
|
148
|
+
# @return [String]
|
|
149
|
+
required :diff, String
|
|
150
|
+
|
|
151
|
+
# @!attribute path
|
|
152
|
+
# Path of the file to update.
|
|
153
|
+
#
|
|
154
|
+
# @return [String]
|
|
155
|
+
required :path, String
|
|
156
|
+
|
|
157
|
+
# @!attribute type
|
|
158
|
+
# Update an existing file with the provided diff.
|
|
159
|
+
#
|
|
160
|
+
# @return [Symbol, :update_file]
|
|
161
|
+
required :type, const: :update_file
|
|
162
|
+
|
|
163
|
+
# @!method initialize(diff:, path:, type: :update_file)
|
|
164
|
+
# Instruction describing how to update a file via the apply_patch tool.
|
|
165
|
+
#
|
|
166
|
+
# @param diff [String] Diff to apply.
|
|
167
|
+
#
|
|
168
|
+
# @param path [String] Path of the file to update.
|
|
169
|
+
#
|
|
170
|
+
# @param type [Symbol, :update_file] Update an existing file with the provided diff.
|
|
171
|
+
end
|
|
172
|
+
|
|
173
|
+
# @!method self.variants
|
|
174
|
+
# @return [Array(OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::CreateFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::DeleteFile, OpenAI::Models::Responses::ResponseApplyPatchToolCall::Operation::UpdateFile)]
|
|
175
|
+
end
|
|
176
|
+
end
|
|
177
|
+
end
|
|
178
|
+
end
|
|
179
|
+
end
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Responses
|
|
6
|
+
class ResponseApplyPatchToolCallOutput < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
# @!attribute id
|
|
8
|
+
# The unique ID of the apply patch tool call output. Populated when this item is
|
|
9
|
+
# returned via API.
|
|
10
|
+
#
|
|
11
|
+
# @return [String]
|
|
12
|
+
required :id, String
|
|
13
|
+
|
|
14
|
+
# @!attribute call_id
|
|
15
|
+
# The unique ID of the apply patch tool call generated by the model.
|
|
16
|
+
#
|
|
17
|
+
# @return [String]
|
|
18
|
+
required :call_id, String
|
|
19
|
+
|
|
20
|
+
# @!attribute output
|
|
21
|
+
# Optional textual output returned by the apply patch tool.
|
|
22
|
+
#
|
|
23
|
+
# @return [String, nil]
|
|
24
|
+
required :output, String, nil?: true
|
|
25
|
+
|
|
26
|
+
# @!attribute status
|
|
27
|
+
# The status of the apply patch tool call output. One of `completed` or `failed`.
|
|
28
|
+
#
|
|
29
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::Status]
|
|
30
|
+
required :status, enum: -> { OpenAI::Responses::ResponseApplyPatchToolCallOutput::Status }
|
|
31
|
+
|
|
32
|
+
# @!attribute type
|
|
33
|
+
# The type of the item. Always `apply_patch_call_output`.
|
|
34
|
+
#
|
|
35
|
+
# @return [Symbol, :apply_patch_call_output]
|
|
36
|
+
required :type, const: :apply_patch_call_output
|
|
37
|
+
|
|
38
|
+
# @!attribute created_by
|
|
39
|
+
# The ID of the entity that created this tool call output.
|
|
40
|
+
#
|
|
41
|
+
# @return [String, nil]
|
|
42
|
+
optional :created_by, String
|
|
43
|
+
|
|
44
|
+
# @!method initialize(id:, call_id:, output:, status:, created_by: nil, type: :apply_patch_call_output)
|
|
45
|
+
# Some parameter documentations has been truncated, see
|
|
46
|
+
# {OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput} for more details.
|
|
47
|
+
#
|
|
48
|
+
# The output emitted by an apply patch tool call.
|
|
49
|
+
#
|
|
50
|
+
# @param id [String] The unique ID of the apply patch tool call output. Populated when this item is r
|
|
51
|
+
#
|
|
52
|
+
# @param call_id [String] The unique ID of the apply patch tool call generated by the model.
|
|
53
|
+
#
|
|
54
|
+
# @param output [String, nil] Optional textual output returned by the apply patch tool.
|
|
55
|
+
#
|
|
56
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput::Status] The status of the apply patch tool call output. One of `completed` or `failed`.
|
|
57
|
+
#
|
|
58
|
+
# @param created_by [String] The ID of the entity that created this tool call output.
|
|
59
|
+
#
|
|
60
|
+
# @param type [Symbol, :apply_patch_call_output] The type of the item. Always `apply_patch_call_output`.
|
|
61
|
+
|
|
62
|
+
# The status of the apply patch tool call output. One of `completed` or `failed`.
|
|
63
|
+
#
|
|
64
|
+
# @see OpenAI::Models::Responses::ResponseApplyPatchToolCallOutput#status
|
|
65
|
+
module Status
|
|
66
|
+
extend OpenAI::Internal::Type::Enum
|
|
67
|
+
|
|
68
|
+
COMPLETED = :completed
|
|
69
|
+
FAILED = :failed
|
|
70
|
+
|
|
71
|
+
# @!method self.values
|
|
72
|
+
# @return [Array<Symbol>]
|
|
73
|
+
end
|
|
74
|
+
end
|
|
75
|
+
end
|
|
76
|
+
end
|
|
77
|
+
end
|
|
@@ -66,7 +66,7 @@ module OpenAI
|
|
|
66
66
|
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
|
67
67
|
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
|
68
68
|
#
|
|
69
|
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
69
|
+
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
|
70
70
|
optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input }
|
|
71
71
|
|
|
72
72
|
# @!attribute instructions
|
|
@@ -147,6 +147,17 @@ module OpenAI
|
|
|
147
147
|
# @return [String, nil]
|
|
148
148
|
optional :prompt_cache_key, String
|
|
149
149
|
|
|
150
|
+
# @!attribute prompt_cache_retention
|
|
151
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
152
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
153
|
+
# of 24 hours.
|
|
154
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
155
|
+
#
|
|
156
|
+
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil]
|
|
157
|
+
optional :prompt_cache_retention,
|
|
158
|
+
enum: -> { OpenAI::Responses::ResponseCreateParams::PromptCacheRetention },
|
|
159
|
+
nil?: true
|
|
160
|
+
|
|
150
161
|
# @!attribute reasoning
|
|
151
162
|
# **gpt-5 and o-series models only**
|
|
152
163
|
#
|
|
@@ -229,7 +240,7 @@ module OpenAI
|
|
|
229
240
|
# response. See the `tools` parameter to see how to specify which tools the model
|
|
230
241
|
# can call.
|
|
231
242
|
#
|
|
232
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil]
|
|
243
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell, nil]
|
|
233
244
|
optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice }
|
|
234
245
|
|
|
235
246
|
# @!attribute tools
|
|
@@ -253,7 +264,7 @@ module OpenAI
|
|
|
253
264
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
|
254
265
|
# You can also use custom tools to call your own code.
|
|
255
266
|
#
|
|
256
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
|
|
267
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>, nil]
|
|
257
268
|
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
|
258
269
|
|
|
259
270
|
# @!attribute top_logprobs
|
|
@@ -297,7 +308,7 @@ module OpenAI
|
|
|
297
308
|
# @return [String, nil]
|
|
298
309
|
optional :user, String
|
|
299
310
|
|
|
300
|
-
# @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
|
311
|
+
# @!method initialize(background: nil, conversation: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, prompt_cache_retention: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
|
301
312
|
# Some parameter documentations has been truncated, see
|
|
302
313
|
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
|
|
303
314
|
#
|
|
@@ -307,7 +318,7 @@ module OpenAI
|
|
|
307
318
|
#
|
|
308
319
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently suppo
|
|
309
320
|
#
|
|
310
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
|
321
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
|
311
322
|
#
|
|
312
323
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
|
313
324
|
#
|
|
@@ -327,6 +338,8 @@ module OpenAI
|
|
|
327
338
|
#
|
|
328
339
|
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
|
329
340
|
#
|
|
341
|
+
# @param prompt_cache_retention [Symbol, OpenAI::Models::Responses::ResponseCreateParams::PromptCacheRetention, nil] The retention policy for the prompt cache. Set to `24h` to enable extended promp
|
|
342
|
+
#
|
|
330
343
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **gpt-5 and o-series models only**
|
|
331
344
|
#
|
|
332
345
|
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
|
@@ -341,9 +354,9 @@ module OpenAI
|
|
|
341
354
|
#
|
|
342
355
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
|
343
356
|
#
|
|
344
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
|
357
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell] How the model should select which tool (or tools) to use when generating
|
|
345
358
|
#
|
|
346
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
359
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::FunctionShellTool, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::ApplyPatchTool, OpenAI::Models::Responses::WebSearchTool, OpenAI::Models::Responses::WebSearchPreviewTool>] An array of tools the model may call while generating a response. You
|
|
347
360
|
#
|
|
348
361
|
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
|
349
362
|
#
|
|
@@ -393,7 +406,21 @@ module OpenAI
|
|
|
393
406
|
variant -> { OpenAI::Responses::ResponseInput }
|
|
394
407
|
|
|
395
408
|
# @!method self.variants
|
|
396
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
|
409
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ShellCall, OpenAI::Models::Responses::ResponseInputItem::ShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCall, OpenAI::Models::Responses::ResponseInputItem::ApplyPatchCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
|
410
|
+
end
|
|
411
|
+
|
|
412
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
413
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
414
|
+
# of 24 hours.
|
|
415
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
416
|
+
module PromptCacheRetention
|
|
417
|
+
extend OpenAI::Internal::Type::Enum
|
|
418
|
+
|
|
419
|
+
IN_MEMORY = :"in-memory"
|
|
420
|
+
PROMPT_CACHE_RETENTION_24H = :"24h"
|
|
421
|
+
|
|
422
|
+
# @!method self.values
|
|
423
|
+
# @return [Array<Symbol>]
|
|
397
424
|
end
|
|
398
425
|
|
|
399
426
|
# Specifies the processing type used for serving the request.
|
|
@@ -479,8 +506,14 @@ module OpenAI
|
|
|
479
506
|
# Use this option to force the model to call a specific custom tool.
|
|
480
507
|
variant -> { OpenAI::Responses::ToolChoiceCustom }
|
|
481
508
|
|
|
509
|
+
# Forces the model to call the apply_patch tool when executing a tool call.
|
|
510
|
+
variant -> { OpenAI::Responses::ToolChoiceApplyPatch }
|
|
511
|
+
|
|
512
|
+
# Forces the model to call the function shell tool when a tool call is required.
|
|
513
|
+
variant -> { OpenAI::Responses::ToolChoiceShell }
|
|
514
|
+
|
|
482
515
|
# @!method self.variants
|
|
483
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
|
|
516
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, OpenAI::Models::Responses::ToolChoiceApplyPatch, OpenAI::Models::Responses::ToolChoiceShell)]
|
|
484
517
|
end
|
|
485
518
|
|
|
486
519
|
# The truncation strategy to use for the model response.
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Responses
|
|
6
|
+
class ResponseFunctionShellCallOutputContent < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
# @!attribute outcome
|
|
8
|
+
# The exit or timeout outcome associated with this chunk.
|
|
9
|
+
#
|
|
10
|
+
# @return [OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit]
|
|
11
|
+
required :outcome, union: -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome }
|
|
12
|
+
|
|
13
|
+
# @!attribute stderr
|
|
14
|
+
# Captured stderr output for this chunk of the shell call.
|
|
15
|
+
#
|
|
16
|
+
# @return [String]
|
|
17
|
+
required :stderr, String
|
|
18
|
+
|
|
19
|
+
# @!attribute stdout
|
|
20
|
+
# Captured stdout output for this chunk of the shell call.
|
|
21
|
+
#
|
|
22
|
+
# @return [String]
|
|
23
|
+
required :stdout, String
|
|
24
|
+
|
|
25
|
+
# @!method initialize(outcome:, stderr:, stdout:)
|
|
26
|
+
# Captured stdout and stderr for a portion of a function shell tool call output.
|
|
27
|
+
#
|
|
28
|
+
# @param outcome [OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit] The exit or timeout outcome associated with this chunk.
|
|
29
|
+
#
|
|
30
|
+
# @param stderr [String] Captured stderr output for this chunk of the shell call.
|
|
31
|
+
#
|
|
32
|
+
# @param stdout [String] Captured stdout output for this chunk of the shell call.
|
|
33
|
+
|
|
34
|
+
# The exit or timeout outcome associated with this chunk.
|
|
35
|
+
#
|
|
36
|
+
# @see OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent#outcome
|
|
37
|
+
module Outcome
|
|
38
|
+
extend OpenAI::Internal::Type::Union
|
|
39
|
+
|
|
40
|
+
discriminator :type
|
|
41
|
+
|
|
42
|
+
# Indicates that the function shell call exceeded its configured time limit.
|
|
43
|
+
variant :timeout, -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout }
|
|
44
|
+
|
|
45
|
+
# Indicates that the shell commands finished and returned an exit code.
|
|
46
|
+
variant :exit, -> { OpenAI::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit }
|
|
47
|
+
|
|
48
|
+
class Timeout < OpenAI::Internal::Type::BaseModel
|
|
49
|
+
# @!attribute type
|
|
50
|
+
# The outcome type. Always `timeout`.
|
|
51
|
+
#
|
|
52
|
+
# @return [Symbol, :timeout]
|
|
53
|
+
required :type, const: :timeout
|
|
54
|
+
|
|
55
|
+
# @!method initialize(type: :timeout)
|
|
56
|
+
# Indicates that the function shell call exceeded its configured time limit.
|
|
57
|
+
#
|
|
58
|
+
# @param type [Symbol, :timeout] The outcome type. Always `timeout`.
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
class Exit < OpenAI::Internal::Type::BaseModel
|
|
62
|
+
# @!attribute exit_code
|
|
63
|
+
# The exit code returned by the shell process.
|
|
64
|
+
#
|
|
65
|
+
# @return [Integer]
|
|
66
|
+
required :exit_code, Integer
|
|
67
|
+
|
|
68
|
+
# @!attribute type
|
|
69
|
+
# The outcome type. Always `exit`.
|
|
70
|
+
#
|
|
71
|
+
# @return [Symbol, :exit]
|
|
72
|
+
required :type, const: :exit
|
|
73
|
+
|
|
74
|
+
# @!method initialize(exit_code:, type: :exit)
|
|
75
|
+
# Indicates that the shell commands finished and returned an exit code.
|
|
76
|
+
#
|
|
77
|
+
# @param exit_code [Integer] The exit code returned by the shell process.
|
|
78
|
+
#
|
|
79
|
+
# @param type [Symbol, :exit] The outcome type. Always `exit`.
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
# @!method self.variants
|
|
83
|
+
# @return [Array(OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Timeout, OpenAI::Models::Responses::ResponseFunctionShellCallOutputContent::Outcome::Exit)]
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
end
|