openai 0.15.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +47 -0
- data/README.md +14 -20
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
- data/lib/openai/helpers/structured_output/union_of.rb +11 -1
- data/lib/openai/internal/transport/base_client.rb +1 -1
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/internal/type/union.rb +13 -17
- data/lib/openai/models/beta/assistant_create_params.rb +4 -5
- data/lib/openai/models/beta/assistant_update_params.rb +22 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
- data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
- data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
- data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
- data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
- data/lib/openai/models/chat/chat_completion_message.rb +3 -5
- data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
- data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
- data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
- data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
- data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
- data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
- data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
- data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
- data/lib/openai/models/chat/completion_create_params.rb +65 -16
- data/lib/openai/models/chat_model.rb +7 -0
- data/lib/openai/models/custom_tool_input_format.rb +76 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
- data/lib/openai/models/evals/run_cancel_response.rb +2 -2
- data/lib/openai/models/evals/run_create_params.rb +2 -2
- data/lib/openai/models/evals/run_create_response.rb +2 -2
- data/lib/openai/models/evals/run_list_response.rb +2 -2
- data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
- data/lib/openai/models/reasoning.rb +4 -5
- data/lib/openai/models/reasoning_effort.rb +4 -4
- data/lib/openai/models/response_format_text_grammar.rb +27 -0
- data/lib/openai/models/response_format_text_python.rb +20 -0
- data/lib/openai/models/responses/custom_tool.rb +48 -0
- data/lib/openai/models/responses/response.rb +70 -16
- data/lib/openai/models/responses/response_create_params.rb +78 -14
- data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
- data/lib/openai/models/responses/response_input_item.rb +7 -1
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
- data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
- data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
- data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
- data/lib/openai/models/responses/response_stream_event.rb +13 -11
- data/lib/openai/models/responses/response_text_config.rb +27 -1
- data/lib/openai/models/responses/tool.rb +5 -1
- data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
- data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
- data/lib/openai/models/vector_store_search_params.rb +6 -1
- data/lib/openai/models.rb +6 -0
- data/lib/openai/resources/beta/assistants.rb +2 -2
- data/lib/openai/resources/beta/threads/runs.rb +2 -2
- data/lib/openai/resources/chat/completions.rb +26 -12
- data/lib/openai/resources/responses.rb +77 -36
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -2
- data/rbi/openai/internal/transport/base_client.rbi +1 -1
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
- data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
- data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
- data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
- data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
- data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
- data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
- data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
- data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
- data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
- data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
- data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
- data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
- data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
- data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
- data/rbi/openai/models/chat/completion_create_params.rbi +150 -31
- data/rbi/openai/models/chat_model.rbi +11 -0
- data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
- data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
- data/rbi/openai/models/evals/run_create_params.rbi +4 -0
- data/rbi/openai/models/evals/run_create_response.rbi +2 -0
- data/rbi/openai/models/evals/run_list_response.rbi +2 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
- data/rbi/openai/models/reasoning.rbi +6 -8
- data/rbi/openai/models/reasoning_effort.rbi +4 -4
- data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
- data/rbi/openai/models/response_format_text_python.rbi +30 -0
- data/rbi/openai/models/responses/custom_tool.rbi +96 -0
- data/rbi/openai/models/responses/response.rbi +59 -11
- data/rbi/openai/models/responses/response_create_params.rbi +138 -13
- data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
- data/rbi/openai/models/responses/response_input_item.rbi +2 -0
- data/rbi/openai/models/responses/response_output_item.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
- data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
- data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
- data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
- data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
- data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
- data/rbi/openai/models/responses/response_text_config.rbi +64 -1
- data/rbi/openai/models/responses/tool.rbi +1 -0
- data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
- data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
- data/rbi/openai/models/vector_store_search_params.rbi +12 -1
- data/rbi/openai/models.rbi +6 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -8
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
- data/rbi/openai/resources/chat/completions.rbi +78 -25
- data/rbi/openai/resources/responses.rbi +249 -47
- data/sig/openai/internal/transport/base_client.rbs +1 -1
- data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
- data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
- data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
- data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
- data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
- data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
- data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
- data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
- data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
- data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
- data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +37 -6
- data/sig/openai/models/chat_model.rbs +15 -1
- data/sig/openai/models/custom_tool_input_format.rbs +61 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/response_format_text_grammar.rbs +15 -0
- data/sig/openai/models/response_format_text_python.rbs +13 -0
- data/sig/openai/models/responses/custom_tool.rbs +43 -0
- data/sig/openai/models/responses/response.rbs +16 -0
- data/sig/openai/models/responses/response_create_params.rbs +33 -0
- data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
- data/sig/openai/models/responses/response_input_item.rbs +2 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
- data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
- data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
- data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
- data/sig/openai/models/responses/response_stream_event.rbs +4 -2
- data/sig/openai/models/responses/response_text_config.rbs +22 -3
- data/sig/openai/models/responses/tool.rbs +1 -0
- data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
- data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
- data/sig/openai/models/vector_store_search_params.rbs +2 -1
- data/sig/openai/models.rbs +6 -0
- data/sig/openai/resources/chat/completions.rbs +8 -2
- data/sig/openai/resources/responses.rbs +36 -0
- metadata +59 -8
- data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
- data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -38,7 +38,7 @@ module OpenAI
|
|
38
38
|
# response will not be carried over to the next response. This makes it simple to
|
39
39
|
# swap out system (or developer) messages in new responses.
|
40
40
|
#
|
41
|
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
41
|
+
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
42
42
|
required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true
|
43
43
|
|
44
44
|
# @!attribute metadata
|
@@ -77,7 +77,7 @@ module OpenAI
|
|
77
77
|
# an `assistant` message with the content generated by the model, you might
|
78
78
|
# consider using the `output_text` property where supported in SDKs.
|
79
79
|
#
|
80
|
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest>]
|
80
|
+
# @return [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>]
|
81
81
|
required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] }
|
82
82
|
|
83
83
|
# @!attribute parallel_tool_calls
|
@@ -100,7 +100,7 @@ module OpenAI
|
|
100
100
|
# response. See the `tools` parameter to see how to specify which tools the model
|
101
101
|
# can call.
|
102
102
|
#
|
103
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp]
|
103
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom]
|
104
104
|
required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
|
105
105
|
|
106
106
|
# @!attribute tools
|
@@ -116,10 +116,12 @@ module OpenAI
|
|
116
116
|
# Learn more about
|
117
117
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
118
118
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
119
|
-
# the model to call your own code
|
119
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
120
|
+
# Learn more about
|
120
121
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
122
|
+
# You can also use custom tools to call your own code.
|
121
123
|
#
|
122
|
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>]
|
124
|
+
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>]
|
123
125
|
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
124
126
|
|
125
127
|
# @!attribute top_p
|
@@ -171,6 +173,14 @@ module OpenAI
|
|
171
173
|
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
|
172
174
|
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
|
173
175
|
|
176
|
+
# @!attribute prompt_cache_key
|
177
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
178
|
+
# hit rates. Replaces the `user` field.
|
179
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
180
|
+
#
|
181
|
+
# @return [String, nil]
|
182
|
+
optional :prompt_cache_key, String
|
183
|
+
|
174
184
|
# @!attribute reasoning
|
175
185
|
# **o-series models only**
|
176
186
|
#
|
@@ -180,6 +190,16 @@ module OpenAI
|
|
180
190
|
# @return [OpenAI::Models::Reasoning, nil]
|
181
191
|
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
|
182
192
|
|
193
|
+
# @!attribute safety_identifier
|
194
|
+
# A stable identifier used to help detect users of your application that may be
|
195
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
196
|
+
# identifies each user. We recommend hashing their username or email address, in
|
197
|
+
# order to avoid sending us any identifying information.
|
198
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
199
|
+
#
|
200
|
+
# @return [String, nil]
|
201
|
+
optional :safety_identifier, String
|
202
|
+
|
183
203
|
# @!attribute service_tier
|
184
204
|
# Specifies the processing type used for serving the request.
|
185
205
|
#
|
@@ -246,14 +266,38 @@ module OpenAI
|
|
246
266
|
optional :usage, -> { OpenAI::Responses::ResponseUsage }
|
247
267
|
|
248
268
|
# @!attribute user
|
249
|
-
#
|
250
|
-
#
|
251
|
-
#
|
269
|
+
# @deprecated
|
270
|
+
#
|
271
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
272
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
273
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
274
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
275
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
252
276
|
#
|
253
277
|
# @return [String, nil]
|
254
278
|
optional :user, String
|
255
279
|
|
256
|
-
#
|
280
|
+
# Convenience property that aggregates all `output_text` items from the `output` list.
|
281
|
+
#
|
282
|
+
# If no `output_text` content blocks exist, then an empty string is returned.
|
283
|
+
#
|
284
|
+
# @return [String]
|
285
|
+
def output_text
|
286
|
+
texts = []
|
287
|
+
|
288
|
+
output.each do |item|
|
289
|
+
next unless item.type == :message
|
290
|
+
item.content.each do |content|
|
291
|
+
if content.type == :output_text
|
292
|
+
texts << content.text
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
|
297
|
+
texts.join
|
298
|
+
end
|
299
|
+
|
300
|
+
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
257
301
|
# Some parameter documentations has been truncated, see
|
258
302
|
# {OpenAI::Models::Responses::Response} for more details.
|
259
303
|
#
|
@@ -265,21 +309,21 @@ module OpenAI
|
|
265
309
|
#
|
266
310
|
# @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete.
|
267
311
|
#
|
268
|
-
# @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
|
312
|
+
# @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
|
269
313
|
#
|
270
314
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
271
315
|
#
|
272
316
|
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
273
317
|
#
|
274
|
-
# @param output [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest>] An array of content items generated by the model.
|
318
|
+
# @param output [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseCustomToolCall>] An array of content items generated by the model.
|
275
319
|
#
|
276
320
|
# @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel.
|
277
321
|
#
|
278
322
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
279
323
|
#
|
280
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
324
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
281
325
|
#
|
282
|
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
326
|
+
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
283
327
|
#
|
284
328
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
285
329
|
#
|
@@ -293,8 +337,12 @@ module OpenAI
|
|
293
337
|
#
|
294
338
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
295
339
|
#
|
340
|
+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
341
|
+
#
|
296
342
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
297
343
|
#
|
344
|
+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
345
|
+
#
|
298
346
|
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
|
299
347
|
#
|
300
348
|
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
|
@@ -307,7 +355,7 @@ module OpenAI
|
|
307
355
|
#
|
308
356
|
# @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
|
309
357
|
#
|
310
|
-
# @param user [String]
|
358
|
+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
311
359
|
#
|
312
360
|
# @param object [Symbol, :response] The object type of this resource - always set to `response`.
|
313
361
|
|
@@ -357,7 +405,7 @@ module OpenAI
|
|
357
405
|
variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray }
|
358
406
|
|
359
407
|
# @!method self.variants
|
360
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
408
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
361
409
|
|
362
410
|
# @type [OpenAI::Internal::Type::Converter]
|
363
411
|
ResponseInputItemArray =
|
@@ -382,6 +430,9 @@ module OpenAI
|
|
382
430
|
# `required` means the model must call one or more tools.
|
383
431
|
variant enum: -> { OpenAI::Responses::ToolChoiceOptions }
|
384
432
|
|
433
|
+
# Constrains the tools available to the model to a pre-defined set.
|
434
|
+
variant -> { OpenAI::Responses::ToolChoiceAllowed }
|
435
|
+
|
385
436
|
# Indicates that the model should use a built-in tool to generate a response.
|
386
437
|
# [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
|
387
438
|
variant -> { OpenAI::Responses::ToolChoiceTypes }
|
@@ -392,8 +443,11 @@ module OpenAI
|
|
392
443
|
# Use this option to force the model to call a specific tool on a remote MCP server.
|
393
444
|
variant -> { OpenAI::Responses::ToolChoiceMcp }
|
394
445
|
|
446
|
+
# Use this option to force the model to call a specific custom tool.
|
447
|
+
variant -> { OpenAI::Responses::ToolChoiceCustom }
|
448
|
+
|
395
449
|
# @!method self.variants
|
396
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
|
450
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
|
397
451
|
end
|
398
452
|
|
399
453
|
# Specifies the processing type used for serving the request.
|
@@ -51,7 +51,7 @@ module OpenAI
|
|
51
51
|
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
52
52
|
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
53
53
|
#
|
54
|
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
54
|
+
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
55
55
|
optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input }
|
56
56
|
|
57
57
|
# @!attribute instructions
|
@@ -123,6 +123,14 @@ module OpenAI
|
|
123
123
|
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
|
124
124
|
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
|
125
125
|
|
126
|
+
# @!attribute prompt_cache_key
|
127
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
128
|
+
# hit rates. Replaces the `user` field.
|
129
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
130
|
+
#
|
131
|
+
# @return [String, nil]
|
132
|
+
optional :prompt_cache_key, String
|
133
|
+
|
126
134
|
# @!attribute reasoning
|
127
135
|
# **o-series models only**
|
128
136
|
#
|
@@ -132,6 +140,16 @@ module OpenAI
|
|
132
140
|
# @return [OpenAI::Models::Reasoning, nil]
|
133
141
|
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
|
134
142
|
|
143
|
+
# @!attribute safety_identifier
|
144
|
+
# A stable identifier used to help detect users of your application that may be
|
145
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
146
|
+
# identifies each user. We recommend hashing their username or email address, in
|
147
|
+
# order to avoid sending us any identifying information.
|
148
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
149
|
+
#
|
150
|
+
# @return [String, nil]
|
151
|
+
optional :safety_identifier, String
|
152
|
+
|
135
153
|
# @!attribute service_tier
|
136
154
|
# Specifies the processing type used for serving the request.
|
137
155
|
#
|
@@ -160,6 +178,12 @@ module OpenAI
|
|
160
178
|
# @return [Boolean, nil]
|
161
179
|
optional :store, OpenAI::Internal::Type::Boolean, nil?: true
|
162
180
|
|
181
|
+
# @!attribute stream_options
|
182
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
183
|
+
#
|
184
|
+
# @return [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil]
|
185
|
+
optional :stream_options, -> { OpenAI::Responses::ResponseCreateParams::StreamOptions }, nil?: true
|
186
|
+
|
163
187
|
# @!attribute temperature
|
164
188
|
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
165
189
|
# make the output more random, while lower values like 0.2 will make it more
|
@@ -190,7 +214,7 @@ module OpenAI
|
|
190
214
|
# response. See the `tools` parameter to see how to specify which tools the model
|
191
215
|
# can call.
|
192
216
|
#
|
193
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil]
|
217
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom, nil]
|
194
218
|
optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice }
|
195
219
|
|
196
220
|
# @!attribute tools
|
@@ -206,10 +230,12 @@ module OpenAI
|
|
206
230
|
# Learn more about
|
207
231
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
208
232
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
209
|
-
# the model to call your own code
|
233
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
234
|
+
# Learn more about
|
210
235
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
236
|
+
# You can also use custom tools to call your own code.
|
211
237
|
#
|
212
|
-
# @return [Array<OpenAI::
|
238
|
+
# @return [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>, nil]
|
213
239
|
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
214
240
|
|
215
241
|
# @!attribute top_logprobs
|
@@ -242,14 +268,18 @@ module OpenAI
|
|
242
268
|
optional :truncation, enum: -> { OpenAI::Responses::ResponseCreateParams::Truncation }, nil?: true
|
243
269
|
|
244
270
|
# @!attribute user
|
245
|
-
#
|
246
|
-
#
|
247
|
-
#
|
271
|
+
# @deprecated
|
272
|
+
#
|
273
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
274
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
275
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
276
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
277
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
248
278
|
#
|
249
279
|
# @return [String, nil]
|
250
280
|
optional :user, String
|
251
281
|
|
252
|
-
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
282
|
+
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, stream_options: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
253
283
|
# Some parameter documentations has been truncated, see
|
254
284
|
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
|
255
285
|
#
|
@@ -257,7 +287,7 @@ module OpenAI
|
|
257
287
|
#
|
258
288
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
259
289
|
#
|
260
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
290
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
261
291
|
#
|
262
292
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
263
293
|
#
|
@@ -275,19 +305,25 @@ module OpenAI
|
|
275
305
|
#
|
276
306
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
277
307
|
#
|
308
|
+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
309
|
+
#
|
278
310
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
279
311
|
#
|
312
|
+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
313
|
+
#
|
280
314
|
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
281
315
|
#
|
282
316
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
283
317
|
#
|
318
|
+
# @param stream_options [OpenAI::Models::Responses::ResponseCreateParams::StreamOptions, nil] Options for streaming responses. Only set this when you set `stream: true`.
|
319
|
+
#
|
284
320
|
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
|
285
321
|
#
|
286
322
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
287
323
|
#
|
288
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
324
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom] How the model should select which tool (or tools) to use when generating
|
289
325
|
#
|
290
|
-
# @param tools [Array<OpenAI::
|
326
|
+
# @param tools [Array<OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::CustomTool, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
291
327
|
#
|
292
328
|
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
293
329
|
#
|
@@ -295,7 +331,7 @@ module OpenAI
|
|
295
331
|
#
|
296
332
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
297
333
|
#
|
298
|
-
# @param user [String]
|
334
|
+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
299
335
|
#
|
300
336
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
301
337
|
|
@@ -320,7 +356,7 @@ module OpenAI
|
|
320
356
|
variant -> { OpenAI::Responses::ResponseInput }
|
321
357
|
|
322
358
|
# @!method self.variants
|
323
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
359
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseCustomToolCallOutput, OpenAI::Models::Responses::ResponseCustomToolCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
324
360
|
end
|
325
361
|
|
326
362
|
# Specifies the processing type used for serving the request.
|
@@ -353,6 +389,28 @@ module OpenAI
|
|
353
389
|
# @return [Array<Symbol>]
|
354
390
|
end
|
355
391
|
|
392
|
+
class StreamOptions < OpenAI::Internal::Type::BaseModel
|
393
|
+
# @!attribute include_obfuscation
|
394
|
+
# When true, stream obfuscation will be enabled. Stream obfuscation adds random
|
395
|
+
# characters to an `obfuscation` field on streaming delta events to normalize
|
396
|
+
# payload sizes as a mitigation to certain side-channel attacks. These obfuscation
|
397
|
+
# fields are included by default, but add a small amount of overhead to the data
|
398
|
+
# stream. You can set `include_obfuscation` to false to optimize for bandwidth if
|
399
|
+
# you trust the network links between your application and the OpenAI API.
|
400
|
+
#
|
401
|
+
# @return [Boolean, nil]
|
402
|
+
optional :include_obfuscation, OpenAI::Internal::Type::Boolean
|
403
|
+
|
404
|
+
# @!method initialize(include_obfuscation: nil)
|
405
|
+
# Some parameter documentations has been truncated, see
|
406
|
+
# {OpenAI::Models::Responses::ResponseCreateParams::StreamOptions} for more
|
407
|
+
# details.
|
408
|
+
#
|
409
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
410
|
+
#
|
411
|
+
# @param include_obfuscation [Boolean] When true, stream obfuscation will be enabled. Stream obfuscation adds
|
412
|
+
end
|
413
|
+
|
356
414
|
# How the model should select which tool (or tools) to use when generating a
|
357
415
|
# response. See the `tools` parameter to see how to specify which tools the model
|
358
416
|
# can call.
|
@@ -369,6 +427,9 @@ module OpenAI
|
|
369
427
|
# `required` means the model must call one or more tools.
|
370
428
|
variant enum: -> { OpenAI::Responses::ToolChoiceOptions }
|
371
429
|
|
430
|
+
# Constrains the tools available to the model to a pre-defined set.
|
431
|
+
variant -> { OpenAI::Responses::ToolChoiceAllowed }
|
432
|
+
|
372
433
|
# Indicates that the model should use a built-in tool to generate a response.
|
373
434
|
# [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
|
374
435
|
variant -> { OpenAI::Responses::ToolChoiceTypes }
|
@@ -379,8 +440,11 @@ module OpenAI
|
|
379
440
|
# Use this option to force the model to call a specific tool on a remote MCP server.
|
380
441
|
variant -> { OpenAI::Responses::ToolChoiceMcp }
|
381
442
|
|
443
|
+
# Use this option to force the model to call a specific custom tool.
|
444
|
+
variant -> { OpenAI::Responses::ToolChoiceCustom }
|
445
|
+
|
382
446
|
# @!method self.variants
|
383
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
|
447
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceAllowed, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, OpenAI::Models::Responses::ToolChoiceCustom)]
|
384
448
|
end
|
385
449
|
|
386
450
|
# The truncation strategy to use for the model response.
|
@@ -0,0 +1,55 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute call_id
|
8
|
+
# An identifier used to map this custom tool call to a tool call output.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :call_id, String
|
12
|
+
|
13
|
+
# @!attribute input
|
14
|
+
# The input for the custom tool call generated by the model.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :input, String
|
18
|
+
|
19
|
+
# @!attribute name
|
20
|
+
# The name of the custom tool being called.
|
21
|
+
#
|
22
|
+
# @return [String]
|
23
|
+
required :name, String
|
24
|
+
|
25
|
+
# @!attribute type
|
26
|
+
# The type of the custom tool call. Always `custom_tool_call`.
|
27
|
+
#
|
28
|
+
# @return [Symbol, :custom_tool_call]
|
29
|
+
required :type, const: :custom_tool_call
|
30
|
+
|
31
|
+
# @!attribute id
|
32
|
+
# The unique ID of the custom tool call in the OpenAI platform.
|
33
|
+
#
|
34
|
+
# @return [String, nil]
|
35
|
+
optional :id, String
|
36
|
+
|
37
|
+
# @!method initialize(call_id:, input:, name:, id: nil, type: :custom_tool_call)
|
38
|
+
# Some parameter documentations has been truncated, see
|
39
|
+
# {OpenAI::Models::Responses::ResponseCustomToolCall} for more details.
|
40
|
+
#
|
41
|
+
# A call to a custom tool created by the model.
|
42
|
+
#
|
43
|
+
# @param call_id [String] An identifier used to map this custom tool call to a tool call output.
|
44
|
+
#
|
45
|
+
# @param input [String] The input for the custom tool call generated by the model.
|
46
|
+
#
|
47
|
+
# @param name [String] The name of the custom tool being called.
|
48
|
+
#
|
49
|
+
# @param id [String] The unique ID of the custom tool call in the OpenAI platform.
|
50
|
+
#
|
51
|
+
# @param type [Symbol, :custom_tool_call] The type of the custom tool call. Always `custom_tool_call`.
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute delta
|
8
|
+
# The incremental input data (delta) for the custom tool call.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :delta, String
|
12
|
+
|
13
|
+
# @!attribute item_id
|
14
|
+
# Unique identifier for the API item associated with this event.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :item_id, String
|
18
|
+
|
19
|
+
# @!attribute output_index
|
20
|
+
# The index of the output this delta applies to.
|
21
|
+
#
|
22
|
+
# @return [Integer]
|
23
|
+
required :output_index, Integer
|
24
|
+
|
25
|
+
# @!attribute sequence_number
|
26
|
+
# The sequence number of this event.
|
27
|
+
#
|
28
|
+
# @return [Integer]
|
29
|
+
required :sequence_number, Integer
|
30
|
+
|
31
|
+
# @!attribute type
|
32
|
+
# The event type identifier.
|
33
|
+
#
|
34
|
+
# @return [Symbol, :"response.custom_tool_call_input.delta"]
|
35
|
+
required :type, const: :"response.custom_tool_call_input.delta"
|
36
|
+
|
37
|
+
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.delta")
|
38
|
+
# Event representing a delta (partial update) to the input of a custom tool call.
|
39
|
+
#
|
40
|
+
# @param delta [String] The incremental input data (delta) for the custom tool call.
|
41
|
+
#
|
42
|
+
# @param item_id [String] Unique identifier for the API item associated with this event.
|
43
|
+
#
|
44
|
+
# @param output_index [Integer] The index of the output this delta applies to.
|
45
|
+
#
|
46
|
+
# @param sequence_number [Integer] The sequence number of this event.
|
47
|
+
#
|
48
|
+
# @param type [Symbol, :"response.custom_tool_call_input.delta"] The event type identifier.
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute input
|
8
|
+
# The complete input data for the custom tool call.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :input, String
|
12
|
+
|
13
|
+
# @!attribute item_id
|
14
|
+
# Unique identifier for the API item associated with this event.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :item_id, String
|
18
|
+
|
19
|
+
# @!attribute output_index
|
20
|
+
# The index of the output this event applies to.
|
21
|
+
#
|
22
|
+
# @return [Integer]
|
23
|
+
required :output_index, Integer
|
24
|
+
|
25
|
+
# @!attribute sequence_number
|
26
|
+
# The sequence number of this event.
|
27
|
+
#
|
28
|
+
# @return [Integer]
|
29
|
+
required :sequence_number, Integer
|
30
|
+
|
31
|
+
# @!attribute type
|
32
|
+
# The event type identifier.
|
33
|
+
#
|
34
|
+
# @return [Symbol, :"response.custom_tool_call_input.done"]
|
35
|
+
required :type, const: :"response.custom_tool_call_input.done"
|
36
|
+
|
37
|
+
# @!method initialize(input:, item_id:, output_index:, sequence_number:, type: :"response.custom_tool_call_input.done")
|
38
|
+
# Event indicating that input for a custom tool call is complete.
|
39
|
+
#
|
40
|
+
# @param input [String] The complete input data for the custom tool call.
|
41
|
+
#
|
42
|
+
# @param item_id [String] Unique identifier for the API item associated with this event.
|
43
|
+
#
|
44
|
+
# @param output_index [Integer] The index of the output this event applies to.
|
45
|
+
#
|
46
|
+
# @param sequence_number [Integer] The sequence number of this event.
|
47
|
+
#
|
48
|
+
# @param type [Symbol, :"response.custom_tool_call_input.done"] The event type identifier.
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute call_id
|
8
|
+
# The call ID, used to map this custom tool call output to a custom tool call.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :call_id, String
|
12
|
+
|
13
|
+
# @!attribute output
|
14
|
+
# The output from the custom tool call generated by your code.
|
15
|
+
#
|
16
|
+
# @return [String]
|
17
|
+
required :output, String
|
18
|
+
|
19
|
+
# @!attribute type
|
20
|
+
# The type of the custom tool call output. Always `custom_tool_call_output`.
|
21
|
+
#
|
22
|
+
# @return [Symbol, :custom_tool_call_output]
|
23
|
+
required :type, const: :custom_tool_call_output
|
24
|
+
|
25
|
+
# @!attribute id
|
26
|
+
# The unique ID of the custom tool call output in the OpenAI platform.
|
27
|
+
#
|
28
|
+
# @return [String, nil]
|
29
|
+
optional :id, String
|
30
|
+
|
31
|
+
# @!method initialize(call_id:, output:, id: nil, type: :custom_tool_call_output)
|
32
|
+
# Some parameter documentations has been truncated, see
|
33
|
+
# {OpenAI::Models::Responses::ResponseCustomToolCallOutput} for more details.
|
34
|
+
#
|
35
|
+
# The output of a custom tool call from your code, being sent back to the model.
|
36
|
+
#
|
37
|
+
# @param call_id [String] The call ID, used to map this custom tool call output to a custom tool call.
|
38
|
+
#
|
39
|
+
# @param output [String] The output from the custom tool call generated by your code.
|
40
|
+
#
|
41
|
+
# @param id [String] The unique ID of the custom tool call output in the OpenAI platform.
|
42
|
+
#
|
43
|
+
# @param type [Symbol, :custom_tool_call_output] The type of the custom tool call output. Always `custom_tool_call_output`.
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|