openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -10,30 +10,6 @@ module OpenAI
|
|
10
10
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
11
11
|
include OpenAI::Internal::Type::RequestParameters
|
12
12
|
|
13
|
-
# @!attribute input
|
14
|
-
# Text, image, or file inputs to the model, used to generate a response.
|
15
|
-
#
|
16
|
-
# Learn more:
|
17
|
-
#
|
18
|
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
19
|
-
# - [Image inputs](https://platform.openai.com/docs/guides/images)
|
20
|
-
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
|
21
|
-
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
22
|
-
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
23
|
-
#
|
24
|
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>]
|
25
|
-
required :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input }
|
26
|
-
|
27
|
-
# @!attribute model
|
28
|
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
29
|
-
# wide range of models with different capabilities, performance characteristics,
|
30
|
-
# and price points. Refer to the
|
31
|
-
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
32
|
-
# available models.
|
33
|
-
#
|
34
|
-
# @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel]
|
35
|
-
required :model, union: -> { OpenAI::ResponsesModel }
|
36
|
-
|
37
13
|
# @!attribute background
|
38
14
|
# Whether to run the model response in the background.
|
39
15
|
# [Learn more](https://platform.openai.com/docs/guides/background).
|
@@ -45,24 +21,39 @@ module OpenAI
|
|
45
21
|
# Specify additional output data to include in the model response. Currently
|
46
22
|
# supported values are:
|
47
23
|
#
|
24
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
25
|
+
# in code interpreter tool call items.
|
26
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
27
|
+
# call output.
|
48
28
|
# - `file_search_call.results`: Include the search results of the file search tool
|
49
29
|
# call.
|
50
30
|
# - `message.input_image.image_url`: Include image urls from the input message.
|
51
|
-
# - `
|
52
|
-
# call output.
|
31
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
53
32
|
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
54
33
|
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
55
34
|
# multi-turn conversations when using the Responses API statelessly (like when
|
56
35
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
57
36
|
# in the zero data retention program).
|
58
|
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
59
|
-
# in code interpreter tool call items.
|
60
37
|
#
|
61
38
|
# @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
|
62
39
|
optional :include,
|
63
40
|
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] },
|
64
41
|
nil?: true
|
65
42
|
|
43
|
+
# @!attribute input
|
44
|
+
# Text, image, or file inputs to the model, used to generate a response.
|
45
|
+
#
|
46
|
+
# Learn more:
|
47
|
+
#
|
48
|
+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
49
|
+
# - [Image inputs](https://platform.openai.com/docs/guides/images)
|
50
|
+
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
|
51
|
+
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
52
|
+
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
53
|
+
#
|
54
|
+
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
|
55
|
+
optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input }
|
56
|
+
|
66
57
|
# @!attribute instructions
|
67
58
|
# A system (or developer) message inserted into the model's context.
|
68
59
|
#
|
@@ -81,6 +72,15 @@ module OpenAI
|
|
81
72
|
# @return [Integer, nil]
|
82
73
|
optional :max_output_tokens, Integer, nil?: true
|
83
74
|
|
75
|
+
# @!attribute max_tool_calls
|
76
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
77
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
78
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
79
|
+
# ignored.
|
80
|
+
#
|
81
|
+
# @return [Integer, nil]
|
82
|
+
optional :max_tool_calls, Integer, nil?: true
|
83
|
+
|
84
84
|
# @!attribute metadata
|
85
85
|
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
86
86
|
# for storing additional information about the object in a structured format, and
|
@@ -92,6 +92,16 @@ module OpenAI
|
|
92
92
|
# @return [Hash{Symbol=>String}, nil]
|
93
93
|
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
|
94
94
|
|
95
|
+
# @!attribute model
|
96
|
+
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
97
|
+
# wide range of models with different capabilities, performance characteristics,
|
98
|
+
# and price points. Refer to the
|
99
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
100
|
+
# available models.
|
101
|
+
#
|
102
|
+
# @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel, nil]
|
103
|
+
optional :model, union: -> { OpenAI::ResponsesModel }
|
104
|
+
|
95
105
|
# @!attribute parallel_tool_calls
|
96
106
|
# Whether to allow the model to run tool calls in parallel.
|
97
107
|
#
|
@@ -123,23 +133,23 @@ module OpenAI
|
|
123
133
|
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
|
124
134
|
|
125
135
|
# @!attribute service_tier
|
126
|
-
# Specifies the
|
127
|
-
#
|
128
|
-
#
|
129
|
-
#
|
130
|
-
#
|
131
|
-
# - If set to '
|
132
|
-
#
|
133
|
-
#
|
134
|
-
#
|
135
|
-
# tier
|
136
|
-
#
|
137
|
-
# service tier.
|
138
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
136
|
+
# Specifies the processing type used for serving the request.
|
137
|
+
#
|
138
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
139
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
140
|
+
# will use 'default'.
|
141
|
+
# - If set to 'default', then the requset will be processed with the standard
|
142
|
+
# pricing and performance for the selected model.
|
143
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
144
|
+
# 'priority', then the request will be processed with the corresponding service
|
145
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
146
|
+
# Priority processing.
|
139
147
|
# - When not set, the default behavior is 'auto'.
|
140
148
|
#
|
141
|
-
# When
|
142
|
-
#
|
149
|
+
# When the `service_tier` parameter is set, the response body will include the
|
150
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
151
|
+
# request. This response value may be different from the value set in the
|
152
|
+
# parameter.
|
143
153
|
#
|
144
154
|
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil]
|
145
155
|
optional :service_tier, enum: -> { OpenAI::Responses::ResponseCreateParams::ServiceTier }, nil?: true
|
@@ -180,7 +190,7 @@ module OpenAI
|
|
180
190
|
# response. See the `tools` parameter to see how to specify which tools the model
|
181
191
|
# can call.
|
182
192
|
#
|
183
|
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, nil]
|
193
|
+
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil]
|
184
194
|
optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice }
|
185
195
|
|
186
196
|
# @!attribute tools
|
@@ -202,6 +212,13 @@ module OpenAI
|
|
202
212
|
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
|
203
213
|
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
|
204
214
|
|
215
|
+
# @!attribute top_logprobs
|
216
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
217
|
+
# return at each token position, each with an associated log probability.
|
218
|
+
#
|
219
|
+
# @return [Integer, nil]
|
220
|
+
optional :top_logprobs, Integer, nil?: true
|
221
|
+
|
205
222
|
# @!attribute top_p
|
206
223
|
# An alternative to sampling with temperature, called nucleus sampling, where the
|
207
224
|
# model considers the results of the tokens with top_p probability mass. So 0.1
|
@@ -232,24 +249,26 @@ module OpenAI
|
|
232
249
|
# @return [String, nil]
|
233
250
|
optional :user, String
|
234
251
|
|
235
|
-
# @!method initialize(
|
252
|
+
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
|
236
253
|
# Some parameter documentations has been truncated, see
|
237
254
|
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
|
238
255
|
#
|
239
|
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
240
|
-
#
|
241
|
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
242
|
-
#
|
243
256
|
# @param background [Boolean, nil] Whether to run the model response in the background.
|
244
257
|
#
|
245
258
|
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
|
246
259
|
#
|
260
|
+
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
|
261
|
+
#
|
247
262
|
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
|
248
263
|
#
|
249
264
|
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
|
250
265
|
#
|
266
|
+
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
|
267
|
+
#
|
251
268
|
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
|
252
269
|
#
|
270
|
+
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
|
271
|
+
#
|
253
272
|
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
|
254
273
|
#
|
255
274
|
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
|
@@ -258,7 +277,7 @@ module OpenAI
|
|
258
277
|
#
|
259
278
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
260
279
|
#
|
261
|
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the
|
280
|
+
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
262
281
|
#
|
263
282
|
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
|
264
283
|
#
|
@@ -266,10 +285,12 @@ module OpenAI
|
|
266
285
|
#
|
267
286
|
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
|
268
287
|
#
|
269
|
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
|
288
|
+
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
|
270
289
|
#
|
271
290
|
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
|
272
291
|
#
|
292
|
+
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
|
293
|
+
#
|
273
294
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
274
295
|
#
|
275
296
|
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
|
@@ -302,23 +323,23 @@ module OpenAI
|
|
302
323
|
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
|
303
324
|
end
|
304
325
|
|
305
|
-
# Specifies the
|
306
|
-
#
|
307
|
-
#
|
308
|
-
#
|
309
|
-
#
|
310
|
-
# - If set to '
|
311
|
-
#
|
312
|
-
#
|
313
|
-
#
|
314
|
-
# tier
|
315
|
-
#
|
316
|
-
# service tier.
|
317
|
-
# [Learn more](https://platform.openai.com/docs/guides/flex-processing).
|
326
|
+
# Specifies the processing type used for serving the request.
|
327
|
+
#
|
328
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
329
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
330
|
+
# will use 'default'.
|
331
|
+
# - If set to 'default', then the requset will be processed with the standard
|
332
|
+
# pricing and performance for the selected model.
|
333
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
334
|
+
# 'priority', then the request will be processed with the corresponding service
|
335
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
336
|
+
# Priority processing.
|
318
337
|
# - When not set, the default behavior is 'auto'.
|
319
338
|
#
|
320
|
-
# When
|
321
|
-
#
|
339
|
+
# When the `service_tier` parameter is set, the response body will include the
|
340
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
341
|
+
# request. This response value may be different from the value set in the
|
342
|
+
# parameter.
|
322
343
|
module ServiceTier
|
323
344
|
extend OpenAI::Internal::Type::Enum
|
324
345
|
|
@@ -326,6 +347,7 @@ module OpenAI
|
|
326
347
|
DEFAULT = :default
|
327
348
|
FLEX = :flex
|
328
349
|
SCALE = :scale
|
350
|
+
PRIORITY = :priority
|
329
351
|
|
330
352
|
# @!method self.values
|
331
353
|
# @return [Array<Symbol>]
|
@@ -354,8 +376,11 @@ module OpenAI
|
|
354
376
|
# Use this option to force the model to call a specific function.
|
355
377
|
variant -> { OpenAI::Responses::ToolChoiceFunction }
|
356
378
|
|
379
|
+
# Use this option to force the model to call a specific tool on a remote MCP server.
|
380
|
+
variant -> { OpenAI::Responses::ToolChoiceMcp }
|
381
|
+
|
357
382
|
# @!method self.variants
|
358
|
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction)]
|
383
|
+
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
|
359
384
|
end
|
360
385
|
|
361
386
|
# The truncation strategy to use for the model response.
|
@@ -10,6 +10,13 @@ module OpenAI
|
|
10
10
|
# @return [String]
|
11
11
|
required :id, String
|
12
12
|
|
13
|
+
# @!attribute action
|
14
|
+
# An object describing the specific action taken in this web search call. Includes
|
15
|
+
# details on how the model used the web (search, open_page, find).
|
16
|
+
#
|
17
|
+
# @return [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find]
|
18
|
+
required :action, union: -> { OpenAI::Responses::ResponseFunctionWebSearch::Action }
|
19
|
+
|
13
20
|
# @!attribute status
|
14
21
|
# The status of the web search tool call.
|
15
22
|
#
|
@@ -22,7 +29,7 @@ module OpenAI
|
|
22
29
|
# @return [Symbol, :web_search_call]
|
23
30
|
required :type, const: :web_search_call
|
24
31
|
|
25
|
-
# @!method initialize(id:, status:, type: :web_search_call)
|
32
|
+
# @!method initialize(id:, action:, status:, type: :web_search_call)
|
26
33
|
# Some parameter documentations has been truncated, see
|
27
34
|
# {OpenAI::Models::Responses::ResponseFunctionWebSearch} for more details.
|
28
35
|
#
|
@@ -32,10 +39,117 @@ module OpenAI
|
|
32
39
|
#
|
33
40
|
# @param id [String] The unique ID of the web search tool call.
|
34
41
|
#
|
42
|
+
# @param action [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find] An object describing the specific action taken in this web search call.
|
43
|
+
#
|
35
44
|
# @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] The status of the web search tool call.
|
36
45
|
#
|
37
46
|
# @param type [Symbol, :web_search_call] The type of the web search tool call. Always `web_search_call`.
|
38
47
|
|
48
|
+
# An object describing the specific action taken in this web search call. Includes
|
49
|
+
# details on how the model used the web (search, open_page, find).
|
50
|
+
#
|
51
|
+
# @see OpenAI::Models::Responses::ResponseFunctionWebSearch#action
|
52
|
+
module Action
|
53
|
+
extend OpenAI::Internal::Type::Union
|
54
|
+
|
55
|
+
discriminator :type
|
56
|
+
|
57
|
+
# Action type "search" - Performs a web search query.
|
58
|
+
variant :search, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Search }
|
59
|
+
|
60
|
+
# Action type "open_page" - Opens a specific URL from search results.
|
61
|
+
variant :open_page, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage }
|
62
|
+
|
63
|
+
# Action type "find": Searches for a pattern within a loaded page.
|
64
|
+
variant :find, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Find }
|
65
|
+
|
66
|
+
class Search < OpenAI::Internal::Type::BaseModel
|
67
|
+
# @!attribute query
|
68
|
+
# The search query.
|
69
|
+
#
|
70
|
+
# @return [String]
|
71
|
+
required :query, String
|
72
|
+
|
73
|
+
# @!attribute type
|
74
|
+
# The action type.
|
75
|
+
#
|
76
|
+
# @return [Symbol, :search]
|
77
|
+
required :type, const: :search
|
78
|
+
|
79
|
+
# @!method initialize(query:, type: :search)
|
80
|
+
# Some parameter documentations has been truncated, see
|
81
|
+
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search} for more
|
82
|
+
# details.
|
83
|
+
#
|
84
|
+
# Action type "search" - Performs a web search query.
|
85
|
+
#
|
86
|
+
# @param query [String] The search query.
|
87
|
+
#
|
88
|
+
# @param type [Symbol, :search] The action type.
|
89
|
+
end
|
90
|
+
|
91
|
+
class OpenPage < OpenAI::Internal::Type::BaseModel
|
92
|
+
# @!attribute type
|
93
|
+
# The action type.
|
94
|
+
#
|
95
|
+
# @return [Symbol, :open_page]
|
96
|
+
required :type, const: :open_page
|
97
|
+
|
98
|
+
# @!attribute url
|
99
|
+
# The URL opened by the model.
|
100
|
+
#
|
101
|
+
# @return [String]
|
102
|
+
required :url, String
|
103
|
+
|
104
|
+
# @!method initialize(url:, type: :open_page)
|
105
|
+
# Some parameter documentations has been truncated, see
|
106
|
+
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage} for
|
107
|
+
# more details.
|
108
|
+
#
|
109
|
+
# Action type "open_page" - Opens a specific URL from search results.
|
110
|
+
#
|
111
|
+
# @param url [String] The URL opened by the model.
|
112
|
+
#
|
113
|
+
# @param type [Symbol, :open_page] The action type.
|
114
|
+
end
|
115
|
+
|
116
|
+
class Find < OpenAI::Internal::Type::BaseModel
|
117
|
+
# @!attribute pattern
|
118
|
+
# The pattern or text to search for within the page.
|
119
|
+
#
|
120
|
+
# @return [String]
|
121
|
+
required :pattern, String
|
122
|
+
|
123
|
+
# @!attribute type
|
124
|
+
# The action type.
|
125
|
+
#
|
126
|
+
# @return [Symbol, :find]
|
127
|
+
required :type, const: :find
|
128
|
+
|
129
|
+
# @!attribute url
|
130
|
+
# The URL of the page searched for the pattern.
|
131
|
+
#
|
132
|
+
# @return [String]
|
133
|
+
required :url, String
|
134
|
+
|
135
|
+
# @!method initialize(pattern:, url:, type: :find)
|
136
|
+
# Some parameter documentations has been truncated, see
|
137
|
+
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find} for more
|
138
|
+
# details.
|
139
|
+
#
|
140
|
+
# Action type "find": Searches for a pattern within a loaded page.
|
141
|
+
#
|
142
|
+
# @param pattern [String] The pattern or text to search for within the page.
|
143
|
+
#
|
144
|
+
# @param url [String] The URL of the page searched for the pattern.
|
145
|
+
#
|
146
|
+
# @param type [Symbol, :find] The action type.
|
147
|
+
end
|
148
|
+
|
149
|
+
# @!method self.variants
|
150
|
+
# @return [Array(OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find)]
|
151
|
+
end
|
152
|
+
|
39
153
|
# The status of the web search tool call.
|
40
154
|
#
|
41
155
|
# @see OpenAI::Models::Responses::ResponseFunctionWebSearch#status
|
@@ -6,26 +6,28 @@ module OpenAI
|
|
6
6
|
# Specify additional output data to include in the model response. Currently
|
7
7
|
# supported values are:
|
8
8
|
#
|
9
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
10
|
+
# in code interpreter tool call items.
|
11
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
12
|
+
# call output.
|
9
13
|
# - `file_search_call.results`: Include the search results of the file search tool
|
10
14
|
# call.
|
11
15
|
# - `message.input_image.image_url`: Include image urls from the input message.
|
12
|
-
# - `
|
13
|
-
# call output.
|
16
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
14
17
|
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
15
18
|
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
16
19
|
# multi-turn conversations when using the Responses API statelessly (like when
|
17
20
|
# the `store` parameter is set to `false`, or when an organization is enrolled
|
18
21
|
# in the zero data retention program).
|
19
|
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
20
|
-
# in code interpreter tool call items.
|
21
22
|
module ResponseIncludable
|
22
23
|
extend OpenAI::Internal::Type::Enum
|
23
24
|
|
25
|
+
CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
|
26
|
+
COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url"
|
24
27
|
FILE_SEARCH_CALL_RESULTS = :"file_search_call.results"
|
25
28
|
MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url"
|
26
|
-
|
29
|
+
MESSAGE_OUTPUT_TEXT_LOGPROBS = :"message.output_text.logprobs"
|
27
30
|
REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content"
|
28
|
-
CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
|
29
31
|
|
30
32
|
# @!method self.values
|
31
33
|
# @return [Array<Symbol>]
|
@@ -76,6 +76,12 @@ module OpenAI
|
|
76
76
|
# @return [String]
|
77
77
|
required :file_id, String
|
78
78
|
|
79
|
+
# @!attribute filename
|
80
|
+
# The filename of the file cited.
|
81
|
+
#
|
82
|
+
# @return [String]
|
83
|
+
required :filename, String
|
84
|
+
|
79
85
|
# @!attribute index
|
80
86
|
# The index of the file in the list of files.
|
81
87
|
#
|
@@ -88,11 +94,13 @@ module OpenAI
|
|
88
94
|
# @return [Symbol, :file_citation]
|
89
95
|
required :type, const: :file_citation
|
90
96
|
|
91
|
-
# @!method initialize(file_id:, index:, type: :file_citation)
|
97
|
+
# @!method initialize(file_id:, filename:, index:, type: :file_citation)
|
92
98
|
# A citation to a file.
|
93
99
|
#
|
94
100
|
# @param file_id [String] The ID of the file.
|
95
101
|
#
|
102
|
+
# @param filename [String] The filename of the file cited.
|
103
|
+
#
|
96
104
|
# @param index [Integer] The index of the file in the list of files.
|
97
105
|
#
|
98
106
|
# @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`.
|
@@ -162,6 +170,12 @@ module OpenAI
|
|
162
170
|
# @return [String]
|
163
171
|
required :file_id, String
|
164
172
|
|
173
|
+
# @!attribute filename
|
174
|
+
# The filename of the container file cited.
|
175
|
+
#
|
176
|
+
# @return [String]
|
177
|
+
required :filename, String
|
178
|
+
|
165
179
|
# @!attribute start_index
|
166
180
|
# The index of the first character of the container file citation in the message.
|
167
181
|
#
|
@@ -174,7 +188,7 @@ module OpenAI
|
|
174
188
|
# @return [Symbol, :container_file_citation]
|
175
189
|
required :type, const: :container_file_citation
|
176
190
|
|
177
|
-
# @!method initialize(container_id:, end_index:, file_id:, start_index:, type: :container_file_citation)
|
191
|
+
# @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation)
|
178
192
|
# A citation for a container file used to generate a model response.
|
179
193
|
#
|
180
194
|
# @param container_id [String] The ID of the container file.
|
@@ -183,6 +197,8 @@ module OpenAI
|
|
183
197
|
#
|
184
198
|
# @param file_id [String] The ID of the file.
|
185
199
|
#
|
200
|
+
# @param filename [String] The filename of the container file cited.
|
201
|
+
#
|
186
202
|
# @param start_index [Integer] The index of the first character of the container file citation in the message.
|
187
203
|
#
|
188
204
|
# @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`.
|
@@ -24,11 +24,11 @@ module OpenAI
|
|
24
24
|
# Emitted when the full audio transcript is completed.
|
25
25
|
variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
|
26
26
|
|
27
|
-
# Emitted when a partial code snippet is
|
27
|
+
# Emitted when a partial code snippet is streamed by the code interpreter.
|
28
28
|
variant :"response.code_interpreter_call_code.delta",
|
29
29
|
-> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent }
|
30
30
|
|
31
|
-
# Emitted when code snippet
|
31
|
+
# Emitted when the code snippet is finalized by the code interpreter.
|
32
32
|
variant :"response.code_interpreter_call_code.done",
|
33
33
|
-> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent }
|
34
34
|
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Responses
|
6
|
+
class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel
|
7
|
+
# @!attribute server_label
|
8
|
+
# The label of the MCP server to use.
|
9
|
+
#
|
10
|
+
# @return [String]
|
11
|
+
required :server_label, String
|
12
|
+
|
13
|
+
# @!attribute type
|
14
|
+
# For MCP tools, the type is always `mcp`.
|
15
|
+
#
|
16
|
+
# @return [Symbol, :mcp]
|
17
|
+
required :type, const: :mcp
|
18
|
+
|
19
|
+
# @!attribute name
|
20
|
+
# The name of the tool to call on the server.
|
21
|
+
#
|
22
|
+
# @return [String, nil]
|
23
|
+
optional :name, String, nil?: true
|
24
|
+
|
25
|
+
# @!method initialize(server_label:, name: nil, type: :mcp)
|
26
|
+
# Some parameter documentations has been truncated, see
|
27
|
+
# {OpenAI::Models::Responses::ToolChoiceMcp} for more details.
|
28
|
+
#
|
29
|
+
# Use this option to force the model to call a specific tool on a remote MCP
|
30
|
+
# server.
|
31
|
+
#
|
32
|
+
# @param server_label [String] The label of the MCP server to use.
|
33
|
+
#
|
34
|
+
# @param name [String, nil] The name of the tool to call on the server.
|
35
|
+
#
|
36
|
+
# @param type [Symbol, :mcp] For MCP tools, the type is always `mcp`.
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
@@ -14,7 +14,6 @@ module OpenAI
|
|
14
14
|
# - `web_search_preview`
|
15
15
|
# - `computer_use_preview`
|
16
16
|
# - `code_interpreter`
|
17
|
-
# - `mcp`
|
18
17
|
# - `image_generation`
|
19
18
|
#
|
20
19
|
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type]
|
@@ -38,7 +37,6 @@ module OpenAI
|
|
38
37
|
# - `web_search_preview`
|
39
38
|
# - `computer_use_preview`
|
40
39
|
# - `code_interpreter`
|
41
|
-
# - `mcp`
|
42
40
|
# - `image_generation`
|
43
41
|
#
|
44
42
|
# @see OpenAI::Models::Responses::ToolChoiceTypes#type
|
@@ -51,7 +49,6 @@ module OpenAI
|
|
51
49
|
WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11
|
52
50
|
IMAGE_GENERATION = :image_generation
|
53
51
|
CODE_INTERPRETER = :code_interpreter
|
54
|
-
MCP = :mcp
|
55
52
|
|
56
53
|
# @!method self.values
|
57
54
|
# @return [Array<Symbol>]
|
@@ -18,6 +18,10 @@ module OpenAI
|
|
18
18
|
O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
|
19
19
|
O3_PRO = :"o3-pro"
|
20
20
|
O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
|
21
|
+
O3_DEEP_RESEARCH = :"o3-deep-research"
|
22
|
+
O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
|
23
|
+
O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
|
24
|
+
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
|
21
25
|
COMPUTER_USE_PREVIEW = :"computer-use-preview"
|
22
26
|
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
|
23
27
|
|