openai 0.15.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +47 -0
- data/README.md +14 -20
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
- data/lib/openai/helpers/structured_output/union_of.rb +11 -1
- data/lib/openai/internal/transport/base_client.rb +1 -1
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/internal/type/union.rb +13 -17
- data/lib/openai/models/beta/assistant_create_params.rb +4 -5
- data/lib/openai/models/beta/assistant_update_params.rb +22 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
- data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
- data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
- data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
- data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
- data/lib/openai/models/chat/chat_completion_message.rb +3 -5
- data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
- data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
- data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
- data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
- data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
- data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
- data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
- data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
- data/lib/openai/models/chat/completion_create_params.rb +65 -16
- data/lib/openai/models/chat_model.rb +7 -0
- data/lib/openai/models/custom_tool_input_format.rb +76 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
- data/lib/openai/models/evals/run_cancel_response.rb +2 -2
- data/lib/openai/models/evals/run_create_params.rb +2 -2
- data/lib/openai/models/evals/run_create_response.rb +2 -2
- data/lib/openai/models/evals/run_list_response.rb +2 -2
- data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
- data/lib/openai/models/reasoning.rb +4 -5
- data/lib/openai/models/reasoning_effort.rb +4 -4
- data/lib/openai/models/response_format_text_grammar.rb +27 -0
- data/lib/openai/models/response_format_text_python.rb +20 -0
- data/lib/openai/models/responses/custom_tool.rb +48 -0
- data/lib/openai/models/responses/response.rb +70 -16
- data/lib/openai/models/responses/response_create_params.rb +78 -14
- data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
- data/lib/openai/models/responses/response_input_item.rb +7 -1
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
- data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
- data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
- data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
- data/lib/openai/models/responses/response_stream_event.rb +13 -11
- data/lib/openai/models/responses/response_text_config.rb +27 -1
- data/lib/openai/models/responses/tool.rb +5 -1
- data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
- data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
- data/lib/openai/models/vector_store_search_params.rb +6 -1
- data/lib/openai/models.rb +6 -0
- data/lib/openai/resources/beta/assistants.rb +2 -2
- data/lib/openai/resources/beta/threads/runs.rb +2 -2
- data/lib/openai/resources/chat/completions.rb +26 -12
- data/lib/openai/resources/responses.rb +77 -36
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -2
- data/rbi/openai/internal/transport/base_client.rbi +1 -1
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
- data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
- data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
- data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
- data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
- data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
- data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
- data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
- data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
- data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
- data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
- data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
- data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
- data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
- data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
- data/rbi/openai/models/chat/completion_create_params.rbi +150 -31
- data/rbi/openai/models/chat_model.rbi +11 -0
- data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
- data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
- data/rbi/openai/models/evals/run_create_params.rbi +4 -0
- data/rbi/openai/models/evals/run_create_response.rbi +2 -0
- data/rbi/openai/models/evals/run_list_response.rbi +2 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
- data/rbi/openai/models/reasoning.rbi +6 -8
- data/rbi/openai/models/reasoning_effort.rbi +4 -4
- data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
- data/rbi/openai/models/response_format_text_python.rbi +30 -0
- data/rbi/openai/models/responses/custom_tool.rbi +96 -0
- data/rbi/openai/models/responses/response.rbi +59 -11
- data/rbi/openai/models/responses/response_create_params.rbi +138 -13
- data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
- data/rbi/openai/models/responses/response_input_item.rbi +2 -0
- data/rbi/openai/models/responses/response_output_item.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
- data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
- data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
- data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
- data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
- data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
- data/rbi/openai/models/responses/response_text_config.rbi +64 -1
- data/rbi/openai/models/responses/tool.rbi +1 -0
- data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
- data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
- data/rbi/openai/models/vector_store_search_params.rbi +12 -1
- data/rbi/openai/models.rbi +6 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -8
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
- data/rbi/openai/resources/chat/completions.rbi +78 -25
- data/rbi/openai/resources/responses.rbi +249 -47
- data/sig/openai/internal/transport/base_client.rbs +1 -1
- data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
- data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
- data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
- data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
- data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
- data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
- data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
- data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
- data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
- data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
- data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +37 -6
- data/sig/openai/models/chat_model.rbs +15 -1
- data/sig/openai/models/custom_tool_input_format.rbs +61 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/response_format_text_grammar.rbs +15 -0
- data/sig/openai/models/response_format_text_python.rbs +13 -0
- data/sig/openai/models/responses/custom_tool.rbs +43 -0
- data/sig/openai/models/responses/response.rbs +16 -0
- data/sig/openai/models/responses/response_create_params.rbs +33 -0
- data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
- data/sig/openai/models/responses/response_input_item.rbs +2 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
- data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
- data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
- data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
- data/sig/openai/models/responses/response_stream_event.rbs +4 -2
- data/sig/openai/models/responses/response_text_config.rbs +22 -3
- data/sig/openai/models/responses/tool.rbs +1 -0
- data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
- data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
- data/sig/openai/models/vector_store_search_params.rbs +2 -1
- data/sig/openai/models.rbs +6 -0
- data/sig/openai/resources/chat/completions.rbs +8 -2
- data/sig/openai/resources/responses.rbs +36 -0
- metadata +59 -8
- data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
- data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -40,12 +40,18 @@ module OpenAI
|
|
40
40
|
parallel_tool_calls: T.nilable(T::Boolean),
|
41
41
|
previous_response_id: T.nilable(String),
|
42
42
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
43
|
+
prompt_cache_key: String,
|
43
44
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
45
|
+
safety_identifier: String,
|
44
46
|
service_tier:
|
45
47
|
T.nilable(
|
46
48
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
47
49
|
),
|
48
50
|
store: T.nilable(T::Boolean),
|
51
|
+
stream_options:
|
52
|
+
T.nilable(
|
53
|
+
OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash
|
54
|
+
),
|
49
55
|
temperature: T.nilable(Float),
|
50
56
|
text:
|
51
57
|
T.any(
|
@@ -55,9 +61,11 @@ module OpenAI
|
|
55
61
|
tool_choice:
|
56
62
|
T.any(
|
57
63
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
64
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
58
65
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
59
66
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
60
|
-
OpenAI::Responses::ToolChoiceMcp::OrHash
|
67
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
68
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash
|
61
69
|
),
|
62
70
|
tools:
|
63
71
|
T::Array[
|
@@ -69,6 +77,7 @@ module OpenAI
|
|
69
77
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
70
78
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
71
79
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
80
|
+
OpenAI::Responses::CustomTool::OrHash,
|
72
81
|
OpenAI::Responses::WebSearchTool::OrHash
|
73
82
|
)
|
74
83
|
],
|
@@ -151,11 +160,21 @@ module OpenAI
|
|
151
160
|
# Reference to a prompt template and its variables.
|
152
161
|
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
|
153
162
|
prompt: nil,
|
163
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
164
|
+
# hit rates. Replaces the `user` field.
|
165
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
166
|
+
prompt_cache_key: nil,
|
154
167
|
# **o-series models only**
|
155
168
|
#
|
156
169
|
# Configuration options for
|
157
170
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
158
171
|
reasoning: nil,
|
172
|
+
# A stable identifier used to help detect users of your application that may be
|
173
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
174
|
+
# identifies each user. We recommend hashing their username or email address, in
|
175
|
+
# order to avoid sending us any identifying information.
|
176
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
177
|
+
safety_identifier: nil,
|
159
178
|
# Specifies the processing type used for serving the request.
|
160
179
|
#
|
161
180
|
# - If set to 'auto', then the request will be processed with the service tier
|
@@ -176,6 +195,8 @@ module OpenAI
|
|
176
195
|
service_tier: nil,
|
177
196
|
# Whether to store the generated model response for later retrieval via API.
|
178
197
|
store: nil,
|
198
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
199
|
+
stream_options: nil,
|
179
200
|
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
180
201
|
# make the output more random, while lower values like 0.2 will make it more
|
181
202
|
# focused and deterministic. We generally recommend altering this or `top_p` but
|
@@ -203,8 +224,10 @@ module OpenAI
|
|
203
224
|
# Learn more about
|
204
225
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
205
226
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
206
|
-
# the model to call your own code
|
227
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
228
|
+
# Learn more about
|
207
229
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
230
|
+
# You can also use custom tools to call your own code.
|
208
231
|
tools: nil,
|
209
232
|
# An integer between 0 and 20 specifying the number of most likely tokens to
|
210
233
|
# return at each token position, each with an associated log probability.
|
@@ -223,9 +246,11 @@ module OpenAI
|
|
223
246
|
# - `disabled` (default): If a model response will exceed the context window size
|
224
247
|
# for a model, the request will fail with a 400 error.
|
225
248
|
truncation: nil,
|
226
|
-
#
|
227
|
-
#
|
228
|
-
#
|
249
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
250
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
251
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
252
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
253
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
229
254
|
user: nil,
|
230
255
|
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create`
|
231
256
|
# for streaming and non-streaming use cases, respectively.
|
@@ -268,12 +293,18 @@ module OpenAI
|
|
268
293
|
parallel_tool_calls: T.nilable(T::Boolean),
|
269
294
|
previous_response_id: T.nilable(String),
|
270
295
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
296
|
+
prompt_cache_key: String,
|
271
297
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
298
|
+
safety_identifier: String,
|
272
299
|
service_tier:
|
273
300
|
T.nilable(
|
274
301
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
275
302
|
),
|
276
303
|
store: T.nilable(T::Boolean),
|
304
|
+
stream_options:
|
305
|
+
T.nilable(
|
306
|
+
OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash
|
307
|
+
),
|
277
308
|
temperature: T.nilable(Float),
|
278
309
|
text:
|
279
310
|
T.nilable(
|
@@ -285,9 +316,11 @@ module OpenAI
|
|
285
316
|
tool_choice:
|
286
317
|
T.any(
|
287
318
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
319
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
288
320
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
289
321
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
290
|
-
OpenAI::Responses::ToolChoiceMcp::OrHash
|
322
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
323
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash
|
291
324
|
),
|
292
325
|
tools:
|
293
326
|
T::Array[
|
@@ -299,6 +332,7 @@ module OpenAI
|
|
299
332
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
300
333
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
301
334
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
335
|
+
OpenAI::Responses::CustomTool::OrHash,
|
302
336
|
OpenAI::Responses::WebSearchTool::OrHash
|
303
337
|
)
|
304
338
|
],
|
@@ -385,11 +419,21 @@ module OpenAI
|
|
385
419
|
# Reference to a prompt template and its variables.
|
386
420
|
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
|
387
421
|
prompt: nil,
|
422
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
423
|
+
# hit rates. Replaces the `user` field.
|
424
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
425
|
+
prompt_cache_key: nil,
|
388
426
|
# **o-series models only**
|
389
427
|
#
|
390
428
|
# Configuration options for
|
391
429
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
392
430
|
reasoning: nil,
|
431
|
+
# A stable identifier used to help detect users of your application that may be
|
432
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
433
|
+
# identifies each user. We recommend hashing their username or email address, in
|
434
|
+
# order to avoid sending us any identifying information.
|
435
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
436
|
+
safety_identifier: nil,
|
393
437
|
# Specifies the processing type used for serving the request.
|
394
438
|
#
|
395
439
|
# - If set to 'auto', then the request will be processed with the service tier
|
@@ -410,6 +454,8 @@ module OpenAI
|
|
410
454
|
service_tier: nil,
|
411
455
|
# Whether to store the generated model response for later retrieval via API.
|
412
456
|
store: nil,
|
457
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
458
|
+
stream_options: nil,
|
413
459
|
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
414
460
|
# make the output more random, while lower values like 0.2 will make it more
|
415
461
|
# focused and deterministic. We generally recommend altering this or `top_p` but
|
@@ -437,8 +483,10 @@ module OpenAI
|
|
437
483
|
# Learn more about
|
438
484
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
439
485
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
440
|
-
# the model to call your own code
|
486
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
487
|
+
# Learn more about
|
441
488
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
489
|
+
# You can also use custom tools to call your own code.
|
442
490
|
tools: nil,
|
443
491
|
# An integer between 0 and 20 specifying the number of most likely tokens to
|
444
492
|
# return at each token position, each with an associated log probability.
|
@@ -457,9 +505,11 @@ module OpenAI
|
|
457
505
|
# - `disabled` (default): If a model response will exceed the context window size
|
458
506
|
# for a model, the request will fail with a 400 error.
|
459
507
|
truncation: nil,
|
460
|
-
#
|
461
|
-
#
|
462
|
-
#
|
508
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
509
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
510
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
511
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
512
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
463
513
|
user: nil,
|
464
514
|
# There is no need to provide `stream:`. Instead, use `#stream_raw` or `#create`
|
465
515
|
# for streaming and non-streaming use cases, respectively.
|
@@ -470,12 +520,30 @@ module OpenAI
|
|
470
520
|
|
471
521
|
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
|
472
522
|
#
|
473
|
-
# Creates a model response
|
474
|
-
#
|
523
|
+
# Creates a model response. Provide
|
524
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
525
|
+
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
|
526
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
527
|
+
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
|
528
|
+
# the model call your own
|
529
|
+
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
|
530
|
+
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
|
531
|
+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
|
532
|
+
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
533
|
+
# your own data as input for the model's response.
|
475
534
|
sig do
|
476
535
|
params(
|
536
|
+
background: T.nilable(T::Boolean),
|
537
|
+
include:
|
538
|
+
T.nilable(
|
539
|
+
T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
|
540
|
+
),
|
477
541
|
input:
|
478
542
|
T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants),
|
543
|
+
instructions: T.nilable(String),
|
544
|
+
max_output_tokens: T.nilable(Integer),
|
545
|
+
max_tool_calls: T.nilable(Integer),
|
546
|
+
metadata: T.nilable(T::Hash[Symbol, String]),
|
479
547
|
model:
|
480
548
|
T.nilable(
|
481
549
|
T.any(
|
@@ -484,18 +552,12 @@ module OpenAI
|
|
484
552
|
OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
|
485
553
|
)
|
486
554
|
),
|
487
|
-
background: T.nilable(T::Boolean),
|
488
|
-
include:
|
489
|
-
T.nilable(
|
490
|
-
T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
|
491
|
-
),
|
492
|
-
instructions: T.nilable(String),
|
493
|
-
max_output_tokens: T.nilable(Integer),
|
494
|
-
metadata: T.nilable(T::Hash[Symbol, String]),
|
495
555
|
parallel_tool_calls: T.nilable(T::Boolean),
|
496
556
|
previous_response_id: T.nilable(String),
|
497
557
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
558
|
+
prompt_cache_key: String,
|
498
559
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
560
|
+
safety_identifier: String,
|
499
561
|
service_tier:
|
500
562
|
T.nilable(
|
501
563
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
@@ -503,20 +565,25 @@ module OpenAI
|
|
503
565
|
store: T.nilable(T::Boolean),
|
504
566
|
temperature: T.nilable(Float),
|
505
567
|
text:
|
506
|
-
T.
|
507
|
-
|
508
|
-
|
568
|
+
T.nilable(
|
569
|
+
T.any(
|
570
|
+
OpenAI::Responses::ResponseTextConfig::OrHash,
|
571
|
+
OpenAI::StructuredOutput::JsonSchemaConverter
|
572
|
+
)
|
509
573
|
),
|
510
574
|
tool_choice:
|
511
575
|
T.any(
|
512
576
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
577
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
513
578
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
514
|
-
OpenAI::Responses::ToolChoiceFunction::OrHash
|
579
|
+
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
580
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash
|
515
581
|
),
|
516
582
|
tools:
|
517
583
|
T.nilable(
|
518
584
|
T::Array[
|
519
585
|
T.any(
|
586
|
+
OpenAI::StructuredOutput::JsonSchemaConverter,
|
520
587
|
OpenAI::Responses::FunctionTool::OrHash,
|
521
588
|
OpenAI::Responses::FileSearchTool::OrHash,
|
522
589
|
OpenAI::Responses::ComputerTool::OrHash,
|
@@ -524,65 +591,184 @@ module OpenAI
|
|
524
591
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
525
592
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
526
593
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
527
|
-
OpenAI::Responses::WebSearchTool::OrHash
|
528
|
-
OpenAI::StructuredOutput::JsonSchemaConverter
|
594
|
+
OpenAI::Responses::WebSearchTool::OrHash
|
529
595
|
)
|
530
596
|
]
|
531
597
|
),
|
598
|
+
top_logprobs: T.nilable(Integer),
|
532
599
|
top_p: T.nilable(Float),
|
533
600
|
truncation:
|
534
601
|
T.nilable(
|
535
602
|
OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol
|
536
603
|
),
|
537
|
-
user:
|
538
|
-
|
539
|
-
request_options: T.nilable(OpenAI::RequestOptions::OrHash)
|
604
|
+
user: String,
|
605
|
+
request_options: OpenAI::RequestOptions::OrHash
|
540
606
|
).returns(OpenAI::Streaming::ResponseStream)
|
541
607
|
end
|
542
608
|
def stream(
|
543
|
-
# Text, image, or file inputs to the model, used to generate a response.
|
544
|
-
input: nil,
|
545
|
-
# Model ID used to generate the response, like `gpt-4o` or `o3`.
|
546
|
-
model: nil,
|
547
609
|
# Whether to run the model response in the background.
|
610
|
+
# [Learn more](https://platform.openai.com/docs/guides/background).
|
548
611
|
background: nil,
|
549
|
-
# Specify additional output data to include in the model response.
|
612
|
+
# Specify additional output data to include in the model response. Currently
|
613
|
+
# supported values are:
|
614
|
+
#
|
615
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
616
|
+
# in code interpreter tool call items.
|
617
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
618
|
+
# call output.
|
619
|
+
# - `file_search_call.results`: Include the search results of the file search tool
|
620
|
+
# call.
|
621
|
+
# - `message.input_image.image_url`: Include image urls from the input message.
|
622
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
623
|
+
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
624
|
+
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
625
|
+
# multi-turn conversations when using the Responses API statelessly (like when
|
626
|
+
# the `store` parameter is set to `false`, or when an organization is enrolled
|
627
|
+
# in the zero data retention program).
|
550
628
|
include: nil,
|
629
|
+
# Text, image, or file inputs to the model, used to generate a response.
|
630
|
+
#
|
631
|
+
# Learn more:
|
632
|
+
#
|
633
|
+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
634
|
+
# - [Image inputs](https://platform.openai.com/docs/guides/images)
|
635
|
+
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
|
636
|
+
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
637
|
+
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
638
|
+
input: nil,
|
551
639
|
# A system (or developer) message inserted into the model's context.
|
640
|
+
#
|
641
|
+
# When using along with `previous_response_id`, the instructions from a previous
|
642
|
+
# response will not be carried over to the next response. This makes it simple to
|
643
|
+
# swap out system (or developer) messages in new responses.
|
552
644
|
instructions: nil,
|
553
|
-
# An upper bound for the number of tokens that can be generated for a response
|
645
|
+
# An upper bound for the number of tokens that can be generated for a response,
|
646
|
+
# including visible output tokens and
|
647
|
+
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
554
648
|
max_output_tokens: nil,
|
555
|
-
#
|
649
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
650
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
651
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
652
|
+
# ignored.
|
653
|
+
max_tool_calls: nil,
|
654
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
655
|
+
# for storing additional information about the object in a structured format, and
|
656
|
+
# querying for objects via API or the dashboard.
|
657
|
+
#
|
658
|
+
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
659
|
+
# a maximum length of 512 characters.
|
556
660
|
metadata: nil,
|
661
|
+
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
662
|
+
# wide range of models with different capabilities, performance characteristics,
|
663
|
+
# and price points. Refer to the
|
664
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
665
|
+
# available models.
|
666
|
+
model: nil,
|
557
667
|
# Whether to allow the model to run tool calls in parallel.
|
558
668
|
parallel_tool_calls: nil,
|
559
669
|
# The unique ID of the previous response to the model. Use this to create
|
560
|
-
# multi-turn conversations.
|
670
|
+
# multi-turn conversations. Learn more about
|
671
|
+
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
|
561
672
|
previous_response_id: nil,
|
562
673
|
# Reference to a prompt template and its variables.
|
674
|
+
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
|
563
675
|
prompt: nil,
|
564
|
-
#
|
676
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
677
|
+
# hit rates. Replaces the `user` field.
|
678
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
679
|
+
prompt_cache_key: nil,
|
680
|
+
# **o-series models only**
|
681
|
+
#
|
682
|
+
# Configuration options for
|
683
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
565
684
|
reasoning: nil,
|
566
|
-
#
|
685
|
+
# A stable identifier used to help detect users of your application that may be
|
686
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
687
|
+
# identifies each user. We recommend hashing their username or email address, in
|
688
|
+
# order to avoid sending us any identifying information.
|
689
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
690
|
+
safety_identifier: nil,
|
691
|
+
# Specifies the processing type used for serving the request.
|
692
|
+
#
|
693
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
694
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
695
|
+
# will use 'default'.
|
696
|
+
# - If set to 'default', then the request will be processed with the standard
|
697
|
+
# pricing and performance for the selected model.
|
698
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
699
|
+
# 'priority', then the request will be processed with the corresponding service
|
700
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
701
|
+
# Priority processing.
|
702
|
+
# - When not set, the default behavior is 'auto'.
|
703
|
+
#
|
704
|
+
# When the `service_tier` parameter is set, the response body will include the
|
705
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
706
|
+
# request. This response value may be different from the value set in the
|
707
|
+
# parameter.
|
567
708
|
service_tier: nil,
|
568
709
|
# Whether to store the generated model response for later retrieval via API.
|
569
710
|
store: nil,
|
570
|
-
# What sampling temperature to use, between 0 and 2.
|
711
|
+
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
712
|
+
# make the output more random, while lower values like 0.2 will make it more
|
713
|
+
# focused and deterministic. We generally recommend altering this or `top_p` but
|
714
|
+
# not both.
|
571
715
|
temperature: nil,
|
572
|
-
# Configuration options for a text response from the model.
|
716
|
+
# Configuration options for a text response from the model. Can be plain text,
|
717
|
+
# structured JSON data, or text that conforms to a custom grammar. Learn more:
|
718
|
+
#
|
719
|
+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
720
|
+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
|
721
|
+
# - [Custom grammars](https://platform.openai.com/docs/guides/custom-grammars)
|
573
722
|
text: nil,
|
574
|
-
# How the model should select which tool (or tools) to use when generating a
|
723
|
+
# How the model should select which tool (or tools) to use when generating a
|
724
|
+
# response. See the `tools` parameter to see how to specify which tools the model
|
725
|
+
# can call.
|
575
726
|
tool_choice: nil,
|
576
|
-
# An array of tools the model may call while generating a response.
|
727
|
+
# An array of tools the model may call while generating a response. You can
|
728
|
+
# specify which tool to use by setting the `tool_choice` parameter.
|
729
|
+
#
|
730
|
+
# The two categories of tools you can provide the model are:
|
731
|
+
#
|
732
|
+
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
|
733
|
+
# capabilities, like
|
734
|
+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
|
735
|
+
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
|
736
|
+
# Learn more about
|
737
|
+
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
738
|
+
# - **Custom tools**: Free form tools which the model can call with flexible
|
739
|
+
# inputs and outputs. Learn more about
|
740
|
+
# [custom tools](https://platform.openai.com/docs/guides/custom-tools).
|
741
|
+
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
742
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
743
|
+
# Learn more about
|
744
|
+
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
745
|
+
# You can also use
|
746
|
+
# [custom tools](https://platform.openai.com/docs/guides/custom-tools) to call
|
747
|
+
# your own code.
|
577
748
|
tools: nil,
|
578
|
-
# An
|
749
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
750
|
+
# return at each token position, each with an associated log probability.
|
751
|
+
top_logprobs: nil,
|
752
|
+
# An alternative to sampling with temperature, called nucleus sampling, where the
|
753
|
+
# model considers the results of the tokens with top_p probability mass. So 0.1
|
754
|
+
# means only the tokens comprising the top 10% probability mass are considered.
|
755
|
+
#
|
756
|
+
# We generally recommend altering this or `temperature` but not both.
|
579
757
|
top_p: nil,
|
580
758
|
# The truncation strategy to use for the model response.
|
759
|
+
#
|
760
|
+
# - `auto`: If the context of this response and previous ones exceeds the model's
|
761
|
+
# context window size, the model will truncate the response to fit the context
|
762
|
+
# window by dropping input items in the middle of the conversation.
|
763
|
+
# - `disabled` (default): If a model response will exceed the context window size
|
764
|
+
# for a model, the request will fail with a 400 error.
|
581
765
|
truncation: nil,
|
582
|
-
#
|
766
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
767
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
768
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
769
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
770
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
583
771
|
user: nil,
|
584
|
-
# The sequence number of the event after which to start streaming (for resuming streams).
|
585
|
-
starting_after: nil,
|
586
772
|
request_options: {}
|
587
773
|
)
|
588
774
|
end
|
@@ -594,6 +780,7 @@ module OpenAI
|
|
594
780
|
params(
|
595
781
|
response_id: String,
|
596
782
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
783
|
+
include_obfuscation: T::Boolean,
|
597
784
|
starting_after: Integer,
|
598
785
|
stream: T.noreturn,
|
599
786
|
request_options: OpenAI::RequestOptions::OrHash
|
@@ -605,6 +792,13 @@ module OpenAI
|
|
605
792
|
# Additional fields to include in the response. See the `include` parameter for
|
606
793
|
# Response creation above for more information.
|
607
794
|
include: nil,
|
795
|
+
# When true, stream obfuscation will be enabled. Stream obfuscation adds random
|
796
|
+
# characters to an `obfuscation` field on streaming delta events to normalize
|
797
|
+
# payload sizes as a mitigation to certain side-channel attacks. These obfuscation
|
798
|
+
# fields are included by default, but add a small amount of overhead to the data
|
799
|
+
# stream. You can set `include_obfuscation` to false to optimize for bandwidth if
|
800
|
+
# you trust the network links between your application and the OpenAI API.
|
801
|
+
include_obfuscation: nil,
|
608
802
|
# The sequence number of the event after which to start streaming.
|
609
803
|
starting_after: nil,
|
610
804
|
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
@@ -621,6 +815,7 @@ module OpenAI
|
|
621
815
|
params(
|
622
816
|
response_id: String,
|
623
817
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
818
|
+
include_obfuscation: T::Boolean,
|
624
819
|
starting_after: Integer,
|
625
820
|
stream: T.noreturn,
|
626
821
|
request_options: OpenAI::RequestOptions::OrHash
|
@@ -636,6 +831,13 @@ module OpenAI
|
|
636
831
|
# Additional fields to include in the response. See the `include` parameter for
|
637
832
|
# Response creation above for more information.
|
638
833
|
include: nil,
|
834
|
+
# When true, stream obfuscation will be enabled. Stream obfuscation adds random
|
835
|
+
# characters to an `obfuscation` field on streaming delta events to normalize
|
836
|
+
# payload sizes as a mitigation to certain side-channel attacks. These obfuscation
|
837
|
+
# fields are included by default, but add a small amount of overhead to the data
|
838
|
+
# stream. You can set `include_obfuscation` to false to optimize for bandwidth if
|
839
|
+
# you trust the network links between your application and the OpenAI API.
|
840
|
+
include_obfuscation: nil,
|
639
841
|
# The sequence number of the event after which to start streaming.
|
640
842
|
starting_after: nil,
|
641
843
|
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
@@ -83,6 +83,12 @@ module OpenAI
|
|
83
83
|
|
84
84
|
type model =
|
85
85
|
String
|
86
|
+
| :"gpt-5"
|
87
|
+
| :"gpt-5-mini"
|
88
|
+
| :"gpt-5-nano"
|
89
|
+
| :"gpt-5-2025-08-07"
|
90
|
+
| :"gpt-5-mini-2025-08-07"
|
91
|
+
| :"gpt-5-nano-2025-08-07"
|
86
92
|
| :"gpt-4.1"
|
87
93
|
| :"gpt-4.1-mini"
|
88
94
|
| :"gpt-4.1-nano"
|
@@ -125,6 +131,12 @@ module OpenAI
|
|
125
131
|
|
126
132
|
def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::model]
|
127
133
|
|
134
|
+
GPT_5: :"gpt-5"
|
135
|
+
GPT_5_MINI: :"gpt-5-mini"
|
136
|
+
GPT_5_NANO: :"gpt-5-nano"
|
137
|
+
GPT_5_2025_08_07: :"gpt-5-2025-08-07"
|
138
|
+
GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07"
|
139
|
+
GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07"
|
128
140
|
GPT_4_1: :"gpt-4.1"
|
129
141
|
GPT_4_1_MINI: :"gpt-4.1-mini"
|
130
142
|
GPT_4_1_NANO: :"gpt-4.1-nano"
|
@@ -0,0 +1,29 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
class ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice
|
4
|
+
|
5
|
+
module Chat
|
6
|
+
type chat_completion_allowed_tool_choice =
|
7
|
+
{
|
8
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
9
|
+
type: :allowed_tools
|
10
|
+
}
|
11
|
+
|
12
|
+
class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel
|
13
|
+
attr_accessor allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools
|
14
|
+
|
15
|
+
attr_accessor type: :allowed_tools
|
16
|
+
|
17
|
+
def initialize: (
|
18
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
19
|
+
?type: :allowed_tools
|
20
|
+
) -> void
|
21
|
+
|
22
|
+
def to_hash: -> {
|
23
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
24
|
+
type: :allowed_tools
|
25
|
+
}
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Chat
|
4
|
+
type chat_completion_allowed_tools =
|
5
|
+
{
|
6
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
7
|
+
tools: ::Array[::Hash[Symbol, top]]
|
8
|
+
}
|
9
|
+
|
10
|
+
class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel
|
11
|
+
attr_accessor mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode
|
12
|
+
|
13
|
+
attr_accessor tools: ::Array[::Hash[Symbol, top]]
|
14
|
+
|
15
|
+
def initialize: (
|
16
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
17
|
+
tools: ::Array[::Hash[Symbol, top]]
|
18
|
+
) -> void
|
19
|
+
|
20
|
+
def to_hash: -> {
|
21
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
22
|
+
tools: ::Array[::Hash[Symbol, top]]
|
23
|
+
}
|
24
|
+
|
25
|
+
type mode = :auto | :required
|
26
|
+
|
27
|
+
module Mode
|
28
|
+
extend OpenAI::Internal::Type::Enum
|
29
|
+
|
30
|
+
AUTO: :auto
|
31
|
+
REQUIRED: :required
|
32
|
+
|
33
|
+
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAllowedTools::mode]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|