openai 0.16.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +24 -0
- data/README.md +14 -20
- data/lib/openai/internal/transport/base_client.rb +1 -1
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/internal/type/union.rb +13 -17
- data/lib/openai/models/beta/assistant_create_params.rb +4 -5
- data/lib/openai/models/beta/assistant_update_params.rb +22 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
- data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
- data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
- data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
- data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
- data/lib/openai/models/chat/chat_completion_message.rb +3 -5
- data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
- data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
- data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
- data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
- data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
- data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
- data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
- data/lib/openai/models/chat/completion_create_params.rb +35 -12
- data/lib/openai/models/chat_model.rb +7 -0
- data/lib/openai/models/custom_tool_input_format.rb +76 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
- data/lib/openai/models/evals/run_cancel_response.rb +2 -2
- data/lib/openai/models/evals/run_create_params.rb +2 -2
- data/lib/openai/models/evals/run_create_response.rb +2 -2
- data/lib/openai/models/evals/run_list_response.rb +2 -2
- data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
- data/lib/openai/models/reasoning.rb +4 -5
- data/lib/openai/models/reasoning_effort.rb +4 -4
- data/lib/openai/models/response_format_text_grammar.rb +27 -0
- data/lib/openai/models/response_format_text_python.rb +20 -0
- data/lib/openai/models/responses/custom_tool.rb +48 -0
- data/lib/openai/models/responses/response.rb +20 -12
- data/lib/openai/models/responses/response_create_params.rb +48 -10
- data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
- data/lib/openai/models/responses/response_input_item.rb +7 -1
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
- data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
- data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
- data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
- data/lib/openai/models/responses/response_stream_event.rb +13 -11
- data/lib/openai/models/responses/response_text_config.rb +27 -1
- data/lib/openai/models/responses/tool.rb +5 -1
- data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
- data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
- data/lib/openai/models/vector_store_search_params.rb +6 -1
- data/lib/openai/models.rb +6 -0
- data/lib/openai/resources/beta/assistants.rb +2 -2
- data/lib/openai/resources/beta/threads/runs.rb +2 -2
- data/lib/openai/resources/chat/completions.rb +16 -10
- data/lib/openai/resources/responses.rb +38 -22
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -2
- data/rbi/openai/internal/transport/base_client.rbi +1 -1
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
- data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
- data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
- data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
- data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
- data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
- data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
- data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
- data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
- data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
- data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
- data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
- data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
- data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
- data/rbi/openai/models/chat/completion_create_params.rbi +106 -25
- data/rbi/openai/models/chat_model.rbi +11 -0
- data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
- data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
- data/rbi/openai/models/evals/run_create_params.rbi +4 -0
- data/rbi/openai/models/evals/run_create_response.rbi +2 -0
- data/rbi/openai/models/evals/run_list_response.rbi +2 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
- data/rbi/openai/models/reasoning.rbi +6 -8
- data/rbi/openai/models/reasoning_effort.rbi +4 -4
- data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
- data/rbi/openai/models/response_format_text_python.rbi +30 -0
- data/rbi/openai/models/responses/custom_tool.rbi +96 -0
- data/rbi/openai/models/responses/response.rbi +15 -5
- data/rbi/openai/models/responses/response_create_params.rbi +94 -7
- data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
- data/rbi/openai/models/responses/response_input_item.rbi +2 -0
- data/rbi/openai/models/responses/response_output_item.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
- data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
- data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
- data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
- data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
- data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
- data/rbi/openai/models/responses/response_text_config.rbi +64 -1
- data/rbi/openai/models/responses/tool.rbi +1 -0
- data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
- data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
- data/rbi/openai/models/vector_store_search_params.rbi +12 -1
- data/rbi/openai/models.rbi +6 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -8
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
- data/rbi/openai/resources/chat/completions.rbi +44 -19
- data/rbi/openai/resources/responses.rbi +215 -41
- data/sig/openai/internal/transport/base_client.rbs +1 -1
- data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
- data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
- data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
- data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
- data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
- data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
- data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
- data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
- data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
- data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +23 -6
- data/sig/openai/models/chat_model.rbs +15 -1
- data/sig/openai/models/custom_tool_input_format.rbs +61 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/response_format_text_grammar.rbs +15 -0
- data/sig/openai/models/response_format_text_python.rbs +13 -0
- data/sig/openai/models/responses/custom_tool.rbs +43 -0
- data/sig/openai/models/responses/response.rbs +2 -0
- data/sig/openai/models/responses/response_create_params.rbs +19 -0
- data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
- data/sig/openai/models/responses/response_input_item.rbs +2 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
- data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
- data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
- data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
- data/sig/openai/models/responses/response_stream_event.rbs +4 -2
- data/sig/openai/models/responses/response_text_config.rbs +22 -3
- data/sig/openai/models/responses/tool.rbs +1 -0
- data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
- data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
- data/sig/openai/models/vector_store_search_params.rbs +2 -1
- data/sig/openai/models.rbs +6 -0
- data/sig/openai/resources/chat/completions.rbs +4 -2
- data/sig/openai/resources/responses.rbs +32 -0
- metadata +59 -8
- data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
- data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -48,6 +48,10 @@ module OpenAI
|
|
48
48
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
49
49
|
),
|
50
50
|
store: T.nilable(T::Boolean),
|
51
|
+
stream_options:
|
52
|
+
T.nilable(
|
53
|
+
OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash
|
54
|
+
),
|
51
55
|
temperature: T.nilable(Float),
|
52
56
|
text:
|
53
57
|
T.any(
|
@@ -57,9 +61,11 @@ module OpenAI
|
|
57
61
|
tool_choice:
|
58
62
|
T.any(
|
59
63
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
64
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
60
65
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
61
66
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
62
|
-
OpenAI::Responses::ToolChoiceMcp::OrHash
|
67
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
68
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash
|
63
69
|
),
|
64
70
|
tools:
|
65
71
|
T::Array[
|
@@ -71,6 +77,7 @@ module OpenAI
|
|
71
77
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
72
78
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
73
79
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
80
|
+
OpenAI::Responses::CustomTool::OrHash,
|
74
81
|
OpenAI::Responses::WebSearchTool::OrHash
|
75
82
|
)
|
76
83
|
],
|
@@ -188,6 +195,8 @@ module OpenAI
|
|
188
195
|
service_tier: nil,
|
189
196
|
# Whether to store the generated model response for later retrieval via API.
|
190
197
|
store: nil,
|
198
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
199
|
+
stream_options: nil,
|
191
200
|
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
192
201
|
# make the output more random, while lower values like 0.2 will make it more
|
193
202
|
# focused and deterministic. We generally recommend altering this or `top_p` but
|
@@ -215,8 +224,10 @@ module OpenAI
|
|
215
224
|
# Learn more about
|
216
225
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
217
226
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
218
|
-
# the model to call your own code
|
227
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
228
|
+
# Learn more about
|
219
229
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
230
|
+
# You can also use custom tools to call your own code.
|
220
231
|
tools: nil,
|
221
232
|
# An integer between 0 and 20 specifying the number of most likely tokens to
|
222
233
|
# return at each token position, each with an associated log probability.
|
@@ -290,6 +301,10 @@ module OpenAI
|
|
290
301
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
291
302
|
),
|
292
303
|
store: T.nilable(T::Boolean),
|
304
|
+
stream_options:
|
305
|
+
T.nilable(
|
306
|
+
OpenAI::Responses::ResponseCreateParams::StreamOptions::OrHash
|
307
|
+
),
|
293
308
|
temperature: T.nilable(Float),
|
294
309
|
text:
|
295
310
|
T.nilable(
|
@@ -301,9 +316,11 @@ module OpenAI
|
|
301
316
|
tool_choice:
|
302
317
|
T.any(
|
303
318
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
319
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
304
320
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
305
321
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
306
|
-
OpenAI::Responses::ToolChoiceMcp::OrHash
|
322
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
323
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash
|
307
324
|
),
|
308
325
|
tools:
|
309
326
|
T::Array[
|
@@ -315,6 +332,7 @@ module OpenAI
|
|
315
332
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
316
333
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
317
334
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
335
|
+
OpenAI::Responses::CustomTool::OrHash,
|
318
336
|
OpenAI::Responses::WebSearchTool::OrHash
|
319
337
|
)
|
320
338
|
],
|
@@ -436,6 +454,8 @@ module OpenAI
|
|
436
454
|
service_tier: nil,
|
437
455
|
# Whether to store the generated model response for later retrieval via API.
|
438
456
|
store: nil,
|
457
|
+
# Options for streaming responses. Only set this when you set `stream: true`.
|
458
|
+
stream_options: nil,
|
439
459
|
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
440
460
|
# make the output more random, while lower values like 0.2 will make it more
|
441
461
|
# focused and deterministic. We generally recommend altering this or `top_p` but
|
@@ -463,8 +483,10 @@ module OpenAI
|
|
463
483
|
# Learn more about
|
464
484
|
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
465
485
|
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
466
|
-
# the model to call your own code
|
486
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
487
|
+
# Learn more about
|
467
488
|
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
489
|
+
# You can also use custom tools to call your own code.
|
468
490
|
tools: nil,
|
469
491
|
# An integer between 0 and 20 specifying the number of most likely tokens to
|
470
492
|
# return at each token position, each with an associated log probability.
|
@@ -498,12 +520,30 @@ module OpenAI
|
|
498
520
|
|
499
521
|
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
|
500
522
|
#
|
501
|
-
# Creates a model response
|
502
|
-
#
|
523
|
+
# Creates a model response. Provide
|
524
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
525
|
+
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
|
526
|
+
# [text](https://platform.openai.com/docs/guides/text) or
|
527
|
+
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
|
528
|
+
# the model call your own
|
529
|
+
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
|
530
|
+
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
|
531
|
+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
|
532
|
+
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
|
533
|
+
# your own data as input for the model's response.
|
503
534
|
sig do
|
504
535
|
params(
|
536
|
+
background: T.nilable(T::Boolean),
|
537
|
+
include:
|
538
|
+
T.nilable(
|
539
|
+
T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
|
540
|
+
),
|
505
541
|
input:
|
506
542
|
T.nilable(OpenAI::Responses::ResponseCreateParams::Input::Variants),
|
543
|
+
instructions: T.nilable(String),
|
544
|
+
max_output_tokens: T.nilable(Integer),
|
545
|
+
max_tool_calls: T.nilable(Integer),
|
546
|
+
metadata: T.nilable(T::Hash[Symbol, String]),
|
507
547
|
model:
|
508
548
|
T.nilable(
|
509
549
|
T.any(
|
@@ -512,18 +552,12 @@ module OpenAI
|
|
512
552
|
OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
|
513
553
|
)
|
514
554
|
),
|
515
|
-
background: T.nilable(T::Boolean),
|
516
|
-
include:
|
517
|
-
T.nilable(
|
518
|
-
T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
|
519
|
-
),
|
520
|
-
instructions: T.nilable(String),
|
521
|
-
max_output_tokens: T.nilable(Integer),
|
522
|
-
metadata: T.nilable(T::Hash[Symbol, String]),
|
523
555
|
parallel_tool_calls: T.nilable(T::Boolean),
|
524
556
|
previous_response_id: T.nilable(String),
|
525
557
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
558
|
+
prompt_cache_key: String,
|
526
559
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
560
|
+
safety_identifier: String,
|
527
561
|
service_tier:
|
528
562
|
T.nilable(
|
529
563
|
OpenAI::Responses::ResponseCreateParams::ServiceTier::OrSymbol
|
@@ -531,20 +565,25 @@ module OpenAI
|
|
531
565
|
store: T.nilable(T::Boolean),
|
532
566
|
temperature: T.nilable(Float),
|
533
567
|
text:
|
534
|
-
T.
|
535
|
-
|
536
|
-
|
568
|
+
T.nilable(
|
569
|
+
T.any(
|
570
|
+
OpenAI::Responses::ResponseTextConfig::OrHash,
|
571
|
+
OpenAI::StructuredOutput::JsonSchemaConverter
|
572
|
+
)
|
537
573
|
),
|
538
574
|
tool_choice:
|
539
575
|
T.any(
|
540
576
|
OpenAI::Responses::ToolChoiceOptions::OrSymbol,
|
577
|
+
OpenAI::Responses::ToolChoiceAllowed::OrHash,
|
541
578
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
542
|
-
OpenAI::Responses::ToolChoiceFunction::OrHash
|
579
|
+
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
580
|
+
OpenAI::Responses::ToolChoiceMcp::OrHash
|
543
581
|
),
|
544
582
|
tools:
|
545
583
|
T.nilable(
|
546
584
|
T::Array[
|
547
585
|
T.any(
|
586
|
+
OpenAI::StructuredOutput::JsonSchemaConverter,
|
548
587
|
OpenAI::Responses::FunctionTool::OrHash,
|
549
588
|
OpenAI::Responses::FileSearchTool::OrHash,
|
550
589
|
OpenAI::Responses::ComputerTool::OrHash,
|
@@ -552,65 +591,184 @@ module OpenAI
|
|
552
591
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
553
592
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
554
593
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
555
|
-
OpenAI::Responses::WebSearchTool::OrHash
|
556
|
-
OpenAI::StructuredOutput::JsonSchemaConverter
|
594
|
+
OpenAI::Responses::WebSearchTool::OrHash
|
557
595
|
)
|
558
596
|
]
|
559
597
|
),
|
598
|
+
top_logprobs: T.nilable(Integer),
|
560
599
|
top_p: T.nilable(Float),
|
561
600
|
truncation:
|
562
601
|
T.nilable(
|
563
602
|
OpenAI::Responses::ResponseCreateParams::Truncation::OrSymbol
|
564
603
|
),
|
565
|
-
user:
|
566
|
-
|
567
|
-
request_options: T.nilable(OpenAI::RequestOptions::OrHash)
|
604
|
+
user: String,
|
605
|
+
request_options: OpenAI::RequestOptions::OrHash
|
568
606
|
).returns(OpenAI::Streaming::ResponseStream)
|
569
607
|
end
|
570
608
|
def stream(
|
571
|
-
# Text, image, or file inputs to the model, used to generate a response.
|
572
|
-
input: nil,
|
573
|
-
# Model ID used to generate the response, like `gpt-4o` or `o3`.
|
574
|
-
model: nil,
|
575
609
|
# Whether to run the model response in the background.
|
610
|
+
# [Learn more](https://platform.openai.com/docs/guides/background).
|
576
611
|
background: nil,
|
577
|
-
# Specify additional output data to include in the model response.
|
612
|
+
# Specify additional output data to include in the model response. Currently
|
613
|
+
# supported values are:
|
614
|
+
#
|
615
|
+
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
|
616
|
+
# in code interpreter tool call items.
|
617
|
+
# - `computer_call_output.output.image_url`: Include image urls from the computer
|
618
|
+
# call output.
|
619
|
+
# - `file_search_call.results`: Include the search results of the file search tool
|
620
|
+
# call.
|
621
|
+
# - `message.input_image.image_url`: Include image urls from the input message.
|
622
|
+
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
|
623
|
+
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
|
624
|
+
# tokens in reasoning item outputs. This enables reasoning items to be used in
|
625
|
+
# multi-turn conversations when using the Responses API statelessly (like when
|
626
|
+
# the `store` parameter is set to `false`, or when an organization is enrolled
|
627
|
+
# in the zero data retention program).
|
578
628
|
include: nil,
|
629
|
+
# Text, image, or file inputs to the model, used to generate a response.
|
630
|
+
#
|
631
|
+
# Learn more:
|
632
|
+
#
|
633
|
+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
634
|
+
# - [Image inputs](https://platform.openai.com/docs/guides/images)
|
635
|
+
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
|
636
|
+
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
|
637
|
+
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
|
638
|
+
input: nil,
|
579
639
|
# A system (or developer) message inserted into the model's context.
|
640
|
+
#
|
641
|
+
# When using along with `previous_response_id`, the instructions from a previous
|
642
|
+
# response will not be carried over to the next response. This makes it simple to
|
643
|
+
# swap out system (or developer) messages in new responses.
|
580
644
|
instructions: nil,
|
581
|
-
# An upper bound for the number of tokens that can be generated for a response
|
645
|
+
# An upper bound for the number of tokens that can be generated for a response,
|
646
|
+
# including visible output tokens and
|
647
|
+
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
|
582
648
|
max_output_tokens: nil,
|
583
|
-
#
|
649
|
+
# The maximum number of total calls to built-in tools that can be processed in a
|
650
|
+
# response. This maximum number applies across all built-in tool calls, not per
|
651
|
+
# individual tool. Any further attempts to call a tool by the model will be
|
652
|
+
# ignored.
|
653
|
+
max_tool_calls: nil,
|
654
|
+
# Set of 16 key-value pairs that can be attached to an object. This can be useful
|
655
|
+
# for storing additional information about the object in a structured format, and
|
656
|
+
# querying for objects via API or the dashboard.
|
657
|
+
#
|
658
|
+
# Keys are strings with a maximum length of 64 characters. Values are strings with
|
659
|
+
# a maximum length of 512 characters.
|
584
660
|
metadata: nil,
|
661
|
+
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
|
662
|
+
# wide range of models with different capabilities, performance characteristics,
|
663
|
+
# and price points. Refer to the
|
664
|
+
# [model guide](https://platform.openai.com/docs/models) to browse and compare
|
665
|
+
# available models.
|
666
|
+
model: nil,
|
585
667
|
# Whether to allow the model to run tool calls in parallel.
|
586
668
|
parallel_tool_calls: nil,
|
587
669
|
# The unique ID of the previous response to the model. Use this to create
|
588
|
-
# multi-turn conversations.
|
670
|
+
# multi-turn conversations. Learn more about
|
671
|
+
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
|
589
672
|
previous_response_id: nil,
|
590
673
|
# Reference to a prompt template and its variables.
|
674
|
+
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
|
591
675
|
prompt: nil,
|
592
|
-
#
|
676
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
677
|
+
# hit rates. Replaces the `user` field.
|
678
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
679
|
+
prompt_cache_key: nil,
|
680
|
+
# **o-series models only**
|
681
|
+
#
|
682
|
+
# Configuration options for
|
683
|
+
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
593
684
|
reasoning: nil,
|
594
|
-
#
|
685
|
+
# A stable identifier used to help detect users of your application that may be
|
686
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
687
|
+
# identifies each user. We recommend hashing their username or email address, in
|
688
|
+
# order to avoid sending us any identifying information.
|
689
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
690
|
+
safety_identifier: nil,
|
691
|
+
# Specifies the processing type used for serving the request.
|
692
|
+
#
|
693
|
+
# - If set to 'auto', then the request will be processed with the service tier
|
694
|
+
# configured in the Project settings. Unless otherwise configured, the Project
|
695
|
+
# will use 'default'.
|
696
|
+
# - If set to 'default', then the request will be processed with the standard
|
697
|
+
# pricing and performance for the selected model.
|
698
|
+
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
699
|
+
# 'priority', then the request will be processed with the corresponding service
|
700
|
+
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
|
701
|
+
# Priority processing.
|
702
|
+
# - When not set, the default behavior is 'auto'.
|
703
|
+
#
|
704
|
+
# When the `service_tier` parameter is set, the response body will include the
|
705
|
+
# `service_tier` value based on the processing mode actually used to serve the
|
706
|
+
# request. This response value may be different from the value set in the
|
707
|
+
# parameter.
|
595
708
|
service_tier: nil,
|
596
709
|
# Whether to store the generated model response for later retrieval via API.
|
597
710
|
store: nil,
|
598
|
-
# What sampling temperature to use, between 0 and 2.
|
711
|
+
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
712
|
+
# make the output more random, while lower values like 0.2 will make it more
|
713
|
+
# focused and deterministic. We generally recommend altering this or `top_p` but
|
714
|
+
# not both.
|
599
715
|
temperature: nil,
|
600
|
-
# Configuration options for a text response from the model.
|
716
|
+
# Configuration options for a text response from the model. Can be plain text,
|
717
|
+
# structured JSON data, or text that conforms to a custom grammar. Learn more:
|
718
|
+
#
|
719
|
+
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
|
720
|
+
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
|
721
|
+
# - [Custom grammars](https://platform.openai.com/docs/guides/custom-grammars)
|
601
722
|
text: nil,
|
602
|
-
# How the model should select which tool (or tools) to use when generating a
|
723
|
+
# How the model should select which tool (or tools) to use when generating a
|
724
|
+
# response. See the `tools` parameter to see how to specify which tools the model
|
725
|
+
# can call.
|
603
726
|
tool_choice: nil,
|
604
|
-
# An array of tools the model may call while generating a response.
|
727
|
+
# An array of tools the model may call while generating a response. You can
|
728
|
+
# specify which tool to use by setting the `tool_choice` parameter.
|
729
|
+
#
|
730
|
+
# The two categories of tools you can provide the model are:
|
731
|
+
#
|
732
|
+
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
|
733
|
+
# capabilities, like
|
734
|
+
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
|
735
|
+
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
|
736
|
+
# Learn more about
|
737
|
+
# [built-in tools](https://platform.openai.com/docs/guides/tools).
|
738
|
+
# - **Custom tools**: Free form tools which the model can call with flexible
|
739
|
+
# inputs and outputs. Learn more about
|
740
|
+
# [custom tools](https://platform.openai.com/docs/guides/custom-tools).
|
741
|
+
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
|
742
|
+
# the model to call your own code with strongly typed arguments and outputs.
|
743
|
+
# Learn more about
|
744
|
+
# [function calling](https://platform.openai.com/docs/guides/function-calling).
|
745
|
+
# You can also use
|
746
|
+
# [custom tools](https://platform.openai.com/docs/guides/custom-tools) to call
|
747
|
+
# your own code.
|
605
748
|
tools: nil,
|
606
|
-
# An
|
749
|
+
# An integer between 0 and 20 specifying the number of most likely tokens to
|
750
|
+
# return at each token position, each with an associated log probability.
|
751
|
+
top_logprobs: nil,
|
752
|
+
# An alternative to sampling with temperature, called nucleus sampling, where the
|
753
|
+
# model considers the results of the tokens with top_p probability mass. So 0.1
|
754
|
+
# means only the tokens comprising the top 10% probability mass are considered.
|
755
|
+
#
|
756
|
+
# We generally recommend altering this or `temperature` but not both.
|
607
757
|
top_p: nil,
|
608
758
|
# The truncation strategy to use for the model response.
|
759
|
+
#
|
760
|
+
# - `auto`: If the context of this response and previous ones exceeds the model's
|
761
|
+
# context window size, the model will truncate the response to fit the context
|
762
|
+
# window by dropping input items in the middle of the conversation.
|
763
|
+
# - `disabled` (default): If a model response will exceed the context window size
|
764
|
+
# for a model, the request will fail with a 400 error.
|
609
765
|
truncation: nil,
|
610
|
-
#
|
766
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
767
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
768
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
769
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
770
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
611
771
|
user: nil,
|
612
|
-
# The sequence number of the event after which to start streaming (for resuming streams).
|
613
|
-
starting_after: nil,
|
614
772
|
request_options: {}
|
615
773
|
)
|
616
774
|
end
|
@@ -622,6 +780,7 @@ module OpenAI
|
|
622
780
|
params(
|
623
781
|
response_id: String,
|
624
782
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
783
|
+
include_obfuscation: T::Boolean,
|
625
784
|
starting_after: Integer,
|
626
785
|
stream: T.noreturn,
|
627
786
|
request_options: OpenAI::RequestOptions::OrHash
|
@@ -633,6 +792,13 @@ module OpenAI
|
|
633
792
|
# Additional fields to include in the response. See the `include` parameter for
|
634
793
|
# Response creation above for more information.
|
635
794
|
include: nil,
|
795
|
+
# When true, stream obfuscation will be enabled. Stream obfuscation adds random
|
796
|
+
# characters to an `obfuscation` field on streaming delta events to normalize
|
797
|
+
# payload sizes as a mitigation to certain side-channel attacks. These obfuscation
|
798
|
+
# fields are included by default, but add a small amount of overhead to the data
|
799
|
+
# stream. You can set `include_obfuscation` to false to optimize for bandwidth if
|
800
|
+
# you trust the network links between your application and the OpenAI API.
|
801
|
+
include_obfuscation: nil,
|
636
802
|
# The sequence number of the event after which to start streaming.
|
637
803
|
starting_after: nil,
|
638
804
|
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
@@ -649,6 +815,7 @@ module OpenAI
|
|
649
815
|
params(
|
650
816
|
response_id: String,
|
651
817
|
include: T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol],
|
818
|
+
include_obfuscation: T::Boolean,
|
652
819
|
starting_after: Integer,
|
653
820
|
stream: T.noreturn,
|
654
821
|
request_options: OpenAI::RequestOptions::OrHash
|
@@ -664,6 +831,13 @@ module OpenAI
|
|
664
831
|
# Additional fields to include in the response. See the `include` parameter for
|
665
832
|
# Response creation above for more information.
|
666
833
|
include: nil,
|
834
|
+
# When true, stream obfuscation will be enabled. Stream obfuscation adds random
|
835
|
+
# characters to an `obfuscation` field on streaming delta events to normalize
|
836
|
+
# payload sizes as a mitigation to certain side-channel attacks. These obfuscation
|
837
|
+
# fields are included by default, but add a small amount of overhead to the data
|
838
|
+
# stream. You can set `include_obfuscation` to false to optimize for bandwidth if
|
839
|
+
# you trust the network links between your application and the OpenAI API.
|
840
|
+
include_obfuscation: nil,
|
667
841
|
# The sequence number of the event after which to start streaming.
|
668
842
|
starting_after: nil,
|
669
843
|
# There is no need to provide `stream:`. Instead, use `#retrieve_streaming` or
|
@@ -83,6 +83,12 @@ module OpenAI
|
|
83
83
|
|
84
84
|
type model =
|
85
85
|
String
|
86
|
+
| :"gpt-5"
|
87
|
+
| :"gpt-5-mini"
|
88
|
+
| :"gpt-5-nano"
|
89
|
+
| :"gpt-5-2025-08-07"
|
90
|
+
| :"gpt-5-mini-2025-08-07"
|
91
|
+
| :"gpt-5-nano-2025-08-07"
|
86
92
|
| :"gpt-4.1"
|
87
93
|
| :"gpt-4.1-mini"
|
88
94
|
| :"gpt-4.1-nano"
|
@@ -125,6 +131,12 @@ module OpenAI
|
|
125
131
|
|
126
132
|
def self?.variants: -> ::Array[OpenAI::Models::Beta::AssistantUpdateParams::model]
|
127
133
|
|
134
|
+
GPT_5: :"gpt-5"
|
135
|
+
GPT_5_MINI: :"gpt-5-mini"
|
136
|
+
GPT_5_NANO: :"gpt-5-nano"
|
137
|
+
GPT_5_2025_08_07: :"gpt-5-2025-08-07"
|
138
|
+
GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07"
|
139
|
+
GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07"
|
128
140
|
GPT_4_1: :"gpt-4.1"
|
129
141
|
GPT_4_1_MINI: :"gpt-4.1-mini"
|
130
142
|
GPT_4_1_NANO: :"gpt-4.1-nano"
|
@@ -0,0 +1,29 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
class ChatCompletionAllowedToolChoice = Chat::ChatCompletionAllowedToolChoice
|
4
|
+
|
5
|
+
module Chat
|
6
|
+
type chat_completion_allowed_tool_choice =
|
7
|
+
{
|
8
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
9
|
+
type: :allowed_tools
|
10
|
+
}
|
11
|
+
|
12
|
+
class ChatCompletionAllowedToolChoice < OpenAI::Internal::Type::BaseModel
|
13
|
+
attr_accessor allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools
|
14
|
+
|
15
|
+
attr_accessor type: :allowed_tools
|
16
|
+
|
17
|
+
def initialize: (
|
18
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
19
|
+
?type: :allowed_tools
|
20
|
+
) -> void
|
21
|
+
|
22
|
+
def to_hash: -> {
|
23
|
+
allowed_tools: OpenAI::Chat::ChatCompletionAllowedTools,
|
24
|
+
type: :allowed_tools
|
25
|
+
}
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Chat
|
4
|
+
type chat_completion_allowed_tools =
|
5
|
+
{
|
6
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
7
|
+
tools: ::Array[::Hash[Symbol, top]]
|
8
|
+
}
|
9
|
+
|
10
|
+
class ChatCompletionAllowedTools < OpenAI::Internal::Type::BaseModel
|
11
|
+
attr_accessor mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode
|
12
|
+
|
13
|
+
attr_accessor tools: ::Array[::Hash[Symbol, top]]
|
14
|
+
|
15
|
+
def initialize: (
|
16
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
17
|
+
tools: ::Array[::Hash[Symbol, top]]
|
18
|
+
) -> void
|
19
|
+
|
20
|
+
def to_hash: -> {
|
21
|
+
mode: OpenAI::Models::Chat::ChatCompletionAllowedTools::mode,
|
22
|
+
tools: ::Array[::Hash[Symbol, top]]
|
23
|
+
}
|
24
|
+
|
25
|
+
type mode = :auto | :required
|
26
|
+
|
27
|
+
module Mode
|
28
|
+
extend OpenAI::Internal::Type::Enum
|
29
|
+
|
30
|
+
AUTO: :auto
|
31
|
+
REQUIRED: :required
|
32
|
+
|
33
|
+
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionAllowedTools::mode]
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
@@ -11,7 +11,7 @@ module OpenAI
|
|
11
11
|
function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?,
|
12
12
|
name: String,
|
13
13
|
refusal: String?,
|
14
|
-
tool_calls: ::Array[OpenAI::Chat::
|
14
|
+
tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]
|
15
15
|
}
|
16
16
|
|
17
17
|
class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel
|
@@ -29,11 +29,11 @@ module OpenAI
|
|
29
29
|
|
30
30
|
attr_accessor refusal: String?
|
31
31
|
|
32
|
-
attr_reader tool_calls: ::Array[OpenAI::Chat::
|
32
|
+
attr_reader tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]?
|
33
33
|
|
34
34
|
def tool_calls=: (
|
35
|
-
::Array[OpenAI::Chat::
|
36
|
-
) -> ::Array[OpenAI::Chat::
|
35
|
+
::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]
|
36
|
+
) -> ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]
|
37
37
|
|
38
38
|
def initialize: (
|
39
39
|
?audio: OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio?,
|
@@ -41,7 +41,7 @@ module OpenAI
|
|
41
41
|
?function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?,
|
42
42
|
?name: String,
|
43
43
|
?refusal: String?,
|
44
|
-
?tool_calls: ::Array[OpenAI::Chat::
|
44
|
+
?tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call],
|
45
45
|
?role: :assistant
|
46
46
|
) -> void
|
47
47
|
|
@@ -52,7 +52,7 @@ module OpenAI
|
|
52
52
|
function_call: OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall?,
|
53
53
|
name: String,
|
54
54
|
refusal: String?,
|
55
|
-
tool_calls: ::Array[OpenAI::Chat::
|
55
|
+
tool_calls: ::Array[OpenAI::Models::Chat::chat_completion_message_tool_call]
|
56
56
|
}
|
57
57
|
|
58
58
|
type audio = { id: String }
|