openai 0.35.2 → 0.36.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/README.md +21 -15
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/models/batch_create_params.rb +9 -6
- data/lib/openai/models/beta/assistant_create_params.rb +9 -5
- data/lib/openai/models/beta/assistant_update_params.rb +9 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +10 -6
- data/lib/openai/models/chat/completion_create_params.rb +37 -6
- data/lib/openai/models/chat_model.rb +5 -0
- data/lib/openai/models/conversations/conversation_create_params.rb +2 -2
- data/lib/openai/models/conversations/conversation_item.rb +13 -1
- data/lib/openai/models/conversations/conversation_item_list.rb +2 -2
- data/lib/openai/models/conversations/item_create_params.rb +2 -2
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +9 -5
- data/lib/openai/models/evals/run_cancel_response.rb +20 -12
- data/lib/openai/models/evals/run_create_params.rb +20 -12
- data/lib/openai/models/evals/run_create_response.rb +20 -12
- data/lib/openai/models/evals/run_list_response.rb +20 -12
- data/lib/openai/models/evals/run_retrieve_response.rb +20 -12
- data/lib/openai/models/graders/score_model_grader.rb +9 -5
- data/lib/openai/models/reasoning.rb +10 -6
- data/lib/openai/models/reasoning_effort.rb +10 -5
- data/lib/openai/models/responses/apply_patch_tool.rb +20 -0
- data/lib/openai/models/responses/function_shell_tool.rb +20 -0
- data/lib/openai/models/responses/input_token_count_params.rb +14 -8
- data/lib/openai/models/responses/response.rb +46 -11
- data/lib/openai/models/responses/response_apply_patch_tool_call.rb +179 -0
- data/lib/openai/models/responses/response_apply_patch_tool_call_output.rb +77 -0
- data/lib/openai/models/responses/response_create_params.rb +42 -9
- data/lib/openai/models/responses/response_function_shell_call_output_content.rb +88 -0
- data/lib/openai/models/responses/response_function_shell_tool_call.rb +109 -0
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +158 -0
- data/lib/openai/models/responses/response_input_item.rb +395 -1
- data/lib/openai/models/responses/response_item.rb +13 -1
- data/lib/openai/models/responses/response_item_list.rb +2 -2
- data/lib/openai/models/responses/response_output_item.rb +13 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/tool.rb +7 -1
- data/lib/openai/models/responses/tool_choice_apply_patch.rb +20 -0
- data/lib/openai/models/responses/tool_choice_shell.rb +20 -0
- data/lib/openai/resources/chat/completions.rb +6 -2
- data/lib/openai/resources/conversations/items.rb +3 -3
- data/lib/openai/resources/conversations.rb +1 -1
- data/lib/openai/resources/responses/input_items.rb +1 -1
- data/lib/openai/resources/responses/input_tokens.rb +3 -3
- data/lib/openai/resources/responses.rb +12 -8
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +9 -0
- data/rbi/openai/models/batch_create_params.rbi +17 -9
- data/rbi/openai/models/beta/assistant_create_params.rbi +18 -10
- data/rbi/openai/models/beta/assistant_update_params.rbi +18 -10
- data/rbi/openai/models/beta/threads/run_create_params.rbi +18 -10
- data/rbi/openai/models/chat/completion_create_params.rbi +82 -10
- data/rbi/openai/models/chat_model.rbi +7 -0
- data/rbi/openai/models/conversations/conversation_create_params.rbi +12 -0
- data/rbi/openai/models/conversations/conversation_item.rbi +4 -0
- data/rbi/openai/models/conversations/conversation_item_list.rbi +4 -0
- data/rbi/openai/models/conversations/item_create_params.rbi +12 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +18 -10
- data/rbi/openai/models/evals/run_cancel_response.rbi +40 -20
- data/rbi/openai/models/evals/run_create_params.rbi +44 -20
- data/rbi/openai/models/evals/run_create_response.rbi +40 -20
- data/rbi/openai/models/evals/run_list_response.rbi +40 -20
- data/rbi/openai/models/evals/run_retrieve_response.rbi +40 -20
- data/rbi/openai/models/graders/score_model_grader.rbi +18 -10
- data/rbi/openai/models/reasoning.rbi +18 -10
- data/rbi/openai/models/reasoning_effort.rbi +10 -5
- data/rbi/openai/models/responses/apply_patch_tool.rbi +30 -0
- data/rbi/openai/models/responses/function_shell_tool.rbi +33 -0
- data/rbi/openai/models/responses/input_token_count_params.rbi +18 -4
- data/rbi/openai/models/responses/response.rbi +73 -2
- data/rbi/openai/models/responses/response_apply_patch_tool_call.rbi +300 -0
- data/rbi/openai/models/responses/response_apply_patch_tool_call_output.rbi +129 -0
- data/rbi/openai/models/responses/response_create_params.rbi +87 -5
- data/rbi/openai/models/responses/response_function_shell_call_output_content.rbi +157 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call.rbi +198 -0
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +254 -0
- data/rbi/openai/models/responses/response_input_item.rbi +675 -0
- data/rbi/openai/models/responses/response_item.rbi +4 -0
- data/rbi/openai/models/responses/response_item_list.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +4 -0
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +4 -0
- data/rbi/openai/models/responses/tool.rbi +2 -0
- data/rbi/openai/models/responses/tool_choice_apply_patch.rbi +33 -0
- data/rbi/openai/models/responses/tool_choice_shell.rbi +30 -0
- data/rbi/openai/resources/batches.rbi +4 -3
- data/rbi/openai/resources/beta/assistants.rbi +18 -10
- data/rbi/openai/resources/beta/threads/runs.rbi +18 -10
- data/rbi/openai/resources/chat/completions.rbi +38 -12
- data/rbi/openai/resources/conversations/items.rbi +4 -0
- data/rbi/openai/resources/conversations.rbi +4 -0
- data/rbi/openai/resources/responses/input_tokens.rbi +5 -1
- data/rbi/openai/resources/responses.rbi +28 -2
- data/sig/openai/models/batch_create_params.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +16 -0
- data/sig/openai/models/chat_model.rbs +11 -1
- data/sig/openai/models/conversations/conversation_item.rbs +4 -0
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/responses/apply_patch_tool.rbs +15 -0
- data/sig/openai/models/responses/function_shell_tool.rbs +15 -0
- data/sig/openai/models/responses/input_token_count_params.rbs +2 -0
- data/sig/openai/models/responses/response.rbs +18 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call.rbs +123 -0
- data/sig/openai/models/responses/response_apply_patch_tool_call_output.rbs +60 -0
- data/sig/openai/models/responses/response_create_params.rbs +18 -0
- data/sig/openai/models/responses/response_function_shell_call_output_content.rbs +64 -0
- data/sig/openai/models/responses/response_function_shell_tool_call.rbs +88 -0
- data/sig/openai/models/responses/response_function_shell_tool_call_output.rbs +115 -0
- data/sig/openai/models/responses/response_input_item.rbs +276 -0
- data/sig/openai/models/responses/response_item.rbs +4 -0
- data/sig/openai/models/responses/response_output_item.rbs +4 -0
- data/sig/openai/models/responses/tool.rbs +2 -0
- data/sig/openai/models/responses/tool_choice_apply_patch.rbs +15 -0
- data/sig/openai/models/responses/tool_choice_shell.rbs +15 -0
- data/sig/openai/resources/chat/completions.rbs +2 -0
- data/sig/openai/resources/responses.rbs +2 -0
- metadata +29 -2
|
@@ -19,6 +19,10 @@ module OpenAI
|
|
|
19
19
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall,
|
|
20
20
|
OpenAI::Responses::ResponseCodeInterpreterToolCall,
|
|
21
21
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall,
|
|
22
|
+
OpenAI::Responses::ResponseFunctionShellToolCall,
|
|
23
|
+
OpenAI::Responses::ResponseFunctionShellToolCallOutput,
|
|
24
|
+
OpenAI::Responses::ResponseApplyPatchToolCall,
|
|
25
|
+
OpenAI::Responses::ResponseApplyPatchToolCallOutput,
|
|
22
26
|
OpenAI::Responses::ResponseOutputItem::McpCall,
|
|
23
27
|
OpenAI::Responses::ResponseOutputItem::McpListTools,
|
|
24
28
|
OpenAI::Responses::ResponseOutputItem::McpApprovalRequest,
|
|
@@ -42,6 +42,10 @@ module OpenAI
|
|
|
42
42
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash,
|
|
43
43
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
44
44
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash,
|
|
45
|
+
OpenAI::Responses::ResponseFunctionShellToolCall::OrHash,
|
|
46
|
+
OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash,
|
|
47
|
+
OpenAI::Responses::ResponseApplyPatchToolCall::OrHash,
|
|
48
|
+
OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash,
|
|
45
49
|
OpenAI::Responses::ResponseOutputItem::McpCall::OrHash,
|
|
46
50
|
OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash,
|
|
47
51
|
OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash,
|
|
@@ -42,6 +42,10 @@ module OpenAI
|
|
|
42
42
|
OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::OrHash,
|
|
43
43
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
44
44
|
OpenAI::Responses::ResponseOutputItem::LocalShellCall::OrHash,
|
|
45
|
+
OpenAI::Responses::ResponseFunctionShellToolCall::OrHash,
|
|
46
|
+
OpenAI::Responses::ResponseFunctionShellToolCallOutput::OrHash,
|
|
47
|
+
OpenAI::Responses::ResponseApplyPatchToolCall::OrHash,
|
|
48
|
+
OpenAI::Responses::ResponseApplyPatchToolCallOutput::OrHash,
|
|
45
49
|
OpenAI::Responses::ResponseOutputItem::McpCall::OrHash,
|
|
46
50
|
OpenAI::Responses::ResponseOutputItem::McpListTools::OrHash,
|
|
47
51
|
OpenAI::Responses::ResponseOutputItem::McpApprovalRequest::OrHash,
|
|
@@ -17,7 +17,9 @@ module OpenAI
|
|
|
17
17
|
OpenAI::Responses::Tool::CodeInterpreter,
|
|
18
18
|
OpenAI::Responses::Tool::ImageGeneration,
|
|
19
19
|
OpenAI::Responses::Tool::LocalShell,
|
|
20
|
+
OpenAI::Responses::FunctionShellTool,
|
|
20
21
|
OpenAI::Responses::CustomTool,
|
|
22
|
+
OpenAI::Responses::ApplyPatchTool,
|
|
21
23
|
OpenAI::Responses::WebSearchTool,
|
|
22
24
|
OpenAI::Responses::WebSearchPreviewTool
|
|
23
25
|
)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# typed: strong
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Responses
|
|
6
|
+
class ToolChoiceApplyPatch < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
OrHash =
|
|
8
|
+
T.type_alias do
|
|
9
|
+
T.any(
|
|
10
|
+
OpenAI::Responses::ToolChoiceApplyPatch,
|
|
11
|
+
OpenAI::Internal::AnyHash
|
|
12
|
+
)
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
# The tool to call. Always `apply_patch`.
|
|
16
|
+
sig { returns(Symbol) }
|
|
17
|
+
attr_accessor :type
|
|
18
|
+
|
|
19
|
+
# Forces the model to call the apply_patch tool when executing a tool call.
|
|
20
|
+
sig { params(type: Symbol).returns(T.attached_class) }
|
|
21
|
+
def self.new(
|
|
22
|
+
# The tool to call. Always `apply_patch`.
|
|
23
|
+
type: :apply_patch
|
|
24
|
+
)
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
sig { override.returns({ type: Symbol }) }
|
|
28
|
+
def to_hash
|
|
29
|
+
end
|
|
30
|
+
end
|
|
31
|
+
end
|
|
32
|
+
end
|
|
33
|
+
end
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
# typed: strong
|
|
2
|
+
|
|
3
|
+
module OpenAI
|
|
4
|
+
module Models
|
|
5
|
+
module Responses
|
|
6
|
+
class ToolChoiceShell < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
OrHash =
|
|
8
|
+
T.type_alias do
|
|
9
|
+
T.any(OpenAI::Responses::ToolChoiceShell, OpenAI::Internal::AnyHash)
|
|
10
|
+
end
|
|
11
|
+
|
|
12
|
+
# The tool to call. Always `shell`.
|
|
13
|
+
sig { returns(Symbol) }
|
|
14
|
+
attr_accessor :type
|
|
15
|
+
|
|
16
|
+
# Forces the model to call the function shell tool when a tool call is required.
|
|
17
|
+
sig { params(type: Symbol).returns(T.attached_class) }
|
|
18
|
+
def self.new(
|
|
19
|
+
# The tool to call. Always `shell`.
|
|
20
|
+
type: :shell
|
|
21
|
+
)
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
sig { override.returns({ type: Symbol }) }
|
|
25
|
+
def to_hash
|
|
26
|
+
end
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
end
|
|
30
|
+
end
|
|
@@ -21,9 +21,10 @@ module OpenAI
|
|
|
21
21
|
# is supported.
|
|
22
22
|
completion_window:,
|
|
23
23
|
# The endpoint to be used for all requests in the batch. Currently
|
|
24
|
-
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`,
|
|
25
|
-
# are supported. Note that `/v1/embeddings` batches are also
|
|
26
|
-
# maximum of 50,000 embedding inputs across all requests in the
|
|
24
|
+
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, `/v1/completions`,
|
|
25
|
+
# and `/v1/moderations` are supported. Note that `/v1/embeddings` batches are also
|
|
26
|
+
# restricted to a maximum of 50,000 embedding inputs across all requests in the
|
|
27
|
+
# batch.
|
|
27
28
|
endpoint:,
|
|
28
29
|
# The ID of an uploaded file that contains requests for the new batch.
|
|
29
30
|
#
|
|
@@ -62,12 +62,16 @@ module OpenAI
|
|
|
62
62
|
name: nil,
|
|
63
63
|
# Constrains effort on reasoning for
|
|
64
64
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
65
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
66
|
-
# effort can result in faster responses and fewer tokens used on
|
|
67
|
-
# response.
|
|
65
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
66
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
67
|
+
# reasoning in a response.
|
|
68
68
|
#
|
|
69
|
-
#
|
|
70
|
-
#
|
|
69
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
70
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
71
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
72
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
73
|
+
# support `none`.
|
|
74
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
71
75
|
reasoning_effort: nil,
|
|
72
76
|
# Specifies the format that the model must output. Compatible with
|
|
73
77
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -192,12 +196,16 @@ module OpenAI
|
|
|
192
196
|
name: nil,
|
|
193
197
|
# Constrains effort on reasoning for
|
|
194
198
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
195
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
196
|
-
# effort can result in faster responses and fewer tokens used on
|
|
197
|
-
# response.
|
|
199
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
200
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
201
|
+
# reasoning in a response.
|
|
198
202
|
#
|
|
199
|
-
#
|
|
200
|
-
#
|
|
203
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
204
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
205
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
206
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
207
|
+
# support `none`.
|
|
208
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
201
209
|
reasoning_effort: nil,
|
|
202
210
|
# Specifies the format that the model must output. Compatible with
|
|
203
211
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -125,12 +125,16 @@ module OpenAI
|
|
|
125
125
|
parallel_tool_calls: nil,
|
|
126
126
|
# Body param: Constrains effort on reasoning for
|
|
127
127
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
128
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
129
|
-
# effort can result in faster responses and fewer tokens used on
|
|
130
|
-
# response.
|
|
128
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
129
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
130
|
+
# reasoning in a response.
|
|
131
131
|
#
|
|
132
|
-
#
|
|
133
|
-
#
|
|
132
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
133
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
134
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
135
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
136
|
+
# support `none`.
|
|
137
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
134
138
|
reasoning_effort: nil,
|
|
135
139
|
# Body param: Specifies the format that the model must output. Compatible with
|
|
136
140
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -307,12 +311,16 @@ module OpenAI
|
|
|
307
311
|
parallel_tool_calls: nil,
|
|
308
312
|
# Body param: Constrains effort on reasoning for
|
|
309
313
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
310
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
311
|
-
# effort can result in faster responses and fewer tokens used on
|
|
312
|
-
# response.
|
|
314
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
315
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
316
|
+
# reasoning in a response.
|
|
313
317
|
#
|
|
314
|
-
#
|
|
315
|
-
#
|
|
318
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
319
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
320
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
321
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
322
|
+
# support `none`.
|
|
323
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
316
324
|
reasoning_effort: nil,
|
|
317
325
|
# Body param: Specifies the format that the model must output. Compatible with
|
|
318
326
|
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
|
|
@@ -66,6 +66,10 @@ module OpenAI
|
|
|
66
66
|
T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
|
|
67
67
|
presence_penalty: T.nilable(Float),
|
|
68
68
|
prompt_cache_key: String,
|
|
69
|
+
prompt_cache_retention:
|
|
70
|
+
T.nilable(
|
|
71
|
+
OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol
|
|
72
|
+
),
|
|
69
73
|
reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
|
70
74
|
response_format:
|
|
71
75
|
T.any(
|
|
@@ -217,14 +221,23 @@ module OpenAI
|
|
|
217
221
|
# hit rates. Replaces the `user` field.
|
|
218
222
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
219
223
|
prompt_cache_key: nil,
|
|
224
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
225
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
226
|
+
# of 24 hours.
|
|
227
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
228
|
+
prompt_cache_retention: nil,
|
|
220
229
|
# Constrains effort on reasoning for
|
|
221
230
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
222
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
223
|
-
# effort can result in faster responses and fewer tokens used on
|
|
224
|
-
# response.
|
|
225
|
-
#
|
|
226
|
-
#
|
|
227
|
-
#
|
|
231
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
232
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
233
|
+
# reasoning in a response.
|
|
234
|
+
#
|
|
235
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
236
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
237
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
238
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
239
|
+
# support `none`.
|
|
240
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
228
241
|
reasoning_effort: nil,
|
|
229
242
|
# An object specifying the format that the model must output.
|
|
230
243
|
#
|
|
@@ -388,6 +401,10 @@ module OpenAI
|
|
|
388
401
|
T.nilable(OpenAI::Chat::ChatCompletionPredictionContent::OrHash),
|
|
389
402
|
presence_penalty: T.nilable(Float),
|
|
390
403
|
prompt_cache_key: String,
|
|
404
|
+
prompt_cache_retention:
|
|
405
|
+
T.nilable(
|
|
406
|
+
OpenAI::Chat::CompletionCreateParams::PromptCacheRetention::OrSymbol
|
|
407
|
+
),
|
|
391
408
|
reasoning_effort: T.nilable(OpenAI::ReasoningEffort::OrSymbol),
|
|
392
409
|
response_format:
|
|
393
410
|
T.any(
|
|
@@ -537,14 +554,23 @@ module OpenAI
|
|
|
537
554
|
# hit rates. Replaces the `user` field.
|
|
538
555
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
539
556
|
prompt_cache_key: nil,
|
|
557
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
558
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
559
|
+
# of 24 hours.
|
|
560
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
561
|
+
prompt_cache_retention: nil,
|
|
540
562
|
# Constrains effort on reasoning for
|
|
541
563
|
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
|
|
542
|
-
# supported values are `minimal`, `low`, `medium`, and `high`. Reducing
|
|
543
|
-
# effort can result in faster responses and fewer tokens used on
|
|
544
|
-
# response.
|
|
545
|
-
#
|
|
546
|
-
#
|
|
547
|
-
#
|
|
564
|
+
# supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
|
|
565
|
+
# reasoning effort can result in faster responses and fewer tokens used on
|
|
566
|
+
# reasoning in a response.
|
|
567
|
+
#
|
|
568
|
+
# - `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
|
|
569
|
+
# reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
|
|
570
|
+
# calls are supported for all reasoning values in gpt-5.1.
|
|
571
|
+
# - All models before `gpt-5.1` default to `medium` reasoning effort, and do not
|
|
572
|
+
# support `none`.
|
|
573
|
+
# - The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
|
|
548
574
|
reasoning_effort: nil,
|
|
549
575
|
# An object specifying the format that the model must output.
|
|
550
576
|
#
|
|
@@ -25,6 +25,10 @@ module OpenAI
|
|
|
25
25
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
26
26
|
OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
|
|
27
27
|
OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash,
|
|
28
|
+
OpenAI::Responses::ResponseInputItem::ShellCall::OrHash,
|
|
29
|
+
OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash,
|
|
30
|
+
OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash,
|
|
31
|
+
OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash,
|
|
28
32
|
OpenAI::Responses::ResponseInputItem::McpListTools::OrHash,
|
|
29
33
|
OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash,
|
|
30
34
|
OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash,
|
|
@@ -27,6 +27,10 @@ module OpenAI
|
|
|
27
27
|
OpenAI::Responses::ResponseCodeInterpreterToolCall::OrHash,
|
|
28
28
|
OpenAI::Responses::ResponseInputItem::LocalShellCall::OrHash,
|
|
29
29
|
OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::OrHash,
|
|
30
|
+
OpenAI::Responses::ResponseInputItem::ShellCall::OrHash,
|
|
31
|
+
OpenAI::Responses::ResponseInputItem::ShellCallOutput::OrHash,
|
|
32
|
+
OpenAI::Responses::ResponseInputItem::ApplyPatchCall::OrHash,
|
|
33
|
+
OpenAI::Responses::ResponseInputItem::ApplyPatchCallOutput::OrHash,
|
|
30
34
|
OpenAI::Responses::ResponseInputItem::McpListTools::OrHash,
|
|
31
35
|
OpenAI::Responses::ResponseInputItem::McpApprovalRequest::OrHash,
|
|
32
36
|
OpenAI::Responses::ResponseInputItem::McpApprovalResponse::OrHash,
|
|
@@ -33,7 +33,9 @@ module OpenAI
|
|
|
33
33
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
|
34
34
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
|
35
35
|
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
|
36
|
-
OpenAI::Responses::ToolChoiceCustom::OrHash
|
|
36
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash,
|
|
37
|
+
OpenAI::Responses::ToolChoiceApplyPatch::OrHash,
|
|
38
|
+
OpenAI::Responses::ToolChoiceShell::OrHash
|
|
37
39
|
)
|
|
38
40
|
),
|
|
39
41
|
tools:
|
|
@@ -47,7 +49,9 @@ module OpenAI
|
|
|
47
49
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
|
48
50
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
|
49
51
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
|
52
|
+
OpenAI::Responses::FunctionShellTool::OrHash,
|
|
50
53
|
OpenAI::Responses::CustomTool::OrHash,
|
|
54
|
+
OpenAI::Responses::ApplyPatchTool::OrHash,
|
|
51
55
|
OpenAI::Responses::WebSearchTool::OrHash,
|
|
52
56
|
OpenAI::Responses::WebSearchPreviewTool::OrHash
|
|
53
57
|
)
|
|
@@ -51,6 +51,10 @@ module OpenAI
|
|
|
51
51
|
previous_response_id: T.nilable(String),
|
|
52
52
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
|
53
53
|
prompt_cache_key: String,
|
|
54
|
+
prompt_cache_retention:
|
|
55
|
+
T.nilable(
|
|
56
|
+
OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol
|
|
57
|
+
),
|
|
54
58
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
|
55
59
|
safety_identifier: String,
|
|
56
60
|
service_tier:
|
|
@@ -75,7 +79,9 @@ module OpenAI
|
|
|
75
79
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
|
76
80
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
|
77
81
|
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
|
78
|
-
OpenAI::Responses::ToolChoiceCustom::OrHash
|
|
82
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash,
|
|
83
|
+
OpenAI::Responses::ToolChoiceApplyPatch::OrHash,
|
|
84
|
+
OpenAI::Responses::ToolChoiceShell::OrHash
|
|
79
85
|
),
|
|
80
86
|
tools:
|
|
81
87
|
T::Array[
|
|
@@ -87,7 +93,9 @@ module OpenAI
|
|
|
87
93
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
|
88
94
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
|
89
95
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
|
96
|
+
OpenAI::Responses::FunctionShellTool::OrHash,
|
|
90
97
|
OpenAI::Responses::CustomTool::OrHash,
|
|
98
|
+
OpenAI::Responses::ApplyPatchTool::OrHash,
|
|
91
99
|
OpenAI::Responses::WebSearchTool::OrHash,
|
|
92
100
|
OpenAI::Responses::WebSearchPreviewTool::OrHash
|
|
93
101
|
)
|
|
@@ -183,6 +191,11 @@ module OpenAI
|
|
|
183
191
|
# hit rates. Replaces the `user` field.
|
|
184
192
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
185
193
|
prompt_cache_key: nil,
|
|
194
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
195
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
196
|
+
# of 24 hours.
|
|
197
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
198
|
+
prompt_cache_retention: nil,
|
|
186
199
|
# **gpt-5 and o-series models only**
|
|
187
200
|
#
|
|
188
201
|
# Configuration options for
|
|
@@ -322,6 +335,10 @@ module OpenAI
|
|
|
322
335
|
previous_response_id: T.nilable(String),
|
|
323
336
|
prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
|
|
324
337
|
prompt_cache_key: String,
|
|
338
|
+
prompt_cache_retention:
|
|
339
|
+
T.nilable(
|
|
340
|
+
OpenAI::Responses::ResponseCreateParams::PromptCacheRetention::OrSymbol
|
|
341
|
+
),
|
|
325
342
|
reasoning: T.nilable(OpenAI::Reasoning::OrHash),
|
|
326
343
|
safety_identifier: String,
|
|
327
344
|
service_tier:
|
|
@@ -348,7 +365,9 @@ module OpenAI
|
|
|
348
365
|
OpenAI::Responses::ToolChoiceTypes::OrHash,
|
|
349
366
|
OpenAI::Responses::ToolChoiceFunction::OrHash,
|
|
350
367
|
OpenAI::Responses::ToolChoiceMcp::OrHash,
|
|
351
|
-
OpenAI::Responses::ToolChoiceCustom::OrHash
|
|
368
|
+
OpenAI::Responses::ToolChoiceCustom::OrHash,
|
|
369
|
+
OpenAI::Responses::ToolChoiceApplyPatch::OrHash,
|
|
370
|
+
OpenAI::Responses::ToolChoiceShell::OrHash
|
|
352
371
|
),
|
|
353
372
|
tools:
|
|
354
373
|
T::Array[
|
|
@@ -360,7 +379,9 @@ module OpenAI
|
|
|
360
379
|
OpenAI::Responses::Tool::CodeInterpreter::OrHash,
|
|
361
380
|
OpenAI::Responses::Tool::ImageGeneration::OrHash,
|
|
362
381
|
OpenAI::Responses::Tool::LocalShell::OrHash,
|
|
382
|
+
OpenAI::Responses::FunctionShellTool::OrHash,
|
|
363
383
|
OpenAI::Responses::CustomTool::OrHash,
|
|
384
|
+
OpenAI::Responses::ApplyPatchTool::OrHash,
|
|
364
385
|
OpenAI::Responses::WebSearchTool::OrHash,
|
|
365
386
|
OpenAI::Responses::WebSearchPreviewTool::OrHash
|
|
366
387
|
)
|
|
@@ -460,6 +481,11 @@ module OpenAI
|
|
|
460
481
|
# hit rates. Replaces the `user` field.
|
|
461
482
|
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
|
462
483
|
prompt_cache_key: nil,
|
|
484
|
+
# The retention policy for the prompt cache. Set to `24h` to enable extended
|
|
485
|
+
# prompt caching, which keeps cached prefixes active for longer, up to a maximum
|
|
486
|
+
# of 24 hours.
|
|
487
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching#prompt-cache-retention).
|
|
488
|
+
prompt_cache_retention: nil,
|
|
463
489
|
# **gpt-5 and o-series models only**
|
|
464
490
|
#
|
|
465
491
|
# Configuration options for
|
|
@@ -61,6 +61,7 @@ module OpenAI
|
|
|
61
61
|
| :"/v1/chat/completions"
|
|
62
62
|
| :"/v1/embeddings"
|
|
63
63
|
| :"/v1/completions"
|
|
64
|
+
| :"/v1/moderations"
|
|
64
65
|
|
|
65
66
|
module Endpoint
|
|
66
67
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -69,6 +70,7 @@ module OpenAI
|
|
|
69
70
|
V1_CHAT_COMPLETIONS: :"/v1/chat/completions"
|
|
70
71
|
V1_EMBEDDINGS: :"/v1/embeddings"
|
|
71
72
|
V1_COMPLETIONS: :"/v1/completions"
|
|
73
|
+
V1_MODERATIONS: :"/v1/moderations"
|
|
72
74
|
|
|
73
75
|
def self?.values: -> ::Array[OpenAI::Models::BatchCreateParams::endpoint]
|
|
74
76
|
end
|
|
@@ -20,6 +20,7 @@ module OpenAI
|
|
|
20
20
|
prediction: OpenAI::Chat::ChatCompletionPredictionContent?,
|
|
21
21
|
presence_penalty: Float?,
|
|
22
22
|
prompt_cache_key: String,
|
|
23
|
+
prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?,
|
|
23
24
|
reasoning_effort: OpenAI::Models::reasoning_effort?,
|
|
24
25
|
response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format,
|
|
25
26
|
safety_identifier: String,
|
|
@@ -89,6 +90,8 @@ module OpenAI
|
|
|
89
90
|
|
|
90
91
|
def prompt_cache_key=: (String) -> String
|
|
91
92
|
|
|
93
|
+
attr_accessor prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?
|
|
94
|
+
|
|
92
95
|
attr_accessor reasoning_effort: OpenAI::Models::reasoning_effort?
|
|
93
96
|
|
|
94
97
|
attr_reader response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format?
|
|
@@ -159,6 +162,7 @@ module OpenAI
|
|
|
159
162
|
?prediction: OpenAI::Chat::ChatCompletionPredictionContent?,
|
|
160
163
|
?presence_penalty: Float?,
|
|
161
164
|
?prompt_cache_key: String,
|
|
165
|
+
?prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?,
|
|
162
166
|
?reasoning_effort: OpenAI::Models::reasoning_effort?,
|
|
163
167
|
?response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format,
|
|
164
168
|
?safety_identifier: String,
|
|
@@ -196,6 +200,7 @@ module OpenAI
|
|
|
196
200
|
prediction: OpenAI::Chat::ChatCompletionPredictionContent?,
|
|
197
201
|
presence_penalty: Float?,
|
|
198
202
|
prompt_cache_key: String,
|
|
203
|
+
prompt_cache_retention: OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention?,
|
|
199
204
|
reasoning_effort: OpenAI::Models::reasoning_effort?,
|
|
200
205
|
response_format: OpenAI::Models::Chat::CompletionCreateParams::response_format,
|
|
201
206
|
safety_identifier: String,
|
|
@@ -288,6 +293,17 @@ module OpenAI
|
|
|
288
293
|
def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::modality]
|
|
289
294
|
end
|
|
290
295
|
|
|
296
|
+
type prompt_cache_retention = :"in-memory" | :"24h"
|
|
297
|
+
|
|
298
|
+
module PromptCacheRetention
|
|
299
|
+
extend OpenAI::Internal::Type::Enum
|
|
300
|
+
|
|
301
|
+
IN_MEMORY: :"in-memory"
|
|
302
|
+
PROMPT_CACHE_RETENTION_24H: :"24h"
|
|
303
|
+
|
|
304
|
+
def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::prompt_cache_retention]
|
|
305
|
+
end
|
|
306
|
+
|
|
291
307
|
type response_format =
|
|
292
308
|
OpenAI::ResponseFormatText
|
|
293
309
|
| OpenAI::ResponseFormatJSONSchema
|
|
@@ -1,7 +1,12 @@
|
|
|
1
1
|
module OpenAI
|
|
2
2
|
module Models
|
|
3
3
|
type chat_model =
|
|
4
|
-
:"gpt-5"
|
|
4
|
+
:"gpt-5.1"
|
|
5
|
+
| :"gpt-5.1-2025-11-13"
|
|
6
|
+
| :"gpt-5.1-codex"
|
|
7
|
+
| :"gpt-5.1-mini"
|
|
8
|
+
| :"gpt-5.1-chat-latest"
|
|
9
|
+
| :"gpt-5"
|
|
5
10
|
| :"gpt-5-mini"
|
|
6
11
|
| :"gpt-5-nano"
|
|
7
12
|
| :"gpt-5-2025-08-07"
|
|
@@ -67,6 +72,11 @@ module OpenAI
|
|
|
67
72
|
module ChatModel
|
|
68
73
|
extend OpenAI::Internal::Type::Enum
|
|
69
74
|
|
|
75
|
+
GPT_5_1: :"gpt-5.1"
|
|
76
|
+
GPT_5_1_2025_11_13: :"gpt-5.1-2025-11-13"
|
|
77
|
+
GPT_5_1_CODEX: :"gpt-5.1-codex"
|
|
78
|
+
GPT_5_1_MINI: :"gpt-5.1-mini"
|
|
79
|
+
GPT_5_1_CHAT_LATEST: :"gpt-5.1-chat-latest"
|
|
70
80
|
GPT_5: :"gpt-5"
|
|
71
81
|
GPT_5_MINI: :"gpt-5-mini"
|
|
72
82
|
GPT_5_NANO: :"gpt-5-nano"
|
|
@@ -16,6 +16,10 @@ module OpenAI
|
|
|
16
16
|
| OpenAI::Responses::ResponseCodeInterpreterToolCall
|
|
17
17
|
| OpenAI::Conversations::ConversationItem::LocalShellCall
|
|
18
18
|
| OpenAI::Conversations::ConversationItem::LocalShellCallOutput
|
|
19
|
+
| OpenAI::Responses::ResponseFunctionShellToolCall
|
|
20
|
+
| OpenAI::Responses::ResponseFunctionShellToolCallOutput
|
|
21
|
+
| OpenAI::Responses::ResponseApplyPatchToolCall
|
|
22
|
+
| OpenAI::Responses::ResponseApplyPatchToolCallOutput
|
|
19
23
|
| OpenAI::Conversations::ConversationItem::McpListTools
|
|
20
24
|
| OpenAI::Conversations::ConversationItem::McpApprovalRequest
|
|
21
25
|
| OpenAI::Conversations::ConversationItem::McpApprovalResponse
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
module OpenAI
|
|
2
2
|
module Models
|
|
3
|
-
type reasoning_effort = :minimal | :low | :medium | :high
|
|
3
|
+
type reasoning_effort = :none | :minimal | :low | :medium | :high
|
|
4
4
|
|
|
5
5
|
module ReasoningEffort
|
|
6
6
|
extend OpenAI::Internal::Type::Enum
|
|
7
7
|
|
|
8
|
+
NONE: :none
|
|
8
9
|
MINIMAL: :minimal
|
|
9
10
|
LOW: :low
|
|
10
11
|
MEDIUM: :medium
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type apply_patch_tool = { type: :apply_patch }
|
|
5
|
+
|
|
6
|
+
class ApplyPatchTool < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
attr_accessor type: :apply_patch
|
|
8
|
+
|
|
9
|
+
def initialize: (?type: :apply_patch) -> void
|
|
10
|
+
|
|
11
|
+
def to_hash: -> { type: :apply_patch }
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
end
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
module OpenAI
|
|
2
|
+
module Models
|
|
3
|
+
module Responses
|
|
4
|
+
type function_shell_tool = { type: :shell }
|
|
5
|
+
|
|
6
|
+
class FunctionShellTool < OpenAI::Internal::Type::BaseModel
|
|
7
|
+
attr_accessor type: :shell
|
|
8
|
+
|
|
9
|
+
def initialize: (?type: :shell) -> void
|
|
10
|
+
|
|
11
|
+
def to_hash: -> { type: :shell }
|
|
12
|
+
end
|
|
13
|
+
end
|
|
14
|
+
end
|
|
15
|
+
end
|
|
@@ -142,6 +142,8 @@ module OpenAI
|
|
|
142
142
|
| OpenAI::Responses::ToolChoiceFunction
|
|
143
143
|
| OpenAI::Responses::ToolChoiceMcp
|
|
144
144
|
| OpenAI::Responses::ToolChoiceCustom
|
|
145
|
+
| OpenAI::Responses::ToolChoiceApplyPatch
|
|
146
|
+
| OpenAI::Responses::ToolChoiceShell
|
|
145
147
|
|
|
146
148
|
module ToolChoice
|
|
147
149
|
extend OpenAI::Internal::Type::Union
|
|
@@ -24,6 +24,7 @@ module OpenAI
|
|
|
24
24
|
previous_response_id: String?,
|
|
25
25
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
|
26
26
|
prompt_cache_key: String,
|
|
27
|
+
prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?,
|
|
27
28
|
reasoning: OpenAI::Reasoning?,
|
|
28
29
|
safety_identifier: String,
|
|
29
30
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
|
@@ -80,6 +81,8 @@ module OpenAI
|
|
|
80
81
|
|
|
81
82
|
def prompt_cache_key=: (String) -> String
|
|
82
83
|
|
|
84
|
+
attr_accessor prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?
|
|
85
|
+
|
|
83
86
|
attr_accessor reasoning: OpenAI::Reasoning?
|
|
84
87
|
|
|
85
88
|
attr_reader safety_identifier: String?
|
|
@@ -135,6 +138,7 @@ module OpenAI
|
|
|
135
138
|
?previous_response_id: String?,
|
|
136
139
|
?prompt: OpenAI::Responses::ResponsePrompt?,
|
|
137
140
|
?prompt_cache_key: String,
|
|
141
|
+
?prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?,
|
|
138
142
|
?reasoning: OpenAI::Reasoning?,
|
|
139
143
|
?safety_identifier: String,
|
|
140
144
|
?service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
|
@@ -169,6 +173,7 @@ module OpenAI
|
|
|
169
173
|
previous_response_id: String?,
|
|
170
174
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
|
171
175
|
prompt_cache_key: String,
|
|
176
|
+
prompt_cache_retention: OpenAI::Models::Responses::Response::prompt_cache_retention?,
|
|
172
177
|
reasoning: OpenAI::Reasoning?,
|
|
173
178
|
safety_identifier: String,
|
|
174
179
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
|
@@ -230,6 +235,8 @@ module OpenAI
|
|
|
230
235
|
| OpenAI::Responses::ToolChoiceFunction
|
|
231
236
|
| OpenAI::Responses::ToolChoiceMcp
|
|
232
237
|
| OpenAI::Responses::ToolChoiceCustom
|
|
238
|
+
| OpenAI::Responses::ToolChoiceApplyPatch
|
|
239
|
+
| OpenAI::Responses::ToolChoiceShell
|
|
233
240
|
|
|
234
241
|
module ToolChoice
|
|
235
242
|
extend OpenAI::Internal::Type::Union
|
|
@@ -247,6 +254,17 @@ module OpenAI
|
|
|
247
254
|
def to_hash: -> { id: String }
|
|
248
255
|
end
|
|
249
256
|
|
|
257
|
+
type prompt_cache_retention = :"in-memory" | :"24h"
|
|
258
|
+
|
|
259
|
+
module PromptCacheRetention
|
|
260
|
+
extend OpenAI::Internal::Type::Enum
|
|
261
|
+
|
|
262
|
+
IN_MEMORY: :"in-memory"
|
|
263
|
+
PROMPT_CACHE_RETENTION_24H: :"24h"
|
|
264
|
+
|
|
265
|
+
def self?.values: -> ::Array[OpenAI::Models::Responses::Response::prompt_cache_retention]
|
|
266
|
+
end
|
|
267
|
+
|
|
250
268
|
type service_tier = :auto | :default | :flex | :scale | :priority
|
|
251
269
|
|
|
252
270
|
module ServiceTier
|