openai 0.15.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +47 -0
- data/README.md +14 -20
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
- data/lib/openai/helpers/structured_output/union_of.rb +11 -1
- data/lib/openai/internal/transport/base_client.rb +1 -1
- data/lib/openai/internal/type/enum.rb +6 -6
- data/lib/openai/internal/type/union.rb +13 -17
- data/lib/openai/models/beta/assistant_create_params.rb +4 -5
- data/lib/openai/models/beta/assistant_update_params.rb +22 -5
- data/lib/openai/models/beta/threads/run_create_params.rb +4 -5
- data/lib/openai/models/chat/chat_completion_allowed_tool_choice.rb +33 -0
- data/lib/openai/models/chat/chat_completion_allowed_tools.rb +64 -0
- data/lib/openai/models/chat/chat_completion_assistant_message_param.rb +3 -5
- data/lib/openai/models/chat/chat_completion_custom_tool.rb +163 -0
- data/lib/openai/models/chat/chat_completion_function_tool.rb +29 -0
- data/lib/openai/models/chat/chat_completion_message.rb +3 -5
- data/lib/openai/models/chat/chat_completion_message_custom_tool_call.rb +60 -0
- data/lib/openai/models/chat/chat_completion_message_function_tool_call.rb +73 -0
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +10 -56
- data/lib/openai/models/chat/chat_completion_named_tool_choice.rb +2 -2
- data/lib/openai/models/chat/chat_completion_named_tool_choice_custom.rb +42 -0
- data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
- data/lib/openai/models/chat/chat_completion_stream_options.rb +14 -1
- data/lib/openai/models/chat/chat_completion_tool.rb +12 -14
- data/lib/openai/models/chat/chat_completion_tool_choice_option.rb +7 -1
- data/lib/openai/models/chat/completion_create_params.rb +65 -16
- data/lib/openai/models/chat_model.rb +7 -0
- data/lib/openai/models/custom_tool_input_format.rb +76 -0
- data/lib/openai/models/evals/create_eval_completions_run_data_source.rb +3 -3
- data/lib/openai/models/evals/run_cancel_response.rb +2 -2
- data/lib/openai/models/evals/run_create_params.rb +2 -2
- data/lib/openai/models/evals/run_create_response.rb +2 -2
- data/lib/openai/models/evals/run_list_response.rb +2 -2
- data/lib/openai/models/evals/run_retrieve_response.rb +2 -2
- data/lib/openai/models/reasoning.rb +4 -5
- data/lib/openai/models/reasoning_effort.rb +4 -4
- data/lib/openai/models/response_format_text_grammar.rb +27 -0
- data/lib/openai/models/response_format_text_python.rb +20 -0
- data/lib/openai/models/responses/custom_tool.rb +48 -0
- data/lib/openai/models/responses/response.rb +70 -16
- data/lib/openai/models/responses/response_create_params.rb +78 -14
- data/lib/openai/models/responses/response_custom_tool_call.rb +55 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_delta_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_input_done_event.rb +52 -0
- data/lib/openai/models/responses/response_custom_tool_call_output.rb +47 -0
- data/lib/openai/models/responses/response_input_item.rb +7 -1
- data/lib/openai/models/responses/response_output_item.rb +4 -1
- data/lib/openai/models/responses/response_output_item_added_event.rb +2 -2
- data/lib/openai/models/responses/response_output_item_done_event.rb +2 -2
- data/lib/openai/models/responses/response_reasoning_item.rb +36 -5
- data/lib/openai/models/responses/response_reasoning_text_delta_event.rb +63 -0
- data/lib/openai/models/responses/response_reasoning_text_done_event.rb +63 -0
- data/lib/openai/models/responses/response_retrieve_params.rb +14 -1
- data/lib/openai/models/responses/response_stream_event.rb +13 -11
- data/lib/openai/models/responses/response_text_config.rb +27 -1
- data/lib/openai/models/responses/tool.rb +5 -1
- data/lib/openai/models/responses/tool_choice_allowed.rb +73 -0
- data/lib/openai/models/responses/tool_choice_custom.rb +28 -0
- data/lib/openai/models/vector_store_search_params.rb +6 -1
- data/lib/openai/models.rb +6 -0
- data/lib/openai/resources/beta/assistants.rb +2 -2
- data/lib/openai/resources/beta/threads/runs.rb +2 -2
- data/lib/openai/resources/chat/completions.rb +26 -12
- data/lib/openai/resources/responses.rb +77 -36
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +19 -2
- data/rbi/openai/internal/transport/base_client.rbi +1 -1
- data/rbi/openai/models/beta/assistant_create_params.rbi +6 -8
- data/rbi/openai/models/beta/assistant_update_params.rbi +36 -8
- data/rbi/openai/models/beta/threads/run_create_params.rbi +6 -8
- data/rbi/openai/models/chat/chat_completion_allowed_tool_choice.rbi +60 -0
- data/rbi/openai/models/chat/chat_completion_allowed_tools.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_assistant_message_param.rbi +27 -4
- data/rbi/openai/models/chat/chat_completion_custom_tool.rbi +335 -0
- data/rbi/openai/models/chat/chat_completion_function_tool.rbi +51 -0
- data/rbi/openai/models/chat/chat_completion_message.rbi +17 -4
- data/rbi/openai/models/chat/chat_completion_message_custom_tool_call.rbi +105 -0
- data/rbi/openai/models/chat/chat_completion_message_function_tool_call.rbi +118 -0
- data/rbi/openai/models/chat/chat_completion_message_tool_call.rbi +9 -92
- data/rbi/openai/models/chat/chat_completion_named_tool_choice.rbi +2 -2
- data/rbi/openai/models/chat/chat_completion_named_tool_choice_custom.rbi +89 -0
- data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
- data/rbi/openai/models/chat/chat_completion_stream_options.rbi +30 -2
- data/rbi/openai/models/chat/chat_completion_tool.rbi +11 -30
- data/rbi/openai/models/chat/chat_completion_tool_choice_option.rbi +3 -1
- data/rbi/openai/models/chat/completion_create_params.rbi +150 -31
- data/rbi/openai/models/chat_model.rbi +11 -0
- data/rbi/openai/models/custom_tool_input_format.rbi +136 -0
- data/rbi/openai/models/evals/create_eval_completions_run_data_source.rbi +8 -4
- data/rbi/openai/models/evals/run_cancel_response.rbi +2 -0
- data/rbi/openai/models/evals/run_create_params.rbi +4 -0
- data/rbi/openai/models/evals/run_create_response.rbi +2 -0
- data/rbi/openai/models/evals/run_list_response.rbi +2 -0
- data/rbi/openai/models/evals/run_retrieve_response.rbi +2 -0
- data/rbi/openai/models/reasoning.rbi +6 -8
- data/rbi/openai/models/reasoning_effort.rbi +4 -4
- data/rbi/openai/models/response_format_text_grammar.rbi +35 -0
- data/rbi/openai/models/response_format_text_python.rbi +30 -0
- data/rbi/openai/models/responses/custom_tool.rbi +96 -0
- data/rbi/openai/models/responses/response.rbi +59 -11
- data/rbi/openai/models/responses/response_create_params.rbi +138 -13
- data/rbi/openai/models/responses/response_custom_tool_call.rbi +78 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_delta_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_input_done_event.rbi +75 -0
- data/rbi/openai/models/responses/response_custom_tool_call_output.rbi +65 -0
- data/rbi/openai/models/responses/response_input_item.rbi +2 -0
- data/rbi/openai/models/responses/response_output_item.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_added_event.rbi +2 -1
- data/rbi/openai/models/responses/response_output_item_done_event.rbi +2 -1
- data/rbi/openai/models/responses/response_reasoning_item.rbi +63 -4
- data/rbi/openai/models/responses/response_reasoning_text_delta_event.rbi +83 -0
- data/rbi/openai/models/responses/{response_reasoning_summary_done_event.rbi → response_reasoning_text_done_event.rbi} +20 -20
- data/rbi/openai/models/responses/response_retrieve_params.rbi +21 -0
- data/rbi/openai/models/responses/response_stream_event.rbi +4 -2
- data/rbi/openai/models/responses/response_text_config.rbi +64 -1
- data/rbi/openai/models/responses/tool.rbi +1 -0
- data/rbi/openai/models/responses/tool_choice_allowed.rbi +124 -0
- data/rbi/openai/models/responses/tool_choice_custom.rbi +39 -0
- data/rbi/openai/models/vector_store_search_params.rbi +12 -1
- data/rbi/openai/models.rbi +6 -0
- data/rbi/openai/resources/beta/assistants.rbi +6 -8
- data/rbi/openai/resources/beta/threads/runs.rbi +8 -10
- data/rbi/openai/resources/chat/completions.rbi +78 -25
- data/rbi/openai/resources/responses.rbi +249 -47
- data/sig/openai/internal/transport/base_client.rbs +1 -1
- data/sig/openai/models/beta/assistant_update_params.rbs +12 -0
- data/sig/openai/models/chat/chat_completion_allowed_tool_choice.rbs +29 -0
- data/sig/openai/models/chat/chat_completion_allowed_tools.rbs +38 -0
- data/sig/openai/models/chat/chat_completion_assistant_message_param.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_custom_tool.rbs +137 -0
- data/sig/openai/models/chat/chat_completion_function_tool.rbs +26 -0
- data/sig/openai/models/chat/chat_completion_message.rbs +6 -6
- data/sig/openai/models/chat/chat_completion_message_custom_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_function_tool_call.rbs +46 -0
- data/sig/openai/models/chat/chat_completion_message_tool_call.rbs +6 -35
- data/sig/openai/models/chat/chat_completion_named_tool_choice_custom.rbs +39 -0
- data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
- data/sig/openai/models/chat/chat_completion_stream_options.rbs +11 -3
- data/sig/openai/models/chat/chat_completion_tool.rbs +6 -15
- data/sig/openai/models/chat/chat_completion_tool_choice_option.rbs +2 -0
- data/sig/openai/models/chat/completion_create_params.rbs +37 -6
- data/sig/openai/models/chat_model.rbs +15 -1
- data/sig/openai/models/custom_tool_input_format.rbs +61 -0
- data/sig/openai/models/evals/create_eval_completions_run_data_source.rbs +6 -6
- data/sig/openai/models/reasoning_effort.rbs +2 -1
- data/sig/openai/models/response_format_text_grammar.rbs +15 -0
- data/sig/openai/models/response_format_text_python.rbs +13 -0
- data/sig/openai/models/responses/custom_tool.rbs +43 -0
- data/sig/openai/models/responses/response.rbs +16 -0
- data/sig/openai/models/responses/response_create_params.rbs +33 -0
- data/sig/openai/models/responses/response_custom_tool_call.rbs +44 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_delta_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_input_done_event.rbs +42 -0
- data/sig/openai/models/responses/response_custom_tool_call_output.rbs +39 -0
- data/sig/openai/models/responses/response_input_item.rbs +2 -0
- data/sig/openai/models/responses/response_output_item.rbs +1 -0
- data/sig/openai/models/responses/response_reasoning_item.rbs +21 -0
- data/sig/openai/models/responses/{response_reasoning_summary_delta_event.rbs → response_reasoning_text_delta_event.rbs} +15 -15
- data/sig/openai/models/responses/{response_reasoning_summary_done_event.rbs → response_reasoning_text_done_event.rbs} +11 -11
- data/sig/openai/models/responses/response_retrieve_params.rbs +7 -0
- data/sig/openai/models/responses/response_stream_event.rbs +4 -2
- data/sig/openai/models/responses/response_text_config.rbs +22 -3
- data/sig/openai/models/responses/tool.rbs +1 -0
- data/sig/openai/models/responses/tool_choice_allowed.rbs +43 -0
- data/sig/openai/models/responses/tool_choice_custom.rbs +17 -0
- data/sig/openai/models/vector_store_search_params.rbs +2 -1
- data/sig/openai/models.rbs +6 -0
- data/sig/openai/resources/chat/completions.rbs +8 -2
- data/sig/openai/resources/responses.rbs +36 -0
- metadata +59 -8
- data/lib/openai/models/responses/response_reasoning_summary_delta_event.rb +0 -65
- data/lib/openai/models/responses/response_reasoning_summary_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_summary_delta_event.rbi +0 -85
@@ -1,7 +1,14 @@
|
|
1
1
|
module OpenAI
|
2
2
|
module Models
|
3
3
|
type chat_model =
|
4
|
-
:"gpt-
|
4
|
+
:"gpt-5"
|
5
|
+
| :"gpt-5-mini"
|
6
|
+
| :"gpt-5-nano"
|
7
|
+
| :"gpt-5-2025-08-07"
|
8
|
+
| :"gpt-5-mini-2025-08-07"
|
9
|
+
| :"gpt-5-nano-2025-08-07"
|
10
|
+
| :"gpt-5-chat-latest"
|
11
|
+
| :"gpt-4.1"
|
5
12
|
| :"gpt-4.1-mini"
|
6
13
|
| :"gpt-4.1-nano"
|
7
14
|
| :"gpt-4.1-2025-04-14"
|
@@ -60,6 +67,13 @@ module OpenAI
|
|
60
67
|
module ChatModel
|
61
68
|
extend OpenAI::Internal::Type::Enum
|
62
69
|
|
70
|
+
GPT_5: :"gpt-5"
|
71
|
+
GPT_5_MINI: :"gpt-5-mini"
|
72
|
+
GPT_5_NANO: :"gpt-5-nano"
|
73
|
+
GPT_5_2025_08_07: :"gpt-5-2025-08-07"
|
74
|
+
GPT_5_MINI_2025_08_07: :"gpt-5-mini-2025-08-07"
|
75
|
+
GPT_5_NANO_2025_08_07: :"gpt-5-nano-2025-08-07"
|
76
|
+
GPT_5_CHAT_LATEST: :"gpt-5-chat-latest"
|
63
77
|
GPT_4_1: :"gpt-4.1"
|
64
78
|
GPT_4_1_MINI: :"gpt-4.1-mini"
|
65
79
|
GPT_4_1_NANO: :"gpt-4.1-nano"
|
@@ -0,0 +1,61 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type custom_tool_input_format =
|
4
|
+
OpenAI::CustomToolInputFormat::Text
|
5
|
+
| OpenAI::CustomToolInputFormat::Grammar
|
6
|
+
|
7
|
+
module CustomToolInputFormat
|
8
|
+
extend OpenAI::Internal::Type::Union
|
9
|
+
|
10
|
+
type text = { type: :text }
|
11
|
+
|
12
|
+
class Text < OpenAI::Internal::Type::BaseModel
|
13
|
+
attr_accessor type: :text
|
14
|
+
|
15
|
+
def initialize: (?type: :text) -> void
|
16
|
+
|
17
|
+
def to_hash: -> { type: :text }
|
18
|
+
end
|
19
|
+
|
20
|
+
type grammar =
|
21
|
+
{
|
22
|
+
definition: String,
|
23
|
+
syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax,
|
24
|
+
type: :grammar
|
25
|
+
}
|
26
|
+
|
27
|
+
class Grammar < OpenAI::Internal::Type::BaseModel
|
28
|
+
attr_accessor definition: String
|
29
|
+
|
30
|
+
attr_accessor syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax
|
31
|
+
|
32
|
+
attr_accessor type: :grammar
|
33
|
+
|
34
|
+
def initialize: (
|
35
|
+
definition: String,
|
36
|
+
syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax,
|
37
|
+
?type: :grammar
|
38
|
+
) -> void
|
39
|
+
|
40
|
+
def to_hash: -> {
|
41
|
+
definition: String,
|
42
|
+
syntax: OpenAI::Models::CustomToolInputFormat::Grammar::syntax,
|
43
|
+
type: :grammar
|
44
|
+
}
|
45
|
+
|
46
|
+
type syntax = :lark | :regex
|
47
|
+
|
48
|
+
module Syntax
|
49
|
+
extend OpenAI::Internal::Type::Enum
|
50
|
+
|
51
|
+
LARK: :lark
|
52
|
+
REGEX: :regex
|
53
|
+
|
54
|
+
def self?.values: -> ::Array[OpenAI::Models::CustomToolInputFormat::Grammar::syntax]
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def self?.variants: -> ::Array[OpenAI::Models::custom_tool_input_format]
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
@@ -335,7 +335,7 @@ module OpenAI
|
|
335
335
|
response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format,
|
336
336
|
seed: Integer,
|
337
337
|
temperature: Float,
|
338
|
-
tools: ::Array[OpenAI::Chat::
|
338
|
+
tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool],
|
339
339
|
top_p: Float
|
340
340
|
}
|
341
341
|
|
@@ -358,11 +358,11 @@ module OpenAI
|
|
358
358
|
|
359
359
|
def temperature=: (Float) -> Float
|
360
360
|
|
361
|
-
attr_reader tools: ::Array[OpenAI::Chat::
|
361
|
+
attr_reader tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool]?
|
362
362
|
|
363
363
|
def tools=: (
|
364
|
-
::Array[OpenAI::Chat::
|
365
|
-
) -> ::Array[OpenAI::Chat::
|
364
|
+
::Array[OpenAI::Chat::ChatCompletionFunctionTool]
|
365
|
+
) -> ::Array[OpenAI::Chat::ChatCompletionFunctionTool]
|
366
366
|
|
367
367
|
attr_reader top_p: Float?
|
368
368
|
|
@@ -373,7 +373,7 @@ module OpenAI
|
|
373
373
|
?response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format,
|
374
374
|
?seed: Integer,
|
375
375
|
?temperature: Float,
|
376
|
-
?tools: ::Array[OpenAI::Chat::
|
376
|
+
?tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool],
|
377
377
|
?top_p: Float
|
378
378
|
) -> void
|
379
379
|
|
@@ -382,7 +382,7 @@ module OpenAI
|
|
382
382
|
response_format: OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::response_format,
|
383
383
|
seed: Integer,
|
384
384
|
temperature: Float,
|
385
|
-
tools: ::Array[OpenAI::Chat::
|
385
|
+
tools: ::Array[OpenAI::Chat::ChatCompletionFunctionTool],
|
386
386
|
top_p: Float
|
387
387
|
}
|
388
388
|
|
@@ -1,10 +1,11 @@
|
|
1
1
|
module OpenAI
|
2
2
|
module Models
|
3
|
-
type reasoning_effort = :low | :medium | :high
|
3
|
+
type reasoning_effort = :minimal | :low | :medium | :high
|
4
4
|
|
5
5
|
module ReasoningEffort
|
6
6
|
extend OpenAI::Internal::Type::Enum
|
7
7
|
|
8
|
+
MINIMAL: :minimal
|
8
9
|
LOW: :low
|
9
10
|
MEDIUM: :medium
|
10
11
|
HIGH: :high
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type response_format_text_grammar = { grammar: String, type: :grammar }
|
4
|
+
|
5
|
+
class ResponseFormatTextGrammar < OpenAI::Internal::Type::BaseModel
|
6
|
+
attr_accessor grammar: String
|
7
|
+
|
8
|
+
attr_accessor type: :grammar
|
9
|
+
|
10
|
+
def initialize: (grammar: String, ?type: :grammar) -> void
|
11
|
+
|
12
|
+
def to_hash: -> { grammar: String, type: :grammar }
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,13 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
type response_format_text_python = { type: :python }
|
4
|
+
|
5
|
+
class ResponseFormatTextPython < OpenAI::Internal::Type::BaseModel
|
6
|
+
attr_accessor type: :python
|
7
|
+
|
8
|
+
def initialize: (?type: :python) -> void
|
9
|
+
|
10
|
+
def to_hash: -> { type: :python }
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Responses
|
4
|
+
type custom_tool =
|
5
|
+
{
|
6
|
+
name: String,
|
7
|
+
type: :custom,
|
8
|
+
description: String,
|
9
|
+
format_: OpenAI::Models::custom_tool_input_format
|
10
|
+
}
|
11
|
+
|
12
|
+
class CustomTool < OpenAI::Internal::Type::BaseModel
|
13
|
+
attr_accessor name: String
|
14
|
+
|
15
|
+
attr_accessor type: :custom
|
16
|
+
|
17
|
+
attr_reader description: String?
|
18
|
+
|
19
|
+
def description=: (String) -> String
|
20
|
+
|
21
|
+
attr_reader format_: OpenAI::Models::custom_tool_input_format?
|
22
|
+
|
23
|
+
def format_=: (
|
24
|
+
OpenAI::Models::custom_tool_input_format
|
25
|
+
) -> OpenAI::Models::custom_tool_input_format
|
26
|
+
|
27
|
+
def initialize: (
|
28
|
+
name: String,
|
29
|
+
?description: String,
|
30
|
+
?format_: OpenAI::Models::custom_tool_input_format,
|
31
|
+
?type: :custom
|
32
|
+
) -> void
|
33
|
+
|
34
|
+
def to_hash: -> {
|
35
|
+
name: String,
|
36
|
+
type: :custom,
|
37
|
+
description: String,
|
38
|
+
format_: OpenAI::Models::custom_tool_input_format
|
39
|
+
}
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -22,7 +22,9 @@ module OpenAI
|
|
22
22
|
max_tool_calls: Integer?,
|
23
23
|
previous_response_id: String?,
|
24
24
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
25
|
+
prompt_cache_key: String,
|
25
26
|
reasoning: OpenAI::Reasoning?,
|
27
|
+
safety_identifier: String,
|
26
28
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
27
29
|
status: OpenAI::Models::Responses::response_status,
|
28
30
|
text: OpenAI::Responses::ResponseTextConfig,
|
@@ -71,8 +73,16 @@ module OpenAI
|
|
71
73
|
|
72
74
|
attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
|
73
75
|
|
76
|
+
attr_reader prompt_cache_key: String?
|
77
|
+
|
78
|
+
def prompt_cache_key=: (String) -> String
|
79
|
+
|
74
80
|
attr_accessor reasoning: OpenAI::Reasoning?
|
75
81
|
|
82
|
+
attr_reader safety_identifier: String?
|
83
|
+
|
84
|
+
def safety_identifier=: (String) -> String
|
85
|
+
|
76
86
|
attr_accessor service_tier: OpenAI::Models::Responses::Response::service_tier?
|
77
87
|
|
78
88
|
attr_reader status: OpenAI::Models::Responses::response_status?
|
@@ -120,7 +130,9 @@ module OpenAI
|
|
120
130
|
?max_tool_calls: Integer?,
|
121
131
|
?previous_response_id: String?,
|
122
132
|
?prompt: OpenAI::Responses::ResponsePrompt?,
|
133
|
+
?prompt_cache_key: String,
|
123
134
|
?reasoning: OpenAI::Reasoning?,
|
135
|
+
?safety_identifier: String,
|
124
136
|
?service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
125
137
|
?status: OpenAI::Models::Responses::response_status,
|
126
138
|
?text: OpenAI::Responses::ResponseTextConfig,
|
@@ -151,7 +163,9 @@ module OpenAI
|
|
151
163
|
max_tool_calls: Integer?,
|
152
164
|
previous_response_id: String?,
|
153
165
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
166
|
+
prompt_cache_key: String,
|
154
167
|
reasoning: OpenAI::Reasoning?,
|
168
|
+
safety_identifier: String,
|
155
169
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
156
170
|
status: OpenAI::Models::Responses::response_status,
|
157
171
|
text: OpenAI::Responses::ResponseTextConfig,
|
@@ -206,9 +220,11 @@ module OpenAI
|
|
206
220
|
|
207
221
|
type tool_choice =
|
208
222
|
OpenAI::Models::Responses::tool_choice_options
|
223
|
+
| OpenAI::Responses::ToolChoiceAllowed
|
209
224
|
| OpenAI::Responses::ToolChoiceTypes
|
210
225
|
| OpenAI::Responses::ToolChoiceFunction
|
211
226
|
| OpenAI::Responses::ToolChoiceMcp
|
227
|
+
| OpenAI::Responses::ToolChoiceCustom
|
212
228
|
|
213
229
|
module ToolChoice
|
214
230
|
extend OpenAI::Internal::Type::Union
|
@@ -14,9 +14,12 @@ module OpenAI
|
|
14
14
|
parallel_tool_calls: bool?,
|
15
15
|
previous_response_id: String?,
|
16
16
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
17
|
+
prompt_cache_key: String,
|
17
18
|
reasoning: OpenAI::Reasoning?,
|
19
|
+
safety_identifier: String,
|
18
20
|
service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
|
19
21
|
store: bool?,
|
22
|
+
stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?,
|
20
23
|
temperature: Float?,
|
21
24
|
text: OpenAI::Responses::ResponseTextConfig,
|
22
25
|
tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
@@ -62,12 +65,22 @@ module OpenAI
|
|
62
65
|
|
63
66
|
attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
|
64
67
|
|
68
|
+
attr_reader prompt_cache_key: String?
|
69
|
+
|
70
|
+
def prompt_cache_key=: (String) -> String
|
71
|
+
|
65
72
|
attr_accessor reasoning: OpenAI::Reasoning?
|
66
73
|
|
74
|
+
attr_reader safety_identifier: String?
|
75
|
+
|
76
|
+
def safety_identifier=: (String) -> String
|
77
|
+
|
67
78
|
attr_accessor service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?
|
68
79
|
|
69
80
|
attr_accessor store: bool?
|
70
81
|
|
82
|
+
attr_accessor stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?
|
83
|
+
|
71
84
|
attr_accessor temperature: Float?
|
72
85
|
|
73
86
|
attr_reader text: OpenAI::Responses::ResponseTextConfig?
|
@@ -110,9 +123,12 @@ module OpenAI
|
|
110
123
|
?parallel_tool_calls: bool?,
|
111
124
|
?previous_response_id: String?,
|
112
125
|
?prompt: OpenAI::Responses::ResponsePrompt?,
|
126
|
+
?prompt_cache_key: String,
|
113
127
|
?reasoning: OpenAI::Reasoning?,
|
128
|
+
?safety_identifier: String,
|
114
129
|
?service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
|
115
130
|
?store: bool?,
|
131
|
+
?stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?,
|
116
132
|
?temperature: Float?,
|
117
133
|
?text: OpenAI::Responses::ResponseTextConfig,
|
118
134
|
?tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
@@ -136,9 +152,12 @@ module OpenAI
|
|
136
152
|
parallel_tool_calls: bool?,
|
137
153
|
previous_response_id: String?,
|
138
154
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
155
|
+
prompt_cache_key: String,
|
139
156
|
reasoning: OpenAI::Reasoning?,
|
157
|
+
safety_identifier: String,
|
140
158
|
service_tier: OpenAI::Models::Responses::ResponseCreateParams::service_tier?,
|
141
159
|
store: bool?,
|
160
|
+
stream_options: OpenAI::Responses::ResponseCreateParams::StreamOptions?,
|
142
161
|
temperature: Float?,
|
143
162
|
text: OpenAI::Responses::ResponseTextConfig,
|
144
163
|
tool_choice: OpenAI::Models::Responses::ResponseCreateParams::tool_choice,
|
@@ -172,11 +191,25 @@ module OpenAI
|
|
172
191
|
def self?.values: -> ::Array[OpenAI::Models::Responses::ResponseCreateParams::service_tier]
|
173
192
|
end
|
174
193
|
|
194
|
+
type stream_options = { include_obfuscation: bool }
|
195
|
+
|
196
|
+
class StreamOptions < OpenAI::Internal::Type::BaseModel
|
197
|
+
attr_reader include_obfuscation: bool?
|
198
|
+
|
199
|
+
def include_obfuscation=: (bool) -> bool
|
200
|
+
|
201
|
+
def initialize: (?include_obfuscation: bool) -> void
|
202
|
+
|
203
|
+
def to_hash: -> { include_obfuscation: bool }
|
204
|
+
end
|
205
|
+
|
175
206
|
type tool_choice =
|
176
207
|
OpenAI::Models::Responses::tool_choice_options
|
208
|
+
| OpenAI::Responses::ToolChoiceAllowed
|
177
209
|
| OpenAI::Responses::ToolChoiceTypes
|
178
210
|
| OpenAI::Responses::ToolChoiceFunction
|
179
211
|
| OpenAI::Responses::ToolChoiceMcp
|
212
|
+
| OpenAI::Responses::ToolChoiceCustom
|
180
213
|
|
181
214
|
module ToolChoice
|
182
215
|
extend OpenAI::Internal::Type::Union
|
@@ -0,0 +1,44 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Responses
|
4
|
+
type response_custom_tool_call =
|
5
|
+
{
|
6
|
+
call_id: String,
|
7
|
+
input: String,
|
8
|
+
name: String,
|
9
|
+
type: :custom_tool_call,
|
10
|
+
id: String
|
11
|
+
}
|
12
|
+
|
13
|
+
class ResponseCustomToolCall < OpenAI::Internal::Type::BaseModel
|
14
|
+
attr_accessor call_id: String
|
15
|
+
|
16
|
+
attr_accessor input: String
|
17
|
+
|
18
|
+
attr_accessor name: String
|
19
|
+
|
20
|
+
attr_accessor type: :custom_tool_call
|
21
|
+
|
22
|
+
attr_reader id: String?
|
23
|
+
|
24
|
+
def id=: (String) -> String
|
25
|
+
|
26
|
+
def initialize: (
|
27
|
+
call_id: String,
|
28
|
+
input: String,
|
29
|
+
name: String,
|
30
|
+
?id: String,
|
31
|
+
?type: :custom_tool_call
|
32
|
+
) -> void
|
33
|
+
|
34
|
+
def to_hash: -> {
|
35
|
+
call_id: String,
|
36
|
+
input: String,
|
37
|
+
name: String,
|
38
|
+
type: :custom_tool_call,
|
39
|
+
id: String
|
40
|
+
}
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Responses
|
4
|
+
type response_custom_tool_call_input_delta_event =
|
5
|
+
{
|
6
|
+
delta: String,
|
7
|
+
item_id: String,
|
8
|
+
output_index: Integer,
|
9
|
+
sequence_number: Integer,
|
10
|
+
type: :"response.custom_tool_call_input.delta"
|
11
|
+
}
|
12
|
+
|
13
|
+
class ResponseCustomToolCallInputDeltaEvent < OpenAI::Internal::Type::BaseModel
|
14
|
+
attr_accessor delta: String
|
15
|
+
|
16
|
+
attr_accessor item_id: String
|
17
|
+
|
18
|
+
attr_accessor output_index: Integer
|
19
|
+
|
20
|
+
attr_accessor sequence_number: Integer
|
21
|
+
|
22
|
+
attr_accessor type: :"response.custom_tool_call_input.delta"
|
23
|
+
|
24
|
+
def initialize: (
|
25
|
+
delta: String,
|
26
|
+
item_id: String,
|
27
|
+
output_index: Integer,
|
28
|
+
sequence_number: Integer,
|
29
|
+
?type: :"response.custom_tool_call_input.delta"
|
30
|
+
) -> void
|
31
|
+
|
32
|
+
def to_hash: -> {
|
33
|
+
delta: String,
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: :"response.custom_tool_call_input.delta"
|
38
|
+
}
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Responses
|
4
|
+
type response_custom_tool_call_input_done_event =
|
5
|
+
{
|
6
|
+
input: String,
|
7
|
+
item_id: String,
|
8
|
+
output_index: Integer,
|
9
|
+
sequence_number: Integer,
|
10
|
+
type: :"response.custom_tool_call_input.done"
|
11
|
+
}
|
12
|
+
|
13
|
+
class ResponseCustomToolCallInputDoneEvent < OpenAI::Internal::Type::BaseModel
|
14
|
+
attr_accessor input: String
|
15
|
+
|
16
|
+
attr_accessor item_id: String
|
17
|
+
|
18
|
+
attr_accessor output_index: Integer
|
19
|
+
|
20
|
+
attr_accessor sequence_number: Integer
|
21
|
+
|
22
|
+
attr_accessor type: :"response.custom_tool_call_input.done"
|
23
|
+
|
24
|
+
def initialize: (
|
25
|
+
input: String,
|
26
|
+
item_id: String,
|
27
|
+
output_index: Integer,
|
28
|
+
sequence_number: Integer,
|
29
|
+
?type: :"response.custom_tool_call_input.done"
|
30
|
+
) -> void
|
31
|
+
|
32
|
+
def to_hash: -> {
|
33
|
+
input: String,
|
34
|
+
item_id: String,
|
35
|
+
output_index: Integer,
|
36
|
+
sequence_number: Integer,
|
37
|
+
type: :"response.custom_tool_call_input.done"
|
38
|
+
}
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module OpenAI
|
2
|
+
module Models
|
3
|
+
module Responses
|
4
|
+
type response_custom_tool_call_output =
|
5
|
+
{
|
6
|
+
call_id: String,
|
7
|
+
output: String,
|
8
|
+
type: :custom_tool_call_output,
|
9
|
+
id: String
|
10
|
+
}
|
11
|
+
|
12
|
+
class ResponseCustomToolCallOutput < OpenAI::Internal::Type::BaseModel
|
13
|
+
attr_accessor call_id: String
|
14
|
+
|
15
|
+
attr_accessor output: String
|
16
|
+
|
17
|
+
attr_accessor type: :custom_tool_call_output
|
18
|
+
|
19
|
+
attr_reader id: String?
|
20
|
+
|
21
|
+
def id=: (String) -> String
|
22
|
+
|
23
|
+
def initialize: (
|
24
|
+
call_id: String,
|
25
|
+
output: String,
|
26
|
+
?id: String,
|
27
|
+
?type: :custom_tool_call_output
|
28
|
+
) -> void
|
29
|
+
|
30
|
+
def to_hash: -> {
|
31
|
+
call_id: String,
|
32
|
+
output: String,
|
33
|
+
type: :custom_tool_call_output,
|
34
|
+
id: String
|
35
|
+
}
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -20,6 +20,8 @@ module OpenAI
|
|
20
20
|
| OpenAI::Responses::ResponseInputItem::McpApprovalRequest
|
21
21
|
| OpenAI::Responses::ResponseInputItem::McpApprovalResponse
|
22
22
|
| OpenAI::Responses::ResponseInputItem::McpCall
|
23
|
+
| OpenAI::Responses::ResponseCustomToolCallOutput
|
24
|
+
| OpenAI::Responses::ResponseCustomToolCall
|
23
25
|
| OpenAI::Responses::ResponseInputItem::ItemReference
|
24
26
|
|
25
27
|
module ResponseInputItem
|
@@ -14,6 +14,7 @@ module OpenAI
|
|
14
14
|
| OpenAI::Responses::ResponseOutputItem::McpCall
|
15
15
|
| OpenAI::Responses::ResponseOutputItem::McpListTools
|
16
16
|
| OpenAI::Responses::ResponseOutputItem::McpApprovalRequest
|
17
|
+
| OpenAI::Responses::ResponseCustomToolCall
|
17
18
|
|
18
19
|
module ResponseOutputItem
|
19
20
|
extend OpenAI::Internal::Type::Union
|
@@ -6,6 +6,7 @@ module OpenAI
|
|
6
6
|
id: String,
|
7
7
|
summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary],
|
8
8
|
type: :reasoning,
|
9
|
+
content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content],
|
9
10
|
encrypted_content: String?,
|
10
11
|
status: OpenAI::Models::Responses::ResponseReasoningItem::status
|
11
12
|
}
|
@@ -17,6 +18,12 @@ module OpenAI
|
|
17
18
|
|
18
19
|
attr_accessor type: :reasoning
|
19
20
|
|
21
|
+
attr_reader content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content]?
|
22
|
+
|
23
|
+
def content=: (
|
24
|
+
::Array[OpenAI::Responses::ResponseReasoningItem::Content]
|
25
|
+
) -> ::Array[OpenAI::Responses::ResponseReasoningItem::Content]
|
26
|
+
|
20
27
|
attr_accessor encrypted_content: String?
|
21
28
|
|
22
29
|
attr_reader status: OpenAI::Models::Responses::ResponseReasoningItem::status?
|
@@ -28,6 +35,7 @@ module OpenAI
|
|
28
35
|
def initialize: (
|
29
36
|
id: String,
|
30
37
|
summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary],
|
38
|
+
?content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content],
|
31
39
|
?encrypted_content: String?,
|
32
40
|
?status: OpenAI::Models::Responses::ResponseReasoningItem::status,
|
33
41
|
?type: :reasoning
|
@@ -37,6 +45,7 @@ module OpenAI
|
|
37
45
|
id: String,
|
38
46
|
summary: ::Array[OpenAI::Responses::ResponseReasoningItem::Summary],
|
39
47
|
type: :reasoning,
|
48
|
+
content: ::Array[OpenAI::Responses::ResponseReasoningItem::Content],
|
40
49
|
encrypted_content: String?,
|
41
50
|
status: OpenAI::Models::Responses::ResponseReasoningItem::status
|
42
51
|
}
|
@@ -53,6 +62,18 @@ module OpenAI
|
|
53
62
|
def to_hash: -> { text: String, type: :summary_text }
|
54
63
|
end
|
55
64
|
|
65
|
+
type content = { text: String, type: :reasoning_text }
|
66
|
+
|
67
|
+
class Content < OpenAI::Internal::Type::BaseModel
|
68
|
+
attr_accessor text: String
|
69
|
+
|
70
|
+
attr_accessor type: :reasoning_text
|
71
|
+
|
72
|
+
def initialize: (text: String, ?type: :reasoning_text) -> void
|
73
|
+
|
74
|
+
def to_hash: -> { text: String, type: :reasoning_text }
|
75
|
+
end
|
76
|
+
|
56
77
|
type status = :in_progress | :completed | :incomplete
|
57
78
|
|
58
79
|
module Status
|
@@ -1,18 +1,20 @@
|
|
1
1
|
module OpenAI
|
2
2
|
module Models
|
3
3
|
module Responses
|
4
|
-
type
|
4
|
+
type response_reasoning_text_delta_event =
|
5
5
|
{
|
6
|
-
|
6
|
+
content_index: Integer,
|
7
|
+
delta: String,
|
7
8
|
item_id: String,
|
8
9
|
output_index: Integer,
|
9
10
|
sequence_number: Integer,
|
10
|
-
|
11
|
-
type: :"response.reasoning_summary.delta"
|
11
|
+
type: :"response.reasoning_text.delta"
|
12
12
|
}
|
13
13
|
|
14
|
-
class
|
15
|
-
attr_accessor
|
14
|
+
class ResponseReasoningTextDeltaEvent < OpenAI::Internal::Type::BaseModel
|
15
|
+
attr_accessor content_index: Integer
|
16
|
+
|
17
|
+
attr_accessor delta: String
|
16
18
|
|
17
19
|
attr_accessor item_id: String
|
18
20
|
|
@@ -20,26 +22,24 @@ module OpenAI
|
|
20
22
|
|
21
23
|
attr_accessor sequence_number: Integer
|
22
24
|
|
23
|
-
attr_accessor
|
24
|
-
|
25
|
-
attr_accessor type: :"response.reasoning_summary.delta"
|
25
|
+
attr_accessor type: :"response.reasoning_text.delta"
|
26
26
|
|
27
27
|
def initialize: (
|
28
|
-
|
28
|
+
content_index: Integer,
|
29
|
+
delta: String,
|
29
30
|
item_id: String,
|
30
31
|
output_index: Integer,
|
31
32
|
sequence_number: Integer,
|
32
|
-
|
33
|
-
?type: :"response.reasoning_summary.delta"
|
33
|
+
?type: :"response.reasoning_text.delta"
|
34
34
|
) -> void
|
35
35
|
|
36
36
|
def to_hash: -> {
|
37
|
-
|
37
|
+
content_index: Integer,
|
38
|
+
delta: String,
|
38
39
|
item_id: String,
|
39
40
|
output_index: Integer,
|
40
41
|
sequence_number: Integer,
|
41
|
-
|
42
|
-
type: :"response.reasoning_summary.delta"
|
42
|
+
type: :"response.reasoning_text.delta"
|
43
43
|
}
|
44
44
|
end
|
45
45
|
end
|