openai 0.9.0 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +40 -0
- data/README.md +79 -1
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +25 -0
- data/lib/openai/internal/type/array_of.rb +6 -1
- data/lib/openai/internal/type/base_model.rb +76 -24
- data/lib/openai/internal/type/boolean.rb +7 -1
- data/lib/openai/internal/type/converter.rb +42 -34
- data/lib/openai/internal/type/enum.rb +10 -2
- data/lib/openai/internal/type/file_input.rb +6 -1
- data/lib/openai/internal/type/hash_of.rb +6 -1
- data/lib/openai/internal/type/union.rb +12 -7
- data/lib/openai/internal/type/unknown.rb +7 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/audio/speech_create_params.rb +23 -2
- data/lib/openai/models/audio/transcription.rb +118 -1
- data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
- data/lib/openai/models/audio/transcription_verbose.rb +31 -1
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
- data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
- data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
- data/lib/openai/models/responses/response_create_params.rb +92 -67
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +18 -2
- data/lib/openai/models/responses/response_stream_event.rb +2 -2
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/audio/speech.rb +3 -1
- data/lib/openai/resources/chat/completions.rb +10 -2
- data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
- data/lib/openai/resources/responses.rb +24 -16
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +18 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/errors.rbi +16 -0
- data/rbi/openai/internal/type/boolean.rbi +2 -0
- data/rbi/openai/internal/type/converter.rbi +15 -15
- data/rbi/openai/internal/type/union.rbi +5 -0
- data/rbi/openai/internal/type/unknown.rbi +2 -0
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
- data/rbi/openai/models/audio/transcription.rbi +213 -3
- data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
- data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
- data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
- data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
- data/rbi/openai/models/responses/response_create_params.rbi +174 -115
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/response_output_text.rbi +26 -4
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/audio/speech.rbi +6 -1
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
- data/rbi/openai/resources/responses.rbi +108 -84
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/errors.rbs +9 -0
- data/sig/openai/internal/type/converter.rbs +7 -1
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/audio/speech_create_params.rbs +21 -1
- data/sig/openai/models/audio/transcription.rbs +95 -3
- data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
- data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
- data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
- data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
- data/sig/openai/models/responses/response_create_params.rbs +31 -11
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/response_output_text.rbs +15 -1
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/audio/speech.rbs +1 -0
- data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
- data/sig/openai/resources/responses.rbs +8 -4
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +56 -2
@@ -5,7 +5,8 @@ module OpenAI
|
|
5
5
|
{
|
6
6
|
text: String,
|
7
7
|
type: :"transcript.text.done",
|
8
|
-
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
|
8
|
+
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
|
9
|
+
usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage
|
9
10
|
}
|
10
11
|
|
11
12
|
class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel
|
@@ -19,16 +20,24 @@ module OpenAI
|
|
19
20
|
::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
|
20
21
|
) -> ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
|
21
22
|
|
23
|
+
attr_reader usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage?
|
24
|
+
|
25
|
+
def usage=: (
|
26
|
+
OpenAI::Audio::TranscriptionTextDoneEvent::Usage
|
27
|
+
) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage
|
28
|
+
|
22
29
|
def initialize: (
|
23
30
|
text: String,
|
24
31
|
?logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
|
32
|
+
?usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage,
|
25
33
|
?type: :"transcript.text.done"
|
26
34
|
) -> void
|
27
35
|
|
28
36
|
def to_hash: -> {
|
29
37
|
text: String,
|
30
38
|
type: :"transcript.text.done",
|
31
|
-
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
|
39
|
+
logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
|
40
|
+
usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage
|
32
41
|
}
|
33
42
|
|
34
43
|
type logprob =
|
@@ -59,6 +68,67 @@ module OpenAI
|
|
59
68
|
logprob: Float
|
60
69
|
}
|
61
70
|
end
|
71
|
+
|
72
|
+
type usage =
|
73
|
+
{
|
74
|
+
input_tokens: Integer,
|
75
|
+
output_tokens: Integer,
|
76
|
+
total_tokens: Integer,
|
77
|
+
type: :tokens,
|
78
|
+
input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
|
79
|
+
}
|
80
|
+
|
81
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
82
|
+
attr_accessor input_tokens: Integer
|
83
|
+
|
84
|
+
attr_accessor output_tokens: Integer
|
85
|
+
|
86
|
+
attr_accessor total_tokens: Integer
|
87
|
+
|
88
|
+
attr_accessor type: :tokens
|
89
|
+
|
90
|
+
attr_reader input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails?
|
91
|
+
|
92
|
+
def input_token_details=: (
|
93
|
+
OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
|
94
|
+
) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
|
95
|
+
|
96
|
+
def initialize: (
|
97
|
+
input_tokens: Integer,
|
98
|
+
output_tokens: Integer,
|
99
|
+
total_tokens: Integer,
|
100
|
+
?input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails,
|
101
|
+
?type: :tokens
|
102
|
+
) -> void
|
103
|
+
|
104
|
+
def to_hash: -> {
|
105
|
+
input_tokens: Integer,
|
106
|
+
output_tokens: Integer,
|
107
|
+
total_tokens: Integer,
|
108
|
+
type: :tokens,
|
109
|
+
input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
|
110
|
+
}
|
111
|
+
|
112
|
+
type input_token_details =
|
113
|
+
{ audio_tokens: Integer, text_tokens: Integer }
|
114
|
+
|
115
|
+
class InputTokenDetails < OpenAI::Internal::Type::BaseModel
|
116
|
+
attr_reader audio_tokens: Integer?
|
117
|
+
|
118
|
+
def audio_tokens=: (Integer) -> Integer
|
119
|
+
|
120
|
+
attr_reader text_tokens: Integer?
|
121
|
+
|
122
|
+
def text_tokens=: (Integer) -> Integer
|
123
|
+
|
124
|
+
def initialize: (
|
125
|
+
?audio_tokens: Integer,
|
126
|
+
?text_tokens: Integer
|
127
|
+
) -> void
|
128
|
+
|
129
|
+
def to_hash: -> { audio_tokens: Integer, text_tokens: Integer }
|
130
|
+
end
|
131
|
+
end
|
62
132
|
end
|
63
133
|
end
|
64
134
|
end
|
@@ -7,6 +7,7 @@ module OpenAI
|
|
7
7
|
language: String,
|
8
8
|
text: String,
|
9
9
|
segments: ::Array[OpenAI::Audio::TranscriptionSegment],
|
10
|
+
usage: OpenAI::Audio::TranscriptionVerbose::Usage,
|
10
11
|
words: ::Array[OpenAI::Audio::TranscriptionWord]
|
11
12
|
}
|
12
13
|
|
@@ -23,6 +24,12 @@ module OpenAI
|
|
23
24
|
::Array[OpenAI::Audio::TranscriptionSegment]
|
24
25
|
) -> ::Array[OpenAI::Audio::TranscriptionSegment]
|
25
26
|
|
27
|
+
attr_reader usage: OpenAI::Audio::TranscriptionVerbose::Usage?
|
28
|
+
|
29
|
+
def usage=: (
|
30
|
+
OpenAI::Audio::TranscriptionVerbose::Usage
|
31
|
+
) -> OpenAI::Audio::TranscriptionVerbose::Usage
|
32
|
+
|
26
33
|
attr_reader words: ::Array[OpenAI::Audio::TranscriptionWord]?
|
27
34
|
|
28
35
|
def words=: (
|
@@ -34,6 +41,7 @@ module OpenAI
|
|
34
41
|
language: String,
|
35
42
|
text: String,
|
36
43
|
?segments: ::Array[OpenAI::Audio::TranscriptionSegment],
|
44
|
+
?usage: OpenAI::Audio::TranscriptionVerbose::Usage,
|
37
45
|
?words: ::Array[OpenAI::Audio::TranscriptionWord]
|
38
46
|
) -> void
|
39
47
|
|
@@ -42,8 +50,21 @@ module OpenAI
|
|
42
50
|
language: String,
|
43
51
|
text: String,
|
44
52
|
segments: ::Array[OpenAI::Audio::TranscriptionSegment],
|
53
|
+
usage: OpenAI::Audio::TranscriptionVerbose::Usage,
|
45
54
|
words: ::Array[OpenAI::Audio::TranscriptionWord]
|
46
55
|
}
|
56
|
+
|
57
|
+
type usage = { duration: Float, type: :duration }
|
58
|
+
|
59
|
+
class Usage < OpenAI::Internal::Type::BaseModel
|
60
|
+
attr_accessor duration: Float
|
61
|
+
|
62
|
+
attr_accessor type: :duration
|
63
|
+
|
64
|
+
def initialize: (duration: Float, ?type: :duration) -> void
|
65
|
+
|
66
|
+
def to_hash: -> { duration: Float, type: :duration }
|
67
|
+
end
|
47
68
|
end
|
48
69
|
end
|
49
70
|
end
|
@@ -127,7 +127,7 @@ module OpenAI
|
|
127
127
|
end
|
128
128
|
end
|
129
129
|
|
130
|
-
type service_tier = :auto | :default | :flex | :scale
|
130
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
131
131
|
|
132
132
|
module ServiceTier
|
133
133
|
extend OpenAI::Internal::Type::Enum
|
@@ -136,6 +136,7 @@ module OpenAI
|
|
136
136
|
DEFAULT: :default
|
137
137
|
FLEX: :flex
|
138
138
|
SCALE: :scale
|
139
|
+
PRIORITY: :priority
|
139
140
|
|
140
141
|
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletion::service_tier]
|
141
142
|
end
|
@@ -272,7 +272,7 @@ module OpenAI
|
|
272
272
|
end
|
273
273
|
end
|
274
274
|
|
275
|
-
type service_tier = :auto | :default | :flex | :scale
|
275
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
276
276
|
|
277
277
|
module ServiceTier
|
278
278
|
extend OpenAI::Internal::Type::Enum
|
@@ -281,6 +281,7 @@ module OpenAI
|
|
281
281
|
DEFAULT: :default
|
282
282
|
FLEX: :flex
|
283
283
|
SCALE: :scale
|
284
|
+
PRIORITY: :priority
|
284
285
|
|
285
286
|
def self?.values: -> ::Array[OpenAI::Models::Chat::ChatCompletionChunk::service_tier]
|
286
287
|
end
|
@@ -280,7 +280,7 @@ module OpenAI
|
|
280
280
|
def self?.variants: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::response_format]
|
281
281
|
end
|
282
282
|
|
283
|
-
type service_tier = :auto | :default | :flex | :scale
|
283
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
284
284
|
|
285
285
|
module ServiceTier
|
286
286
|
extend OpenAI::Internal::Type::Enum
|
@@ -289,6 +289,7 @@ module OpenAI
|
|
289
289
|
DEFAULT: :default
|
290
290
|
FLEX: :flex
|
291
291
|
SCALE: :scale
|
292
|
+
PRIORITY: :priority
|
292
293
|
|
293
294
|
def self?.values: -> ::Array[OpenAI::Models::Chat::CompletionCreateParams::service_tier]
|
294
295
|
end
|
@@ -4,34 +4,71 @@ module OpenAI
|
|
4
4
|
module Checkpoints
|
5
5
|
type permission_retrieve_response =
|
6
6
|
{
|
7
|
-
|
8
|
-
|
9
|
-
object: :
|
10
|
-
|
7
|
+
data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
|
8
|
+
has_more: bool,
|
9
|
+
object: :list,
|
10
|
+
first_id: String?,
|
11
|
+
last_id: String?
|
11
12
|
}
|
12
13
|
|
13
14
|
class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
|
14
|
-
attr_accessor
|
15
|
+
attr_accessor data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]
|
15
16
|
|
16
|
-
attr_accessor
|
17
|
+
attr_accessor has_more: bool
|
17
18
|
|
18
|
-
attr_accessor object: :
|
19
|
+
attr_accessor object: :list
|
19
20
|
|
20
|
-
attr_accessor
|
21
|
+
attr_accessor first_id: String?
|
22
|
+
|
23
|
+
attr_accessor last_id: String?
|
21
24
|
|
22
25
|
def initialize: (
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
?
|
26
|
+
data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
|
27
|
+
has_more: bool,
|
28
|
+
?first_id: String?,
|
29
|
+
?last_id: String?,
|
30
|
+
?object: :list
|
27
31
|
) -> void
|
28
32
|
|
29
33
|
def to_hash: -> {
|
30
|
-
|
31
|
-
|
32
|
-
object: :
|
33
|
-
|
34
|
+
data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
|
35
|
+
has_more: bool,
|
36
|
+
object: :list,
|
37
|
+
first_id: String?,
|
38
|
+
last_id: String?
|
34
39
|
}
|
40
|
+
|
41
|
+
type data =
|
42
|
+
{
|
43
|
+
id: String,
|
44
|
+
created_at: Integer,
|
45
|
+
object: :"checkpoint.permission",
|
46
|
+
project_id: String
|
47
|
+
}
|
48
|
+
|
49
|
+
class Data < OpenAI::Internal::Type::BaseModel
|
50
|
+
attr_accessor id: String
|
51
|
+
|
52
|
+
attr_accessor created_at: Integer
|
53
|
+
|
54
|
+
attr_accessor object: :"checkpoint.permission"
|
55
|
+
|
56
|
+
attr_accessor project_id: String
|
57
|
+
|
58
|
+
def initialize: (
|
59
|
+
id: String,
|
60
|
+
created_at: Integer,
|
61
|
+
project_id: String,
|
62
|
+
?object: :"checkpoint.permission"
|
63
|
+
) -> void
|
64
|
+
|
65
|
+
def to_hash: -> {
|
66
|
+
id: String,
|
67
|
+
created_at: Integer,
|
68
|
+
object: :"checkpoint.permission",
|
69
|
+
project_id: String
|
70
|
+
}
|
71
|
+
end
|
35
72
|
end
|
36
73
|
end
|
37
74
|
end
|
@@ -3,17 +3,45 @@ module OpenAI
|
|
3
3
|
type images_response =
|
4
4
|
{
|
5
5
|
created: Integer,
|
6
|
+
background: OpenAI::Models::ImagesResponse::background,
|
6
7
|
data: ::Array[OpenAI::Image],
|
8
|
+
output_format: OpenAI::Models::ImagesResponse::output_format,
|
9
|
+
quality: OpenAI::Models::ImagesResponse::quality,
|
10
|
+
size: OpenAI::Models::ImagesResponse::size,
|
7
11
|
usage: OpenAI::ImagesResponse::Usage
|
8
12
|
}
|
9
13
|
|
10
14
|
class ImagesResponse < OpenAI::Internal::Type::BaseModel
|
11
15
|
attr_accessor created: Integer
|
12
16
|
|
17
|
+
attr_reader background: OpenAI::Models::ImagesResponse::background?
|
18
|
+
|
19
|
+
def background=: (
|
20
|
+
OpenAI::Models::ImagesResponse::background
|
21
|
+
) -> OpenAI::Models::ImagesResponse::background
|
22
|
+
|
13
23
|
attr_reader data: ::Array[OpenAI::Image]?
|
14
24
|
|
15
25
|
def data=: (::Array[OpenAI::Image]) -> ::Array[OpenAI::Image]
|
16
26
|
|
27
|
+
attr_reader output_format: OpenAI::Models::ImagesResponse::output_format?
|
28
|
+
|
29
|
+
def output_format=: (
|
30
|
+
OpenAI::Models::ImagesResponse::output_format
|
31
|
+
) -> OpenAI::Models::ImagesResponse::output_format
|
32
|
+
|
33
|
+
attr_reader quality: OpenAI::Models::ImagesResponse::quality?
|
34
|
+
|
35
|
+
def quality=: (
|
36
|
+
OpenAI::Models::ImagesResponse::quality
|
37
|
+
) -> OpenAI::Models::ImagesResponse::quality
|
38
|
+
|
39
|
+
attr_reader size: OpenAI::Models::ImagesResponse::size?
|
40
|
+
|
41
|
+
def size=: (
|
42
|
+
OpenAI::Models::ImagesResponse::size
|
43
|
+
) -> OpenAI::Models::ImagesResponse::size
|
44
|
+
|
17
45
|
attr_reader usage: OpenAI::ImagesResponse::Usage?
|
18
46
|
|
19
47
|
def usage=: (
|
@@ -22,16 +50,71 @@ module OpenAI
|
|
22
50
|
|
23
51
|
def initialize: (
|
24
52
|
created: Integer,
|
53
|
+
?background: OpenAI::Models::ImagesResponse::background,
|
25
54
|
?data: ::Array[OpenAI::Image],
|
55
|
+
?output_format: OpenAI::Models::ImagesResponse::output_format,
|
56
|
+
?quality: OpenAI::Models::ImagesResponse::quality,
|
57
|
+
?size: OpenAI::Models::ImagesResponse::size,
|
26
58
|
?usage: OpenAI::ImagesResponse::Usage
|
27
59
|
) -> void
|
28
60
|
|
29
61
|
def to_hash: -> {
|
30
62
|
created: Integer,
|
63
|
+
background: OpenAI::Models::ImagesResponse::background,
|
31
64
|
data: ::Array[OpenAI::Image],
|
65
|
+
output_format: OpenAI::Models::ImagesResponse::output_format,
|
66
|
+
quality: OpenAI::Models::ImagesResponse::quality,
|
67
|
+
size: OpenAI::Models::ImagesResponse::size,
|
32
68
|
usage: OpenAI::ImagesResponse::Usage
|
33
69
|
}
|
34
70
|
|
71
|
+
type background = :transparent | :opaque
|
72
|
+
|
73
|
+
module Background
|
74
|
+
extend OpenAI::Internal::Type::Enum
|
75
|
+
|
76
|
+
TRANSPARENT: :transparent
|
77
|
+
OPAQUE: :opaque
|
78
|
+
|
79
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::background]
|
80
|
+
end
|
81
|
+
|
82
|
+
type output_format = :png | :webp | :jpeg
|
83
|
+
|
84
|
+
module OutputFormat
|
85
|
+
extend OpenAI::Internal::Type::Enum
|
86
|
+
|
87
|
+
PNG: :png
|
88
|
+
WEBP: :webp
|
89
|
+
JPEG: :jpeg
|
90
|
+
|
91
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::output_format]
|
92
|
+
end
|
93
|
+
|
94
|
+
type quality = :low | :medium | :high
|
95
|
+
|
96
|
+
module Quality
|
97
|
+
extend OpenAI::Internal::Type::Enum
|
98
|
+
|
99
|
+
LOW: :low
|
100
|
+
MEDIUM: :medium
|
101
|
+
HIGH: :high
|
102
|
+
|
103
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::quality]
|
104
|
+
end
|
105
|
+
|
106
|
+
type size = :"1024x1024" | :"1024x1536" | :"1536x1024"
|
107
|
+
|
108
|
+
module Size
|
109
|
+
extend OpenAI::Internal::Type::Enum
|
110
|
+
|
111
|
+
SIZE_1024X1024: :"1024x1024"
|
112
|
+
SIZE_1024X1536: :"1024x1536"
|
113
|
+
SIZE_1536X1024: :"1536x1024"
|
114
|
+
|
115
|
+
def self?.values: -> ::Array[OpenAI::Models::ImagesResponse::size]
|
116
|
+
end
|
117
|
+
|
35
118
|
type usage =
|
36
119
|
{
|
37
120
|
input_tokens: Integer,
|
@@ -19,12 +19,14 @@ module OpenAI
|
|
19
19
|
top_p: Float?,
|
20
20
|
background: bool?,
|
21
21
|
max_output_tokens: Integer?,
|
22
|
+
max_tool_calls: Integer?,
|
22
23
|
previous_response_id: String?,
|
23
24
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
24
25
|
reasoning: OpenAI::Reasoning?,
|
25
26
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
26
27
|
status: OpenAI::Models::Responses::response_status,
|
27
28
|
text: OpenAI::Responses::ResponseTextConfig,
|
29
|
+
top_logprobs: Integer?,
|
28
30
|
truncation: OpenAI::Models::Responses::Response::truncation?,
|
29
31
|
usage: OpenAI::Responses::ResponseUsage,
|
30
32
|
user: String
|
@@ -63,6 +65,8 @@ module OpenAI
|
|
63
65
|
|
64
66
|
attr_accessor max_output_tokens: Integer?
|
65
67
|
|
68
|
+
attr_accessor max_tool_calls: Integer?
|
69
|
+
|
66
70
|
attr_accessor previous_response_id: String?
|
67
71
|
|
68
72
|
attr_accessor prompt: OpenAI::Responses::ResponsePrompt?
|
@@ -83,6 +87,8 @@ module OpenAI
|
|
83
87
|
OpenAI::Responses::ResponseTextConfig
|
84
88
|
) -> OpenAI::Responses::ResponseTextConfig
|
85
89
|
|
90
|
+
attr_accessor top_logprobs: Integer?
|
91
|
+
|
86
92
|
attr_accessor truncation: OpenAI::Models::Responses::Response::truncation?
|
87
93
|
|
88
94
|
attr_reader usage: OpenAI::Responses::ResponseUsage?
|
@@ -111,12 +117,14 @@ module OpenAI
|
|
111
117
|
top_p: Float?,
|
112
118
|
?background: bool?,
|
113
119
|
?max_output_tokens: Integer?,
|
120
|
+
?max_tool_calls: Integer?,
|
114
121
|
?previous_response_id: String?,
|
115
122
|
?prompt: OpenAI::Responses::ResponsePrompt?,
|
116
123
|
?reasoning: OpenAI::Reasoning?,
|
117
124
|
?service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
118
125
|
?status: OpenAI::Models::Responses::response_status,
|
119
126
|
?text: OpenAI::Responses::ResponseTextConfig,
|
127
|
+
?top_logprobs: Integer?,
|
120
128
|
?truncation: OpenAI::Models::Responses::Response::truncation?,
|
121
129
|
?usage: OpenAI::Responses::ResponseUsage,
|
122
130
|
?user: String,
|
@@ -140,12 +148,14 @@ module OpenAI
|
|
140
148
|
top_p: Float?,
|
141
149
|
background: bool?,
|
142
150
|
max_output_tokens: Integer?,
|
151
|
+
max_tool_calls: Integer?,
|
143
152
|
previous_response_id: String?,
|
144
153
|
prompt: OpenAI::Responses::ResponsePrompt?,
|
145
154
|
reasoning: OpenAI::Reasoning?,
|
146
155
|
service_tier: OpenAI::Models::Responses::Response::service_tier?,
|
147
156
|
status: OpenAI::Models::Responses::response_status,
|
148
157
|
text: OpenAI::Responses::ResponseTextConfig,
|
158
|
+
top_logprobs: Integer?,
|
149
159
|
truncation: OpenAI::Models::Responses::Response::truncation?,
|
150
160
|
usage: OpenAI::Responses::ResponseUsage,
|
151
161
|
user: String
|
@@ -198,6 +208,7 @@ module OpenAI
|
|
198
208
|
OpenAI::Models::Responses::tool_choice_options
|
199
209
|
| OpenAI::Responses::ToolChoiceTypes
|
200
210
|
| OpenAI::Responses::ToolChoiceFunction
|
211
|
+
| OpenAI::Responses::ToolChoiceMcp
|
201
212
|
|
202
213
|
module ToolChoice
|
203
214
|
extend OpenAI::Internal::Type::Union
|
@@ -205,7 +216,7 @@ module OpenAI
|
|
205
216
|
def self?.variants: -> ::Array[OpenAI::Models::Responses::Response::tool_choice]
|
206
217
|
end
|
207
218
|
|
208
|
-
type service_tier = :auto | :default | :flex | :scale
|
219
|
+
type service_tier = :auto | :default | :flex | :scale | :priority
|
209
220
|
|
210
221
|
module ServiceTier
|
211
222
|
extend OpenAI::Internal::Type::Enum
|
@@ -214,6 +225,7 @@ module OpenAI
|
|
214
225
|
DEFAULT: :default
|
215
226
|
FLEX: :flex
|
216
227
|
SCALE: :scale
|
228
|
+
PRIORITY: :priority
|
217
229
|
|
218
230
|
def self?.values: -> ::Array[OpenAI::Models::Responses::Response::service_tier]
|
219
231
|
end
|
@@ -4,6 +4,7 @@ module OpenAI
|
|
4
4
|
type response_code_interpreter_call_code_delta_event =
|
5
5
|
{
|
6
6
|
delta: String,
|
7
|
+
item_id: String,
|
7
8
|
output_index: Integer,
|
8
9
|
sequence_number: Integer,
|
9
10
|
type: :"response.code_interpreter_call_code.delta"
|
@@ -12,6 +13,8 @@ module OpenAI
|
|
12
13
|
class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
|
13
14
|
attr_accessor delta: String
|
14
15
|
|
16
|
+
attr_accessor item_id: String
|
17
|
+
|
15
18
|
attr_accessor output_index: Integer
|
16
19
|
|
17
20
|
attr_accessor sequence_number: Integer
|
@@ -20,6 +23,7 @@ module OpenAI
|
|
20
23
|
|
21
24
|
def initialize: (
|
22
25
|
delta: String,
|
26
|
+
item_id: String,
|
23
27
|
output_index: Integer,
|
24
28
|
sequence_number: Integer,
|
25
29
|
?type: :"response.code_interpreter_call_code.delta"
|
@@ -27,6 +31,7 @@ module OpenAI
|
|
27
31
|
|
28
32
|
def to_hash: -> {
|
29
33
|
delta: String,
|
34
|
+
item_id: String,
|
30
35
|
output_index: Integer,
|
31
36
|
sequence_number: Integer,
|
32
37
|
type: :"response.code_interpreter_call_code.delta"
|
@@ -4,6 +4,7 @@ module OpenAI
|
|
4
4
|
type response_code_interpreter_call_code_done_event =
|
5
5
|
{
|
6
6
|
code: String,
|
7
|
+
item_id: String,
|
7
8
|
output_index: Integer,
|
8
9
|
sequence_number: Integer,
|
9
10
|
type: :"response.code_interpreter_call_code.done"
|
@@ -12,6 +13,8 @@ module OpenAI
|
|
12
13
|
class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel
|
13
14
|
attr_accessor code: String
|
14
15
|
|
16
|
+
attr_accessor item_id: String
|
17
|
+
|
15
18
|
attr_accessor output_index: Integer
|
16
19
|
|
17
20
|
attr_accessor sequence_number: Integer
|
@@ -20,6 +23,7 @@ module OpenAI
|
|
20
23
|
|
21
24
|
def initialize: (
|
22
25
|
code: String,
|
26
|
+
item_id: String,
|
23
27
|
output_index: Integer,
|
24
28
|
sequence_number: Integer,
|
25
29
|
?type: :"response.code_interpreter_call_code.done"
|
@@ -27,6 +31,7 @@ module OpenAI
|
|
27
31
|
|
28
32
|
def to_hash: -> {
|
29
33
|
code: String,
|
34
|
+
item_id: String,
|
30
35
|
output_index: Integer,
|
31
36
|
sequence_number: Integer,
|
32
37
|
type: :"response.code_interpreter_call_code.done"
|
@@ -3,14 +3,14 @@ module OpenAI
|
|
3
3
|
module Responses
|
4
4
|
type response_code_interpreter_call_completed_event =
|
5
5
|
{
|
6
|
-
|
6
|
+
item_id: String,
|
7
7
|
output_index: Integer,
|
8
8
|
sequence_number: Integer,
|
9
9
|
type: :"response.code_interpreter_call.completed"
|
10
10
|
}
|
11
11
|
|
12
12
|
class ResponseCodeInterpreterCallCompletedEvent < OpenAI::Internal::Type::BaseModel
|
13
|
-
attr_accessor
|
13
|
+
attr_accessor item_id: String
|
14
14
|
|
15
15
|
attr_accessor output_index: Integer
|
16
16
|
|
@@ -19,14 +19,14 @@ module OpenAI
|
|
19
19
|
attr_accessor type: :"response.code_interpreter_call.completed"
|
20
20
|
|
21
21
|
def initialize: (
|
22
|
-
|
22
|
+
item_id: String,
|
23
23
|
output_index: Integer,
|
24
24
|
sequence_number: Integer,
|
25
25
|
?type: :"response.code_interpreter_call.completed"
|
26
26
|
) -> void
|
27
27
|
|
28
28
|
def to_hash: -> {
|
29
|
-
|
29
|
+
item_id: String,
|
30
30
|
output_index: Integer,
|
31
31
|
sequence_number: Integer,
|
32
32
|
type: :"response.code_interpreter_call.completed"
|
@@ -3,14 +3,14 @@ module OpenAI
|
|
3
3
|
module Responses
|
4
4
|
type response_code_interpreter_call_in_progress_event =
|
5
5
|
{
|
6
|
-
|
6
|
+
item_id: String,
|
7
7
|
output_index: Integer,
|
8
8
|
sequence_number: Integer,
|
9
9
|
type: :"response.code_interpreter_call.in_progress"
|
10
10
|
}
|
11
11
|
|
12
12
|
class ResponseCodeInterpreterCallInProgressEvent < OpenAI::Internal::Type::BaseModel
|
13
|
-
attr_accessor
|
13
|
+
attr_accessor item_id: String
|
14
14
|
|
15
15
|
attr_accessor output_index: Integer
|
16
16
|
|
@@ -19,14 +19,14 @@ module OpenAI
|
|
19
19
|
attr_accessor type: :"response.code_interpreter_call.in_progress"
|
20
20
|
|
21
21
|
def initialize: (
|
22
|
-
|
22
|
+
item_id: String,
|
23
23
|
output_index: Integer,
|
24
24
|
sequence_number: Integer,
|
25
25
|
?type: :"response.code_interpreter_call.in_progress"
|
26
26
|
) -> void
|
27
27
|
|
28
28
|
def to_hash: -> {
|
29
|
-
|
29
|
+
item_id: String,
|
30
30
|
output_index: Integer,
|
31
31
|
sequence_number: Integer,
|
32
32
|
type: :"response.code_interpreter_call.in_progress"
|
@@ -3,14 +3,14 @@ module OpenAI
|
|
3
3
|
module Responses
|
4
4
|
type response_code_interpreter_call_interpreting_event =
|
5
5
|
{
|
6
|
-
|
6
|
+
item_id: String,
|
7
7
|
output_index: Integer,
|
8
8
|
sequence_number: Integer,
|
9
9
|
type: :"response.code_interpreter_call.interpreting"
|
10
10
|
}
|
11
11
|
|
12
12
|
class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::Internal::Type::BaseModel
|
13
|
-
attr_accessor
|
13
|
+
attr_accessor item_id: String
|
14
14
|
|
15
15
|
attr_accessor output_index: Integer
|
16
16
|
|
@@ -19,14 +19,14 @@ module OpenAI
|
|
19
19
|
attr_accessor type: :"response.code_interpreter_call.interpreting"
|
20
20
|
|
21
21
|
def initialize: (
|
22
|
-
|
22
|
+
item_id: String,
|
23
23
|
output_index: Integer,
|
24
24
|
sequence_number: Integer,
|
25
25
|
?type: :"response.code_interpreter_call.interpreting"
|
26
26
|
) -> void
|
27
27
|
|
28
28
|
def to_hash: -> {
|
29
|
-
|
29
|
+
item_id: String,
|
30
30
|
output_index: Integer,
|
31
31
|
sequence_number: Integer,
|
32
32
|
type: :"response.code_interpreter_call.interpreting"
|