openai 0.25.1 → 0.27.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +32 -0
- data/README.md +1 -1
- data/lib/openai/helpers/streaming/chat_completion_stream.rb +683 -0
- data/lib/openai/helpers/streaming/chat_events.rb +181 -0
- data/lib/openai/helpers/streaming/exceptions.rb +29 -0
- data/lib/openai/helpers/streaming/response_stream.rb +0 -2
- data/lib/openai/internal/util.rb +2 -1
- data/lib/openai/models/all_models.rb +1 -0
- data/lib/openai/models/chat/parsed_chat_completion.rb +15 -0
- data/lib/openai/models/responses_model.rb +1 -0
- data/lib/openai/resources/chat/completions.rb +78 -37
- data/lib/openai/resources/responses.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +5 -1
- data/rbi/openai/helpers/streaming/events.rbi +120 -0
- data/rbi/openai/models/all_models.rbi +5 -0
- data/rbi/openai/models/responses_model.rbi +5 -0
- data/rbi/openai/streaming.rbi +28 -1
- data/sig/openai/models/all_models.rbs +2 -0
- data/sig/openai/models/responses_model.rbs +2 -0
- metadata +7 -3
- /data/lib/openai/helpers/streaming/{events.rb → response_events.rb} +0 -0
@@ -0,0 +1,181 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Helpers
|
5
|
+
module Streaming
|
6
|
+
# Raw streaming chunk event with accumulated completion snapshot.
|
7
|
+
#
|
8
|
+
# This is the fundamental event that wraps each raw chunk from the API
|
9
|
+
# along with the accumulated state up to that point. All other events
|
10
|
+
# are derived from processing these chunks.
|
11
|
+
#
|
12
|
+
# @example
|
13
|
+
# event.chunk # => ChatCompletionChunk (raw API response)
|
14
|
+
# event.snapshot # => ParsedChatCompletion (accumulated state)
|
15
|
+
class ChatChunkEvent < OpenAI::Internal::Type::BaseModel
|
16
|
+
required :type, const: :chunk
|
17
|
+
required :chunk, -> { OpenAI::Chat::ChatCompletionChunk }
|
18
|
+
required :snapshot, -> { OpenAI::Chat::ParsedChatCompletion }
|
19
|
+
end
|
20
|
+
|
21
|
+
# Incremental text content update event.
|
22
|
+
#
|
23
|
+
# Emitted as the assistant's text response is being generated. Each event
|
24
|
+
# contains the new text fragment (delta) and the complete accumulated
|
25
|
+
# text so far (snapshot).
|
26
|
+
#
|
27
|
+
# @example
|
28
|
+
# event.delta # => "Hello" (new fragment)
|
29
|
+
# event.snapshot # => "Hello world" (accumulated text)
|
30
|
+
# event.parsed # => {name: "John"} (if using structured outputs)
|
31
|
+
class ChatContentDeltaEvent < OpenAI::Internal::Type::BaseModel
|
32
|
+
required :type, const: :"content.delta"
|
33
|
+
required :delta, String
|
34
|
+
required :snapshot, String
|
35
|
+
optional :parsed, Object # Partially parsed structured output
|
36
|
+
end
|
37
|
+
|
38
|
+
# Text content completion event.
|
39
|
+
#
|
40
|
+
# Emitted when the assistant has finished generating text content.
|
41
|
+
# Contains the complete text and, if applicable, the fully parsed
|
42
|
+
# structured output.
|
43
|
+
#
|
44
|
+
# @example
|
45
|
+
# event.content # => "Hello world! How can I help?"
|
46
|
+
# event.parsed # => {name: "John", age: 30} (if using structured outputs)
|
47
|
+
class ChatContentDoneEvent < OpenAI::Internal::Type::BaseModel
|
48
|
+
required :type, const: :"content.done"
|
49
|
+
required :content, String
|
50
|
+
optional :parsed, Object # Fully parsed structured output
|
51
|
+
end
|
52
|
+
|
53
|
+
# Incremental refusal update event.
|
54
|
+
#
|
55
|
+
# Emitted when the assistant is refusing to fulfill a request.
|
56
|
+
# Contains the new refusal text fragment and accumulated refusal message.
|
57
|
+
#
|
58
|
+
# @example
|
59
|
+
# event.delta # => "I cannot"
|
60
|
+
# event.snapshot # => "I cannot help with that request"
|
61
|
+
class ChatRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel
|
62
|
+
required :type, const: :"refusal.delta"
|
63
|
+
required :delta, String
|
64
|
+
required :snapshot, String
|
65
|
+
end
|
66
|
+
|
67
|
+
# Refusal completion event.
|
68
|
+
#
|
69
|
+
# Emitted when the assistant has finished generating a refusal message.
|
70
|
+
# Contains the complete refusal text.
|
71
|
+
#
|
72
|
+
# @example
|
73
|
+
# event.refusal # => "I cannot help with that request as it violates..."
|
74
|
+
class ChatRefusalDoneEvent < OpenAI::Internal::Type::BaseModel
|
75
|
+
required :type, const: :"refusal.done"
|
76
|
+
required :refusal, String
|
77
|
+
end
|
78
|
+
|
79
|
+
# Incremental function tool call arguments update.
|
80
|
+
#
|
81
|
+
# Emitted as function arguments are being streamed. Provides both the
|
82
|
+
# raw JSON fragments and incrementally parsed arguments for strict tools.
|
83
|
+
#
|
84
|
+
# @example
|
85
|
+
# event.name # => "get_weather"
|
86
|
+
# event.index # => 0 (tool call index in array)
|
87
|
+
# event.arguments_delta # => '{"location": "San' (new fragment)
|
88
|
+
# event.arguments # => '{"location": "San Francisco"' (accumulated JSON)
|
89
|
+
# event.parsed # => {location: "San Francisco"} (if strict: true)
|
90
|
+
class ChatFunctionToolCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
|
91
|
+
required :type, const: :"tool_calls.function.arguments.delta"
|
92
|
+
required :name, String
|
93
|
+
required :index, Integer
|
94
|
+
required :arguments_delta, String
|
95
|
+
required :arguments, String
|
96
|
+
required :parsed, Object
|
97
|
+
end
|
98
|
+
|
99
|
+
# Function tool call arguments completion event.
|
100
|
+
#
|
101
|
+
# Emitted when a function tool call's arguments are complete.
|
102
|
+
# For tools defined with `strict: true`, the arguments will be fully
|
103
|
+
# parsed and validated. For non-strict tools, only raw JSON is available.
|
104
|
+
#
|
105
|
+
# @example With strict tool
|
106
|
+
# event.name # => "get_weather"
|
107
|
+
# event.arguments # => '{"location": "San Francisco", "unit": "celsius"}'
|
108
|
+
# event.parsed # => {location: "San Francisco", unit: "celsius"}
|
109
|
+
#
|
110
|
+
# @example Without strict tool
|
111
|
+
# event.parsed # => nil (parse JSON from event.arguments manually)
|
112
|
+
class ChatFunctionToolCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
|
113
|
+
required :type, const: :"tool_calls.function.arguments.done"
|
114
|
+
required :name, String
|
115
|
+
required :index, Integer
|
116
|
+
required :arguments, String
|
117
|
+
required :parsed, Object # (only for strict: true tools)
|
118
|
+
end
|
119
|
+
|
120
|
+
# Incremental logprobs update for content tokens.
|
121
|
+
#
|
122
|
+
# Emitted when logprobs are requested and content tokens are being generated.
|
123
|
+
# Contains log probability information for the new tokens and accumulated
|
124
|
+
# logprobs for all content tokens so far.
|
125
|
+
#
|
126
|
+
# @example
|
127
|
+
# event.content[0].token # => "Hello"
|
128
|
+
# event.content[0].logprob # => -0.31725305
|
129
|
+
# event.content[0].top_logprobs # => [{token: "Hello", logprob: -0.31725305}, ...]
|
130
|
+
# event.snapshot # => [all logprobs accumulated so far]
|
131
|
+
class ChatLogprobsContentDeltaEvent < OpenAI::Internal::Type::BaseModel
|
132
|
+
required :type, const: :"logprobs.content.delta"
|
133
|
+
required :content, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
134
|
+
required :snapshot, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
135
|
+
end
|
136
|
+
|
137
|
+
# Logprobs completion event for content tokens.
|
138
|
+
#
|
139
|
+
# Emitted when content generation is complete and logprobs were requested.
|
140
|
+
# Contains the complete array of log probabilities for all content tokens.
|
141
|
+
#
|
142
|
+
# @example
|
143
|
+
# event.content.each do |logprob|
|
144
|
+
# puts "Token: #{logprob.token}, Logprob: #{logprob.logprob}"
|
145
|
+
# end
|
146
|
+
class ChatLogprobsContentDoneEvent < OpenAI::Internal::Type::BaseModel
|
147
|
+
required :type, const: :"logprobs.content.done"
|
148
|
+
required :content, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
149
|
+
end
|
150
|
+
|
151
|
+
# Incremental logprobs update for refusal tokens.
|
152
|
+
#
|
153
|
+
# Emitted when logprobs are requested and refusal tokens are being generated.
|
154
|
+
# Contains log probability information for refusal message tokens.
|
155
|
+
#
|
156
|
+
# @example
|
157
|
+
# event.refusal[0].token # => "I"
|
158
|
+
# event.refusal[0].logprob # => -0.12345
|
159
|
+
# event.snapshot # => [all refusal logprobs accumulated so far]
|
160
|
+
class ChatLogprobsRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel
|
161
|
+
required :type, const: :"logprobs.refusal.delta"
|
162
|
+
required :refusal, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
163
|
+
required :snapshot, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
164
|
+
end
|
165
|
+
|
166
|
+
# Logprobs completion event for refusal tokens.
|
167
|
+
#
|
168
|
+
# Emitted when refusal generation is complete and logprobs were requested.
|
169
|
+
# Contains the complete array of log probabilities for all refusal tokens.
|
170
|
+
#
|
171
|
+
# @example
|
172
|
+
# event.refusal.each do |logprob|
|
173
|
+
# puts "Refusal token: #{logprob.token}, Logprob: #{logprob.logprob}"
|
174
|
+
# end
|
175
|
+
class ChatLogprobsRefusalDoneEvent < OpenAI::Internal::Type::BaseModel
|
176
|
+
required :type, const: :"logprobs.refusal.done"
|
177
|
+
required :refusal, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] }
|
178
|
+
end
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Helpers
|
5
|
+
module Streaming
|
6
|
+
class StreamError < StandardError; end
|
7
|
+
|
8
|
+
class LengthFinishReasonError < StreamError
|
9
|
+
attr_reader :completion
|
10
|
+
|
11
|
+
def initialize(completion:)
|
12
|
+
@completion = completion
|
13
|
+
super("Stream finished due to length limit")
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
class ContentFilterFinishReasonError < StreamError
|
18
|
+
def initialize
|
19
|
+
super("Stream finished due to content filter")
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
module OpenAI
|
27
|
+
LengthFinishReasonError = Helpers::Streaming::LengthFinishReasonError
|
28
|
+
ContentFilterFinishReasonError = Helpers::Streaming::ContentFilterFinishReasonError
|
29
|
+
end
|
data/lib/openai/internal/util.rb
CHANGED
@@ -566,7 +566,8 @@ module OpenAI
|
|
566
566
|
#
|
567
567
|
# @return [Array(String, Enumerable<String>)]
|
568
568
|
private def encode_multipart_streaming(body)
|
569
|
-
|
569
|
+
# RFC 1521 Section 7.2.1 says we should have 70 char maximum for boundary length
|
570
|
+
boundary = SecureRandom.urlsafe_base64(46)
|
570
571
|
|
571
572
|
closing = []
|
572
573
|
strio = writable_enum do |y|
|
@@ -24,6 +24,7 @@ module OpenAI
|
|
24
24
|
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
|
25
25
|
COMPUTER_USE_PREVIEW = :"computer-use-preview"
|
26
26
|
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
|
27
|
+
GPT_5_CODEX = :"gpt-5-codex"
|
27
28
|
|
28
29
|
# @!method self.values
|
29
30
|
# @return [Array<Symbol>]
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Models
|
5
|
+
module Chat
|
6
|
+
class ParsedChoice < OpenAI::Models::Chat::ChatCompletion::Choice
|
7
|
+
optional :finish_reason, enum: -> { OpenAI::Chat::ChatCompletion::Choice::FinishReason }, nil?: true
|
8
|
+
end
|
9
|
+
|
10
|
+
class ParsedChatCompletion < ChatCompletion
|
11
|
+
required :choices, -> { OpenAI::Internal::Type::ArrayOf[ParsedChoice] }
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -24,6 +24,7 @@ module OpenAI
|
|
24
24
|
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
|
25
25
|
COMPUTER_USE_PREVIEW = :"computer-use-preview"
|
26
26
|
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
|
27
|
+
GPT_5_CODEX = :"gpt-5-codex"
|
27
28
|
|
28
29
|
# @!method self.values
|
29
30
|
# @return [Array<Symbol>]
|
@@ -110,6 +110,54 @@ module OpenAI
|
|
110
110
|
raise ArgumentError.new(message)
|
111
111
|
end
|
112
112
|
|
113
|
+
model, tool_models = get_structured_output_models(parsed)
|
114
|
+
|
115
|
+
# rubocop:disable Metrics/BlockLength
|
116
|
+
unwrap = ->(raw) do
|
117
|
+
if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
|
118
|
+
raw[:choices]&.each do |choice|
|
119
|
+
message = choice.fetch(:message)
|
120
|
+
begin
|
121
|
+
content = message.fetch(:content)
|
122
|
+
parsed = content.nil? ? nil : JSON.parse(content, symbolize_names: true)
|
123
|
+
rescue JSON::ParserError => e
|
124
|
+
parsed = e
|
125
|
+
end
|
126
|
+
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
127
|
+
message.store(:parsed, coerced)
|
128
|
+
end
|
129
|
+
end
|
130
|
+
raw[:choices]&.each do |choice|
|
131
|
+
choice.dig(:message, :tool_calls)&.each do |tool_call|
|
132
|
+
func = tool_call.fetch(:function)
|
133
|
+
next if (model = tool_models[func.fetch(:name)]).nil?
|
134
|
+
|
135
|
+
begin
|
136
|
+
arguments = func.fetch(:arguments)
|
137
|
+
parsed = arguments.nil? ? nil : JSON.parse(arguments, symbolize_names: true)
|
138
|
+
rescue JSON::ParserError => e
|
139
|
+
parsed = e
|
140
|
+
end
|
141
|
+
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
142
|
+
func.store(:parsed, coerced)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
raw
|
147
|
+
end
|
148
|
+
# rubocop:enable Metrics/BlockLength
|
149
|
+
|
150
|
+
@client.request(
|
151
|
+
method: :post,
|
152
|
+
path: "chat/completions",
|
153
|
+
body: parsed,
|
154
|
+
unwrap: unwrap,
|
155
|
+
model: OpenAI::Chat::ChatCompletion,
|
156
|
+
options: options
|
157
|
+
)
|
158
|
+
end
|
159
|
+
|
160
|
+
def get_structured_output_models(parsed)
|
113
161
|
model = nil
|
114
162
|
tool_models = {}
|
115
163
|
case parsed
|
@@ -162,53 +210,46 @@ module OpenAI
|
|
162
210
|
else
|
163
211
|
end
|
164
212
|
|
165
|
-
|
166
|
-
|
167
|
-
if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
|
168
|
-
raw[:choices]&.each do |choice|
|
169
|
-
message = choice.fetch(:message)
|
170
|
-
begin
|
171
|
-
content = message.fetch(:content)
|
172
|
-
parsed = content.nil? ? nil : JSON.parse(content, symbolize_names: true)
|
173
|
-
rescue JSON::ParserError => e
|
174
|
-
parsed = e
|
175
|
-
end
|
176
|
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
177
|
-
message.store(:parsed, coerced)
|
178
|
-
end
|
179
|
-
end
|
180
|
-
raw[:choices]&.each do |choice|
|
181
|
-
choice.dig(:message, :tool_calls)&.each do |tool_call|
|
182
|
-
func = tool_call.fetch(:function)
|
183
|
-
next if (model = tool_models[func.fetch(:name)]).nil?
|
213
|
+
[model, tool_models]
|
214
|
+
end
|
184
215
|
|
185
|
-
|
186
|
-
|
187
|
-
parsed = arguments.nil? ? nil : JSON.parse(arguments, symbolize_names: true)
|
188
|
-
rescue JSON::ParserError => e
|
189
|
-
parsed = e
|
190
|
-
end
|
191
|
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
|
192
|
-
func.store(:parsed, coerced)
|
193
|
-
end
|
194
|
-
end
|
216
|
+
def build_tools_with_models(tools, tool_models)
|
217
|
+
return [] if tools.nil?
|
195
218
|
|
196
|
-
|
219
|
+
tools.map do |tool|
|
220
|
+
next tool unless tool[:type] == :function
|
221
|
+
|
222
|
+
function_name = tool.dig(:function, :name)
|
223
|
+
model = tool_models[function_name]
|
224
|
+
|
225
|
+
model ? tool.merge(model: model) : tool
|
197
226
|
end
|
198
|
-
|
227
|
+
end
|
199
228
|
|
200
|
-
|
229
|
+
def stream(params)
|
230
|
+
parsed, options = OpenAI::Chat::CompletionCreateParams.dump_request(params)
|
231
|
+
|
232
|
+
parsed.store(:stream, true)
|
233
|
+
|
234
|
+
response_format, tool_models = get_structured_output_models(parsed)
|
235
|
+
|
236
|
+
input_tools = build_tools_with_models(parsed[:tools], tool_models)
|
237
|
+
|
238
|
+
raw_stream = @client.request(
|
201
239
|
method: :post,
|
202
240
|
path: "chat/completions",
|
241
|
+
headers: {"accept" => "text/event-stream"},
|
203
242
|
body: parsed,
|
204
|
-
|
205
|
-
model: OpenAI::Chat::
|
243
|
+
stream: OpenAI::Internal::Stream,
|
244
|
+
model: OpenAI::Chat::ChatCompletionChunk,
|
206
245
|
options: options
|
207
246
|
)
|
208
|
-
end
|
209
247
|
|
210
|
-
|
211
|
-
|
248
|
+
OpenAI::Helpers::Streaming::ChatCompletionStream.new(
|
249
|
+
raw_stream: raw_stream,
|
250
|
+
response_format: response_format,
|
251
|
+
input_tools: input_tools
|
252
|
+
)
|
212
253
|
end
|
213
254
|
|
214
255
|
# See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart.
|
@@ -85,7 +85,7 @@ module OpenAI
|
|
85
85
|
def create(params = {})
|
86
86
|
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
|
87
87
|
if parsed[:stream]
|
88
|
-
message = "Please use `#
|
88
|
+
message = "Please use `#stream` for the streaming use case."
|
89
89
|
raise ArgumentError.new(message)
|
90
90
|
end
|
91
91
|
|
data/lib/openai/version.rb
CHANGED
data/lib/openai.rb
CHANGED
@@ -195,6 +195,7 @@ require_relative "openai/models/chat/chat_completion_assistant_message_param"
|
|
195
195
|
require_relative "openai/models/chat/chat_completion_audio"
|
196
196
|
require_relative "openai/models/chat/chat_completion_audio_param"
|
197
197
|
require_relative "openai/models/chat/chat_completion_chunk"
|
198
|
+
require_relative "openai/models/chat/parsed_chat_completion"
|
198
199
|
require_relative "openai/models/chat/chat_completion_content_part"
|
199
200
|
require_relative "openai/models/chat/chat_completion_content_part_image"
|
200
201
|
require_relative "openai/models/chat/chat_completion_content_part_input_audio"
|
@@ -697,6 +698,9 @@ require_relative "openai/resources/vector_stores"
|
|
697
698
|
require_relative "openai/resources/vector_stores/file_batches"
|
698
699
|
require_relative "openai/resources/vector_stores/files"
|
699
700
|
require_relative "openai/resources/webhooks"
|
700
|
-
require_relative "openai/helpers/streaming/
|
701
|
+
require_relative "openai/helpers/streaming/response_events"
|
701
702
|
require_relative "openai/helpers/streaming/response_stream"
|
703
|
+
require_relative "openai/helpers/streaming/exceptions"
|
704
|
+
require_relative "openai/helpers/streaming/chat_events"
|
705
|
+
require_relative "openai/helpers/streaming/chat_completion_stream"
|
702
706
|
require_relative "openai/streaming"
|
@@ -26,6 +26,126 @@ module OpenAI
|
|
26
26
|
def response
|
27
27
|
end
|
28
28
|
end
|
29
|
+
|
30
|
+
class ChatChunkEvent < OpenAI::Internal::Type::BaseModel
|
31
|
+
sig { returns(T.untyped) }
|
32
|
+
def chunk
|
33
|
+
end
|
34
|
+
|
35
|
+
sig { returns(T.untyped) }
|
36
|
+
def snapshot
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
class ChatContentDeltaEvent < OpenAI::Internal::Type::BaseModel
|
41
|
+
sig { returns(String) }
|
42
|
+
def delta
|
43
|
+
end
|
44
|
+
|
45
|
+
sig { returns(String) }
|
46
|
+
def snapshot
|
47
|
+
end
|
48
|
+
|
49
|
+
sig { returns(T.untyped) }
|
50
|
+
def parsed
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
class ChatContentDoneEvent < OpenAI::Internal::Type::BaseModel
|
55
|
+
sig { returns(String) }
|
56
|
+
def content
|
57
|
+
end
|
58
|
+
|
59
|
+
sig { returns(T.untyped) }
|
60
|
+
def parsed
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
class ChatRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel
|
65
|
+
sig { returns(String) }
|
66
|
+
def delta
|
67
|
+
end
|
68
|
+
|
69
|
+
sig { returns(String) }
|
70
|
+
def snapshot
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
class ChatRefusalDoneEvent < OpenAI::Internal::Type::BaseModel
|
75
|
+
sig { returns(String) }
|
76
|
+
def refusal
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
class ChatFunctionToolCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
|
81
|
+
sig { returns(String) }
|
82
|
+
def name
|
83
|
+
end
|
84
|
+
|
85
|
+
sig { returns(Integer) }
|
86
|
+
def index
|
87
|
+
end
|
88
|
+
|
89
|
+
sig { returns(String) }
|
90
|
+
def arguments_delta
|
91
|
+
end
|
92
|
+
|
93
|
+
sig { returns(String) }
|
94
|
+
def arguments
|
95
|
+
end
|
96
|
+
|
97
|
+
sig { returns(T.untyped) }
|
98
|
+
def parsed_arguments
|
99
|
+
end
|
100
|
+
end
|
101
|
+
|
102
|
+
class ChatFunctionToolCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
|
103
|
+
sig { returns(String) }
|
104
|
+
def name
|
105
|
+
end
|
106
|
+
|
107
|
+
sig { returns(Integer) }
|
108
|
+
def index
|
109
|
+
end
|
110
|
+
|
111
|
+
sig { returns(String) }
|
112
|
+
def arguments
|
113
|
+
end
|
114
|
+
|
115
|
+
sig { returns(T.untyped) }
|
116
|
+
def parsed_arguments
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
class ChatLogprobsContentDeltaEvent < OpenAI::Internal::Type::BaseModel
|
121
|
+
sig { returns(T.untyped) }
|
122
|
+
def content
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
class ChatLogprobsContentDoneEvent < OpenAI::Internal::Type::BaseModel
|
127
|
+
sig { returns(T.untyped) }
|
128
|
+
def content
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
class ChatLogprobsRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel
|
133
|
+
sig { returns(T.untyped) }
|
134
|
+
def refusal
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
class ChatLogprobsRefusalDoneEvent < OpenAI::Internal::Type::BaseModel
|
139
|
+
sig { returns(T.untyped) }
|
140
|
+
def refusal
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
class ChatCompletionStream
|
145
|
+
sig { returns(T.untyped) }
|
146
|
+
def each
|
147
|
+
end
|
148
|
+
end
|
29
149
|
end
|
30
150
|
end
|
31
151
|
end
|
@@ -73,6 +73,11 @@ module OpenAI
|
|
73
73
|
:"computer-use-preview-2025-03-11",
|
74
74
|
OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
|
75
75
|
)
|
76
|
+
GPT_5_CODEX =
|
77
|
+
T.let(
|
78
|
+
:"gpt-5-codex",
|
79
|
+
OpenAI::ResponsesModel::ResponsesOnlyModel::TaggedSymbol
|
80
|
+
)
|
76
81
|
|
77
82
|
sig do
|
78
83
|
override.returns(
|
data/rbi/openai/streaming.rbi
CHANGED
@@ -1,5 +1,32 @@
|
|
1
1
|
# typed: strong
|
2
2
|
|
3
3
|
module OpenAI
|
4
|
-
|
4
|
+
module Streaming
|
5
|
+
ResponseTextDeltaEvent = OpenAI::Helpers::Streaming::ResponseTextDeltaEvent
|
6
|
+
ResponseTextDoneEvent = OpenAI::Helpers::Streaming::ResponseTextDoneEvent
|
7
|
+
ResponseFunctionCallArgumentsDeltaEvent =
|
8
|
+
OpenAI::Helpers::Streaming::ResponseFunctionCallArgumentsDeltaEvent
|
9
|
+
ResponseCompletedEvent = OpenAI::Helpers::Streaming::ResponseCompletedEvent
|
10
|
+
|
11
|
+
ChatChunkEvent = OpenAI::Helpers::Streaming::ChatChunkEvent
|
12
|
+
ChatContentDeltaEvent = OpenAI::Helpers::Streaming::ChatContentDeltaEvent
|
13
|
+
ChatContentDoneEvent = OpenAI::Helpers::Streaming::ChatContentDoneEvent
|
14
|
+
ChatRefusalDeltaEvent = OpenAI::Helpers::Streaming::ChatRefusalDeltaEvent
|
15
|
+
ChatRefusalDoneEvent = OpenAI::Helpers::Streaming::ChatRefusalDoneEvent
|
16
|
+
ChatFunctionToolCallArgumentsDeltaEvent =
|
17
|
+
OpenAI::Helpers::Streaming::ChatFunctionToolCallArgumentsDeltaEvent
|
18
|
+
ChatFunctionToolCallArgumentsDoneEvent =
|
19
|
+
OpenAI::Helpers::Streaming::ChatFunctionToolCallArgumentsDoneEvent
|
20
|
+
ChatLogprobsContentDeltaEvent =
|
21
|
+
OpenAI::Helpers::Streaming::ChatLogprobsContentDeltaEvent
|
22
|
+
ChatLogprobsContentDoneEvent =
|
23
|
+
OpenAI::Helpers::Streaming::ChatLogprobsContentDoneEvent
|
24
|
+
ChatLogprobsRefusalDeltaEvent =
|
25
|
+
OpenAI::Helpers::Streaming::ChatLogprobsRefusalDeltaEvent
|
26
|
+
ChatLogprobsRefusalDoneEvent =
|
27
|
+
OpenAI::Helpers::Streaming::ChatLogprobsRefusalDoneEvent
|
28
|
+
|
29
|
+
ResponseStream = OpenAI::Helpers::Streaming::ResponseStream
|
30
|
+
ChatCompletionStream = OpenAI::Helpers::Streaming::ChatCompletionStream
|
31
|
+
end
|
5
32
|
end
|
@@ -19,6 +19,7 @@ module OpenAI
|
|
19
19
|
| :"o4-mini-deep-research-2025-06-26"
|
20
20
|
| :"computer-use-preview"
|
21
21
|
| :"computer-use-preview-2025-03-11"
|
22
|
+
| :"gpt-5-codex"
|
22
23
|
|
23
24
|
module ResponsesOnlyModel
|
24
25
|
extend OpenAI::Internal::Type::Enum
|
@@ -33,6 +34,7 @@ module OpenAI
|
|
33
34
|
O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26"
|
34
35
|
COMPUTER_USE_PREVIEW: :"computer-use-preview"
|
35
36
|
COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11"
|
37
|
+
GPT_5_CODEX: :"gpt-5-codex"
|
36
38
|
|
37
39
|
def self?.values: -> ::Array[OpenAI::Models::AllModels::responses_only_model]
|
38
40
|
end
|
@@ -19,6 +19,7 @@ module OpenAI
|
|
19
19
|
| :"o4-mini-deep-research-2025-06-26"
|
20
20
|
| :"computer-use-preview"
|
21
21
|
| :"computer-use-preview-2025-03-11"
|
22
|
+
| :"gpt-5-codex"
|
22
23
|
|
23
24
|
module ResponsesOnlyModel
|
24
25
|
extend OpenAI::Internal::Type::Enum
|
@@ -33,6 +34,7 @@ module OpenAI
|
|
33
34
|
O4_MINI_DEEP_RESEARCH_2025_06_26: :"o4-mini-deep-research-2025-06-26"
|
34
35
|
COMPUTER_USE_PREVIEW: :"computer-use-preview"
|
35
36
|
COMPUTER_USE_PREVIEW_2025_03_11: :"computer-use-preview-2025-03-11"
|
37
|
+
GPT_5_CODEX: :"gpt-5-codex"
|
36
38
|
|
37
39
|
def self?.values: -> ::Array[OpenAI::Models::ResponsesModel::responses_only_model]
|
38
40
|
end
|