openai 0.34.1 → 0.35.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +23 -0
- data/README.md +1 -1
- data/lib/openai/internal/transport/base_client.rb +7 -1
- data/lib/openai/internal/transport/pooled_net_requester.rb +30 -24
- data/lib/openai/models/custom_tool_input_format.rb +6 -0
- data/lib/openai/models/image_edit_params.rb +1 -1
- data/lib/openai/models/realtime/realtime_session_create_request.rb +14 -3
- data/lib/openai/models/realtime/realtime_session_create_response.rb +15 -4
- data/lib/openai/models/realtime/realtime_tracing_config.rb +1 -1
- data/lib/openai/models/realtime/realtime_truncation.rb +13 -2
- data/lib/openai/models/realtime/realtime_truncation_retention_ratio.rb +38 -4
- data/lib/openai/models/reasoning.rb +4 -0
- data/lib/openai/models/responses/custom_tool.rb +3 -0
- data/lib/openai/models/responses/easy_input_message.rb +3 -3
- data/lib/openai/models/responses/file_search_tool.rb +33 -1
- data/lib/openai/models/responses/response_content.rb +1 -4
- data/lib/openai/models/responses/response_input_content.rb +1 -4
- data/lib/openai/models/responses/response_input_item.rb +2 -2
- data/lib/openai/models/responses/response_input_message_item.rb +2 -2
- data/lib/openai/models/responses/response_output_text.rb +8 -8
- data/lib/openai/models/responses/tool.rb +30 -2
- data/lib/openai/models/vector_stores/file_batch_create_params.rb +77 -11
- data/lib/openai/models/video.rb +9 -1
- data/lib/openai/resources/files.rb +13 -14
- data/lib/openai/resources/images.rb +2 -2
- data/lib/openai/resources/realtime/calls.rb +1 -1
- data/lib/openai/resources/responses.rb +4 -0
- data/lib/openai/resources/vector_stores/file_batches.rb +6 -4
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/internal/transport/base_client.rbi +5 -0
- data/rbi/openai/internal/type/base_model.rbi +8 -4
- data/rbi/openai/models/custom_tool_input_format.rbi +2 -0
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +26 -4
- data/rbi/openai/models/realtime/realtime_truncation.rbi +13 -2
- data/rbi/openai/models/realtime/realtime_truncation_retention_ratio.rbi +84 -6
- data/rbi/openai/models/reasoning.rbi +6 -0
- data/rbi/openai/models/responses/custom_tool.rbi +2 -0
- data/rbi/openai/models/responses/file_search_tool.rbi +65 -0
- data/rbi/openai/models/responses/response_content.rbi +0 -1
- data/rbi/openai/models/responses/response_input_content.rbi +1 -2
- data/rbi/openai/models/responses/response_input_item.rbi +3 -6
- data/rbi/openai/models/responses/response_input_message_item.rbi +1 -2
- data/rbi/openai/models/responses/response_output_text.rbi +10 -19
- data/rbi/openai/models/responses/tool.rbi +73 -4
- data/rbi/openai/models/vector_stores/file_batch_create_params.rbi +181 -12
- data/rbi/openai/models/video.rbi +8 -0
- data/rbi/openai/resources/files.rbi +13 -14
- data/rbi/openai/resources/realtime/calls.rbi +17 -6
- data/rbi/openai/resources/vector_stores/file_batches.rbi +15 -5
- data/sig/openai/internal/transport/base_client.rbs +2 -0
- data/sig/openai/models/realtime/realtime_truncation_retention_ratio.rbs +29 -2
- data/sig/openai/models/responses/file_search_tool.rbs +24 -0
- data/sig/openai/models/responses/response_content.rbs +0 -1
- data/sig/openai/models/responses/response_input_content.rbs +0 -1
- data/sig/openai/models/responses/response_output_text.rbs +7 -11
- data/sig/openai/models/responses/tool.rbs +30 -3
- data/sig/openai/models/vector_stores/file_batch_create_params.rbs +56 -6
- data/sig/openai/models/video.rbs +5 -0
- data/sig/openai/resources/vector_stores/file_batches.rbs +2 -1
- metadata +4 -3
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: abaa011e78b024745cccd6b1efad5fb9a3c5201aeb60257d0237c6c78097bb0c
|
|
4
|
+
data.tar.gz: 9dde864dac2f4b5535cf3793093a7d06c4675f1d1f1f7d0f25eaed5e51e76d6e
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 77f016fb4b47043ee5e6ce97095a948d1b255f919e17548e2c495c3545d3cad7506fcf01c6b473df5114be3d70910c0a858da727f5284a1bbae16178c96ec739
|
|
7
|
+
data.tar.gz: 7b4ff9197e751ebc3266d56b5e714b784215c868e5df1aecc716fba0cfd3e4dcaeefb5d051e9132e126191e098441fc1429a1d93ab49c71af67afdc4b1354a4a
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,28 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.35.0 (2025-11-03)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.34.1...v0.35.0](https://github.com/openai/openai-ruby/compare/v0.34.1...v0.35.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** Realtime API token_limits, Hybrid searching ranking options ([f7f04ea](https://github.com/openai/openai-ruby/commit/f7f04ea1816e005cfc7325f3c97b1f463aa6afe3))
|
|
10
|
+
* **api:** remove InputAudio from ResponseInputContent ([e8f5e9f](https://github.com/openai/openai-ruby/commit/e8f5e9f1b51843bc015f787316fbf522a87cac52))
|
|
11
|
+
* handle thread interrupts in the core HTTP client ([92e26d0](https://github.com/openai/openai-ruby/commit/92e26d0593ae6487a62d500c3e1e866252f3bdeb))
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
### Bug Fixes
|
|
15
|
+
|
|
16
|
+
* **api:** docs updates ([88a4a35](https://github.com/openai/openai-ruby/commit/88a4a355457b22ef9ac657ecb0e7a1a2e9bc8973))
|
|
17
|
+
* text and tools use mutually exclusive issue ([#855](https://github.com/openai/openai-ruby/issues/855)) ([7d93874](https://github.com/openai/openai-ruby/commit/7d93874ff34f5efa2459211984533fe72dced9e1))
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
### Chores
|
|
21
|
+
|
|
22
|
+
* add license information to the gemspec file ([#222](https://github.com/openai/openai-ruby/issues/222)) ([90d3c4a](https://github.com/openai/openai-ruby/commit/90d3c4aaae8a6e2fa039e0d1ad220ea3d1051ed7))
|
|
23
|
+
* **client:** send user-agent header ([3a850a9](https://github.com/openai/openai-ruby/commit/3a850a93808daf101fb086edc5511db9fa224684))
|
|
24
|
+
* **internal:** codegen related update ([f6b9f90](https://github.com/openai/openai-ruby/commit/f6b9f904a95d703a0ce76185e63352e095cb35af))
|
|
25
|
+
|
|
3
26
|
## 0.34.1 (2025-10-20)
|
|
4
27
|
|
|
5
28
|
Full Changelog: [v0.34.0...v0.34.1](https://github.com/openai/openai-ruby/compare/v0.34.0...v0.34.1)
|
data/README.md
CHANGED
|
@@ -201,7 +201,8 @@ module OpenAI
|
|
|
201
201
|
self.class::PLATFORM_HEADERS,
|
|
202
202
|
{
|
|
203
203
|
"accept" => "application/json",
|
|
204
|
-
"content-type" => "application/json"
|
|
204
|
+
"content-type" => "application/json",
|
|
205
|
+
"user-agent" => user_agent
|
|
205
206
|
},
|
|
206
207
|
headers
|
|
207
208
|
)
|
|
@@ -219,6 +220,11 @@ module OpenAI
|
|
|
219
220
|
# @return [Hash{String=>String}]
|
|
220
221
|
private def auth_headers = {}
|
|
221
222
|
|
|
223
|
+
# @api private
|
|
224
|
+
#
|
|
225
|
+
# @return [String]
|
|
226
|
+
private def user_agent = "#{self.class.name}/Ruby #{OpenAI::VERSION}"
|
|
227
|
+
|
|
222
228
|
# @api private
|
|
223
229
|
#
|
|
224
230
|
# @return [String]
|
|
@@ -128,40 +128,48 @@ module OpenAI
|
|
|
128
128
|
url, deadline = request.fetch_values(:url, :deadline)
|
|
129
129
|
|
|
130
130
|
req = nil
|
|
131
|
-
eof = false
|
|
132
131
|
finished = false
|
|
133
|
-
closing = nil
|
|
134
132
|
|
|
135
133
|
# rubocop:disable Metrics/BlockLength
|
|
136
134
|
enum = Enumerator.new do |y|
|
|
137
135
|
next if finished
|
|
138
136
|
|
|
139
137
|
with_pool(url, deadline: deadline) do |conn|
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
conn.start
|
|
148
|
-
end
|
|
138
|
+
eof = false
|
|
139
|
+
closing = nil
|
|
140
|
+
::Thread.handle_interrupt(Object => :never) do
|
|
141
|
+
::Thread.handle_interrupt(Object => :immediate) do
|
|
142
|
+
req, closing = self.class.build_request(request) do
|
|
143
|
+
self.class.calibrate_socket_timeout(conn, deadline)
|
|
144
|
+
end
|
|
149
145
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
rsp.read_body do |bytes|
|
|
156
|
-
y << bytes.force_encoding(Encoding::BINARY)
|
|
157
|
-
break if finished
|
|
146
|
+
self.class.calibrate_socket_timeout(conn, deadline)
|
|
147
|
+
unless conn.started?
|
|
148
|
+
conn.keep_alive_timeout = self.class::KEEP_ALIVE_TIMEOUT
|
|
149
|
+
conn.start
|
|
150
|
+
end
|
|
158
151
|
|
|
159
152
|
self.class.calibrate_socket_timeout(conn, deadline)
|
|
153
|
+
conn.request(req) do |rsp|
|
|
154
|
+
y << [req, rsp]
|
|
155
|
+
break if finished
|
|
156
|
+
|
|
157
|
+
rsp.read_body do |bytes|
|
|
158
|
+
y << bytes.force_encoding(Encoding::BINARY)
|
|
159
|
+
break if finished
|
|
160
|
+
|
|
161
|
+
self.class.calibrate_socket_timeout(conn, deadline)
|
|
162
|
+
end
|
|
163
|
+
eof = true
|
|
164
|
+
end
|
|
165
|
+
end
|
|
166
|
+
ensure
|
|
167
|
+
begin
|
|
168
|
+
conn.finish if !eof && conn&.started?
|
|
169
|
+
ensure
|
|
170
|
+
closing&.call
|
|
160
171
|
end
|
|
161
|
-
eof = true
|
|
162
172
|
end
|
|
163
|
-
ensure
|
|
164
|
-
conn.finish if !eof && conn&.started?
|
|
165
173
|
end
|
|
166
174
|
rescue Timeout::Error
|
|
167
175
|
raise OpenAI::Errors::APITimeoutError.new(url: url, request: req)
|
|
@@ -174,8 +182,6 @@ module OpenAI
|
|
|
174
182
|
body = OpenAI::Internal::Util.fused_enum(enum, external: true) do
|
|
175
183
|
finished = true
|
|
176
184
|
loop { enum.next }
|
|
177
|
-
ensure
|
|
178
|
-
closing&.call
|
|
179
185
|
end
|
|
180
186
|
[Integer(response.code), response, body]
|
|
181
187
|
end
|
|
@@ -8,8 +8,10 @@ module OpenAI
|
|
|
8
8
|
|
|
9
9
|
discriminator :type
|
|
10
10
|
|
|
11
|
+
# Unconstrained free-form text.
|
|
11
12
|
variant :text, -> { OpenAI::CustomToolInputFormat::Text }
|
|
12
13
|
|
|
14
|
+
# A grammar defined by the user.
|
|
13
15
|
variant :grammar, -> { OpenAI::CustomToolInputFormat::Grammar }
|
|
14
16
|
|
|
15
17
|
class Text < OpenAI::Internal::Type::BaseModel
|
|
@@ -20,6 +22,8 @@ module OpenAI
|
|
|
20
22
|
required :type, const: :text
|
|
21
23
|
|
|
22
24
|
# @!method initialize(type: :text)
|
|
25
|
+
# Unconstrained free-form text.
|
|
26
|
+
#
|
|
23
27
|
# @param type [Symbol, :text] Unconstrained text format. Always `text`.
|
|
24
28
|
end
|
|
25
29
|
|
|
@@ -43,6 +47,8 @@ module OpenAI
|
|
|
43
47
|
required :type, const: :grammar
|
|
44
48
|
|
|
45
49
|
# @!method initialize(definition:, syntax:, type: :grammar)
|
|
50
|
+
# A grammar defined by the user.
|
|
51
|
+
#
|
|
46
52
|
# @param definition [String] The grammar definition.
|
|
47
53
|
#
|
|
48
54
|
# @param syntax [Symbol, OpenAI::Models::CustomToolInputFormat::Grammar::Syntax] The syntax of the grammar definition. One of `lark` or `regex`.
|
|
@@ -142,7 +142,7 @@ module OpenAI
|
|
|
142
142
|
#
|
|
143
143
|
# @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
|
|
144
144
|
#
|
|
145
|
-
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil]
|
|
145
|
+
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features, es
|
|
146
146
|
#
|
|
147
147
|
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
|
|
148
148
|
#
|
|
@@ -98,8 +98,19 @@ module OpenAI
|
|
|
98
98
|
optional :tracing, union: -> { OpenAI::Realtime::RealtimeTracingConfig }, nil?: true
|
|
99
99
|
|
|
100
100
|
# @!attribute truncation
|
|
101
|
-
#
|
|
102
|
-
#
|
|
101
|
+
# When the number of tokens in a conversation exceeds the model's input token
|
|
102
|
+
# limit, the conversation be truncated, meaning messages (starting from the
|
|
103
|
+
# oldest) will not be included in the model's context. A 32k context model with
|
|
104
|
+
# 4,096 max output tokens can only include 28,224 tokens in the context before
|
|
105
|
+
# truncation occurs. Clients can configure truncation behavior to truncate with a
|
|
106
|
+
# lower max token limit, which is an effective way to control token usage and
|
|
107
|
+
# cost. Truncation will reduce the number of cached tokens on the next turn
|
|
108
|
+
# (busting the cache), since messages are dropped from the beginning of the
|
|
109
|
+
# context. However, clients can also configure truncation to retain messages up to
|
|
110
|
+
# a fraction of the maximum context size, which will reduce the need for future
|
|
111
|
+
# truncations and thus improve the cache rate. Truncation can be disabled
|
|
112
|
+
# entirely, which means the server will never truncate but would instead return an
|
|
113
|
+
# error if the conversation exceeds the model's input token limit.
|
|
103
114
|
#
|
|
104
115
|
# @return [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio, nil]
|
|
105
116
|
optional :truncation, union: -> { OpenAI::Realtime::RealtimeTruncation }
|
|
@@ -130,7 +141,7 @@ module OpenAI
|
|
|
130
141
|
#
|
|
131
142
|
# @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeTracingConfig::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces
|
|
132
143
|
#
|
|
133
|
-
# @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio]
|
|
144
|
+
# @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi
|
|
134
145
|
#
|
|
135
146
|
# @param type [Symbol, :realtime] The type of session to create. Always `realtime` for the Realtime API.
|
|
136
147
|
|
|
@@ -106,8 +106,19 @@ module OpenAI
|
|
|
106
106
|
optional :tracing, union: -> { OpenAI::Realtime::RealtimeSessionCreateResponse::Tracing }, nil?: true
|
|
107
107
|
|
|
108
108
|
# @!attribute truncation
|
|
109
|
-
#
|
|
110
|
-
#
|
|
109
|
+
# When the number of tokens in a conversation exceeds the model's input token
|
|
110
|
+
# limit, the conversation be truncated, meaning messages (starting from the
|
|
111
|
+
# oldest) will not be included in the model's context. A 32k context model with
|
|
112
|
+
# 4,096 max output tokens can only include 28,224 tokens in the context before
|
|
113
|
+
# truncation occurs. Clients can configure truncation behavior to truncate with a
|
|
114
|
+
# lower max token limit, which is an effective way to control token usage and
|
|
115
|
+
# cost. Truncation will reduce the number of cached tokens on the next turn
|
|
116
|
+
# (busting the cache), since messages are dropped from the beginning of the
|
|
117
|
+
# context. However, clients can also configure truncation to retain messages up to
|
|
118
|
+
# a fraction of the maximum context size, which will reduce the need for future
|
|
119
|
+
# truncations and thus improve the cache rate. Truncation can be disabled
|
|
120
|
+
# entirely, which means the server will never truncate but would instead return an
|
|
121
|
+
# error if the conversation exceeds the model's input token limit.
|
|
111
122
|
#
|
|
112
123
|
# @return [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio, nil]
|
|
113
124
|
optional :truncation, union: -> { OpenAI::Realtime::RealtimeTruncation }
|
|
@@ -141,7 +152,7 @@ module OpenAI
|
|
|
141
152
|
#
|
|
142
153
|
# @param tracing [Symbol, :auto, OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Tracing::TracingConfiguration, nil] Realtime API can write session traces to the [Traces Dashboard](/logs?api=traces
|
|
143
154
|
#
|
|
144
|
-
# @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio]
|
|
155
|
+
# @param truncation [Symbol, OpenAI::Models::Realtime::RealtimeTruncation::RealtimeTruncationStrategy, OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio] When the number of tokens in a conversation exceeds the model's input token limi
|
|
145
156
|
#
|
|
146
157
|
# @param type [Symbol, :realtime] The type of session to create. Always `realtime` for the Realtime API.
|
|
147
158
|
|
|
@@ -990,7 +1001,7 @@ module OpenAI
|
|
|
990
1001
|
module Tracing
|
|
991
1002
|
extend OpenAI::Internal::Type::Union
|
|
992
1003
|
|
|
993
|
-
#
|
|
1004
|
+
# Enables tracing and sets default values for tracing configuration options. Always `auto`.
|
|
994
1005
|
variant const: :auto
|
|
995
1006
|
|
|
996
1007
|
# Granular configuration for tracing.
|
|
@@ -12,7 +12,7 @@ module OpenAI
|
|
|
12
12
|
module RealtimeTracingConfig
|
|
13
13
|
extend OpenAI::Internal::Type::Union
|
|
14
14
|
|
|
15
|
-
#
|
|
15
|
+
# Enables tracing and sets default values for tracing configuration options. Always `auto`.
|
|
16
16
|
variant const: :auto
|
|
17
17
|
|
|
18
18
|
# Granular configuration for tracing.
|
|
@@ -3,8 +3,19 @@
|
|
|
3
3
|
module OpenAI
|
|
4
4
|
module Models
|
|
5
5
|
module Realtime
|
|
6
|
-
#
|
|
7
|
-
#
|
|
6
|
+
# When the number of tokens in a conversation exceeds the model's input token
|
|
7
|
+
# limit, the conversation be truncated, meaning messages (starting from the
|
|
8
|
+
# oldest) will not be included in the model's context. A 32k context model with
|
|
9
|
+
# 4,096 max output tokens can only include 28,224 tokens in the context before
|
|
10
|
+
# truncation occurs. Clients can configure truncation behavior to truncate with a
|
|
11
|
+
# lower max token limit, which is an effective way to control token usage and
|
|
12
|
+
# cost. Truncation will reduce the number of cached tokens on the next turn
|
|
13
|
+
# (busting the cache), since messages are dropped from the beginning of the
|
|
14
|
+
# context. However, clients can also configure truncation to retain messages up to
|
|
15
|
+
# a fraction of the maximum context size, which will reduce the need for future
|
|
16
|
+
# truncations and thus improve the cache rate. Truncation can be disabled
|
|
17
|
+
# entirely, which means the server will never truncate but would instead return an
|
|
18
|
+
# error if the conversation exceeds the model's input token limit.
|
|
8
19
|
module RealtimeTruncation
|
|
9
20
|
extend OpenAI::Internal::Type::Union
|
|
10
21
|
|
|
@@ -5,8 +5,10 @@ module OpenAI
|
|
|
5
5
|
module Realtime
|
|
6
6
|
class RealtimeTruncationRetentionRatio < OpenAI::Internal::Type::BaseModel
|
|
7
7
|
# @!attribute retention_ratio
|
|
8
|
-
# Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when
|
|
9
|
-
# conversation exceeds the input token limit.
|
|
8
|
+
# Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when
|
|
9
|
+
# the conversation exceeds the input token limit. Setting this to `0.8` means that
|
|
10
|
+
# messages will be dropped until 80% of the maximum allowed tokens are used. This
|
|
11
|
+
# helps reduce the frequency of truncations and improve cache rates.
|
|
10
12
|
#
|
|
11
13
|
# @return [Float]
|
|
12
14
|
required :retention_ratio, Float
|
|
@@ -17,7 +19,14 @@ module OpenAI
|
|
|
17
19
|
# @return [Symbol, :retention_ratio]
|
|
18
20
|
required :type, const: :retention_ratio
|
|
19
21
|
|
|
20
|
-
# @!
|
|
22
|
+
# @!attribute token_limits
|
|
23
|
+
# Optional custom token limits for this truncation strategy. If not provided, the
|
|
24
|
+
# model's default token limits will be used.
|
|
25
|
+
#
|
|
26
|
+
# @return [OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits, nil]
|
|
27
|
+
optional :token_limits, -> { OpenAI::Realtime::RealtimeTruncationRetentionRatio::TokenLimits }
|
|
28
|
+
|
|
29
|
+
# @!method initialize(retention_ratio:, token_limits: nil, type: :retention_ratio)
|
|
21
30
|
# Some parameter documentations has been truncated, see
|
|
22
31
|
# {OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio} for more details.
|
|
23
32
|
#
|
|
@@ -25,9 +34,34 @@ module OpenAI
|
|
|
25
34
|
# input token limit. This allows you to amortize truncations across multiple
|
|
26
35
|
# turns, which can help improve cached token usage.
|
|
27
36
|
#
|
|
28
|
-
# @param retention_ratio [Float] Fraction of post-instruction conversation tokens to retain (0.0 - 1.0) when
|
|
37
|
+
# @param retention_ratio [Float] Fraction of post-instruction conversation tokens to retain (`0.0` - `1.0`) when
|
|
38
|
+
#
|
|
39
|
+
# @param token_limits [OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits] Optional custom token limits for this truncation strategy. If not provided, the
|
|
29
40
|
#
|
|
30
41
|
# @param type [Symbol, :retention_ratio] Use retention ratio truncation.
|
|
42
|
+
|
|
43
|
+
# @see OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio#token_limits
|
|
44
|
+
class TokenLimits < OpenAI::Internal::Type::BaseModel
|
|
45
|
+
# @!attribute post_instructions
|
|
46
|
+
# Maximum tokens allowed in the conversation after instructions (which including
|
|
47
|
+
# tool definitions). For example, setting this to 5,000 would mean that truncation
|
|
48
|
+
# would occur when the conversation exceeds 5,000 tokens after instructions. This
|
|
49
|
+
# cannot be higher than the model's context window size minus the maximum output
|
|
50
|
+
# tokens.
|
|
51
|
+
#
|
|
52
|
+
# @return [Integer, nil]
|
|
53
|
+
optional :post_instructions, Integer
|
|
54
|
+
|
|
55
|
+
# @!method initialize(post_instructions: nil)
|
|
56
|
+
# Some parameter documentations has been truncated, see
|
|
57
|
+
# {OpenAI::Models::Realtime::RealtimeTruncationRetentionRatio::TokenLimits} for
|
|
58
|
+
# more details.
|
|
59
|
+
#
|
|
60
|
+
# Optional custom token limits for this truncation strategy. If not provided, the
|
|
61
|
+
# model's default token limits will be used.
|
|
62
|
+
#
|
|
63
|
+
# @param post_instructions [Integer] Maximum tokens allowed in the conversation after instructions (which including t
|
|
64
|
+
end
|
|
31
65
|
end
|
|
32
66
|
end
|
|
33
67
|
end
|
|
@@ -33,6 +33,8 @@ module OpenAI
|
|
|
33
33
|
# debugging and understanding the model's reasoning process. One of `auto`,
|
|
34
34
|
# `concise`, or `detailed`.
|
|
35
35
|
#
|
|
36
|
+
# `concise` is only supported for `computer-use-preview` models.
|
|
37
|
+
#
|
|
36
38
|
# @return [Symbol, OpenAI::Models::Reasoning::Summary, nil]
|
|
37
39
|
optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true
|
|
38
40
|
|
|
@@ -75,6 +77,8 @@ module OpenAI
|
|
|
75
77
|
# debugging and understanding the model's reasoning process. One of `auto`,
|
|
76
78
|
# `concise`, or `detailed`.
|
|
77
79
|
#
|
|
80
|
+
# `concise` is only supported for `computer-use-preview` models.
|
|
81
|
+
#
|
|
78
82
|
# @see OpenAI::Models::Reasoning#summary
|
|
79
83
|
module Summary
|
|
80
84
|
extend OpenAI::Internal::Type::Enum
|
|
@@ -29,6 +29,9 @@ module OpenAI
|
|
|
29
29
|
optional :format_, union: -> { OpenAI::CustomToolInputFormat }, api_name: :format
|
|
30
30
|
|
|
31
31
|
# @!method initialize(name:, description: nil, format_: nil, type: :custom)
|
|
32
|
+
# A custom tool that processes input using a specified format. Learn more about
|
|
33
|
+
# [custom tools](https://platform.openai.com/docs/guides/function-calling#custom-tools)
|
|
34
|
+
#
|
|
32
35
|
# @param name [String] The name of the custom tool, used to identify it in tool calls.
|
|
33
36
|
#
|
|
34
37
|
# @param description [String] Optional description of the custom tool, used to provide more context.
|
|
@@ -8,7 +8,7 @@ module OpenAI
|
|
|
8
8
|
# Text, image, or audio input to the model, used to generate a response. Can also
|
|
9
9
|
# contain previous assistant responses.
|
|
10
10
|
#
|
|
11
|
-
# @return [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
11
|
+
# @return [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
|
12
12
|
required :content, union: -> { OpenAI::Responses::EasyInputMessage::Content }
|
|
13
13
|
|
|
14
14
|
# @!attribute role
|
|
@@ -34,7 +34,7 @@ module OpenAI
|
|
|
34
34
|
# `assistant` role are presumed to have been generated by the model in previous
|
|
35
35
|
# interactions.
|
|
36
36
|
#
|
|
37
|
-
# @param content [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
37
|
+
# @param content [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] Text, image, or audio input to the model, used to generate a response.
|
|
38
38
|
#
|
|
39
39
|
# @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] The role of the message input. One of `user`, `assistant`, `system`, or
|
|
40
40
|
#
|
|
@@ -55,7 +55,7 @@ module OpenAI
|
|
|
55
55
|
variant -> { OpenAI::Responses::ResponseInputMessageContentList }
|
|
56
56
|
|
|
57
57
|
# @!method self.variants
|
|
58
|
-
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
58
|
+
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>)]
|
|
59
59
|
end
|
|
60
60
|
|
|
61
61
|
# The role of the message input. One of `user`, `assistant`, `system`, or
|
|
@@ -71,6 +71,13 @@ module OpenAI
|
|
|
71
71
|
|
|
72
72
|
# @see OpenAI::Models::Responses::FileSearchTool#ranking_options
|
|
73
73
|
class RankingOptions < OpenAI::Internal::Type::BaseModel
|
|
74
|
+
# @!attribute hybrid_search
|
|
75
|
+
# Weights that control how reciprocal rank fusion balances semantic embedding
|
|
76
|
+
# matches versus sparse keyword matches when hybrid search is enabled.
|
|
77
|
+
#
|
|
78
|
+
# @return [OpenAI::Models::Responses::FileSearchTool::RankingOptions::HybridSearch, nil]
|
|
79
|
+
optional :hybrid_search, -> { OpenAI::Responses::FileSearchTool::RankingOptions::HybridSearch }
|
|
80
|
+
|
|
74
81
|
# @!attribute ranker
|
|
75
82
|
# The ranker to use for the file search.
|
|
76
83
|
#
|
|
@@ -85,16 +92,41 @@ module OpenAI
|
|
|
85
92
|
# @return [Float, nil]
|
|
86
93
|
optional :score_threshold, Float
|
|
87
94
|
|
|
88
|
-
# @!method initialize(ranker: nil, score_threshold: nil)
|
|
95
|
+
# @!method initialize(hybrid_search: nil, ranker: nil, score_threshold: nil)
|
|
89
96
|
# Some parameter documentations has been truncated, see
|
|
90
97
|
# {OpenAI::Models::Responses::FileSearchTool::RankingOptions} for more details.
|
|
91
98
|
#
|
|
92
99
|
# Ranking options for search.
|
|
93
100
|
#
|
|
101
|
+
# @param hybrid_search [OpenAI::Models::Responses::FileSearchTool::RankingOptions::HybridSearch] Weights that control how reciprocal rank fusion balances semantic embedding matc
|
|
102
|
+
#
|
|
94
103
|
# @param ranker [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] The ranker to use for the file search.
|
|
95
104
|
#
|
|
96
105
|
# @param score_threshold [Float] The score threshold for the file search, a number between 0 and 1. Numbers close
|
|
97
106
|
|
|
107
|
+
# @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#hybrid_search
|
|
108
|
+
class HybridSearch < OpenAI::Internal::Type::BaseModel
|
|
109
|
+
# @!attribute embedding_weight
|
|
110
|
+
# The weight of the embedding in the reciprocal ranking fusion.
|
|
111
|
+
#
|
|
112
|
+
# @return [Float]
|
|
113
|
+
required :embedding_weight, Float
|
|
114
|
+
|
|
115
|
+
# @!attribute text_weight
|
|
116
|
+
# The weight of the text in the reciprocal ranking fusion.
|
|
117
|
+
#
|
|
118
|
+
# @return [Float]
|
|
119
|
+
required :text_weight, Float
|
|
120
|
+
|
|
121
|
+
# @!method initialize(embedding_weight:, text_weight:)
|
|
122
|
+
# Weights that control how reciprocal rank fusion balances semantic embedding
|
|
123
|
+
# matches versus sparse keyword matches when hybrid search is enabled.
|
|
124
|
+
#
|
|
125
|
+
# @param embedding_weight [Float] The weight of the embedding in the reciprocal ranking fusion.
|
|
126
|
+
#
|
|
127
|
+
# @param text_weight [Float] The weight of the text in the reciprocal ranking fusion.
|
|
128
|
+
end
|
|
129
|
+
|
|
98
130
|
# The ranker to use for the file search.
|
|
99
131
|
#
|
|
100
132
|
# @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#ranker
|
|
@@ -16,9 +16,6 @@ module OpenAI
|
|
|
16
16
|
# A file input to the model.
|
|
17
17
|
variant -> { OpenAI::Responses::ResponseInputFile }
|
|
18
18
|
|
|
19
|
-
# An audio input to the model.
|
|
20
|
-
variant -> { OpenAI::Responses::ResponseInputAudio }
|
|
21
|
-
|
|
22
19
|
# A text output from the model.
|
|
23
20
|
variant -> { OpenAI::Responses::ResponseOutputText }
|
|
24
21
|
|
|
@@ -50,7 +47,7 @@ module OpenAI
|
|
|
50
47
|
end
|
|
51
48
|
|
|
52
49
|
# @!method self.variants
|
|
53
|
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::
|
|
50
|
+
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal, OpenAI::Models::Responses::ResponseContent::ReasoningTextContent)]
|
|
54
51
|
end
|
|
55
52
|
end
|
|
56
53
|
end
|
|
@@ -18,11 +18,8 @@ module OpenAI
|
|
|
18
18
|
# A file input to the model.
|
|
19
19
|
variant :input_file, -> { OpenAI::Responses::ResponseInputFile }
|
|
20
20
|
|
|
21
|
-
# An audio input to the model.
|
|
22
|
-
variant :input_audio, -> { OpenAI::Responses::ResponseInputAudio }
|
|
23
|
-
|
|
24
21
|
# @!method self.variants
|
|
25
|
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
22
|
+
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
|
|
26
23
|
end
|
|
27
24
|
end
|
|
28
25
|
end
|
|
@@ -94,7 +94,7 @@ module OpenAI
|
|
|
94
94
|
# A list of one or many input items to the model, containing different content
|
|
95
95
|
# types.
|
|
96
96
|
#
|
|
97
|
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
97
|
+
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
|
98
98
|
required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
|
|
99
99
|
|
|
100
100
|
# @!attribute role
|
|
@@ -124,7 +124,7 @@ module OpenAI
|
|
|
124
124
|
# hierarchy. Instructions given with the `developer` or `system` role take
|
|
125
125
|
# precedence over instructions given with the `user` role.
|
|
126
126
|
#
|
|
127
|
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
127
|
+
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
|
|
128
128
|
#
|
|
129
129
|
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] The role of the message input. One of `user`, `system`, or `developer`.
|
|
130
130
|
#
|
|
@@ -14,7 +14,7 @@ module OpenAI
|
|
|
14
14
|
# A list of one or many input items to the model, containing different content
|
|
15
15
|
# types.
|
|
16
16
|
#
|
|
17
|
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
17
|
+
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
|
|
18
18
|
required :content, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent] }
|
|
19
19
|
|
|
20
20
|
# @!attribute role
|
|
@@ -42,7 +42,7 @@ module OpenAI
|
|
|
42
42
|
#
|
|
43
43
|
# @param id [String] The unique ID of the message input.
|
|
44
44
|
#
|
|
45
|
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile
|
|
45
|
+
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
|
|
46
46
|
#
|
|
47
47
|
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] The role of the message input. One of `user`, `system`, or `developer`.
|
|
48
48
|
#
|
|
@@ -11,6 +11,11 @@ module OpenAI
|
|
|
11
11
|
required :annotations,
|
|
12
12
|
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation] }
|
|
13
13
|
|
|
14
|
+
# @!attribute logprobs
|
|
15
|
+
#
|
|
16
|
+
# @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>]
|
|
17
|
+
required :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] }
|
|
18
|
+
|
|
14
19
|
# @!attribute text
|
|
15
20
|
# The text output from the model.
|
|
16
21
|
#
|
|
@@ -31,20 +36,15 @@ module OpenAI
|
|
|
31
36
|
# @return [Symbol, :output_text]
|
|
32
37
|
required :type, const: :output_text
|
|
33
38
|
|
|
34
|
-
# @!
|
|
35
|
-
#
|
|
36
|
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>, nil]
|
|
37
|
-
optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob] }
|
|
38
|
-
|
|
39
|
-
# @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
|
|
39
|
+
# @!method initialize(annotations:, logprobs:, text:, type: :output_text)
|
|
40
40
|
# A text output from the model.
|
|
41
41
|
#
|
|
42
42
|
# @param annotations [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>] The annotations of the text output.
|
|
43
43
|
#
|
|
44
|
-
# @param text [String] The text output from the model.
|
|
45
|
-
#
|
|
46
44
|
# @param logprobs [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>]
|
|
47
45
|
#
|
|
46
|
+
# @param text [String] The text output from the model.
|
|
47
|
+
#
|
|
48
48
|
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
|
|
49
49
|
|
|
50
50
|
# A citation to a file.
|