openai 0.10.0 → 0.12.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +36 -0
- data/README.md +83 -7
- data/lib/openai/client.rb +11 -0
- data/lib/openai/errors.rb +3 -0
- data/lib/openai/helpers/streaming/events.rb +23 -0
- data/lib/openai/helpers/streaming/response_stream.rb +232 -0
- data/lib/openai/helpers/structured_output/parsed_json.rb +39 -0
- data/lib/openai/internal/stream.rb +2 -1
- data/lib/openai/internal/transport/base_client.rb +10 -2
- data/lib/openai/internal/type/base_stream.rb +3 -1
- data/lib/openai/models/all_models.rb +4 -0
- data/lib/openai/models/chat/chat_completion.rb +32 -31
- data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
- data/lib/openai/models/chat/chat_completion_message.rb +1 -1
- data/lib/openai/models/chat/chat_completion_message_tool_call.rb +1 -1
- data/lib/openai/models/chat/completion_create_params.rb +34 -31
- data/lib/openai/models/images_response.rb +92 -1
- data/lib/openai/models/responses/response.rb +59 -35
- data/lib/openai/models/responses/response_create_params.rb +64 -39
- data/lib/openai/models/responses/response_function_tool_call.rb +1 -1
- data/lib/openai/models/responses/response_function_web_search.rb +115 -1
- data/lib/openai/models/responses/response_includable.rb +8 -6
- data/lib/openai/models/responses/response_output_text.rb +1 -1
- data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
- data/lib/openai/models/responses/tool_choice_types.rb +0 -3
- data/lib/openai/models/responses_model.rb +4 -0
- data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
- data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
- data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
- data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
- data/lib/openai/models.rb +2 -0
- data/lib/openai/resources/chat/completions.rb +14 -6
- data/lib/openai/resources/responses.rb +262 -81
- data/lib/openai/resources/webhooks.rb +124 -0
- data/lib/openai/streaming.rb +5 -0
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +22 -0
- data/rbi/openai/client.rbi +3 -0
- data/rbi/openai/helpers/streaming/events.rbi +31 -0
- data/rbi/openai/helpers/streaming/response_stream.rbi +104 -0
- data/rbi/openai/internal/type/base_stream.rbi +8 -1
- data/rbi/openai/models/all_models.rbi +20 -0
- data/rbi/openai/models/chat/chat_completion.rbi +47 -42
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
- data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
- data/rbi/openai/models/images_response.rbi +146 -0
- data/rbi/openai/models/responses/response.rbi +75 -44
- data/rbi/openai/models/responses/response_create_params.rbi +91 -55
- data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
- data/rbi/openai/models/responses/response_includable.rbi +17 -11
- data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
- data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
- data/rbi/openai/models/responses_model.rbi +20 -0
- data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
- data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
- data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
- data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
- data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
- data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
- data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
- data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
- data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
- data/rbi/openai/models.rbi +2 -0
- data/rbi/openai/resources/chat/completions.rbi +34 -30
- data/rbi/openai/resources/responses.rbi +188 -39
- data/rbi/openai/resources/webhooks.rbi +68 -0
- data/rbi/openai/streaming.rbi +5 -0
- data/sig/openai/client.rbs +2 -0
- data/sig/openai/internal/type/base_stream.rbs +4 -0
- data/sig/openai/models/all_models.rbs +8 -0
- data/sig/openai/models/chat/chat_completion.rbs +2 -1
- data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
- data/sig/openai/models/chat/completion_create_params.rbs +2 -1
- data/sig/openai/models/images_response.rbs +83 -0
- data/sig/openai/models/responses/response.rbs +13 -1
- data/sig/openai/models/responses/response_create_params.rbs +13 -1
- data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
- data/sig/openai/models/responses/response_includable.rbs +7 -5
- data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
- data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
- data/sig/openai/models/responses_model.rbs +8 -0
- data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
- data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
- data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
- data/sig/openai/models.rbs +2 -0
- data/sig/openai/resources/responses.rbs +4 -0
- data/sig/openai/resources/webhooks.rbs +33 -0
- metadata +63 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 38a07c18c8f0197c2edc1400139623aefc1c85e8a0fbf167a86afc53f1399a75
|
4
|
+
data.tar.gz: 13b400c12a9d2bef1ebcf2722413e3e6b1915992df539542f4a73f084b839cfa
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d5a62aacb54b1e50526da647c3ea91adbe8dbf69bd8c4f0df08a0a6c9fa4b49ebb79bd4b51bf5efc55d06d514af286b7658c153a6498298761dedc2f73c42c20
|
7
|
+
data.tar.gz: 9c7a2bbe1053d11780882a11a9617ea4cd2ee4a6648f278003be7da107c0e5dc281beeb6dbce3db168f4c9338d161742ab48bc03711a8c4f05e540f11e6f40d0
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,41 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.12.0 (2025-07-03)
|
4
|
+
|
5
|
+
Full Changelog: [v0.11.0...v0.12.0](https://github.com/openai/openai-ruby/compare/v0.11.0...v0.12.0)
|
6
|
+
|
7
|
+
### Features
|
8
|
+
|
9
|
+
* ensure partial jsons in structured ouput are handled gracefully ([#740](https://github.com/openai/openai-ruby/issues/740)) ([5deec70](https://github.com/openai/openai-ruby/commit/5deec708bad1ceb1a03e9aa65f737e3f89ce6455))
|
10
|
+
* responses streaming helpers ([#721](https://github.com/openai/openai-ruby/issues/721)) ([c2f4270](https://github.com/openai/openai-ruby/commit/c2f42708e41492f1c22886735079973510fb2789))
|
11
|
+
|
12
|
+
|
13
|
+
### Chores
|
14
|
+
|
15
|
+
* **ci:** only run for pushes and fork pull requests ([97538e2](https://github.com/openai/openai-ruby/commit/97538e266f6f9a0e09669453539ee52ca56f4f59))
|
16
|
+
* **internal:** allow streams to also be unwrapped on a per-row basis ([49bdadf](https://github.com/openai/openai-ruby/commit/49bdadfc0d3400664de0c8e7cfd59879faec45b8))
|
17
|
+
* **internal:** minor refactoring of json helpers ([#744](https://github.com/openai/openai-ruby/issues/744)) ([f13edee](https://github.com/openai/openai-ruby/commit/f13edee16325be04335443cb886a7c2024155fd9))
|
18
|
+
|
19
|
+
## 0.11.0 (2025-06-26)
|
20
|
+
|
21
|
+
Full Changelog: [v0.10.0...v0.11.0](https://github.com/openai/openai-ruby/compare/v0.10.0...v0.11.0)
|
22
|
+
|
23
|
+
### Features
|
24
|
+
|
25
|
+
* **api:** webhook and deep research support ([6228400](https://github.com/openai/openai-ruby/commit/6228400e19aadefc5f87e24b3c104fc0b44d3cee))
|
26
|
+
|
27
|
+
|
28
|
+
### Bug Fixes
|
29
|
+
|
30
|
+
* **ci:** release-doctor — report correct token name ([c12c991](https://github.com/openai/openai-ruby/commit/c12c9911beaeb8b1c72d7c5cc5f14dcb9cd5452e))
|
31
|
+
|
32
|
+
|
33
|
+
### Chores
|
34
|
+
|
35
|
+
* **api:** remove unsupported property ([1073c3a](https://github.com/openai/openai-ruby/commit/1073c3a6059f2d1e1ef92937326699e0240503e5))
|
36
|
+
* **client:** throw specific errors ([0cf937e](https://github.com/openai/openai-ruby/commit/0cf937ea8abebc05e52a419e19e275a45b5da646))
|
37
|
+
* **docs:** update README to include links to docs on Webhooks ([2d8f23e](https://github.com/openai/openai-ruby/commit/2d8f23ecb245c88f3f082f93eb906af857d64c7d))
|
38
|
+
|
3
39
|
## 0.10.0 (2025-06-23)
|
4
40
|
|
5
41
|
Full Changelog: [v0.9.0...v0.10.0](https://github.com/openai/openai-ruby/compare/v0.9.0...v0.10.0)
|
data/README.md
CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
|
|
15
15
|
<!-- x-release-please-start-version -->
|
16
16
|
|
17
17
|
```ruby
|
18
|
-
gem "openai", "~> 0.
|
18
|
+
gem "openai", "~> 0.12.0"
|
19
19
|
```
|
20
20
|
|
21
21
|
<!-- x-release-please-end -->
|
@@ -42,16 +42,14 @@ puts(chat_completion)
|
|
42
42
|
|
43
43
|
We provide support for streaming responses using Server-Sent Events (SSE).
|
44
44
|
|
45
|
-
**coming soon:** `openai.chat.completions.stream` will soon come with Python SDK-style higher-level streaming responses support.
|
46
|
-
|
47
45
|
```ruby
|
48
|
-
stream = openai.
|
49
|
-
|
46
|
+
stream = openai.responses.stream(
|
47
|
+
input: "Write a haiku about OpenAI.",
|
50
48
|
model: :"gpt-4.1"
|
51
49
|
)
|
52
50
|
|
53
|
-
stream.each do |
|
54
|
-
puts(
|
51
|
+
stream.each do |event|
|
52
|
+
puts(event.type)
|
55
53
|
end
|
56
54
|
```
|
57
55
|
|
@@ -112,6 +110,84 @@ puts(edited.data.first)
|
|
112
110
|
|
113
111
|
Note that you can also pass a raw `IO` descriptor, but this disables retries, as the library can't be sure if the descriptor is a file or pipe (which cannot be rewound).
|
114
112
|
|
113
|
+
## Webhook Verification
|
114
|
+
|
115
|
+
Verifying webhook signatures is _optional but encouraged_.
|
116
|
+
|
117
|
+
For more information about webhooks, see [the API docs](https://platform.openai.com/docs/guides/webhooks).
|
118
|
+
|
119
|
+
### Parsing webhook payloads
|
120
|
+
|
121
|
+
For most use cases, you will likely want to verify the webhook and parse the payload at the same time. To achieve this, we provide the method `client.webhooks.unwrap`, which parses a webhook request and verifies that it was sent by OpenAI. This method will raise an error if the signature is invalid.
|
122
|
+
|
123
|
+
Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). The `unwrap` method will parse this JSON for you into an event object after verifying the webhook was sent from OpenAI.
|
124
|
+
|
125
|
+
```ruby
|
126
|
+
require 'sinatra'
|
127
|
+
require 'openai'
|
128
|
+
|
129
|
+
# Set up the client with webhook secret from environment variable
|
130
|
+
client = OpenAI::Client.new(webhook_secret: ENV['OPENAI_WEBHOOK_SECRET'])
|
131
|
+
|
132
|
+
post '/webhook' do
|
133
|
+
request_body = request.body.read
|
134
|
+
|
135
|
+
begin
|
136
|
+
event = client.webhooks.unwrap(request_body, request.env)
|
137
|
+
|
138
|
+
case event.type
|
139
|
+
when 'response.completed'
|
140
|
+
puts "Response completed: #{event.data}"
|
141
|
+
when 'response.failed'
|
142
|
+
puts "Response failed: #{event.data}"
|
143
|
+
else
|
144
|
+
puts "Unhandled event type: #{event.type}"
|
145
|
+
end
|
146
|
+
|
147
|
+
status 200
|
148
|
+
'ok'
|
149
|
+
rescue StandardError => e
|
150
|
+
puts "Invalid signature: #{e}"
|
151
|
+
status 400
|
152
|
+
'Invalid signature'
|
153
|
+
end
|
154
|
+
end
|
155
|
+
```
|
156
|
+
|
157
|
+
### Verifying webhook payloads directly
|
158
|
+
|
159
|
+
In some cases, you may want to verify the webhook separately from parsing the payload. If you prefer to handle these steps separately, we provide the method `client.webhooks.verify_signature` to _only verify_ the signature of a webhook request. Like `unwrap`, this method will raise an error if the signature is invalid.
|
160
|
+
|
161
|
+
Note that the `body` parameter must be the raw JSON string sent from the server (do not parse it first). You will then need to parse the body after verifying the signature.
|
162
|
+
|
163
|
+
```ruby
|
164
|
+
require 'sinatra'
|
165
|
+
require 'json'
|
166
|
+
require 'openai'
|
167
|
+
|
168
|
+
# Set up the client with webhook secret from environment variable
|
169
|
+
client = OpenAI::Client.new(webhook_secret: ENV['OPENAI_WEBHOOK_SECRET'])
|
170
|
+
|
171
|
+
post '/webhook' do
|
172
|
+
request_body = request.body.read
|
173
|
+
|
174
|
+
begin
|
175
|
+
client.webhooks.verify_signature(request_body, request.env)
|
176
|
+
|
177
|
+
# Parse the body after verification
|
178
|
+
event = JSON.parse(request_body)
|
179
|
+
puts "Verified event: #{event}"
|
180
|
+
|
181
|
+
status 200
|
182
|
+
'ok'
|
183
|
+
rescue StandardError => e
|
184
|
+
puts "Invalid signature: #{e}"
|
185
|
+
status 400
|
186
|
+
'Invalid signature'
|
187
|
+
end
|
188
|
+
end
|
189
|
+
```
|
190
|
+
|
115
191
|
### [Structured outputs](https://platform.openai.com/docs/guides/structured-outputs) and function calling
|
116
192
|
|
117
193
|
This SDK ships with helpers in `OpenAI::BaseModel`, `OpenAI::ArrayOf`, `OpenAI::EnumOf`, and `OpenAI::UnionOf` to help you define the supported JSON schemas used in making structured outputs and function calling requests.
|
data/lib/openai/client.rb
CHANGED
@@ -24,6 +24,9 @@ module OpenAI
|
|
24
24
|
# @return [String, nil]
|
25
25
|
attr_reader :project
|
26
26
|
|
27
|
+
# @return [String, nil]
|
28
|
+
attr_reader :webhook_secret
|
29
|
+
|
27
30
|
# @return [OpenAI::Resources::Completions]
|
28
31
|
attr_reader :completions
|
29
32
|
|
@@ -57,6 +60,9 @@ module OpenAI
|
|
57
60
|
# @return [OpenAI::Resources::VectorStores]
|
58
61
|
attr_reader :vector_stores
|
59
62
|
|
63
|
+
# @return [OpenAI::Resources::Webhooks]
|
64
|
+
attr_reader :webhooks
|
65
|
+
|
60
66
|
# @return [OpenAI::Resources::Beta]
|
61
67
|
attr_reader :beta
|
62
68
|
|
@@ -92,6 +98,8 @@ module OpenAI
|
|
92
98
|
#
|
93
99
|
# @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]`
|
94
100
|
#
|
101
|
+
# @param webhook_secret [String, nil] Defaults to `ENV["OPENAI_WEBHOOK_SECRET"]`
|
102
|
+
#
|
95
103
|
# @param base_url [String, nil] Override the default base URL for the API, e.g.,
|
96
104
|
# `"https://api.example.com/v2/"`. Defaults to `ENV["OPENAI_BASE_URL"]`
|
97
105
|
#
|
@@ -106,6 +114,7 @@ module OpenAI
|
|
106
114
|
api_key: ENV["OPENAI_API_KEY"],
|
107
115
|
organization: ENV["OPENAI_ORG_ID"],
|
108
116
|
project: ENV["OPENAI_PROJECT_ID"],
|
117
|
+
webhook_secret: ENV["OPENAI_WEBHOOK_SECRET"],
|
109
118
|
base_url: ENV["OPENAI_BASE_URL"],
|
110
119
|
max_retries: self.class::DEFAULT_MAX_RETRIES,
|
111
120
|
timeout: self.class::DEFAULT_TIMEOUT_IN_SECONDS,
|
@@ -124,6 +133,7 @@ module OpenAI
|
|
124
133
|
}
|
125
134
|
|
126
135
|
@api_key = api_key.to_s
|
136
|
+
@webhook_secret = webhook_secret&.to_s
|
127
137
|
|
128
138
|
super(
|
129
139
|
base_url: base_url,
|
@@ -145,6 +155,7 @@ module OpenAI
|
|
145
155
|
@fine_tuning = OpenAI::Resources::FineTuning.new(client: self)
|
146
156
|
@graders = OpenAI::Resources::Graders.new(client: self)
|
147
157
|
@vector_stores = OpenAI::Resources::VectorStores.new(client: self)
|
158
|
+
@webhooks = OpenAI::Resources::Webhooks.new(client: self)
|
148
159
|
@beta = OpenAI::Resources::Beta.new(client: self)
|
149
160
|
@batches = OpenAI::Resources::Batches.new(client: self)
|
150
161
|
@uploads = OpenAI::Resources::Uploads.new(client: self)
|
data/lib/openai/errors.rb
CHANGED
@@ -0,0 +1,23 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Helpers
|
5
|
+
module Streaming
|
6
|
+
class ResponseTextDeltaEvent < OpenAI::Models::Responses::ResponseTextDeltaEvent
|
7
|
+
required :snapshot, String
|
8
|
+
end
|
9
|
+
|
10
|
+
class ResponseTextDoneEvent < OpenAI::Models::Responses::ResponseTextDoneEvent
|
11
|
+
optional :parsed, Object
|
12
|
+
end
|
13
|
+
|
14
|
+
class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
|
15
|
+
required :snapshot, String
|
16
|
+
end
|
17
|
+
|
18
|
+
class ResponseCompletedEvent < OpenAI::Models::Responses::ResponseCompletedEvent
|
19
|
+
required :response, OpenAI::Models::Responses::Response
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,232 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "events"
|
4
|
+
|
5
|
+
module OpenAI
|
6
|
+
module Helpers
|
7
|
+
module Streaming
|
8
|
+
class ResponseStream
|
9
|
+
include OpenAI::Internal::Type::BaseStream
|
10
|
+
|
11
|
+
def initialize(raw_stream:, text_format: nil, starting_after: nil)
|
12
|
+
@text_format = text_format
|
13
|
+
@starting_after = starting_after
|
14
|
+
@raw_stream = raw_stream
|
15
|
+
@iterator = iterator
|
16
|
+
@state = ResponseStreamState.new(
|
17
|
+
text_format: text_format
|
18
|
+
)
|
19
|
+
end
|
20
|
+
|
21
|
+
def until_done
|
22
|
+
each {} # rubocop:disable Lint/EmptyBlock
|
23
|
+
self
|
24
|
+
end
|
25
|
+
|
26
|
+
def text
|
27
|
+
OpenAI::Internal::Util.chain_fused(@iterator) do |yielder|
|
28
|
+
@iterator.each do |event|
|
29
|
+
case event
|
30
|
+
when OpenAI::Streaming::ResponseTextDeltaEvent
|
31
|
+
yielder << event.delta
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def get_final_response
|
38
|
+
until_done
|
39
|
+
response = @state.completed_response
|
40
|
+
raise RuntimeError.new("Didn't receive a 'response.completed' event") unless response
|
41
|
+
response
|
42
|
+
end
|
43
|
+
|
44
|
+
def get_output_text
|
45
|
+
response = get_final_response
|
46
|
+
text_parts = []
|
47
|
+
|
48
|
+
response.output.each do |output|
|
49
|
+
next unless output.type == :message
|
50
|
+
|
51
|
+
output.content.each do |content|
|
52
|
+
next unless content.type == :output_text
|
53
|
+
text_parts << content.text
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
text_parts.join
|
58
|
+
end
|
59
|
+
|
60
|
+
private
|
61
|
+
|
62
|
+
def iterator
|
63
|
+
@iterator ||= OpenAI::Internal::Util.chain_fused(@raw_stream) do |y|
|
64
|
+
@raw_stream.each do |raw_event|
|
65
|
+
events_to_yield = @state.handle_event(raw_event)
|
66
|
+
events_to_yield.each do |event|
|
67
|
+
if @starting_after.nil? || event.sequence_number > @starting_after
|
68
|
+
y << event
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
class ResponseStreamState
|
77
|
+
attr_reader :completed_response
|
78
|
+
|
79
|
+
def initialize(text_format:)
|
80
|
+
@current_snapshot = nil
|
81
|
+
@completed_response = nil
|
82
|
+
@text_format = text_format
|
83
|
+
end
|
84
|
+
|
85
|
+
def handle_event(event)
|
86
|
+
@current_snapshot = accumulate_event(
|
87
|
+
event: event,
|
88
|
+
current_snapshot: @current_snapshot
|
89
|
+
)
|
90
|
+
|
91
|
+
events_to_yield = []
|
92
|
+
|
93
|
+
case event
|
94
|
+
when OpenAI::Models::Responses::ResponseTextDeltaEvent
|
95
|
+
output = @current_snapshot.output[event.output_index]
|
96
|
+
assert_type(output, :message)
|
97
|
+
|
98
|
+
content = output.content[event.content_index]
|
99
|
+
assert_type(content, :output_text)
|
100
|
+
|
101
|
+
events_to_yield << OpenAI::Streaming::ResponseTextDeltaEvent.new(
|
102
|
+
content_index: event.content_index,
|
103
|
+
delta: event.delta,
|
104
|
+
item_id: event.item_id,
|
105
|
+
output_index: event.output_index,
|
106
|
+
sequence_number: event.sequence_number,
|
107
|
+
type: event.type,
|
108
|
+
snapshot: content.text
|
109
|
+
)
|
110
|
+
|
111
|
+
when OpenAI::Models::Responses::ResponseTextDoneEvent
|
112
|
+
output = @current_snapshot.output[event.output_index]
|
113
|
+
assert_type(output, :message)
|
114
|
+
|
115
|
+
content = output.content[event.content_index]
|
116
|
+
assert_type(content, :output_text)
|
117
|
+
|
118
|
+
parsed = parse_structured_text(content.text)
|
119
|
+
|
120
|
+
events_to_yield << OpenAI::Streaming::ResponseTextDoneEvent.new(
|
121
|
+
content_index: event.content_index,
|
122
|
+
item_id: event.item_id,
|
123
|
+
output_index: event.output_index,
|
124
|
+
sequence_number: event.sequence_number,
|
125
|
+
text: event.text,
|
126
|
+
type: event.type,
|
127
|
+
parsed: parsed
|
128
|
+
)
|
129
|
+
|
130
|
+
when OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
|
131
|
+
output = @current_snapshot.output[event.output_index]
|
132
|
+
assert_type(output, :function_call)
|
133
|
+
|
134
|
+
events_to_yield << OpenAI::Streaming::ResponseFunctionCallArgumentsDeltaEvent.new(
|
135
|
+
delta: event.delta,
|
136
|
+
item_id: event.item_id,
|
137
|
+
output_index: event.output_index,
|
138
|
+
sequence_number: event.sequence_number,
|
139
|
+
type: event.type,
|
140
|
+
snapshot: output.arguments
|
141
|
+
)
|
142
|
+
|
143
|
+
when OpenAI::Models::Responses::ResponseCompletedEvent
|
144
|
+
events_to_yield << OpenAI::Streaming::ResponseCompletedEvent.new(
|
145
|
+
sequence_number: event.sequence_number,
|
146
|
+
type: event.type,
|
147
|
+
response: event.response
|
148
|
+
)
|
149
|
+
|
150
|
+
else
|
151
|
+
# Pass through other events unchanged.
|
152
|
+
events_to_yield << event
|
153
|
+
end
|
154
|
+
|
155
|
+
events_to_yield
|
156
|
+
end
|
157
|
+
|
158
|
+
def accumulate_event(event:, current_snapshot:)
|
159
|
+
if current_snapshot.nil?
|
160
|
+
unless event.is_a?(OpenAI::Models::Responses::ResponseCreatedEvent)
|
161
|
+
raise "Expected first event to be response.created"
|
162
|
+
end
|
163
|
+
|
164
|
+
# Use the converter to create a new, isolated copy of the response object.
|
165
|
+
# This ensures proper type validation and prevents shared object references.
|
166
|
+
return OpenAI::Internal::Type::Converter.coerce(
|
167
|
+
OpenAI::Models::Responses::Response,
|
168
|
+
event.response
|
169
|
+
)
|
170
|
+
end
|
171
|
+
|
172
|
+
case event
|
173
|
+
when OpenAI::Models::Responses::ResponseOutputItemAddedEvent
|
174
|
+
current_snapshot.output.push(event.item)
|
175
|
+
|
176
|
+
when OpenAI::Models::Responses::ResponseContentPartAddedEvent
|
177
|
+
output = current_snapshot.output[event.output_index]
|
178
|
+
if output && output.type == :message
|
179
|
+
output.content.push(event.part)
|
180
|
+
current_snapshot.output[event.output_index] = output
|
181
|
+
end
|
182
|
+
|
183
|
+
when OpenAI::Models::Responses::ResponseTextDeltaEvent
|
184
|
+
output = current_snapshot.output[event.output_index]
|
185
|
+
if output && output.type == :message
|
186
|
+
content = output.content[event.content_index]
|
187
|
+
if content && content.type == :output_text
|
188
|
+
content.text += event.delta
|
189
|
+
output.content[event.content_index] = content
|
190
|
+
current_snapshot.output[event.output_index] = output
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
when OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
|
195
|
+
output = current_snapshot.output[event.output_index]
|
196
|
+
if output && output.type == :function_call
|
197
|
+
output.arguments = (output.arguments || "") + event.delta
|
198
|
+
current_snapshot.output[event.output_index] = output
|
199
|
+
end
|
200
|
+
|
201
|
+
when OpenAI::Models::Responses::ResponseCompletedEvent
|
202
|
+
@completed_response = event.response
|
203
|
+
end
|
204
|
+
|
205
|
+
current_snapshot
|
206
|
+
end
|
207
|
+
|
208
|
+
private
|
209
|
+
|
210
|
+
def assert_type(object, expected_type)
|
211
|
+
return if object && object.type == expected_type
|
212
|
+
actual_type = object ? object.type : "nil"
|
213
|
+
raise "Invalid state: expected #{expected_type} but got #{actual_type}"
|
214
|
+
end
|
215
|
+
|
216
|
+
def parse_structured_text(text)
|
217
|
+
return nil unless @text_format && text
|
218
|
+
|
219
|
+
begin
|
220
|
+
parsed = JSON.parse(text, symbolize_names: true)
|
221
|
+
OpenAI::Internal::Type::Converter.coerce(@text_format, parsed)
|
222
|
+
rescue JSON::ParserError => e
|
223
|
+
raise RuntimeError.new(
|
224
|
+
"Failed to parse structured text as JSON for #{@text_format}: #{e.message}. " \
|
225
|
+
"Raw text: #{text.inspect}"
|
226
|
+
)
|
227
|
+
end
|
228
|
+
end
|
229
|
+
end
|
230
|
+
end
|
231
|
+
end
|
232
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module OpenAI
|
4
|
+
module Helpers
|
5
|
+
module StructuredOutput
|
6
|
+
# @abstract
|
7
|
+
#
|
8
|
+
# Like OpenAI::Internal::Type::Unknown, but for parsed JSON values, which can be incomplete or malformed.
|
9
|
+
class ParsedJson < OpenAI::Internal::Type::Unknown
|
10
|
+
class << self
|
11
|
+
# @api private
|
12
|
+
#
|
13
|
+
# No coercion needed for Unknown type.
|
14
|
+
#
|
15
|
+
# @param value [Object]
|
16
|
+
#
|
17
|
+
# @param state [Hash{Symbol=>Object}] .
|
18
|
+
#
|
19
|
+
# @option state [Boolean] :translate_names
|
20
|
+
#
|
21
|
+
# @option state [Boolean] :strictness
|
22
|
+
#
|
23
|
+
# @option state [Hash{Symbol=>Object}] :exactness
|
24
|
+
#
|
25
|
+
# @option state [Class<StandardError>] :error
|
26
|
+
#
|
27
|
+
# @option state [Integer] :branched
|
28
|
+
#
|
29
|
+
# @return [Object]
|
30
|
+
def coerce(value, state:)
|
31
|
+
(state[:error] = value) if value.is_a?(StandardError)
|
32
|
+
|
33
|
+
super
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -47,7 +47,8 @@ module OpenAI
|
|
47
47
|
message: message
|
48
48
|
)
|
49
49
|
in decoded
|
50
|
-
|
50
|
+
unwrapped = OpenAI::Internal::Util.dig(decoded, @unwrap)
|
51
|
+
y << OpenAI::Internal::Type::Converter.coerce(@model, unwrapped)
|
51
52
|
end
|
52
53
|
else
|
53
54
|
end
|
@@ -471,6 +471,7 @@ module OpenAI
|
|
471
471
|
self.class.validate!(req)
|
472
472
|
model = req.fetch(:model) { OpenAI::Internal::Type::Unknown }
|
473
473
|
opts = req[:options].to_h
|
474
|
+
unwrap = req[:unwrap]
|
474
475
|
OpenAI::RequestOptions.validate!(opts)
|
475
476
|
request = build_request(req.except(:options), opts)
|
476
477
|
url = request.fetch(:url)
|
@@ -487,11 +488,18 @@ module OpenAI
|
|
487
488
|
decoded = OpenAI::Internal::Util.decode_content(response, stream: stream)
|
488
489
|
case req
|
489
490
|
in {stream: Class => st}
|
490
|
-
st.new(
|
491
|
+
st.new(
|
492
|
+
model: model,
|
493
|
+
url: url,
|
494
|
+
status: status,
|
495
|
+
response: response,
|
496
|
+
unwrap: unwrap,
|
497
|
+
stream: decoded
|
498
|
+
)
|
491
499
|
in {page: Class => page}
|
492
500
|
page.new(client: self, req: req, headers: response, page_data: decoded)
|
493
501
|
else
|
494
|
-
unwrapped = OpenAI::Internal::Util.dig(decoded,
|
502
|
+
unwrapped = OpenAI::Internal::Util.dig(decoded, unwrap)
|
495
503
|
OpenAI::Internal::Type::Converter.coerce(model, unwrapped)
|
496
504
|
end
|
497
505
|
end
|
@@ -64,12 +64,14 @@ module OpenAI
|
|
64
64
|
# @param url [URI::Generic]
|
65
65
|
# @param status [Integer]
|
66
66
|
# @param response [Net::HTTPResponse]
|
67
|
+
# @param unwrap [Symbol, Integer, Array<Symbol, Integer>, Proc]
|
67
68
|
# @param stream [Enumerable<Object>]
|
68
|
-
def initialize(model:, url:, status:, response:, stream:)
|
69
|
+
def initialize(model:, url:, status:, response:, unwrap:, stream:)
|
69
70
|
@model = model
|
70
71
|
@url = url
|
71
72
|
@status = status
|
72
73
|
@response = response
|
74
|
+
@unwrap = unwrap
|
73
75
|
@stream = stream
|
74
76
|
@iterator = iterator
|
75
77
|
|
@@ -18,6 +18,10 @@ module OpenAI
|
|
18
18
|
O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
|
19
19
|
O3_PRO = :"o3-pro"
|
20
20
|
O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
|
21
|
+
O3_DEEP_RESEARCH = :"o3-deep-research"
|
22
|
+
O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
|
23
|
+
O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
|
24
|
+
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
|
21
25
|
COMPUTER_USE_PREVIEW = :"computer-use-preview"
|
22
26
|
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
|
23
27
|
|