openai 0.14.0 → 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +41 -0
- data/README.md +3 -3
- data/lib/openai/helpers/structured_output/json_schema_converter.rb +20 -21
- data/lib/openai/helpers/structured_output/union_of.rb +11 -1
- data/lib/openai/models/audio/speech_create_params.rb +0 -9
- data/lib/openai/models/chat/chat_completion.rb +2 -2
- data/lib/openai/models/chat/chat_completion_audio_param.rb +0 -9
- data/lib/openai/models/chat/chat_completion_chunk.rb +2 -2
- data/lib/openai/models/chat/chat_completion_store_message.rb +32 -1
- data/lib/openai/models/chat/completion_create_params.rb +33 -7
- data/lib/openai/models/function_definition.rb +1 -1
- data/lib/openai/models/image_edit_params.rb +4 -1
- data/lib/openai/models/image_generate_params.rb +4 -1
- data/lib/openai/models/images_response.rb +2 -5
- data/lib/openai/models/responses/response.rb +52 -6
- data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +5 -3
- data/lib/openai/models/responses/response_create_params.rb +33 -7
- data/lib/openai/models/responses/response_mcp_call_arguments_delta_event.rb +9 -4
- data/lib/openai/models/responses/response_mcp_call_arguments_done_event.rb +7 -4
- data/lib/openai/models/responses/response_mcp_call_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_call_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_completed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_failed_event.rb +17 -1
- data/lib/openai/models/responses/response_mcp_list_tools_in_progress_event.rb +17 -1
- data/lib/openai/models/responses/response_stream_event.rb +1 -7
- data/lib/openai/models/responses/response_text_delta_event.rb +66 -1
- data/lib/openai/models/responses/response_text_done_event.rb +66 -1
- data/lib/openai/resources/chat/completions.rb +12 -4
- data/lib/openai/resources/images.rb +6 -6
- data/lib/openai/resources/responses.rb +42 -17
- data/lib/openai/version.rb +1 -1
- data/lib/openai.rb +0 -2
- data/rbi/openai/models/audio/speech_create_params.rbi +0 -9
- data/rbi/openai/models/chat/chat_completion.rbi +3 -3
- data/rbi/openai/models/chat/chat_completion_audio_param.rbi +0 -15
- data/rbi/openai/models/chat/chat_completion_chunk.rbi +3 -3
- data/rbi/openai/models/chat/chat_completion_store_message.rbi +68 -3
- data/rbi/openai/models/chat/completion_create_params.rbi +47 -9
- data/rbi/openai/models/function_definition.rbi +2 -2
- data/rbi/openai/models/image_edit_params.rbi +6 -0
- data/rbi/openai/models/image_generate_params.rbi +6 -0
- data/rbi/openai/models/images_response.rbi +2 -2
- data/rbi/openai/models/responses/response.rbi +47 -9
- data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +6 -3
- data/rbi/openai/models/responses/response_create_params.rbi +47 -9
- data/rbi/openai/models/responses/response_mcp_call_arguments_delta_event.rbi +7 -5
- data/rbi/openai/models/responses/response_mcp_call_arguments_done_event.rbi +5 -5
- data/rbi/openai/models/responses/response_mcp_call_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_call_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_completed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_failed_event.rbi +28 -4
- data/rbi/openai/models/responses/response_mcp_list_tools_in_progress_event.rbi +28 -4
- data/rbi/openai/models/responses/response_stream_event.rbi +0 -2
- data/rbi/openai/models/responses/response_text_delta_event.rbi +131 -0
- data/rbi/openai/models/responses/response_text_done_event.rbi +131 -0
- data/rbi/openai/resources/chat/completions.rbi +36 -8
- data/rbi/openai/resources/images.rbi +22 -10
- data/rbi/openai/resources/responses.rbi +36 -8
- data/sig/openai/models/audio/speech_create_params.rbs +0 -6
- data/sig/openai/models/chat/chat_completion_audio_param.rbs +0 -6
- data/sig/openai/models/chat/chat_completion_store_message.rbs +29 -3
- data/sig/openai/models/chat/completion_create_params.rbs +14 -0
- data/sig/openai/models/responses/response.rbs +14 -0
- data/sig/openai/models/responses/response_create_params.rbs +14 -0
- data/sig/openai/models/responses/response_mcp_call_arguments_delta_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_arguments_done_event.rbs +4 -4
- data/sig/openai/models/responses/response_mcp_call_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_call_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_completed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_failed_event.rbs +14 -1
- data/sig/openai/models/responses/response_mcp_list_tools_in_progress_event.rbs +10 -0
- data/sig/openai/models/responses/response_stream_event.rbs +0 -2
- data/sig/openai/models/responses/response_text_delta_event.rbs +52 -0
- data/sig/openai/models/responses/response_text_done_event.rbs +52 -0
- data/sig/openai/resources/chat/completions.rbs +4 -0
- data/sig/openai/resources/responses.rbs +4 -0
- metadata +2 -8
- data/lib/openai/models/responses/response_reasoning_delta_event.rb +0 -60
- data/lib/openai/models/responses/response_reasoning_done_event.rb +0 -60
- data/rbi/openai/models/responses/response_reasoning_delta_event.rbi +0 -83
- data/rbi/openai/models/responses/response_reasoning_done_event.rbi +0 -83
- data/sig/openai/models/responses/response_reasoning_delta_event.rbs +0 -47
- data/sig/openai/models/responses/response_reasoning_done_event.rbs +0 -47
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 78e829792412e459c6e623de5db5f553d7d1325fa28300ae94c62103d511c946
|
4
|
+
data.tar.gz: e7711af3f619a26ac688445353ee83b8ca250cad3feb84358f0a556650662e0b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 3a6d15ee5239db7f9f8fe2494169ce279c26435bc7b83a5b8ecb2dfde47da5f6636f6db49a29f429b7fca35b58d90136d06f7de4e49677ee37fb94b899f518da
|
7
|
+
data.tar.gz: 1f6659074cc6e2317b47e0677a928765d9d8b73e898f7c2ee0eb435dc1fa0728df18f64859a6a732ec3beaaa1db16a5e96176eb1bdda174ac9368cccd0a243a2
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,46 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.16.0 (2025-07-30)
|
4
|
+
|
5
|
+
Full Changelog: [v0.15.0...v0.16.0](https://github.com/openai/openai-ruby/compare/v0.15.0...v0.16.0)
|
6
|
+
|
7
|
+
### Features
|
8
|
+
|
9
|
+
* add output_text method for non-streaming responses ([#757](https://github.com/openai/openai-ruby/issues/757)) ([50cf119](https://github.com/openai/openai-ruby/commit/50cf119106f9e16d9ac6a9898028b6d563a6f809))
|
10
|
+
* **api:** manual updates ([e9fa8a0](https://github.com/openai/openai-ruby/commit/e9fa8a08d6ecebdd06212eaf6b9103082b7d67aa))
|
11
|
+
|
12
|
+
|
13
|
+
### Bug Fixes
|
14
|
+
|
15
|
+
* **internal:** ensure sorbet test always runs serially ([0601061](https://github.com/openai/openai-ruby/commit/0601061047525d16cc2afac64e5a4de0dd9de2e5))
|
16
|
+
* provide parsed outputs for resumed streams ([#756](https://github.com/openai/openai-ruby/issues/756)) ([82254f9](https://github.com/openai/openai-ruby/commit/82254f980ccc0affa2555a81b0d8ed5aa0290835))
|
17
|
+
* union definition re-using ([#760](https://github.com/openai/openai-ruby/issues/760)) ([3046c28](https://github.com/openai/openai-ruby/commit/3046c28935ca925c2f399f0350937d04eab54c0a))
|
18
|
+
|
19
|
+
|
20
|
+
### Chores
|
21
|
+
|
22
|
+
* extract reused JSON schema references even in unions ([#761](https://github.com/openai/openai-ruby/issues/761)) ([e17d3bf](https://github.com/openai/openai-ruby/commit/e17d3bf1fdf241f7a78ed72a39ddecabeb5877c8))
|
23
|
+
* **internal:** refactor variable name ([#762](https://github.com/openai/openai-ruby/issues/762)) ([7e15b07](https://github.com/openai/openai-ruby/commit/7e15b0745dcbd3bf7fc4c1899d9d76e0a9ab1e48))
|
24
|
+
* update contribute.md ([b4a0297](https://github.com/openai/openai-ruby/commit/b4a029775bb52d5db2f3fac235595f37b6746a61))
|
25
|
+
|
26
|
+
## 0.15.0 (2025-07-21)
|
27
|
+
|
28
|
+
Full Changelog: [v0.14.0...v0.15.0](https://github.com/openai/openai-ruby/compare/v0.14.0...v0.15.0)
|
29
|
+
|
30
|
+
### Features
|
31
|
+
|
32
|
+
* **api:** manual updates ([fb53071](https://github.com/openai/openai-ruby/commit/fb530713d08a4ba49e8bdaecd9848674bb35c333))
|
33
|
+
|
34
|
+
|
35
|
+
### Bug Fixes
|
36
|
+
|
37
|
+
* **internal:** tests should use normalized property names ([801e9c2](https://github.com/openai/openai-ruby/commit/801e9c29f65e572a3b49f5cf7891d3053e1d087f))
|
38
|
+
|
39
|
+
|
40
|
+
### Chores
|
41
|
+
|
42
|
+
* **api:** event shapes more accurate ([29f32ce](https://github.com/openai/openai-ruby/commit/29f32cedf6112d38fe8de454658a5afd7ad0d2cb))
|
43
|
+
|
3
44
|
## 0.14.0 (2025-07-16)
|
4
45
|
|
5
46
|
Full Changelog: [v0.13.1...v0.14.0](https://github.com/openai/openai-ruby/compare/v0.13.1...v0.14.0)
|
data/README.md
CHANGED
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
|
|
15
15
|
<!-- x-release-please-start-version -->
|
16
16
|
|
17
17
|
```ruby
|
18
|
-
gem "openai", "~> 0.
|
18
|
+
gem "openai", "~> 0.16.0"
|
19
19
|
```
|
20
20
|
|
21
21
|
<!-- x-release-please-end -->
|
@@ -443,7 +443,7 @@ You can provide typesafe request parameters like so:
|
|
443
443
|
|
444
444
|
```ruby
|
445
445
|
openai.chat.completions.create(
|
446
|
-
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(
|
446
|
+
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
447
447
|
model: :"gpt-4.1"
|
448
448
|
)
|
449
449
|
```
|
@@ -459,7 +459,7 @@ openai.chat.completions.create(
|
|
459
459
|
|
460
460
|
# You can also splat a full Params class:
|
461
461
|
params = OpenAI::Chat::CompletionCreateParams.new(
|
462
|
-
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(
|
462
|
+
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
463
463
|
model: :"gpt-4.1"
|
464
464
|
)
|
465
465
|
openai.chat.completions.create(**params)
|
@@ -6,15 +6,9 @@ module OpenAI
|
|
6
6
|
# To customize the JSON schema conversion for a type, implement the `JsonSchemaConverter` interface.
|
7
7
|
module JsonSchemaConverter
|
8
8
|
# @api private
|
9
|
-
|
9
|
+
POINTERS = Object.new.tap do
|
10
10
|
_1.define_singleton_method(:inspect) do
|
11
|
-
"#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::
|
12
|
-
end
|
13
|
-
end.freeze
|
14
|
-
# @api private
|
15
|
-
COUNTER = Object.new.tap do
|
16
|
-
_1.define_singleton_method(:inspect) do
|
17
|
-
"#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::COUNTER>"
|
11
|
+
"#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTERS>"
|
18
12
|
end
|
19
13
|
end.freeze
|
20
14
|
# @api private
|
@@ -81,14 +75,15 @@ module OpenAI
|
|
81
75
|
def cache_def!(state, type:, &blk)
|
82
76
|
defs, path = state.fetch_values(:defs, :path)
|
83
77
|
if (stored = defs[type])
|
84
|
-
stored
|
85
|
-
|
78
|
+
pointers = stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
|
79
|
+
pointers.first.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF).tap do
|
80
|
+
pointers << _1
|
81
|
+
end
|
86
82
|
else
|
87
83
|
ref_path = String.new
|
88
84
|
ref = {"$ref": ref_path}
|
89
85
|
stored = {
|
90
|
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::
|
91
|
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER => 1
|
86
|
+
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS => [ref]
|
92
87
|
}
|
93
88
|
defs.store(type, stored)
|
94
89
|
schema = blk.call
|
@@ -112,17 +107,21 @@ module OpenAI
|
|
112
107
|
)
|
113
108
|
reused_defs = {}
|
114
109
|
defs.each_value do |acc|
|
115
|
-
|
116
|
-
|
117
|
-
|
110
|
+
sch = acc.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
|
111
|
+
pointers = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
|
112
|
+
|
113
|
+
no_refs, refs = pointers.partition do
|
114
|
+
_1.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF)
|
118
115
|
end
|
119
|
-
cnt = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::COUNTER)
|
120
116
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
117
|
+
case refs
|
118
|
+
in [ref]
|
119
|
+
ref.replace(sch)
|
120
|
+
in [_, ref, *]
|
121
|
+
reused_defs.store(ref.fetch(:$ref), sch)
|
122
|
+
else
|
123
|
+
end
|
124
|
+
no_refs.each { _1.replace(sch) }
|
126
125
|
end
|
127
126
|
|
128
127
|
xformed = reused_defs.transform_keys { _1.delete_prefix("#/$defs/") }
|
@@ -36,7 +36,17 @@ module OpenAI
|
|
36
36
|
mergeable_keys.each_key { mergeable_keys[_1] += 1 if schema.keys == _1 }
|
37
37
|
end
|
38
38
|
mergeable = mergeable_keys.any? { _1.last == schemas.length }
|
39
|
-
mergeable
|
39
|
+
if mergeable
|
40
|
+
OpenAI::Internal::Util.deep_merge(*schemas, concat: true)
|
41
|
+
else
|
42
|
+
{
|
43
|
+
anyOf: schemas.each do
|
44
|
+
if _1.key?(:$ref)
|
45
|
+
_1.update(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF => true)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
}
|
49
|
+
end
|
40
50
|
end
|
41
51
|
end
|
42
52
|
|
@@ -111,12 +111,6 @@ module OpenAI
|
|
111
111
|
|
112
112
|
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ECHO }
|
113
113
|
|
114
|
-
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::FABLE }
|
115
|
-
|
116
|
-
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ONYX }
|
117
|
-
|
118
|
-
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::NOVA }
|
119
|
-
|
120
114
|
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SAGE }
|
121
115
|
|
122
116
|
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SHIMMER }
|
@@ -137,9 +131,6 @@ module OpenAI
|
|
137
131
|
BALLAD = :ballad
|
138
132
|
CORAL = :coral
|
139
133
|
ECHO = :echo
|
140
|
-
FABLE = :fable
|
141
|
-
ONYX = :onyx
|
142
|
-
NOVA = :nova
|
143
134
|
SAGE = :sage
|
144
135
|
SHIMMER = :shimmer
|
145
136
|
VERSE = :verse
|
@@ -44,7 +44,7 @@ module OpenAI
|
|
44
44
|
# - If set to 'auto', then the request will be processed with the service tier
|
45
45
|
# configured in the Project settings. Unless otherwise configured, the Project
|
46
46
|
# will use 'default'.
|
47
|
-
# - If set to 'default', then the
|
47
|
+
# - If set to 'default', then the request will be processed with the standard
|
48
48
|
# pricing and performance for the selected model.
|
49
49
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
50
50
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -193,7 +193,7 @@ module OpenAI
|
|
193
193
|
# - If set to 'auto', then the request will be processed with the service tier
|
194
194
|
# configured in the Project settings. Unless otherwise configured, the Project
|
195
195
|
# will use 'default'.
|
196
|
-
# - If set to 'default', then the
|
196
|
+
# - If set to 'default', then the request will be processed with the standard
|
197
197
|
# pricing and performance for the selected model.
|
198
198
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
199
199
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -67,12 +67,6 @@ module OpenAI
|
|
67
67
|
|
68
68
|
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ECHO }
|
69
69
|
|
70
|
-
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::FABLE }
|
71
|
-
|
72
|
-
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ONYX }
|
73
|
-
|
74
|
-
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::NOVA }
|
75
|
-
|
76
70
|
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SAGE }
|
77
71
|
|
78
72
|
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SHIMMER }
|
@@ -93,9 +87,6 @@ module OpenAI
|
|
93
87
|
BALLAD = :ballad
|
94
88
|
CORAL = :coral
|
95
89
|
ECHO = :echo
|
96
|
-
FABLE = :fable
|
97
|
-
ONYX = :onyx
|
98
|
-
NOVA = :nova
|
99
90
|
SAGE = :sage
|
100
91
|
SHIMMER = :shimmer
|
101
92
|
VERSE = :verse
|
@@ -43,7 +43,7 @@ module OpenAI
|
|
43
43
|
# - If set to 'auto', then the request will be processed with the service tier
|
44
44
|
# configured in the Project settings. Unless otherwise configured, the Project
|
45
45
|
# will use 'default'.
|
46
|
-
# - If set to 'default', then the
|
46
|
+
# - If set to 'default', then the request will be processed with the standard
|
47
47
|
# pricing and performance for the selected model.
|
48
48
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
49
49
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -376,7 +376,7 @@ module OpenAI
|
|
376
376
|
# - If set to 'auto', then the request will be processed with the service tier
|
377
377
|
# configured in the Project settings. Unless otherwise configured, the Project
|
378
378
|
# will use 'default'.
|
379
|
-
# - If set to 'default', then the
|
379
|
+
# - If set to 'default', then the request will be processed with the standard
|
380
380
|
# pricing and performance for the selected model.
|
381
381
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
382
382
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -10,10 +10,41 @@ module OpenAI
|
|
10
10
|
# @return [String]
|
11
11
|
required :id, String
|
12
12
|
|
13
|
-
# @!
|
13
|
+
# @!attribute content_parts
|
14
|
+
# If a content parts array was provided, this is an array of `text` and
|
15
|
+
# `image_url` parts. Otherwise, null.
|
16
|
+
#
|
17
|
+
# @return [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil]
|
18
|
+
optional :content_parts,
|
19
|
+
-> {
|
20
|
+
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionStoreMessage::ContentPart]
|
21
|
+
},
|
22
|
+
nil?: true
|
23
|
+
|
24
|
+
# @!method initialize(id:, content_parts: nil)
|
25
|
+
# Some parameter documentations has been truncated, see
|
26
|
+
# {OpenAI::Models::Chat::ChatCompletionStoreMessage} for more details.
|
27
|
+
#
|
14
28
|
# A chat completion message generated by the model.
|
15
29
|
#
|
16
30
|
# @param id [String] The identifier of the chat message.
|
31
|
+
#
|
32
|
+
# @param content_parts [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil] If a content parts array was provided, this is an array of `text` and `image_url
|
33
|
+
|
34
|
+
# Learn about
|
35
|
+
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
|
36
|
+
module ContentPart
|
37
|
+
extend OpenAI::Internal::Type::Union
|
38
|
+
|
39
|
+
# Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
|
40
|
+
variant -> { OpenAI::Chat::ChatCompletionContentPartText }
|
41
|
+
|
42
|
+
# Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
|
43
|
+
variant -> { OpenAI::Chat::ChatCompletionContentPartImage }
|
44
|
+
|
45
|
+
# @!method self.variants
|
46
|
+
# @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage)]
|
47
|
+
end
|
17
48
|
end
|
18
49
|
end
|
19
50
|
|
@@ -182,6 +182,14 @@ module OpenAI
|
|
182
182
|
# @return [Float, nil]
|
183
183
|
optional :presence_penalty, Float, nil?: true
|
184
184
|
|
185
|
+
# @!attribute prompt_cache_key
|
186
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
187
|
+
# hit rates. Replaces the `user` field.
|
188
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
189
|
+
#
|
190
|
+
# @return [String, nil]
|
191
|
+
optional :prompt_cache_key, String
|
192
|
+
|
185
193
|
# @!attribute reasoning_effort
|
186
194
|
# **o-series models only**
|
187
195
|
#
|
@@ -208,6 +216,16 @@ module OpenAI
|
|
208
216
|
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject, nil]
|
209
217
|
optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat }
|
210
218
|
|
219
|
+
# @!attribute safety_identifier
|
220
|
+
# A stable identifier used to help detect users of your application that may be
|
221
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
222
|
+
# identifies each user. We recommend hashing their username or email address, in
|
223
|
+
# order to avoid sending us any identifying information.
|
224
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
225
|
+
#
|
226
|
+
# @return [String, nil]
|
227
|
+
optional :safety_identifier, String
|
228
|
+
|
211
229
|
# @!attribute seed
|
212
230
|
# This feature is in Beta. If specified, our system will make a best effort to
|
213
231
|
# sample deterministically, such that repeated requests with the same `seed` and
|
@@ -224,7 +242,7 @@ module OpenAI
|
|
224
242
|
# - If set to 'auto', then the request will be processed with the service tier
|
225
243
|
# configured in the Project settings. Unless otherwise configured, the Project
|
226
244
|
# will use 'default'.
|
227
|
-
# - If set to 'default', then the
|
245
|
+
# - If set to 'default', then the request will be processed with the standard
|
228
246
|
# pricing and performance for the selected model.
|
229
247
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
230
248
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -320,9 +338,13 @@ module OpenAI
|
|
320
338
|
optional :top_p, Float, nil?: true
|
321
339
|
|
322
340
|
# @!attribute user
|
323
|
-
#
|
324
|
-
#
|
325
|
-
#
|
341
|
+
# @deprecated
|
342
|
+
#
|
343
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
344
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
345
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
346
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
347
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
326
348
|
#
|
327
349
|
# @return [String, nil]
|
328
350
|
optional :user, String
|
@@ -335,7 +357,7 @@ module OpenAI
|
|
335
357
|
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
|
336
358
|
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
|
337
359
|
|
338
|
-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, reasoning_effort: nil, response_format: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
|
360
|
+
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
|
339
361
|
# Some parameter documentations has been truncated, see
|
340
362
|
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
|
341
363
|
#
|
@@ -371,10 +393,14 @@ module OpenAI
|
|
371
393
|
#
|
372
394
|
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
373
395
|
#
|
396
|
+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
397
|
+
#
|
374
398
|
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
|
375
399
|
#
|
376
400
|
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
|
377
401
|
#
|
402
|
+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
403
|
+
#
|
378
404
|
# @param seed [Integer, nil] This feature is in Beta.
|
379
405
|
#
|
380
406
|
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
|
@@ -395,7 +421,7 @@ module OpenAI
|
|
395
421
|
#
|
396
422
|
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
|
397
423
|
#
|
398
|
-
# @param user [String]
|
424
|
+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
399
425
|
#
|
400
426
|
# @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
|
401
427
|
#
|
@@ -553,7 +579,7 @@ module OpenAI
|
|
553
579
|
# - If set to 'auto', then the request will be processed with the service tier
|
554
580
|
# configured in the Project settings. Unless otherwise configured, the Project
|
555
581
|
# will use 'default'.
|
556
|
-
# - If set to 'default', then the
|
582
|
+
# - If set to 'default', then the request will be processed with the standard
|
557
583
|
# pricing and performance for the selected model.
|
558
584
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
559
585
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -34,7 +34,7 @@ module OpenAI
|
|
34
34
|
# set to true, the model will follow the exact schema defined in the `parameters`
|
35
35
|
# field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
|
36
36
|
# more about Structured Outputs in the
|
37
|
-
# [function calling guide](docs/guides/function-calling).
|
37
|
+
# [function calling guide](https://platform.openai.com/docs/guides/function-calling).
|
38
38
|
#
|
39
39
|
# @return [Boolean, nil]
|
40
40
|
optional :strict, OpenAI::Internal::Type::Boolean, nil?: true
|
@@ -4,7 +4,7 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
# @see OpenAI::Resources::Images#edit
|
6
6
|
#
|
7
|
-
# @see OpenAI::Resources::Images#
|
7
|
+
# @see OpenAI::Resources::Images#edit_stream_raw
|
8
8
|
class ImageEditParams < OpenAI::Internal::Type::BaseModel
|
9
9
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
10
10
|
include OpenAI::Internal::Type::RequestParameters
|
@@ -92,6 +92,9 @@ module OpenAI
|
|
92
92
|
# responses that return partial images. Value must be between 0 and 3. When set to
|
93
93
|
# 0, the response will be a single image sent in one streaming event.
|
94
94
|
#
|
95
|
+
# Note that the final image may be sent before the full number of partial images
|
96
|
+
# are generated if the full image is generated more quickly.
|
97
|
+
#
|
95
98
|
# @return [Integer, nil]
|
96
99
|
optional :partial_images, Integer, nil?: true
|
97
100
|
|
@@ -4,7 +4,7 @@ module OpenAI
|
|
4
4
|
module Models
|
5
5
|
# @see OpenAI::Resources::Images#generate
|
6
6
|
#
|
7
|
-
# @see OpenAI::Resources::Images#
|
7
|
+
# @see OpenAI::Resources::Images#generate_stream_raw
|
8
8
|
class ImageGenerateParams < OpenAI::Internal::Type::BaseModel
|
9
9
|
extend OpenAI::Internal::Type::RequestParameters::Converter
|
10
10
|
include OpenAI::Internal::Type::RequestParameters
|
@@ -71,6 +71,9 @@ module OpenAI
|
|
71
71
|
# responses that return partial images. Value must be between 0 and 3. When set to
|
72
72
|
# 0, the response will be a single image sent in one streaming event.
|
73
73
|
#
|
74
|
+
# Note that the final image may be sent before the full number of partial images
|
75
|
+
# are generated if the full image is generated more quickly.
|
76
|
+
#
|
74
77
|
# @return [Integer, nil]
|
75
78
|
optional :partial_images, Integer, nil?: true
|
76
79
|
|
@@ -140,7 +140,7 @@ module OpenAI
|
|
140
140
|
required :input_tokens_details, -> { OpenAI::ImagesResponse::Usage::InputTokensDetails }
|
141
141
|
|
142
142
|
# @!attribute output_tokens
|
143
|
-
# The number of
|
143
|
+
# The number of output tokens generated by the model.
|
144
144
|
#
|
145
145
|
# @return [Integer]
|
146
146
|
required :output_tokens, Integer
|
@@ -152,16 +152,13 @@ module OpenAI
|
|
152
152
|
required :total_tokens, Integer
|
153
153
|
|
154
154
|
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
|
155
|
-
# Some parameter documentations has been truncated, see
|
156
|
-
# {OpenAI::Models::ImagesResponse::Usage} for more details.
|
157
|
-
#
|
158
155
|
# For `gpt-image-1` only, the token usage information for the image generation.
|
159
156
|
#
|
160
157
|
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
|
161
158
|
#
|
162
159
|
# @param input_tokens_details [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
|
163
160
|
#
|
164
|
-
# @param output_tokens [Integer] The number of
|
161
|
+
# @param output_tokens [Integer] The number of output tokens generated by the model.
|
165
162
|
#
|
166
163
|
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
|
167
164
|
|
@@ -171,6 +171,14 @@ module OpenAI
|
|
171
171
|
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
|
172
172
|
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
|
173
173
|
|
174
|
+
# @!attribute prompt_cache_key
|
175
|
+
# Used by OpenAI to cache responses for similar requests to optimize your cache
|
176
|
+
# hit rates. Replaces the `user` field.
|
177
|
+
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
|
178
|
+
#
|
179
|
+
# @return [String, nil]
|
180
|
+
optional :prompt_cache_key, String
|
181
|
+
|
174
182
|
# @!attribute reasoning
|
175
183
|
# **o-series models only**
|
176
184
|
#
|
@@ -180,13 +188,23 @@ module OpenAI
|
|
180
188
|
# @return [OpenAI::Models::Reasoning, nil]
|
181
189
|
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
|
182
190
|
|
191
|
+
# @!attribute safety_identifier
|
192
|
+
# A stable identifier used to help detect users of your application that may be
|
193
|
+
# violating OpenAI's usage policies. The IDs should be a string that uniquely
|
194
|
+
# identifies each user. We recommend hashing their username or email address, in
|
195
|
+
# order to avoid sending us any identifying information.
|
196
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
197
|
+
#
|
198
|
+
# @return [String, nil]
|
199
|
+
optional :safety_identifier, String
|
200
|
+
|
183
201
|
# @!attribute service_tier
|
184
202
|
# Specifies the processing type used for serving the request.
|
185
203
|
#
|
186
204
|
# - If set to 'auto', then the request will be processed with the service tier
|
187
205
|
# configured in the Project settings. Unless otherwise configured, the Project
|
188
206
|
# will use 'default'.
|
189
|
-
# - If set to 'default', then the
|
207
|
+
# - If set to 'default', then the request will be processed with the standard
|
190
208
|
# pricing and performance for the selected model.
|
191
209
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
192
210
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -246,13 +264,37 @@ module OpenAI
|
|
246
264
|
optional :usage, -> { OpenAI::Responses::ResponseUsage }
|
247
265
|
|
248
266
|
# @!attribute user
|
249
|
-
#
|
250
|
-
#
|
251
|
-
#
|
267
|
+
# @deprecated
|
268
|
+
#
|
269
|
+
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
270
|
+
# `prompt_cache_key` instead to maintain caching optimizations. A stable
|
271
|
+
# identifier for your end-users. Used to boost cache hit rates by better bucketing
|
272
|
+
# similar requests and to help OpenAI detect and prevent abuse.
|
273
|
+
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
|
252
274
|
#
|
253
275
|
# @return [String, nil]
|
254
276
|
optional :user, String
|
255
277
|
|
278
|
+
# Convenience property that aggregates all `output_text` items from the `output` list.
|
279
|
+
#
|
280
|
+
# If no `output_text` content blocks exist, then an empty string is returned.
|
281
|
+
#
|
282
|
+
# @return [String]
|
283
|
+
def output_text
|
284
|
+
texts = []
|
285
|
+
|
286
|
+
output.each do |item|
|
287
|
+
next unless item.type == :message
|
288
|
+
item.content.each do |content|
|
289
|
+
if content.type == :output_text
|
290
|
+
texts << content.text
|
291
|
+
end
|
292
|
+
end
|
293
|
+
end
|
294
|
+
|
295
|
+
texts.join
|
296
|
+
end
|
297
|
+
|
256
298
|
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
|
257
299
|
# Some parameter documentations has been truncated, see
|
258
300
|
# {OpenAI::Models::Responses::Response} for more details.
|
@@ -293,8 +335,12 @@ module OpenAI
|
|
293
335
|
#
|
294
336
|
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
|
295
337
|
#
|
338
|
+
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
|
339
|
+
#
|
296
340
|
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
|
297
341
|
#
|
342
|
+
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
|
343
|
+
#
|
298
344
|
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
|
299
345
|
#
|
300
346
|
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
|
@@ -307,7 +353,7 @@ module OpenAI
|
|
307
353
|
#
|
308
354
|
# @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
|
309
355
|
#
|
310
|
-
# @param user [String]
|
356
|
+
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
|
311
357
|
#
|
312
358
|
# @param object [Symbol, :response] The object type of this resource - always set to `response`.
|
313
359
|
|
@@ -401,7 +447,7 @@ module OpenAI
|
|
401
447
|
# - If set to 'auto', then the request will be processed with the service tier
|
402
448
|
# configured in the Project settings. Unless otherwise configured, the Project
|
403
449
|
# will use 'default'.
|
404
|
-
# - If set to 'default', then the
|
450
|
+
# - If set to 'default', then the request will be processed with the standard
|
405
451
|
# pricing and performance for the selected model.
|
406
452
|
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
|
407
453
|
# 'priority', then the request will be processed with the corresponding service
|
@@ -34,7 +34,8 @@ module OpenAI
|
|
34
34
|
nil?: true
|
35
35
|
|
36
36
|
# @!attribute status
|
37
|
-
# The status of the code interpreter tool call.
|
37
|
+
# The status of the code interpreter tool call. Valid values are `in_progress`,
|
38
|
+
# `completed`, `incomplete`, `interpreting`, and `failed`.
|
38
39
|
#
|
39
40
|
# @return [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status]
|
40
41
|
required :status, enum: -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Status }
|
@@ -59,7 +60,7 @@ module OpenAI
|
|
59
60
|
#
|
60
61
|
# @param outputs [Array<OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image>, nil] The outputs generated by the code interpreter, such as logs or images.
|
61
62
|
#
|
62
|
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call.
|
63
|
+
# @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call. Valid values are `in_progress`, `c
|
63
64
|
#
|
64
65
|
# @param type [Symbol, :code_interpreter_call] The type of the code interpreter tool call. Always `code_interpreter_call`.
|
65
66
|
|
@@ -121,7 +122,8 @@ module OpenAI
|
|
121
122
|
# @return [Array(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image)]
|
122
123
|
end
|
123
124
|
|
124
|
-
# The status of the code interpreter tool call.
|
125
|
+
# The status of the code interpreter tool call. Valid values are `in_progress`,
|
126
|
+
# `completed`, `incomplete`, `interpreting`, and `failed`.
|
125
127
|
#
|
126
128
|
# @see OpenAI::Models::Responses::ResponseCodeInterpreterToolCall#status
|
127
129
|
module Status
|