openai 0.41.0 → 0.43.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +25 -0
- data/README.md +10 -16
- data/lib/openai/internal/util.rb +7 -2
- data/lib/openai/models/audio/speech_create_params.rb +12 -10
- data/lib/openai/models/audio/speech_model.rb +1 -0
- data/lib/openai/models/audio/transcription_create_params.rb +10 -8
- data/lib/openai/models/audio_model.rb +1 -0
- data/lib/openai/models/chat/chat_completion_audio_param.rb +7 -5
- data/lib/openai/models/conversations/conversation_item.rb +1 -1
- data/lib/openai/models/conversations/message.rb +1 -1
- data/lib/openai/models/realtime/audio_transcription.rb +33 -10
- data/lib/openai/models/realtime/realtime_audio_config_output.rb +9 -9
- data/lib/openai/models/realtime/realtime_response_create_audio_output.rb +9 -9
- data/lib/openai/models/realtime/realtime_session.rb +46 -6
- data/lib/openai/models/realtime/realtime_session_create_request.rb +6 -0
- data/lib/openai/models/realtime/realtime_session_create_response.rb +6 -0
- data/lib/openai/models/responses/input_token_count_params.rb +4 -7
- data/lib/openai/models/responses/response.rb +17 -8
- data/lib/openai/models/responses/response_compact_params.rb +1 -0
- data/lib/openai/models/responses/response_compaction_item.rb +4 -2
- data/lib/openai/models/responses/response_compaction_item_param.rb +2 -1
- data/lib/openai/models/responses/response_function_call_output_item.rb +1 -1
- data/lib/openai/models/responses/response_function_shell_tool_call_output.rb +10 -6
- data/lib/openai/models/responses/response_function_web_search.rb +11 -3
- data/lib/openai/models/responses/response_input_item.rb +1 -0
- data/lib/openai/models/responses/response_item.rb +1 -1
- data/lib/openai/models/responses/response_output_item.rb +1 -1
- data/lib/openai/models/responses/response_output_text.rb +1 -1
- data/lib/openai/models/responses/tool.rb +4 -1
- data/lib/openai/models/video.rb +3 -3
- data/lib/openai/models/video_create_error.rb +7 -2
- data/lib/openai/models/video_create_params.rb +3 -3
- data/lib/openai/models/video_model.rb +23 -3
- data/lib/openai/resources/audio/speech.rb +1 -1
- data/lib/openai/resources/images.rb +4 -2
- data/lib/openai/resources/responses/input_tokens.rb +1 -1
- data/lib/openai/resources/videos.rb +1 -1
- data/lib/openai/version.rb +1 -1
- data/rbi/openai/models/audio/speech_create_params.rbi +15 -12
- data/rbi/openai/models/audio/speech_model.rbi +5 -0
- data/rbi/openai/models/audio/transcription_create_params.rbi +15 -12
- data/rbi/openai/models/audio_model.rbi +5 -0
- data/rbi/openai/models/chat/chat_completion_audio_param.rbi +9 -6
- data/rbi/openai/models/conversations/message.rbi +1 -1
- data/rbi/openai/models/realtime/audio_transcription.rbi +52 -21
- data/rbi/openai/models/realtime/realtime_audio_config_output.rbi +12 -12
- data/rbi/openai/models/realtime/realtime_response_create_audio_output.rbi +12 -12
- data/rbi/openai/models/realtime/realtime_session.rbi +42 -12
- data/rbi/openai/models/realtime/realtime_session_create_request.rbi +10 -0
- data/rbi/openai/models/realtime/realtime_session_create_response.rbi +10 -0
- data/rbi/openai/models/responses/input_token_count_params.rbi +3 -9
- data/rbi/openai/models/responses/response.rbi +18 -8
- data/rbi/openai/models/responses/response_compaction_item.rbi +4 -0
- data/rbi/openai/models/responses/response_compaction_item_param.rbi +2 -0
- data/rbi/openai/models/responses/response_function_call_output_item.rbi +1 -1
- data/rbi/openai/models/responses/response_function_shell_tool_call_output.rbi +10 -2
- data/rbi/openai/models/responses/response_function_web_search.rbi +13 -2
- data/rbi/openai/models/responses/response_output_text.rbi +1 -1
- data/rbi/openai/models/responses/tool.rbi +3 -0
- data/rbi/openai/models/video.rbi +3 -3
- data/rbi/openai/models/video_create_error.rbi +9 -1
- data/rbi/openai/models/video_create_params.rbi +4 -4
- data/rbi/openai/models/video_model.rbi +8 -5
- data/rbi/openai/resources/audio/speech.rbi +5 -4
- data/rbi/openai/resources/audio/transcriptions.rbi +12 -10
- data/rbi/openai/resources/images.rbi +4 -2
- data/rbi/openai/resources/responses/input_tokens.rbi +1 -3
- data/rbi/openai/resources/videos.rbi +1 -1
- data/sig/openai/models/audio/speech_model.rbs +6 -1
- data/sig/openai/models/audio_model.rbs +2 -0
- data/sig/openai/models/realtime/audio_transcription.rbs +7 -4
- data/sig/openai/models/realtime/realtime_session.rbs +9 -4
- data/sig/openai/models/realtime/realtime_session_create_request.rbs +4 -0
- data/sig/openai/models/realtime/realtime_session_create_response.rbs +4 -0
- data/sig/openai/models/responses/response.rbs +5 -0
- data/sig/openai/models/responses/response_function_web_search.rbs +7 -0
- data/sig/openai/models/video_model.rbs +5 -4
- metadata +16 -2
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: d4083372aec70ae767e55529226345fd14c51b526994a100ac2985ab1990cd48
|
|
4
|
+
data.tar.gz: 79cc5f4c8ad7b26a1ec05490a80b55105393750839b2ecb3bf4da1a22385952d
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 95ff955307367382d388f0d575f733a7c384b513a72be89010dfe70aefa1868c39c36c9a49c7130ea124f793feed59543b7f837facbd738082cd1e0db1e2d658
|
|
7
|
+
data.tar.gz: 378b3dc58226b56d9a362578a879fc3f48acdcfecbea44507798bbfb53ed4fc8f96a5b2849004db91aeab9acadcde2829be897321e38bafa987e819e0ca1aa1a
|
data/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,30 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 0.43.0 (2026-01-09)
|
|
4
|
+
|
|
5
|
+
Full Changelog: [v0.42.0...v0.43.0](https://github.com/openai/openai-ruby/compare/v0.42.0...v0.43.0)
|
|
6
|
+
|
|
7
|
+
### Features
|
|
8
|
+
|
|
9
|
+
* **api:** add new Response completed_at prop ([c32e1c2](https://github.com/openai/openai-ruby/commit/c32e1c2a0a0ae6a9b7724e1c6d251ba14e8499e1))
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
### Chores
|
|
13
|
+
|
|
14
|
+
* add ci tests for ruby 4 compatibility [#235](https://github.com/openai/openai-ruby/issues/235) ([#236](https://github.com/openai/openai-ruby/issues/236)) ([1aa0d7a](https://github.com/openai/openai-ruby/commit/1aa0d7abf5a4714fa28cfe7ee5aecefbe2c683d2))
|
|
15
|
+
* **internal:** codegen related update ([ef23de3](https://github.com/openai/openai-ruby/commit/ef23de347b5f541853a32a288cd02a54938793cf))
|
|
16
|
+
* **internal:** use different example values for some enums ([8b6c4ad](https://github.com/openai/openai-ruby/commit/8b6c4ade813244e8c65690c66fe09f2566dd3ff0))
|
|
17
|
+
* move `cgi` into dependencies for ruby 4 ([bd9c798](https://github.com/openai/openai-ruby/commit/bd9c798552a3d378ec943c7e07d6cf1334f72b1d))
|
|
18
|
+
|
|
19
|
+
## 0.42.0 (2025-12-19)
|
|
20
|
+
|
|
21
|
+
Full Changelog: [v0.41.0...v0.42.0](https://github.com/openai/openai-ruby/compare/v0.41.0...v0.42.0)
|
|
22
|
+
|
|
23
|
+
### Bug Fixes
|
|
24
|
+
|
|
25
|
+
* issue where json.parse errors when receiving HTTP 204 with nobody ([7984c03](https://github.com/openai/openai-ruby/commit/7984c0396f5acd1b801514e280415090deb0cd06))
|
|
26
|
+
* rebuild ([52b19e9](https://github.com/openai/openai-ruby/commit/52b19e9b4b0344c77bec4603df3d4f16ee4cd720))
|
|
27
|
+
|
|
3
28
|
## 0.41.0 (2025-12-16)
|
|
4
29
|
|
|
5
30
|
Full Changelog: [v0.40.0...v0.41.0](https://github.com/openai/openai-ruby/compare/v0.40.0...v0.41.0)
|
data/README.md
CHANGED
|
@@ -15,7 +15,7 @@ To use this gem, install via Bundler by adding the following to your application
|
|
|
15
15
|
<!-- x-release-please-start-version -->
|
|
16
16
|
|
|
17
17
|
```ruby
|
|
18
|
-
gem "openai", "~> 0.
|
|
18
|
+
gem "openai", "~> 0.43.0"
|
|
19
19
|
```
|
|
20
20
|
|
|
21
21
|
<!-- x-release-please-end -->
|
|
@@ -30,10 +30,7 @@ openai = OpenAI::Client.new(
|
|
|
30
30
|
api_key: ENV["OPENAI_API_KEY"] # This is the default and can be omitted
|
|
31
31
|
)
|
|
32
32
|
|
|
33
|
-
chat_completion = openai.chat.completions.create(
|
|
34
|
-
messages: [{role: "user", content: "Say this is a test"}],
|
|
35
|
-
model: :"gpt-5.2"
|
|
36
|
-
)
|
|
33
|
+
chat_completion = openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: "gpt-4o")
|
|
37
34
|
|
|
38
35
|
puts(chat_completion)
|
|
39
36
|
```
|
|
@@ -45,7 +42,7 @@ We provide support for streaming responses using Server-Sent Events (SSE).
|
|
|
45
42
|
```ruby
|
|
46
43
|
stream = openai.responses.stream(
|
|
47
44
|
input: "Write a haiku about OpenAI.",
|
|
48
|
-
model:
|
|
45
|
+
model: "gpt-4o"
|
|
49
46
|
)
|
|
50
47
|
|
|
51
48
|
stream.each do |event|
|
|
@@ -298,7 +295,7 @@ When the library is unable to connect to the API, or if the API returns a non-su
|
|
|
298
295
|
|
|
299
296
|
```ruby
|
|
300
297
|
begin
|
|
301
|
-
job = openai.fine_tuning.jobs.create(model:
|
|
298
|
+
job = openai.fine_tuning.jobs.create(model: "gpt-4o", training_file: "file-abc123")
|
|
302
299
|
rescue OpenAI::Errors::APIConnectionError => e
|
|
303
300
|
puts("The server could not be reached")
|
|
304
301
|
puts(e.cause) # an underlying Exception, likely raised within `net/http`
|
|
@@ -343,7 +340,7 @@ openai = OpenAI::Client.new(
|
|
|
343
340
|
# Or, configure per-request:
|
|
344
341
|
openai.chat.completions.create(
|
|
345
342
|
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
|
|
346
|
-
model:
|
|
343
|
+
model: "gpt-4o",
|
|
347
344
|
request_options: {max_retries: 5}
|
|
348
345
|
)
|
|
349
346
|
```
|
|
@@ -361,7 +358,7 @@ openai = OpenAI::Client.new(
|
|
|
361
358
|
# Or, configure per-request:
|
|
362
359
|
openai.chat.completions.create(
|
|
363
360
|
messages: [{role: "user", content: "How can I list all files in a directory using Python?"}],
|
|
364
|
-
model:
|
|
361
|
+
model: "gpt-4o",
|
|
365
362
|
request_options: {timeout: 5}
|
|
366
363
|
)
|
|
367
364
|
```
|
|
@@ -396,7 +393,7 @@ Note: the `extra_` parameters of the same name overrides the documented paramete
|
|
|
396
393
|
chat_completion =
|
|
397
394
|
openai.chat.completions.create(
|
|
398
395
|
messages: [{role: "user", content: "How can I get the name of the current day in JavaScript?"}],
|
|
399
|
-
model:
|
|
396
|
+
model: "gpt-4o",
|
|
400
397
|
request_options: {
|
|
401
398
|
extra_query: {my_query_parameter: value},
|
|
402
399
|
extra_body: {my_body_parameter: value},
|
|
@@ -444,7 +441,7 @@ You can provide typesafe request parameters like so:
|
|
|
444
441
|
```ruby
|
|
445
442
|
openai.chat.completions.create(
|
|
446
443
|
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
|
447
|
-
model:
|
|
444
|
+
model: "gpt-4o"
|
|
448
445
|
)
|
|
449
446
|
```
|
|
450
447
|
|
|
@@ -452,15 +449,12 @@ Or, equivalently:
|
|
|
452
449
|
|
|
453
450
|
```ruby
|
|
454
451
|
# Hashes work, but are not typesafe:
|
|
455
|
-
openai.chat.completions.create(
|
|
456
|
-
messages: [{role: "user", content: "Say this is a test"}],
|
|
457
|
-
model: :"gpt-5.2"
|
|
458
|
-
)
|
|
452
|
+
openai.chat.completions.create(messages: [{role: "user", content: "Say this is a test"}], model: "gpt-4o")
|
|
459
453
|
|
|
460
454
|
# You can also splat a full Params class:
|
|
461
455
|
params = OpenAI::Chat::CompletionCreateParams.new(
|
|
462
456
|
messages: [OpenAI::Chat::ChatCompletionUserMessageParam.new(content: "Say this is a test")],
|
|
463
|
-
model:
|
|
457
|
+
model: "gpt-4o"
|
|
464
458
|
)
|
|
465
459
|
openai.chat.completions.create(**params)
|
|
466
460
|
```
|
data/lib/openai/internal/util.rb
CHANGED
|
@@ -657,7 +657,8 @@ module OpenAI
|
|
|
657
657
|
def decode_content(headers, stream:, suppress_error: false)
|
|
658
658
|
case (content_type = headers["content-type"])
|
|
659
659
|
in OpenAI::Internal::Util::JSON_CONTENT
|
|
660
|
-
json = stream.to_a.join
|
|
660
|
+
return nil if (json = stream.to_a.join).empty?
|
|
661
|
+
|
|
661
662
|
begin
|
|
662
663
|
JSON.parse(json, symbolize_names: true)
|
|
663
664
|
rescue JSON::ParserError => e
|
|
@@ -667,7 +668,11 @@ module OpenAI
|
|
|
667
668
|
in OpenAI::Internal::Util::JSONL_CONTENT
|
|
668
669
|
lines = decode_lines(stream)
|
|
669
670
|
chain_fused(lines) do |y|
|
|
670
|
-
lines.each
|
|
671
|
+
lines.each do
|
|
672
|
+
next if _1.empty?
|
|
673
|
+
|
|
674
|
+
y << JSON.parse(_1, symbolize_names: true)
|
|
675
|
+
end
|
|
671
676
|
end
|
|
672
677
|
in %r{^text/event-stream}
|
|
673
678
|
lines = decode_lines(stream)
|
|
@@ -16,15 +16,16 @@ module OpenAI
|
|
|
16
16
|
|
|
17
17
|
# @!attribute model
|
|
18
18
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
19
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
19
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
20
20
|
#
|
|
21
21
|
# @return [String, Symbol, OpenAI::Models::Audio::SpeechModel]
|
|
22
22
|
required :model, union: -> { OpenAI::Audio::SpeechCreateParams::Model }
|
|
23
23
|
|
|
24
24
|
# @!attribute voice
|
|
25
|
-
# The voice to use when generating the audio. Supported voices are
|
|
26
|
-
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
|
27
|
-
# `verse`. Previews of the voices are available
|
|
25
|
+
# The voice to use when generating the audio. Supported built-in voices are
|
|
26
|
+
# `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
|
27
|
+
# `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
|
|
28
|
+
# in the
|
|
28
29
|
# [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
|
|
29
30
|
#
|
|
30
31
|
# @return [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice]
|
|
@@ -66,7 +67,7 @@ module OpenAI
|
|
|
66
67
|
#
|
|
67
68
|
# @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
68
69
|
#
|
|
69
|
-
# @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy
|
|
70
|
+
# @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported built-in voices are `alloy
|
|
70
71
|
#
|
|
71
72
|
# @param instructions [String] Control the voice of your generated audio with additional instructions. Does not
|
|
72
73
|
#
|
|
@@ -79,22 +80,23 @@ module OpenAI
|
|
|
79
80
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
|
80
81
|
|
|
81
82
|
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
|
|
82
|
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
83
|
+
# `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
83
84
|
module Model
|
|
84
85
|
extend OpenAI::Internal::Type::Union
|
|
85
86
|
|
|
86
87
|
variant String
|
|
87
88
|
|
|
88
|
-
# One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
|
|
89
|
+
# One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd`, `gpt-4o-mini-tts`, or `gpt-4o-mini-tts-2025-12-15`.
|
|
89
90
|
variant enum: -> { OpenAI::Audio::SpeechModel }
|
|
90
91
|
|
|
91
92
|
# @!method self.variants
|
|
92
93
|
# @return [Array(String, Symbol, OpenAI::Models::Audio::SpeechModel)]
|
|
93
94
|
end
|
|
94
95
|
|
|
95
|
-
# The voice to use when generating the audio. Supported voices are
|
|
96
|
-
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
|
97
|
-
# `verse`. Previews of the voices are available
|
|
96
|
+
# The voice to use when generating the audio. Supported built-in voices are
|
|
97
|
+
# `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
|
98
|
+
# `shimmer`, `verse`, `marin`, and `cedar`. Previews of the voices are available
|
|
99
|
+
# in the
|
|
98
100
|
# [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
|
|
99
101
|
module Voice
|
|
100
102
|
extend OpenAI::Internal::Type::Union
|
|
@@ -19,8 +19,9 @@ module OpenAI
|
|
|
19
19
|
|
|
20
20
|
# @!attribute model
|
|
21
21
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
22
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
23
|
-
# Whisper V2 model), and
|
|
22
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
23
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
24
|
+
# `gpt-4o-transcribe-diarize`.
|
|
24
25
|
#
|
|
25
26
|
# @return [String, Symbol, OpenAI::Models::AudioModel]
|
|
26
27
|
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
|
|
@@ -42,9 +43,9 @@ module OpenAI
|
|
|
42
43
|
# Additional information to include in the transcription response. `logprobs` will
|
|
43
44
|
# return the log probabilities of the tokens in the response to understand the
|
|
44
45
|
# model's confidence in the transcription. `logprobs` only works with
|
|
45
|
-
# response_format set to `json` and only with the models `gpt-4o-transcribe
|
|
46
|
-
# `gpt-4o-mini-transcribe`. This field is
|
|
47
|
-
# `gpt-4o-transcribe-diarize`.
|
|
46
|
+
# response_format set to `json` and only with the models `gpt-4o-transcribe`,
|
|
47
|
+
# `gpt-4o-mini-transcribe`, and `gpt-4o-mini-transcribe-2025-12-15`. This field is
|
|
48
|
+
# not supported when using `gpt-4o-transcribe-diarize`.
|
|
48
49
|
#
|
|
49
50
|
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>, nil]
|
|
50
51
|
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionInclude] }
|
|
@@ -146,14 +147,15 @@ module OpenAI
|
|
|
146
147
|
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
|
|
147
148
|
|
|
148
149
|
# ID of the model to use. The options are `gpt-4o-transcribe`,
|
|
149
|
-
# `gpt-4o-mini-transcribe`, `whisper-1`
|
|
150
|
-
# Whisper V2 model), and
|
|
150
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1`
|
|
151
|
+
# (which is powered by our open source Whisper V2 model), and
|
|
152
|
+
# `gpt-4o-transcribe-diarize`.
|
|
151
153
|
module Model
|
|
152
154
|
extend OpenAI::Internal::Type::Union
|
|
153
155
|
|
|
154
156
|
variant String
|
|
155
157
|
|
|
156
|
-
# ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
158
|
+
# ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`, `whisper-1` (which is powered by our open source Whisper V2 model), and `gpt-4o-transcribe-diarize`.
|
|
157
159
|
variant enum: -> { OpenAI::AudioModel }
|
|
158
160
|
|
|
159
161
|
# @!method self.variants
|
|
@@ -8,6 +8,7 @@ module OpenAI
|
|
|
8
8
|
WHISPER_1 = :"whisper-1"
|
|
9
9
|
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
|
|
10
10
|
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
|
|
11
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 = :"gpt-4o-mini-transcribe-2025-12-15"
|
|
11
12
|
GPT_4O_TRANSCRIBE_DIARIZE = :"gpt-4o-transcribe-diarize"
|
|
12
13
|
|
|
13
14
|
# @!method self.values
|
|
@@ -12,8 +12,9 @@ module OpenAI
|
|
|
12
12
|
required :format_, enum: -> { OpenAI::Chat::ChatCompletionAudioParam::Format }, api_name: :format
|
|
13
13
|
|
|
14
14
|
# @!attribute voice
|
|
15
|
-
# The voice the model uses to respond. Supported voices are `alloy`,
|
|
16
|
-
# `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`,
|
|
15
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
16
|
+
# `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, `shimmer`,
|
|
17
|
+
# `marin`, and `cedar`.
|
|
17
18
|
#
|
|
18
19
|
# @return [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice]
|
|
19
20
|
required :voice, union: -> { OpenAI::Chat::ChatCompletionAudioParam::Voice }
|
|
@@ -28,7 +29,7 @@ module OpenAI
|
|
|
28
29
|
#
|
|
29
30
|
# @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
|
|
30
31
|
#
|
|
31
|
-
# @param voice [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] The voice the model uses to respond. Supported voices are
|
|
32
|
+
# @param voice [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] The voice the model uses to respond. Supported built-in voices are `alloy`, `ash
|
|
32
33
|
|
|
33
34
|
# Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`,
|
|
34
35
|
# or `pcm16`.
|
|
@@ -48,8 +49,9 @@ module OpenAI
|
|
|
48
49
|
# @return [Array<Symbol>]
|
|
49
50
|
end
|
|
50
51
|
|
|
51
|
-
# The voice the model uses to respond. Supported voices are `alloy`,
|
|
52
|
-
# `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`,
|
|
52
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
53
|
+
# `ash`, `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, `shimmer`,
|
|
54
|
+
# `marin`, and `cedar`.
|
|
53
55
|
#
|
|
54
56
|
# @see OpenAI::Models::Chat::ChatCompletionAudioParam#voice
|
|
55
57
|
module Voice
|
|
@@ -57,7 +57,7 @@ module OpenAI
|
|
|
57
57
|
# A tool call that executes one or more shell commands in a managed environment.
|
|
58
58
|
variant :shell_call, -> { OpenAI::Responses::ResponseFunctionShellToolCall }
|
|
59
59
|
|
|
60
|
-
# The output of a shell tool call.
|
|
60
|
+
# The output of a shell tool call that was emitted.
|
|
61
61
|
variant :shell_call_output, -> { OpenAI::Responses::ResponseFunctionShellToolCallOutput }
|
|
62
62
|
|
|
63
63
|
# A tool call that applies file diffs by creating, deleting, or updating files.
|
|
@@ -14,11 +14,12 @@ module OpenAI
|
|
|
14
14
|
|
|
15
15
|
# @!attribute model
|
|
16
16
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
17
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
18
|
-
#
|
|
17
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
18
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
19
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
19
20
|
#
|
|
20
|
-
# @return [Symbol, OpenAI::Models::Realtime::AudioTranscription::Model, nil]
|
|
21
|
-
optional :model,
|
|
21
|
+
# @return [String, Symbol, OpenAI::Models::Realtime::AudioTranscription::Model, nil]
|
|
22
|
+
optional :model, union: -> { OpenAI::Realtime::AudioTranscription::Model }
|
|
22
23
|
|
|
23
24
|
# @!attribute prompt
|
|
24
25
|
# An optional text to guide the model's style or continue a previous audio
|
|
@@ -36,25 +37,47 @@ module OpenAI
|
|
|
36
37
|
#
|
|
37
38
|
# @param language [String] The language of the input audio. Supplying the input language in
|
|
38
39
|
#
|
|
39
|
-
# @param model [Symbol, OpenAI::Models::Realtime::AudioTranscription::Model] The model to use for transcription. Current options are `whisper-1`, `gpt-4o-min
|
|
40
|
+
# @param model [String, Symbol, OpenAI::Models::Realtime::AudioTranscription::Model] The model to use for transcription. Current options are `whisper-1`, `gpt-4o-min
|
|
40
41
|
#
|
|
41
42
|
# @param prompt [String] An optional text to guide the model's style or continue a previous audio
|
|
42
43
|
|
|
43
44
|
# The model to use for transcription. Current options are `whisper-1`,
|
|
44
|
-
# `gpt-4o-mini-transcribe`, `gpt-4o-transcribe
|
|
45
|
-
#
|
|
45
|
+
# `gpt-4o-mini-transcribe`, `gpt-4o-mini-transcribe-2025-12-15`,
|
|
46
|
+
# `gpt-4o-transcribe`, and `gpt-4o-transcribe-diarize`. Use
|
|
47
|
+
# `gpt-4o-transcribe-diarize` when you need diarization with speaker labels.
|
|
46
48
|
#
|
|
47
49
|
# @see OpenAI::Models::Realtime::AudioTranscription#model
|
|
48
50
|
module Model
|
|
49
|
-
extend OpenAI::Internal::Type::
|
|
51
|
+
extend OpenAI::Internal::Type::Union
|
|
52
|
+
|
|
53
|
+
variant String
|
|
54
|
+
|
|
55
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::WHISPER_1 }
|
|
56
|
+
|
|
57
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_MINI_TRANSCRIBE }
|
|
58
|
+
|
|
59
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_MINI_TRANSCRIBE_2025_12_15 }
|
|
60
|
+
|
|
61
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_TRANSCRIBE }
|
|
62
|
+
|
|
63
|
+
variant const: -> { OpenAI::Models::Realtime::AudioTranscription::Model::GPT_4O_TRANSCRIBE_DIARIZE }
|
|
64
|
+
|
|
65
|
+
# @!method self.variants
|
|
66
|
+
# @return [Array(String, Symbol)]
|
|
67
|
+
|
|
68
|
+
define_sorbet_constant!(:Variants) do
|
|
69
|
+
T.type_alias { T.any(String, OpenAI::Realtime::AudioTranscription::Model::TaggedSymbol) }
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
# @!group
|
|
50
73
|
|
|
51
74
|
WHISPER_1 = :"whisper-1"
|
|
52
75
|
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
|
|
76
|
+
GPT_4O_MINI_TRANSCRIBE_2025_12_15 = :"gpt-4o-mini-transcribe-2025-12-15"
|
|
53
77
|
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
|
|
54
78
|
GPT_4O_TRANSCRIBE_DIARIZE = :"gpt-4o-transcribe-diarize"
|
|
55
79
|
|
|
56
|
-
# @!
|
|
57
|
-
# @return [Array<Symbol>]
|
|
80
|
+
# @!endgroup
|
|
58
81
|
end
|
|
59
82
|
end
|
|
60
83
|
end
|
|
@@ -23,10 +23,10 @@ module OpenAI
|
|
|
23
23
|
optional :speed, Float
|
|
24
24
|
|
|
25
25
|
# @!attribute voice
|
|
26
|
-
# The voice the model uses to respond.
|
|
27
|
-
#
|
|
28
|
-
# `
|
|
29
|
-
#
|
|
26
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
27
|
+
# `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
|
|
28
|
+
# `cedar`. Voice cannot be changed during the session once the model has responded
|
|
29
|
+
# with audio at least once. We recommend `marin` and `cedar` for best quality.
|
|
30
30
|
#
|
|
31
31
|
# @return [String, Symbol, OpenAI::Models::Realtime::RealtimeAudioConfigOutput::Voice, nil]
|
|
32
32
|
optional :voice, union: -> { OpenAI::Realtime::RealtimeAudioConfigOutput::Voice }
|
|
@@ -39,12 +39,12 @@ module OpenAI
|
|
|
39
39
|
#
|
|
40
40
|
# @param speed [Float] The speed of the model's spoken response as a multiple of the original speed.
|
|
41
41
|
#
|
|
42
|
-
# @param voice [String, Symbol, OpenAI::Models::Realtime::RealtimeAudioConfigOutput::Voice] The voice the model uses to respond.
|
|
42
|
+
# @param voice [String, Symbol, OpenAI::Models::Realtime::RealtimeAudioConfigOutput::Voice] The voice the model uses to respond. Supported built-in voices are `alloy`, `ash
|
|
43
43
|
|
|
44
|
-
# The voice the model uses to respond.
|
|
45
|
-
#
|
|
46
|
-
# `
|
|
47
|
-
#
|
|
44
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
45
|
+
# `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
|
|
46
|
+
# `cedar`. Voice cannot be changed during the session once the model has responded
|
|
47
|
+
# with audio at least once. We recommend `marin` and `cedar` for best quality.
|
|
48
48
|
#
|
|
49
49
|
# @see OpenAI::Models::Realtime::RealtimeAudioConfigOutput#voice
|
|
50
50
|
module Voice
|
|
@@ -23,10 +23,10 @@ module OpenAI
|
|
|
23
23
|
optional :format_, union: -> { OpenAI::Realtime::RealtimeAudioFormats }, api_name: :format
|
|
24
24
|
|
|
25
25
|
# @!attribute voice
|
|
26
|
-
# The voice the model uses to respond.
|
|
27
|
-
#
|
|
28
|
-
# `
|
|
29
|
-
#
|
|
26
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
27
|
+
# `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
|
|
28
|
+
# `cedar`. Voice cannot be changed during the session once the model has responded
|
|
29
|
+
# with audio at least once.
|
|
30
30
|
#
|
|
31
31
|
# @return [String, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::Voice, nil]
|
|
32
32
|
optional :voice, union: -> { OpenAI::Realtime::RealtimeResponseCreateAudioOutput::Output::Voice }
|
|
@@ -38,12 +38,12 @@ module OpenAI
|
|
|
38
38
|
#
|
|
39
39
|
# @param format_ [OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCM, OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMU, OpenAI::Models::Realtime::RealtimeAudioFormats::AudioPCMA] The format of the output audio.
|
|
40
40
|
#
|
|
41
|
-
# @param voice [String, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::Voice] The voice the model uses to respond.
|
|
41
|
+
# @param voice [String, Symbol, OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output::Voice] The voice the model uses to respond. Supported built-in voices are `alloy`, `ash
|
|
42
42
|
|
|
43
|
-
# The voice the model uses to respond.
|
|
44
|
-
#
|
|
45
|
-
# `
|
|
46
|
-
#
|
|
43
|
+
# The voice the model uses to respond. Supported built-in voices are `alloy`,
|
|
44
|
+
# `ash`, `ballad`, `coral`, `echo`, `sage`, `shimmer`, `verse`, `marin`, and
|
|
45
|
+
# `cedar`. Voice cannot be changed during the session once the model has responded
|
|
46
|
+
# with audio at least once.
|
|
47
47
|
#
|
|
48
48
|
# @see OpenAI::Models::Realtime::RealtimeResponseCreateAudioOutput::Output#voice
|
|
49
49
|
module Voice
|
|
@@ -94,8 +94,8 @@ module OpenAI
|
|
|
94
94
|
# @!attribute model
|
|
95
95
|
# The Realtime model used for this session.
|
|
96
96
|
#
|
|
97
|
-
# @return [Symbol, OpenAI::Models::Realtime::RealtimeSession::Model, nil]
|
|
98
|
-
optional :model,
|
|
97
|
+
# @return [String, Symbol, OpenAI::Models::Realtime::RealtimeSession::Model, nil]
|
|
98
|
+
optional :model, union: -> { OpenAI::Realtime::RealtimeSession::Model }
|
|
99
99
|
|
|
100
100
|
# @!attribute object
|
|
101
101
|
# The object type. Always `realtime.session`.
|
|
@@ -205,7 +205,7 @@ module OpenAI
|
|
|
205
205
|
#
|
|
206
206
|
# @param modalities [Array<Symbol, OpenAI::Models::Realtime::RealtimeSession::Modality>] The set of modalities the model can respond with. To disable audio,
|
|
207
207
|
#
|
|
208
|
-
# @param model [Symbol, OpenAI::Models::Realtime::RealtimeSession::Model] The Realtime model used for this session.
|
|
208
|
+
# @param model [String, Symbol, OpenAI::Models::Realtime::RealtimeSession::Model] The Realtime model used for this session.
|
|
209
209
|
#
|
|
210
210
|
# @param object [Symbol, OpenAI::Models::Realtime::RealtimeSession::Object] The object type. Always `realtime.session`.
|
|
211
211
|
#
|
|
@@ -306,7 +306,46 @@ module OpenAI
|
|
|
306
306
|
#
|
|
307
307
|
# @see OpenAI::Models::Realtime::RealtimeSession#model
|
|
308
308
|
module Model
|
|
309
|
-
extend OpenAI::Internal::Type::
|
|
309
|
+
extend OpenAI::Internal::Type::Union
|
|
310
|
+
|
|
311
|
+
variant String
|
|
312
|
+
|
|
313
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME }
|
|
314
|
+
|
|
315
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_2025_08_28 }
|
|
316
|
+
|
|
317
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW }
|
|
318
|
+
|
|
319
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2024_10_01 }
|
|
320
|
+
|
|
321
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2024_12_17 }
|
|
322
|
+
|
|
323
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_REALTIME_PREVIEW_2025_06_03 }
|
|
324
|
+
|
|
325
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_MINI_REALTIME_PREVIEW }
|
|
326
|
+
|
|
327
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 }
|
|
328
|
+
|
|
329
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI }
|
|
330
|
+
|
|
331
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
332
|
+
|
|
333
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
334
|
+
|
|
335
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI }
|
|
336
|
+
|
|
337
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
338
|
+
|
|
339
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSession::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
340
|
+
|
|
341
|
+
# @!method self.variants
|
|
342
|
+
# @return [Array(String, Symbol)]
|
|
343
|
+
|
|
344
|
+
define_sorbet_constant!(:Variants) do
|
|
345
|
+
T.type_alias { T.any(String, OpenAI::Realtime::RealtimeSession::Model::TaggedSymbol) }
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
# @!group
|
|
310
349
|
|
|
311
350
|
GPT_REALTIME = :"gpt-realtime"
|
|
312
351
|
GPT_REALTIME_2025_08_28 = :"gpt-realtime-2025-08-28"
|
|
@@ -318,11 +357,12 @@ module OpenAI
|
|
|
318
357
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
319
358
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
320
359
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
360
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
321
361
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
322
362
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
363
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
323
364
|
|
|
324
|
-
# @!
|
|
325
|
-
# @return [Array<Symbol>]
|
|
365
|
+
# @!endgroup
|
|
326
366
|
end
|
|
327
367
|
|
|
328
368
|
# The object type. Always `realtime.session`.
|
|
@@ -203,10 +203,14 @@ module OpenAI
|
|
|
203
203
|
|
|
204
204
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
205
205
|
|
|
206
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
207
|
+
|
|
206
208
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI }
|
|
207
209
|
|
|
208
210
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
209
211
|
|
|
212
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateRequest::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
213
|
+
|
|
210
214
|
# @!method self.variants
|
|
211
215
|
# @return [Array(String, Symbol)]
|
|
212
216
|
|
|
@@ -226,8 +230,10 @@ module OpenAI
|
|
|
226
230
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
227
231
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
228
232
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
233
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
229
234
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
230
235
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
236
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
231
237
|
|
|
232
238
|
# @!endgroup
|
|
233
239
|
end
|
|
@@ -612,10 +612,14 @@ module OpenAI
|
|
|
612
612
|
|
|
613
613
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI_2025_10_06 }
|
|
614
614
|
|
|
615
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_REALTIME_MINI_2025_12_15 }
|
|
616
|
+
|
|
615
617
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI }
|
|
616
618
|
|
|
617
619
|
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI_2025_10_06 }
|
|
618
620
|
|
|
621
|
+
variant const: -> { OpenAI::Models::Realtime::RealtimeSessionCreateResponse::Model::GPT_AUDIO_MINI_2025_12_15 }
|
|
622
|
+
|
|
619
623
|
# @!method self.variants
|
|
620
624
|
# @return [Array(String, Symbol)]
|
|
621
625
|
|
|
@@ -635,8 +639,10 @@ module OpenAI
|
|
|
635
639
|
GPT_4O_MINI_REALTIME_PREVIEW_2024_12_17 = :"gpt-4o-mini-realtime-preview-2024-12-17"
|
|
636
640
|
GPT_REALTIME_MINI = :"gpt-realtime-mini"
|
|
637
641
|
GPT_REALTIME_MINI_2025_10_06 = :"gpt-realtime-mini-2025-10-06"
|
|
642
|
+
GPT_REALTIME_MINI_2025_12_15 = :"gpt-realtime-mini-2025-12-15"
|
|
638
643
|
GPT_AUDIO_MINI = :"gpt-audio-mini"
|
|
639
644
|
GPT_AUDIO_MINI_2025_10_06 = :"gpt-audio-mini-2025-10-06"
|
|
645
|
+
GPT_AUDIO_MINI_2025_12_15 = :"gpt-audio-mini-2025-12-15"
|
|
640
646
|
|
|
641
647
|
# @!endgroup
|
|
642
648
|
end
|