openai 0.8.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +41 -0
  3. data/README.md +115 -4
  4. data/lib/openai/errors.rb +22 -0
  5. data/lib/openai/internal/type/array_of.rb +6 -1
  6. data/lib/openai/internal/type/base_model.rb +76 -24
  7. data/lib/openai/internal/type/boolean.rb +7 -1
  8. data/lib/openai/internal/type/converter.rb +42 -34
  9. data/lib/openai/internal/type/enum.rb +10 -2
  10. data/lib/openai/internal/type/file_input.rb +6 -1
  11. data/lib/openai/internal/type/hash_of.rb +6 -1
  12. data/lib/openai/internal/type/union.rb +12 -7
  13. data/lib/openai/internal/type/unknown.rb +7 -1
  14. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  15. data/lib/openai/models/audio/transcription.rb +118 -1
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  17. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  18. data/lib/openai/models/chat/chat_completion.rb +1 -0
  19. data/lib/openai/models/chat/chat_completion_chunk.rb +1 -0
  20. data/lib/openai/models/chat/completion_create_params.rb +1 -0
  21. data/lib/openai/models/fine_tuning/job_create_params.rb +4 -2
  22. data/lib/openai/models/image_edit_params.rb +35 -1
  23. data/lib/openai/models/responses/response.rb +41 -6
  24. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  25. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  26. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  27. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  30. data/lib/openai/models/responses/response_create_params.rb +41 -32
  31. data/lib/openai/models/responses/response_output_text.rb +18 -2
  32. data/lib/openai/models/responses/response_prompt.rb +63 -0
  33. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  34. data/lib/openai/resources/audio/speech.rb +3 -1
  35. data/lib/openai/resources/chat/completions.rb +8 -0
  36. data/lib/openai/resources/fine_tuning/jobs.rb +2 -2
  37. data/lib/openai/resources/images.rb +5 -1
  38. data/lib/openai/resources/responses.rb +18 -14
  39. data/lib/openai/version.rb +1 -1
  40. data/lib/openai.rb +1 -0
  41. data/rbi/openai/errors.rbi +16 -0
  42. data/rbi/openai/internal/type/boolean.rbi +2 -0
  43. data/rbi/openai/internal/type/converter.rbi +15 -15
  44. data/rbi/openai/internal/type/union.rbi +5 -0
  45. data/rbi/openai/internal/type/unknown.rbi +2 -0
  46. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  47. data/rbi/openai/models/audio/transcription.rbi +213 -3
  48. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  49. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  50. data/rbi/openai/models/chat/chat_completion.rbi +5 -0
  51. data/rbi/openai/models/chat/chat_completion_chunk.rbi +5 -0
  52. data/rbi/openai/models/chat/completion_create_params.rbi +5 -0
  53. data/rbi/openai/models/fine_tuning/job_create_params.rbi +8 -4
  54. data/rbi/openai/models/image_edit_params.rbi +51 -0
  55. data/rbi/openai/models/responses/response.rbi +66 -7
  56. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  57. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  58. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  59. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  60. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  61. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  62. data/rbi/openai/models/responses/response_create_params.rbi +107 -64
  63. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  64. data/rbi/openai/models/responses/response_prompt.rbi +120 -0
  65. data/rbi/openai/resources/audio/speech.rbi +6 -1
  66. data/rbi/openai/resources/fine_tuning/jobs.rbi +6 -4
  67. data/rbi/openai/resources/images.rbi +11 -0
  68. data/rbi/openai/resources/responses.rbi +56 -50
  69. data/sig/openai/errors.rbs +9 -0
  70. data/sig/openai/internal/type/converter.rbs +7 -1
  71. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  72. data/sig/openai/models/audio/transcription.rbs +95 -3
  73. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  74. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  75. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  76. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  77. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  78. data/sig/openai/models/image_edit_params.rbs +22 -0
  79. data/sig/openai/models/responses/response.rbs +22 -5
  80. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  81. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  82. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  83. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  84. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  85. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  86. data/sig/openai/models/responses/response_create_params.rbs +25 -11
  87. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  88. data/sig/openai/models/responses/response_prompt.rbs +44 -0
  89. data/sig/openai/resources/audio/speech.rbs +1 -0
  90. data/sig/openai/resources/images.rbs +2 -0
  91. data/sig/openai/resources/responses.rbs +6 -4
  92. metadata +5 -2
@@ -77,10 +77,14 @@ module OpenAI
77
77
  #
78
78
  # @param state [Hash{Symbol=>Object}] .
79
79
  #
80
- # @option state [Boolean, :strong] :strictness
80
+ # @option state [Boolean] :translate_names
81
+ #
82
+ # @option state [Boolean] :strictness
81
83
  #
82
84
  # @option state [Hash{Symbol=>Object}] :exactness
83
85
  #
86
+ # @option state [Class<StandardError>] :error
87
+ #
84
88
  # @option state [Integer] :branched
85
89
  #
86
90
  # @return [Hash{Symbol=>Object}, Object]
@@ -89,6 +93,7 @@ module OpenAI
89
93
 
90
94
  unless value.is_a?(Hash)
91
95
  exactness[:no] += 1
96
+ state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}")
92
97
  return value
93
98
  end
94
99
 
@@ -140,14 +140,23 @@ module OpenAI
140
140
 
141
141
  # @api private
142
142
  #
143
+ # Tries to efficiently coerce the given value to one of the known variants.
144
+ #
145
+ # If the value cannot match any of the known variants, the coercion is considered
146
+ # non-viable and returns the original value.
147
+ #
143
148
  # @param value [Object]
144
149
  #
145
150
  # @param state [Hash{Symbol=>Object}] .
146
151
  #
147
- # @option state [Boolean, :strong] :strictness
152
+ # @option state [Boolean] :translate_names
153
+ #
154
+ # @option state [Boolean] :strictness
148
155
  #
149
156
  # @option state [Hash{Symbol=>Object}] :exactness
150
157
  #
158
+ # @option state [Class<StandardError>] :error
159
+ #
151
160
  # @option state [Integer] :branched
152
161
  #
153
162
  # @return [Object]
@@ -158,7 +167,6 @@ module OpenAI
158
167
 
159
168
  strictness = state.fetch(:strictness)
160
169
  exactness = state.fetch(:exactness)
161
- state[:strictness] = strictness == :strong ? true : strictness
162
170
 
163
171
  alternatives = []
164
172
  known_variants.each do |_, variant_fn|
@@ -177,13 +185,10 @@ module OpenAI
177
185
  end
178
186
  end
179
187
 
180
- case alternatives.sort_by(&:first)
188
+ case alternatives.sort_by!(&:first)
181
189
  in []
182
190
  exactness[:no] += 1
183
- if strictness == :strong
184
- message = "no possible conversion of #{value.class} into a variant of #{target.inspect}"
185
- raise ArgumentError.new(message)
186
- end
191
+ state[:error] = ArgumentError.new("no matching variant for #{value.inspect}")
187
192
  value
188
193
  in [[_, exact, coerced], *]
189
194
  exact.each { exactness[_1] += _2 }
@@ -33,14 +33,20 @@ module OpenAI
33
33
  class << self
34
34
  # @api private
35
35
  #
36
+ # No coercion needed for Unknown type.
37
+ #
36
38
  # @param value [Object]
37
39
  #
38
40
  # @param state [Hash{Symbol=>Object}] .
39
41
  #
40
- # @option state [Boolean, :strong] :strictness
42
+ # @option state [Boolean] :translate_names
43
+ #
44
+ # @option state [Boolean] :strictness
41
45
  #
42
46
  # @option state [Hash{Symbol=>Object}] :exactness
43
47
  #
48
+ # @option state [Class<StandardError>] :error
49
+ #
44
50
  # @option state [Integer] :branched
45
51
  #
46
52
  # @return [Object]
@@ -46,12 +46,19 @@ module OpenAI
46
46
 
47
47
  # @!attribute speed
48
48
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
49
- # the default. Does not work with `gpt-4o-mini-tts`.
49
+ # the default.
50
50
  #
51
51
  # @return [Float, nil]
52
52
  optional :speed, Float
53
53
 
54
- # @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, request_options: {})
54
+ # @!attribute stream_format
55
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
56
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
57
+ #
58
+ # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat, nil]
59
+ optional :stream_format, enum: -> { OpenAI::Audio::SpeechCreateParams::StreamFormat }
60
+
61
+ # @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {})
55
62
  # Some parameter documentations has been truncated, see
56
63
  # {OpenAI::Models::Audio::SpeechCreateParams} for more details.
57
64
  #
@@ -67,6 +74,8 @@ module OpenAI
67
74
  #
68
75
  # @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
69
76
  #
77
+ # @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse
78
+ #
70
79
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
71
80
 
72
81
  # One of the available [TTS models](https://platform.openai.com/docs/models#tts):
@@ -153,6 +162,18 @@ module OpenAI
153
162
  # @!method self.values
154
163
  # @return [Array<Symbol>]
155
164
  end
165
+
166
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
167
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
168
+ module StreamFormat
169
+ extend OpenAI::Internal::Type::Enum
170
+
171
+ SSE = :sse
172
+ AUDIO = :audio
173
+
174
+ # @!method self.values
175
+ # @return [Array<Symbol>]
176
+ end
156
177
  end
157
178
  end
158
179
  end
@@ -18,7 +18,13 @@ module OpenAI
18
18
  # @return [Array<OpenAI::Models::Audio::Transcription::Logprob>, nil]
19
19
  optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::Transcription::Logprob] }
20
20
 
21
- # @!method initialize(text:, logprobs: nil)
21
+ # @!attribute usage
22
+ # Token usage statistics for the request.
23
+ #
24
+ # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration, nil]
25
+ optional :usage, union: -> { OpenAI::Audio::Transcription::Usage }
26
+
27
+ # @!method initialize(text:, logprobs: nil, usage: nil)
22
28
  # Some parameter documentations has been truncated, see
23
29
  # {OpenAI::Models::Audio::Transcription} for more details.
24
30
  #
@@ -28,6 +34,8 @@ module OpenAI
28
34
  # @param text [String] The transcribed text.
29
35
  #
30
36
  # @param logprobs [Array<OpenAI::Models::Audio::Transcription::Logprob>] The log probabilities of the tokens in the transcription. Only returned with the
37
+ #
38
+ # @param usage [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration] Token usage statistics for the request.
31
39
 
32
40
  class Logprob < OpenAI::Internal::Type::BaseModel
33
41
  # @!attribute token
@@ -55,6 +63,115 @@ module OpenAI
55
63
  #
56
64
  # @param logprob [Float] The log probability of the token.
57
65
  end
66
+
67
+ # Token usage statistics for the request.
68
+ #
69
+ # @see OpenAI::Models::Audio::Transcription#usage
70
+ module Usage
71
+ extend OpenAI::Internal::Type::Union
72
+
73
+ discriminator :type
74
+
75
+ # Usage statistics for models billed by token usage.
76
+ variant :tokens, -> { OpenAI::Audio::Transcription::Usage::Tokens }
77
+
78
+ # Usage statistics for models billed by audio input duration.
79
+ variant :duration, -> { OpenAI::Audio::Transcription::Usage::Duration }
80
+
81
+ class Tokens < OpenAI::Internal::Type::BaseModel
82
+ # @!attribute input_tokens
83
+ # Number of input tokens billed for this request.
84
+ #
85
+ # @return [Integer]
86
+ required :input_tokens, Integer
87
+
88
+ # @!attribute output_tokens
89
+ # Number of output tokens generated.
90
+ #
91
+ # @return [Integer]
92
+ required :output_tokens, Integer
93
+
94
+ # @!attribute total_tokens
95
+ # Total number of tokens used (input + output).
96
+ #
97
+ # @return [Integer]
98
+ required :total_tokens, Integer
99
+
100
+ # @!attribute type
101
+ # The type of the usage object. Always `tokens` for this variant.
102
+ #
103
+ # @return [Symbol, :tokens]
104
+ required :type, const: :tokens
105
+
106
+ # @!attribute input_token_details
107
+ # Details about the input tokens billed for this request.
108
+ #
109
+ # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails, nil]
110
+ optional :input_token_details,
111
+ -> {
112
+ OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
113
+ }
114
+
115
+ # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
116
+ # Usage statistics for models billed by token usage.
117
+ #
118
+ # @param input_tokens [Integer] Number of input tokens billed for this request.
119
+ #
120
+ # @param output_tokens [Integer] Number of output tokens generated.
121
+ #
122
+ # @param total_tokens [Integer] Total number of tokens used (input + output).
123
+ #
124
+ # @param input_token_details [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails] Details about the input tokens billed for this request.
125
+ #
126
+ # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
127
+
128
+ # @see OpenAI::Models::Audio::Transcription::Usage::Tokens#input_token_details
129
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
130
+ # @!attribute audio_tokens
131
+ # Number of audio tokens billed for this request.
132
+ #
133
+ # @return [Integer, nil]
134
+ optional :audio_tokens, Integer
135
+
136
+ # @!attribute text_tokens
137
+ # Number of text tokens billed for this request.
138
+ #
139
+ # @return [Integer, nil]
140
+ optional :text_tokens, Integer
141
+
142
+ # @!method initialize(audio_tokens: nil, text_tokens: nil)
143
+ # Details about the input tokens billed for this request.
144
+ #
145
+ # @param audio_tokens [Integer] Number of audio tokens billed for this request.
146
+ #
147
+ # @param text_tokens [Integer] Number of text tokens billed for this request.
148
+ end
149
+ end
150
+
151
+ class Duration < OpenAI::Internal::Type::BaseModel
152
+ # @!attribute duration
153
+ # Duration of the input audio in seconds.
154
+ #
155
+ # @return [Float]
156
+ required :duration, Float
157
+
158
+ # @!attribute type
159
+ # The type of the usage object. Always `duration` for this variant.
160
+ #
161
+ # @return [Symbol, :duration]
162
+ required :type, const: :duration
163
+
164
+ # @!method initialize(duration:, type: :duration)
165
+ # Usage statistics for models billed by audio input duration.
166
+ #
167
+ # @param duration [Float] Duration of the input audio in seconds.
168
+ #
169
+ # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
170
+ end
171
+
172
+ # @!method self.variants
173
+ # @return [Array(OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration)]
174
+ end
58
175
  end
59
176
  end
60
177
  end
@@ -26,7 +26,13 @@ module OpenAI
26
26
  optional :logprobs,
27
27
  -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] }
28
28
 
29
- # @!method initialize(text:, logprobs: nil, type: :"transcript.text.done")
29
+ # @!attribute usage
30
+ # Usage statistics for models billed by token usage.
31
+ #
32
+ # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage, nil]
33
+ optional :usage, -> { OpenAI::Audio::TranscriptionTextDoneEvent::Usage }
34
+
35
+ # @!method initialize(text:, logprobs: nil, usage: nil, type: :"transcript.text.done")
30
36
  # Some parameter documentations has been truncated, see
31
37
  # {OpenAI::Models::Audio::TranscriptionTextDoneEvent} for more details.
32
38
  #
@@ -39,6 +45,8 @@ module OpenAI
39
45
  #
40
46
  # @param logprobs [Array<OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob>] The log probabilities of the individual tokens in the transcription. Only includ
41
47
  #
48
+ # @param usage [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage] Usage statistics for models billed by token usage.
49
+ #
42
50
  # @param type [Symbol, :"transcript.text.done"] The type of the event. Always `transcript.text.done`.
43
51
 
44
52
  class Logprob < OpenAI::Internal::Type::BaseModel
@@ -70,6 +78,77 @@ module OpenAI
70
78
  #
71
79
  # @param logprob [Float] The log probability of the token.
72
80
  end
81
+
82
+ # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent#usage
83
+ class Usage < OpenAI::Internal::Type::BaseModel
84
+ # @!attribute input_tokens
85
+ # Number of input tokens billed for this request.
86
+ #
87
+ # @return [Integer]
88
+ required :input_tokens, Integer
89
+
90
+ # @!attribute output_tokens
91
+ # Number of output tokens generated.
92
+ #
93
+ # @return [Integer]
94
+ required :output_tokens, Integer
95
+
96
+ # @!attribute total_tokens
97
+ # Total number of tokens used (input + output).
98
+ #
99
+ # @return [Integer]
100
+ required :total_tokens, Integer
101
+
102
+ # @!attribute type
103
+ # The type of the usage object. Always `tokens` for this variant.
104
+ #
105
+ # @return [Symbol, :tokens]
106
+ required :type, const: :tokens
107
+
108
+ # @!attribute input_token_details
109
+ # Details about the input tokens billed for this request.
110
+ #
111
+ # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, nil]
112
+ optional :input_token_details,
113
+ -> {
114
+ OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
115
+ }
116
+
117
+ # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
118
+ # Usage statistics for models billed by token usage.
119
+ #
120
+ # @param input_tokens [Integer] Number of input tokens billed for this request.
121
+ #
122
+ # @param output_tokens [Integer] Number of output tokens generated.
123
+ #
124
+ # @param total_tokens [Integer] Total number of tokens used (input + output).
125
+ #
126
+ # @param input_token_details [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails] Details about the input tokens billed for this request.
127
+ #
128
+ # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
129
+
130
+ # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage#input_token_details
131
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
132
+ # @!attribute audio_tokens
133
+ # Number of audio tokens billed for this request.
134
+ #
135
+ # @return [Integer, nil]
136
+ optional :audio_tokens, Integer
137
+
138
+ # @!attribute text_tokens
139
+ # Number of text tokens billed for this request.
140
+ #
141
+ # @return [Integer, nil]
142
+ optional :text_tokens, Integer
143
+
144
+ # @!method initialize(audio_tokens: nil, text_tokens: nil)
145
+ # Details about the input tokens billed for this request.
146
+ #
147
+ # @param audio_tokens [Integer] Number of audio tokens billed for this request.
148
+ #
149
+ # @param text_tokens [Integer] Number of text tokens billed for this request.
150
+ end
151
+ end
73
152
  end
74
153
  end
75
154
  end
@@ -28,13 +28,19 @@ module OpenAI
28
28
  # @return [Array<OpenAI::Models::Audio::TranscriptionSegment>, nil]
29
29
  optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] }
30
30
 
31
+ # @!attribute usage
32
+ # Usage statistics for models billed by audio input duration.
33
+ #
34
+ # @return [OpenAI::Models::Audio::TranscriptionVerbose::Usage, nil]
35
+ optional :usage, -> { OpenAI::Audio::TranscriptionVerbose::Usage }
36
+
31
37
  # @!attribute words
32
38
  # Extracted words and their corresponding timestamps.
33
39
  #
34
40
  # @return [Array<OpenAI::Models::Audio::TranscriptionWord>, nil]
35
41
  optional :words, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionWord] }
36
42
 
37
- # @!method initialize(duration:, language:, text:, segments: nil, words: nil)
43
+ # @!method initialize(duration:, language:, text:, segments: nil, usage: nil, words: nil)
38
44
  # Represents a verbose json transcription response returned by model, based on the
39
45
  # provided input.
40
46
  #
@@ -46,7 +52,31 @@ module OpenAI
46
52
  #
47
53
  # @param segments [Array<OpenAI::Models::Audio::TranscriptionSegment>] Segments of the transcribed text and their corresponding details.
48
54
  #
55
+ # @param usage [OpenAI::Models::Audio::TranscriptionVerbose::Usage] Usage statistics for models billed by audio input duration.
56
+ #
49
57
  # @param words [Array<OpenAI::Models::Audio::TranscriptionWord>] Extracted words and their corresponding timestamps.
58
+
59
+ # @see OpenAI::Models::Audio::TranscriptionVerbose#usage
60
+ class Usage < OpenAI::Internal::Type::BaseModel
61
+ # @!attribute duration
62
+ # Duration of the input audio in seconds.
63
+ #
64
+ # @return [Float]
65
+ required :duration, Float
66
+
67
+ # @!attribute type
68
+ # The type of the usage object. Always `duration` for this variant.
69
+ #
70
+ # @return [Symbol, :duration]
71
+ required :type, const: :duration
72
+
73
+ # @!method initialize(duration:, type: :duration)
74
+ # Usage statistics for models billed by audio input duration.
75
+ #
76
+ # @param duration [Float] Duration of the input audio in seconds.
77
+ #
78
+ # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
79
+ end
50
80
  end
51
81
  end
52
82
  end
@@ -213,6 +213,7 @@ module OpenAI
213
213
  AUTO = :auto
214
214
  DEFAULT = :default
215
215
  FLEX = :flex
216
+ SCALE = :scale
216
217
 
217
218
  # @!method self.values
218
219
  # @return [Array<Symbol>]
@@ -396,6 +396,7 @@ module OpenAI
396
396
  AUTO = :auto
397
397
  DEFAULT = :default
398
398
  FLEX = :flex
399
+ SCALE = :scale
399
400
 
400
401
  # @!method self.values
401
402
  # @return [Array<Symbol>]
@@ -569,6 +569,7 @@ module OpenAI
569
569
  AUTO = :auto
570
570
  DEFAULT = :default
571
571
  FLEX = :flex
572
+ SCALE = :scale
572
573
 
573
574
  # @!method self.values
574
575
  # @return [Array<Symbol>]
@@ -31,7 +31,8 @@ module OpenAI
31
31
  # [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
32
32
  # format.
33
33
  #
34
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
34
+ # See the
35
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
35
36
  # for more details.
36
37
  #
37
38
  # @return [String]
@@ -100,7 +101,8 @@ module OpenAI
100
101
  # Your dataset must be formatted as a JSONL file. You must upload your file with
101
102
  # the purpose `fine-tune`.
102
103
  #
103
- # See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning)
104
+ # See the
105
+ # [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
104
106
  # for more details.
105
107
  #
106
108
  # @return [String, nil]
@@ -61,6 +61,22 @@ module OpenAI
61
61
  # @return [Integer, nil]
62
62
  optional :n, Integer, nil?: true
63
63
 
64
+ # @!attribute output_compression
65
+ # The compression level (0-100%) for the generated images. This parameter is only
66
+ # supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
67
+ # defaults to 100.
68
+ #
69
+ # @return [Integer, nil]
70
+ optional :output_compression, Integer, nil?: true
71
+
72
+ # @!attribute output_format
73
+ # The format in which the generated images are returned. This parameter is only
74
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
75
+ # default value is `png`.
76
+ #
77
+ # @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
78
+ optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true
79
+
64
80
  # @!attribute quality
65
81
  # The quality of the image that will be generated. `high`, `medium` and `low` are
66
82
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
@@ -94,7 +110,7 @@ module OpenAI
94
110
  # @return [String, nil]
95
111
  optional :user, String
96
112
 
97
- # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
113
+ # @!method initialize(image:, prompt:, background: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
98
114
  # Some parameter documentations has been truncated, see
99
115
  # {OpenAI::Models::ImageEditParams} for more details.
100
116
  #
@@ -110,6 +126,10 @@ module OpenAI
110
126
  #
111
127
  # @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
112
128
  #
129
+ # @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
130
+ #
131
+ # @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
132
+ #
113
133
  # @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
114
134
  #
115
135
  # @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
@@ -174,6 +194,20 @@ module OpenAI
174
194
  # @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
175
195
  end
176
196
 
197
+ # The format in which the generated images are returned. This parameter is only
198
+ # supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
199
+ # default value is `png`.
200
+ module OutputFormat
201
+ extend OpenAI::Internal::Type::Enum
202
+
203
+ PNG = :png
204
+ JPEG = :jpeg
205
+ WEBP = :webp
206
+
207
+ # @!method self.values
208
+ # @return [Array<Symbol>]
209
+ end
210
+
177
211
  # The quality of the image that will be generated. `high`, `medium` and `low` are
178
212
  # only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
179
213
  # Defaults to `auto`.
@@ -32,15 +32,14 @@ module OpenAI
32
32
  required :incomplete_details, -> { OpenAI::Responses::Response::IncompleteDetails }, nil?: true
33
33
 
34
34
  # @!attribute instructions
35
- # Inserts a system (or developer) message as the first item in the model's
36
- # context.
35
+ # A system (or developer) message inserted into the model's context.
37
36
  #
38
37
  # When using along with `previous_response_id`, the instructions from a previous
39
38
  # response will not be carried over to the next response. This makes it simple to
40
39
  # swap out system (or developer) messages in new responses.
41
40
  #
42
- # @return [String, nil]
43
- required :instructions, String, nil?: true
41
+ # @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
42
+ required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true
44
43
 
45
44
  # @!attribute metadata
46
45
  # Set of 16 key-value pairs that can be attached to an object. This can be useful
@@ -156,6 +155,13 @@ module OpenAI
156
155
  # @return [String, nil]
157
156
  optional :previous_response_id, String, nil?: true
158
157
 
158
+ # @!attribute prompt
159
+ # Reference to a prompt template and its variables.
160
+ # [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
161
+ #
162
+ # @return [OpenAI::Models::Responses::ResponsePrompt, nil]
163
+ optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
164
+
159
165
  # @!attribute reasoning
160
166
  # **o-series models only**
161
167
  #
@@ -231,7 +237,7 @@ module OpenAI
231
237
  # @return [String, nil]
232
238
  optional :user, String
233
239
 
234
- # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
240
+ # @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, truncation: nil, usage: nil, user: nil, object: :response)
235
241
  # Some parameter documentations has been truncated, see
236
242
  # {OpenAI::Models::Responses::Response} for more details.
237
243
  #
@@ -243,7 +249,7 @@ module OpenAI
243
249
  #
244
250
  # @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete.
245
251
  #
246
- # @param instructions [String, nil] Inserts a system (or developer) message as the first item in the model's context
252
+ # @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
247
253
  #
248
254
  # @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
249
255
  #
@@ -267,6 +273,8 @@ module OpenAI
267
273
  #
268
274
  # @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
269
275
  #
276
+ # @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
277
+ #
270
278
  # @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
271
279
  #
272
280
  # @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
@@ -310,6 +318,32 @@ module OpenAI
310
318
  end
311
319
  end
312
320
 
321
+ # A system (or developer) message inserted into the model's context.
322
+ #
323
+ # When using along with `previous_response_id`, the instructions from a previous
324
+ # response will not be carried over to the next response. This makes it simple to
325
+ # swap out system (or developer) messages in new responses.
326
+ #
327
+ # @see OpenAI::Models::Responses::Response#instructions
328
+ module Instructions
329
+ extend OpenAI::Internal::Type::Union
330
+
331
+ # A text input to the model, equivalent to a text input with the
332
+ # `developer` role.
333
+ variant String
334
+
335
+ # A list of one or many input items to the model, containing
336
+ # different content types.
337
+ variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray }
338
+
339
+ # @!method self.variants
340
+ # @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
341
+
342
+ # @type [OpenAI::Internal::Type::Converter]
343
+ ResponseInputItemArray =
344
+ OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
345
+ end
346
+
313
347
  # How the model should select which tool (or tools) to use when generating a
314
348
  # response. See the `tools` parameter to see how to specify which tools the model
315
349
  # can call.
@@ -364,6 +398,7 @@ module OpenAI
364
398
  AUTO = :auto
365
399
  DEFAULT = :default
366
400
  FLEX = :flex
401
+ SCALE = :scale
367
402
 
368
403
  # @!method self.values
369
404
  # @return [Array<Symbol>]