openai 0.9.0 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +40 -0
  3. data/README.md +79 -1
  4. data/lib/openai/client.rb +11 -0
  5. data/lib/openai/errors.rb +25 -0
  6. data/lib/openai/internal/type/array_of.rb +6 -1
  7. data/lib/openai/internal/type/base_model.rb +76 -24
  8. data/lib/openai/internal/type/boolean.rb +7 -1
  9. data/lib/openai/internal/type/converter.rb +42 -34
  10. data/lib/openai/internal/type/enum.rb +10 -2
  11. data/lib/openai/internal/type/file_input.rb +6 -1
  12. data/lib/openai/internal/type/hash_of.rb +6 -1
  13. data/lib/openai/internal/type/union.rb +12 -7
  14. data/lib/openai/internal/type/unknown.rb +7 -1
  15. data/lib/openai/models/all_models.rb +4 -0
  16. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  17. data/lib/openai/models/audio/transcription.rb +118 -1
  18. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  19. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  20. data/lib/openai/models/chat/chat_completion.rb +32 -31
  21. data/lib/openai/models/chat/chat_completion_chunk.rb +30 -29
  22. data/lib/openai/models/chat/completion_create_params.rb +34 -31
  23. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  24. data/lib/openai/models/images_response.rb +92 -1
  25. data/lib/openai/models/responses/response.rb +59 -35
  26. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  27. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  28. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  29. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  30. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  31. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  32. data/lib/openai/models/responses/response_create_params.rb +92 -67
  33. data/lib/openai/models/responses/response_function_web_search.rb +115 -1
  34. data/lib/openai/models/responses/response_includable.rb +8 -6
  35. data/lib/openai/models/responses/response_output_text.rb +18 -2
  36. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  37. data/lib/openai/models/responses/tool_choice_mcp.rb +40 -0
  38. data/lib/openai/models/responses/tool_choice_types.rb +0 -3
  39. data/lib/openai/models/responses_model.rb +4 -0
  40. data/lib/openai/models/webhooks/batch_cancelled_webhook_event.rb +84 -0
  41. data/lib/openai/models/webhooks/batch_completed_webhook_event.rb +84 -0
  42. data/lib/openai/models/webhooks/batch_expired_webhook_event.rb +84 -0
  43. data/lib/openai/models/webhooks/batch_failed_webhook_event.rb +84 -0
  44. data/lib/openai/models/webhooks/eval_run_canceled_webhook_event.rb +84 -0
  45. data/lib/openai/models/webhooks/eval_run_failed_webhook_event.rb +84 -0
  46. data/lib/openai/models/webhooks/eval_run_succeeded_webhook_event.rb +84 -0
  47. data/lib/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rb +85 -0
  48. data/lib/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rb +85 -0
  49. data/lib/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rb +85 -0
  50. data/lib/openai/models/webhooks/response_cancelled_webhook_event.rb +85 -0
  51. data/lib/openai/models/webhooks/response_completed_webhook_event.rb +85 -0
  52. data/lib/openai/models/webhooks/response_failed_webhook_event.rb +84 -0
  53. data/lib/openai/models/webhooks/response_incomplete_webhook_event.rb +85 -0
  54. data/lib/openai/models/webhooks/unwrap_webhook_event.rb +59 -0
  55. data/lib/openai/models/webhooks/webhook_unwrap_params.rb +16 -0
  56. data/lib/openai/models.rb +2 -0
  57. data/lib/openai/resources/audio/speech.rb +3 -1
  58. data/lib/openai/resources/chat/completions.rb +10 -2
  59. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  60. data/lib/openai/resources/responses.rb +24 -16
  61. data/lib/openai/resources/webhooks.rb +124 -0
  62. data/lib/openai/version.rb +1 -1
  63. data/lib/openai.rb +18 -0
  64. data/rbi/openai/client.rbi +3 -0
  65. data/rbi/openai/errors.rbi +16 -0
  66. data/rbi/openai/internal/type/boolean.rbi +2 -0
  67. data/rbi/openai/internal/type/converter.rbi +15 -15
  68. data/rbi/openai/internal/type/union.rbi +5 -0
  69. data/rbi/openai/internal/type/unknown.rbi +2 -0
  70. data/rbi/openai/models/all_models.rbi +20 -0
  71. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  72. data/rbi/openai/models/audio/transcription.rbi +213 -3
  73. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  74. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  75. data/rbi/openai/models/chat/chat_completion.rbi +47 -42
  76. data/rbi/openai/models/chat/chat_completion_chunk.rbi +47 -42
  77. data/rbi/openai/models/chat/completion_create_params.rbi +51 -42
  78. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  79. data/rbi/openai/models/images_response.rbi +146 -0
  80. data/rbi/openai/models/responses/response.rbi +75 -44
  81. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  82. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  83. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  84. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  85. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  86. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  87. data/rbi/openai/models/responses/response_create_params.rbi +174 -115
  88. data/rbi/openai/models/responses/response_function_web_search.rbi +163 -0
  89. data/rbi/openai/models/responses/response_includable.rbi +17 -11
  90. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  91. data/rbi/openai/models/responses/tool_choice_mcp.rbi +53 -0
  92. data/rbi/openai/models/responses/tool_choice_types.rbi +0 -5
  93. data/rbi/openai/models/responses_model.rbi +20 -0
  94. data/rbi/openai/models/webhooks/batch_cancelled_webhook_event.rbi +154 -0
  95. data/rbi/openai/models/webhooks/batch_completed_webhook_event.rbi +154 -0
  96. data/rbi/openai/models/webhooks/batch_expired_webhook_event.rbi +150 -0
  97. data/rbi/openai/models/webhooks/batch_failed_webhook_event.rbi +149 -0
  98. data/rbi/openai/models/webhooks/eval_run_canceled_webhook_event.rbi +154 -0
  99. data/rbi/openai/models/webhooks/eval_run_failed_webhook_event.rbi +151 -0
  100. data/rbi/openai/models/webhooks/eval_run_succeeded_webhook_event.rbi +154 -0
  101. data/rbi/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbi +158 -0
  102. data/rbi/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbi +156 -0
  103. data/rbi/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbi +158 -0
  104. data/rbi/openai/models/webhooks/response_cancelled_webhook_event.rbi +154 -0
  105. data/rbi/openai/models/webhooks/response_completed_webhook_event.rbi +154 -0
  106. data/rbi/openai/models/webhooks/response_failed_webhook_event.rbi +154 -0
  107. data/rbi/openai/models/webhooks/response_incomplete_webhook_event.rbi +155 -0
  108. data/rbi/openai/models/webhooks/unwrap_webhook_event.rbi +40 -0
  109. data/rbi/openai/models/webhooks/webhook_unwrap_params.rbi +32 -0
  110. data/rbi/openai/models.rbi +2 -0
  111. data/rbi/openai/resources/audio/speech.rbi +6 -1
  112. data/rbi/openai/resources/chat/completions.rbi +34 -30
  113. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  114. data/rbi/openai/resources/responses.rbi +108 -84
  115. data/rbi/openai/resources/webhooks.rbi +68 -0
  116. data/sig/openai/client.rbs +2 -0
  117. data/sig/openai/errors.rbs +9 -0
  118. data/sig/openai/internal/type/converter.rbs +7 -1
  119. data/sig/openai/models/all_models.rbs +8 -0
  120. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  121. data/sig/openai/models/audio/transcription.rbs +95 -3
  122. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  123. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  124. data/sig/openai/models/chat/chat_completion.rbs +2 -1
  125. data/sig/openai/models/chat/chat_completion_chunk.rbs +2 -1
  126. data/sig/openai/models/chat/completion_create_params.rbs +2 -1
  127. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  128. data/sig/openai/models/images_response.rbs +83 -0
  129. data/sig/openai/models/responses/response.rbs +13 -1
  130. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  131. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  132. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  133. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  134. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  135. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  136. data/sig/openai/models/responses/response_create_params.rbs +31 -11
  137. data/sig/openai/models/responses/response_function_web_search.rbs +54 -0
  138. data/sig/openai/models/responses/response_includable.rbs +7 -5
  139. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  140. data/sig/openai/models/responses/tool_choice_mcp.rbs +23 -0
  141. data/sig/openai/models/responses/tool_choice_types.rbs +0 -2
  142. data/sig/openai/models/responses_model.rbs +8 -0
  143. data/sig/openai/models/webhooks/batch_cancelled_webhook_event.rbs +66 -0
  144. data/sig/openai/models/webhooks/batch_completed_webhook_event.rbs +66 -0
  145. data/sig/openai/models/webhooks/batch_expired_webhook_event.rbs +66 -0
  146. data/sig/openai/models/webhooks/batch_failed_webhook_event.rbs +66 -0
  147. data/sig/openai/models/webhooks/eval_run_canceled_webhook_event.rbs +66 -0
  148. data/sig/openai/models/webhooks/eval_run_failed_webhook_event.rbs +66 -0
  149. data/sig/openai/models/webhooks/eval_run_succeeded_webhook_event.rbs +66 -0
  150. data/sig/openai/models/webhooks/fine_tuning_job_cancelled_webhook_event.rbs +66 -0
  151. data/sig/openai/models/webhooks/fine_tuning_job_failed_webhook_event.rbs +66 -0
  152. data/sig/openai/models/webhooks/fine_tuning_job_succeeded_webhook_event.rbs +66 -0
  153. data/sig/openai/models/webhooks/response_cancelled_webhook_event.rbs +66 -0
  154. data/sig/openai/models/webhooks/response_completed_webhook_event.rbs +66 -0
  155. data/sig/openai/models/webhooks/response_failed_webhook_event.rbs +66 -0
  156. data/sig/openai/models/webhooks/response_incomplete_webhook_event.rbs +66 -0
  157. data/sig/openai/models/webhooks/unwrap_webhook_event.rbs +27 -0
  158. data/sig/openai/models/webhooks/webhook_unwrap_params.rbs +17 -0
  159. data/sig/openai/models.rbs +2 -0
  160. data/sig/openai/resources/audio/speech.rbs +1 -0
  161. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  162. data/sig/openai/resources/responses.rbs +8 -4
  163. data/sig/openai/resources/webhooks.rbs +33 -0
  164. metadata +56 -2
@@ -77,10 +77,14 @@ module OpenAI
77
77
  #
78
78
  # @param state [Hash{Symbol=>Object}] .
79
79
  #
80
- # @option state [Boolean, :strong] :strictness
80
+ # @option state [Boolean] :translate_names
81
+ #
82
+ # @option state [Boolean] :strictness
81
83
  #
82
84
  # @option state [Hash{Symbol=>Object}] :exactness
83
85
  #
86
+ # @option state [Class<StandardError>] :error
87
+ #
84
88
  # @option state [Integer] :branched
85
89
  #
86
90
  # @return [Hash{Symbol=>Object}, Object]
@@ -89,6 +93,7 @@ module OpenAI
89
93
 
90
94
  unless value.is_a?(Hash)
91
95
  exactness[:no] += 1
96
+ state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}")
92
97
  return value
93
98
  end
94
99
 
@@ -140,14 +140,23 @@ module OpenAI
140
140
 
141
141
  # @api private
142
142
  #
143
+ # Tries to efficiently coerce the given value to one of the known variants.
144
+ #
145
+ # If the value cannot match any of the known variants, the coercion is considered
146
+ # non-viable and returns the original value.
147
+ #
143
148
  # @param value [Object]
144
149
  #
145
150
  # @param state [Hash{Symbol=>Object}] .
146
151
  #
147
- # @option state [Boolean, :strong] :strictness
152
+ # @option state [Boolean] :translate_names
153
+ #
154
+ # @option state [Boolean] :strictness
148
155
  #
149
156
  # @option state [Hash{Symbol=>Object}] :exactness
150
157
  #
158
+ # @option state [Class<StandardError>] :error
159
+ #
151
160
  # @option state [Integer] :branched
152
161
  #
153
162
  # @return [Object]
@@ -158,7 +167,6 @@ module OpenAI
158
167
 
159
168
  strictness = state.fetch(:strictness)
160
169
  exactness = state.fetch(:exactness)
161
- state[:strictness] = strictness == :strong ? true : strictness
162
170
 
163
171
  alternatives = []
164
172
  known_variants.each do |_, variant_fn|
@@ -177,13 +185,10 @@ module OpenAI
177
185
  end
178
186
  end
179
187
 
180
- case alternatives.sort_by(&:first)
188
+ case alternatives.sort_by!(&:first)
181
189
  in []
182
190
  exactness[:no] += 1
183
- if strictness == :strong
184
- message = "no possible conversion of #{value.class} into a variant of #{target.inspect}"
185
- raise ArgumentError.new(message)
186
- end
191
+ state[:error] = ArgumentError.new("no matching variant for #{value.inspect}")
187
192
  value
188
193
  in [[_, exact, coerced], *]
189
194
  exact.each { exactness[_1] += _2 }
@@ -33,14 +33,20 @@ module OpenAI
33
33
  class << self
34
34
  # @api private
35
35
  #
36
+ # No coercion needed for Unknown type.
37
+ #
36
38
  # @param value [Object]
37
39
  #
38
40
  # @param state [Hash{Symbol=>Object}] .
39
41
  #
40
- # @option state [Boolean, :strong] :strictness
42
+ # @option state [Boolean] :translate_names
43
+ #
44
+ # @option state [Boolean] :strictness
41
45
  #
42
46
  # @option state [Hash{Symbol=>Object}] :exactness
43
47
  #
48
+ # @option state [Class<StandardError>] :error
49
+ #
44
50
  # @option state [Integer] :branched
45
51
  #
46
52
  # @return [Object]
@@ -18,6 +18,10 @@ module OpenAI
18
18
  O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
19
19
  O3_PRO = :"o3-pro"
20
20
  O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
21
+ O3_DEEP_RESEARCH = :"o3-deep-research"
22
+ O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
23
+ O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
24
+ O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
21
25
  COMPUTER_USE_PREVIEW = :"computer-use-preview"
22
26
  COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
23
27
 
@@ -46,12 +46,19 @@ module OpenAI
46
46
 
47
47
  # @!attribute speed
48
48
  # The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
49
- # the default. Does not work with `gpt-4o-mini-tts`.
49
+ # the default.
50
50
  #
51
51
  # @return [Float, nil]
52
52
  optional :speed, Float
53
53
 
54
- # @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, request_options: {})
54
+ # @!attribute stream_format
55
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
56
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
57
+ #
58
+ # @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat, nil]
59
+ optional :stream_format, enum: -> { OpenAI::Audio::SpeechCreateParams::StreamFormat }
60
+
61
+ # @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {})
55
62
  # Some parameter documentations has been truncated, see
56
63
  # {OpenAI::Models::Audio::SpeechCreateParams} for more details.
57
64
  #
@@ -67,6 +74,8 @@ module OpenAI
67
74
  #
68
75
  # @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
69
76
  #
77
+ # @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse
78
+ #
70
79
  # @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
71
80
 
72
81
  # One of the available [TTS models](https://platform.openai.com/docs/models#tts):
@@ -153,6 +162,18 @@ module OpenAI
153
162
  # @!method self.values
154
163
  # @return [Array<Symbol>]
155
164
  end
165
+
166
+ # The format to stream the audio in. Supported formats are `sse` and `audio`.
167
+ # `sse` is not supported for `tts-1` or `tts-1-hd`.
168
+ module StreamFormat
169
+ extend OpenAI::Internal::Type::Enum
170
+
171
+ SSE = :sse
172
+ AUDIO = :audio
173
+
174
+ # @!method self.values
175
+ # @return [Array<Symbol>]
176
+ end
156
177
  end
157
178
  end
158
179
  end
@@ -18,7 +18,13 @@ module OpenAI
18
18
  # @return [Array<OpenAI::Models::Audio::Transcription::Logprob>, nil]
19
19
  optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::Transcription::Logprob] }
20
20
 
21
- # @!method initialize(text:, logprobs: nil)
21
+ # @!attribute usage
22
+ # Token usage statistics for the request.
23
+ #
24
+ # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration, nil]
25
+ optional :usage, union: -> { OpenAI::Audio::Transcription::Usage }
26
+
27
+ # @!method initialize(text:, logprobs: nil, usage: nil)
22
28
  # Some parameter documentations has been truncated, see
23
29
  # {OpenAI::Models::Audio::Transcription} for more details.
24
30
  #
@@ -28,6 +34,8 @@ module OpenAI
28
34
  # @param text [String] The transcribed text.
29
35
  #
30
36
  # @param logprobs [Array<OpenAI::Models::Audio::Transcription::Logprob>] The log probabilities of the tokens in the transcription. Only returned with the
37
+ #
38
+ # @param usage [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration] Token usage statistics for the request.
31
39
 
32
40
  class Logprob < OpenAI::Internal::Type::BaseModel
33
41
  # @!attribute token
@@ -55,6 +63,115 @@ module OpenAI
55
63
  #
56
64
  # @param logprob [Float] The log probability of the token.
57
65
  end
66
+
67
+ # Token usage statistics for the request.
68
+ #
69
+ # @see OpenAI::Models::Audio::Transcription#usage
70
+ module Usage
71
+ extend OpenAI::Internal::Type::Union
72
+
73
+ discriminator :type
74
+
75
+ # Usage statistics for models billed by token usage.
76
+ variant :tokens, -> { OpenAI::Audio::Transcription::Usage::Tokens }
77
+
78
+ # Usage statistics for models billed by audio input duration.
79
+ variant :duration, -> { OpenAI::Audio::Transcription::Usage::Duration }
80
+
81
+ class Tokens < OpenAI::Internal::Type::BaseModel
82
+ # @!attribute input_tokens
83
+ # Number of input tokens billed for this request.
84
+ #
85
+ # @return [Integer]
86
+ required :input_tokens, Integer
87
+
88
+ # @!attribute output_tokens
89
+ # Number of output tokens generated.
90
+ #
91
+ # @return [Integer]
92
+ required :output_tokens, Integer
93
+
94
+ # @!attribute total_tokens
95
+ # Total number of tokens used (input + output).
96
+ #
97
+ # @return [Integer]
98
+ required :total_tokens, Integer
99
+
100
+ # @!attribute type
101
+ # The type of the usage object. Always `tokens` for this variant.
102
+ #
103
+ # @return [Symbol, :tokens]
104
+ required :type, const: :tokens
105
+
106
+ # @!attribute input_token_details
107
+ # Details about the input tokens billed for this request.
108
+ #
109
+ # @return [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails, nil]
110
+ optional :input_token_details,
111
+ -> {
112
+ OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
113
+ }
114
+
115
+ # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
116
+ # Usage statistics for models billed by token usage.
117
+ #
118
+ # @param input_tokens [Integer] Number of input tokens billed for this request.
119
+ #
120
+ # @param output_tokens [Integer] Number of output tokens generated.
121
+ #
122
+ # @param total_tokens [Integer] Total number of tokens used (input + output).
123
+ #
124
+ # @param input_token_details [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails] Details about the input tokens billed for this request.
125
+ #
126
+ # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
127
+
128
+ # @see OpenAI::Models::Audio::Transcription::Usage::Tokens#input_token_details
129
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
130
+ # @!attribute audio_tokens
131
+ # Number of audio tokens billed for this request.
132
+ #
133
+ # @return [Integer, nil]
134
+ optional :audio_tokens, Integer
135
+
136
+ # @!attribute text_tokens
137
+ # Number of text tokens billed for this request.
138
+ #
139
+ # @return [Integer, nil]
140
+ optional :text_tokens, Integer
141
+
142
+ # @!method initialize(audio_tokens: nil, text_tokens: nil)
143
+ # Details about the input tokens billed for this request.
144
+ #
145
+ # @param audio_tokens [Integer] Number of audio tokens billed for this request.
146
+ #
147
+ # @param text_tokens [Integer] Number of text tokens billed for this request.
148
+ end
149
+ end
150
+
151
+ class Duration < OpenAI::Internal::Type::BaseModel
152
+ # @!attribute duration
153
+ # Duration of the input audio in seconds.
154
+ #
155
+ # @return [Float]
156
+ required :duration, Float
157
+
158
+ # @!attribute type
159
+ # The type of the usage object. Always `duration` for this variant.
160
+ #
161
+ # @return [Symbol, :duration]
162
+ required :type, const: :duration
163
+
164
+ # @!method initialize(duration:, type: :duration)
165
+ # Usage statistics for models billed by audio input duration.
166
+ #
167
+ # @param duration [Float] Duration of the input audio in seconds.
168
+ #
169
+ # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
170
+ end
171
+
172
+ # @!method self.variants
173
+ # @return [Array(OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration)]
174
+ end
58
175
  end
59
176
  end
60
177
  end
@@ -26,7 +26,13 @@ module OpenAI
26
26
  optional :logprobs,
27
27
  -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] }
28
28
 
29
- # @!method initialize(text:, logprobs: nil, type: :"transcript.text.done")
29
+ # @!attribute usage
30
+ # Usage statistics for models billed by token usage.
31
+ #
32
+ # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage, nil]
33
+ optional :usage, -> { OpenAI::Audio::TranscriptionTextDoneEvent::Usage }
34
+
35
+ # @!method initialize(text:, logprobs: nil, usage: nil, type: :"transcript.text.done")
30
36
  # Some parameter documentations has been truncated, see
31
37
  # {OpenAI::Models::Audio::TranscriptionTextDoneEvent} for more details.
32
38
  #
@@ -39,6 +45,8 @@ module OpenAI
39
45
  #
40
46
  # @param logprobs [Array<OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob>] The log probabilities of the individual tokens in the transcription. Only includ
41
47
  #
48
+ # @param usage [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage] Usage statistics for models billed by token usage.
49
+ #
42
50
  # @param type [Symbol, :"transcript.text.done"] The type of the event. Always `transcript.text.done`.
43
51
 
44
52
  class Logprob < OpenAI::Internal::Type::BaseModel
@@ -70,6 +78,77 @@ module OpenAI
70
78
  #
71
79
  # @param logprob [Float] The log probability of the token.
72
80
  end
81
+
82
+ # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent#usage
83
+ class Usage < OpenAI::Internal::Type::BaseModel
84
+ # @!attribute input_tokens
85
+ # Number of input tokens billed for this request.
86
+ #
87
+ # @return [Integer]
88
+ required :input_tokens, Integer
89
+
90
+ # @!attribute output_tokens
91
+ # Number of output tokens generated.
92
+ #
93
+ # @return [Integer]
94
+ required :output_tokens, Integer
95
+
96
+ # @!attribute total_tokens
97
+ # Total number of tokens used (input + output).
98
+ #
99
+ # @return [Integer]
100
+ required :total_tokens, Integer
101
+
102
+ # @!attribute type
103
+ # The type of the usage object. Always `tokens` for this variant.
104
+ #
105
+ # @return [Symbol, :tokens]
106
+ required :type, const: :tokens
107
+
108
+ # @!attribute input_token_details
109
+ # Details about the input tokens billed for this request.
110
+ #
111
+ # @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, nil]
112
+ optional :input_token_details,
113
+ -> {
114
+ OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
115
+ }
116
+
117
+ # @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
118
+ # Usage statistics for models billed by token usage.
119
+ #
120
+ # @param input_tokens [Integer] Number of input tokens billed for this request.
121
+ #
122
+ # @param output_tokens [Integer] Number of output tokens generated.
123
+ #
124
+ # @param total_tokens [Integer] Total number of tokens used (input + output).
125
+ #
126
+ # @param input_token_details [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails] Details about the input tokens billed for this request.
127
+ #
128
+ # @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
129
+
130
+ # @see OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage#input_token_details
131
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
132
+ # @!attribute audio_tokens
133
+ # Number of audio tokens billed for this request.
134
+ #
135
+ # @return [Integer, nil]
136
+ optional :audio_tokens, Integer
137
+
138
+ # @!attribute text_tokens
139
+ # Number of text tokens billed for this request.
140
+ #
141
+ # @return [Integer, nil]
142
+ optional :text_tokens, Integer
143
+
144
+ # @!method initialize(audio_tokens: nil, text_tokens: nil)
145
+ # Details about the input tokens billed for this request.
146
+ #
147
+ # @param audio_tokens [Integer] Number of audio tokens billed for this request.
148
+ #
149
+ # @param text_tokens [Integer] Number of text tokens billed for this request.
150
+ end
151
+ end
73
152
  end
74
153
  end
75
154
  end
@@ -28,13 +28,19 @@ module OpenAI
28
28
  # @return [Array<OpenAI::Models::Audio::TranscriptionSegment>, nil]
29
29
  optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] }
30
30
 
31
+ # @!attribute usage
32
+ # Usage statistics for models billed by audio input duration.
33
+ #
34
+ # @return [OpenAI::Models::Audio::TranscriptionVerbose::Usage, nil]
35
+ optional :usage, -> { OpenAI::Audio::TranscriptionVerbose::Usage }
36
+
31
37
  # @!attribute words
32
38
  # Extracted words and their corresponding timestamps.
33
39
  #
34
40
  # @return [Array<OpenAI::Models::Audio::TranscriptionWord>, nil]
35
41
  optional :words, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionWord] }
36
42
 
37
- # @!method initialize(duration:, language:, text:, segments: nil, words: nil)
43
+ # @!method initialize(duration:, language:, text:, segments: nil, usage: nil, words: nil)
38
44
  # Represents a verbose json transcription response returned by model, based on the
39
45
  # provided input.
40
46
  #
@@ -46,7 +52,31 @@ module OpenAI
46
52
  #
47
53
  # @param segments [Array<OpenAI::Models::Audio::TranscriptionSegment>] Segments of the transcribed text and their corresponding details.
48
54
  #
55
+ # @param usage [OpenAI::Models::Audio::TranscriptionVerbose::Usage] Usage statistics for models billed by audio input duration.
56
+ #
49
57
  # @param words [Array<OpenAI::Models::Audio::TranscriptionWord>] Extracted words and their corresponding timestamps.
58
+
59
+ # @see OpenAI::Models::Audio::TranscriptionVerbose#usage
60
+ class Usage < OpenAI::Internal::Type::BaseModel
61
+ # @!attribute duration
62
+ # Duration of the input audio in seconds.
63
+ #
64
+ # @return [Float]
65
+ required :duration, Float
66
+
67
+ # @!attribute type
68
+ # The type of the usage object. Always `duration` for this variant.
69
+ #
70
+ # @return [Symbol, :duration]
71
+ required :type, const: :duration
72
+
73
+ # @!method initialize(duration:, type: :duration)
74
+ # Usage statistics for models billed by audio input duration.
75
+ #
76
+ # @param duration [Float] Duration of the input audio in seconds.
77
+ #
78
+ # @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
79
+ end
50
80
  end
51
81
  end
52
82
  end
@@ -39,23 +39,23 @@ module OpenAI
39
39
  required :object, const: :"chat.completion"
40
40
 
41
41
  # @!attribute service_tier
42
- # Specifies the latency tier to use for processing the request. This parameter is
43
- # relevant for customers subscribed to the scale tier service:
44
- #
45
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
46
- # utilize scale tier credits until they are exhausted.
47
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
48
- # be processed using the default service tier with a lower uptime SLA and no
49
- # latency guarantee.
50
- # - If set to 'default', the request will be processed using the default service
51
- # tier with a lower uptime SLA and no latency guarantee.
52
- # - If set to 'flex', the request will be processed with the Flex Processing
53
- # service tier.
54
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
42
+ # Specifies the processing type used for serving the request.
43
+ #
44
+ # - If set to 'auto', then the request will be processed with the service tier
45
+ # configured in the Project settings. Unless otherwise configured, the Project
46
+ # will use 'default'.
47
+ # - If set to 'default', then the requset will be processed with the standard
48
+ # pricing and performance for the selected model.
49
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
50
+ # 'priority', then the request will be processed with the corresponding service
51
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
52
+ # Priority processing.
55
53
  # - When not set, the default behavior is 'auto'.
56
54
  #
57
- # When this parameter is set, the response body will include the `service_tier`
58
- # utilized.
55
+ # When the `service_tier` parameter is set, the response body will include the
56
+ # `service_tier` value based on the processing mode actually used to serve the
57
+ # request. This response value may be different from the value set in the
58
+ # parameter.
59
59
  #
60
60
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil]
61
61
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true
@@ -90,7 +90,7 @@ module OpenAI
90
90
  #
91
91
  # @param model [String] The model used for the chat completion.
92
92
  #
93
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
93
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the processing type used for serving the request.
94
94
  #
95
95
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
96
96
  #
@@ -188,23 +188,23 @@ module OpenAI
188
188
  end
189
189
  end
190
190
 
191
- # Specifies the latency tier to use for processing the request. This parameter is
192
- # relevant for customers subscribed to the scale tier service:
193
- #
194
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
195
- # utilize scale tier credits until they are exhausted.
196
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
197
- # be processed using the default service tier with a lower uptime SLA and no
198
- # latency guarantee.
199
- # - If set to 'default', the request will be processed using the default service
200
- # tier with a lower uptime SLA and no latency guarantee.
201
- # - If set to 'flex', the request will be processed with the Flex Processing
202
- # service tier.
203
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
191
+ # Specifies the processing type used for serving the request.
192
+ #
193
+ # - If set to 'auto', then the request will be processed with the service tier
194
+ # configured in the Project settings. Unless otherwise configured, the Project
195
+ # will use 'default'.
196
+ # - If set to 'default', then the requset will be processed with the standard
197
+ # pricing and performance for the selected model.
198
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
199
+ # 'priority', then the request will be processed with the corresponding service
200
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
201
+ # Priority processing.
204
202
  # - When not set, the default behavior is 'auto'.
205
203
  #
206
- # When this parameter is set, the response body will include the `service_tier`
207
- # utilized.
204
+ # When the `service_tier` parameter is set, the response body will include the
205
+ # `service_tier` value based on the processing mode actually used to serve the
206
+ # request. This response value may be different from the value set in the
207
+ # parameter.
208
208
  #
209
209
  # @see OpenAI::Models::Chat::ChatCompletion#service_tier
210
210
  module ServiceTier
@@ -214,6 +214,7 @@ module OpenAI
214
214
  DEFAULT = :default
215
215
  FLEX = :flex
216
216
  SCALE = :scale
217
+ PRIORITY = :priority
217
218
 
218
219
  # @!method self.values
219
220
  # @return [Array<Symbol>]
@@ -38,23 +38,23 @@ module OpenAI
38
38
  required :object, const: :"chat.completion.chunk"
39
39
 
40
40
  # @!attribute service_tier
41
- # Specifies the latency tier to use for processing the request. This parameter is
42
- # relevant for customers subscribed to the scale tier service:
41
+ # Specifies the processing type used for serving the request.
43
42
  #
44
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
45
- # utilize scale tier credits until they are exhausted.
46
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
47
- # be processed using the default service tier with a lower uptime SLA and no
48
- # latency guarantee.
49
- # - If set to 'default', the request will be processed using the default service
50
- # tier with a lower uptime SLA and no latency guarantee.
51
- # - If set to 'flex', the request will be processed with the Flex Processing
52
- # service tier.
53
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
43
+ # - If set to 'auto', then the request will be processed with the service tier
44
+ # configured in the Project settings. Unless otherwise configured, the Project
45
+ # will use 'default'.
46
+ # - If set to 'default', then the requset will be processed with the standard
47
+ # pricing and performance for the selected model.
48
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
49
+ # 'priority', then the request will be processed with the corresponding service
50
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
51
+ # Priority processing.
54
52
  # - When not set, the default behavior is 'auto'.
55
53
  #
56
- # When this parameter is set, the response body will include the `service_tier`
57
- # utilized.
54
+ # When the `service_tier` parameter is set, the response body will include the
55
+ # `service_tier` value based on the processing mode actually used to serve the
56
+ # request. This response value may be different from the value set in the
57
+ # parameter.
58
58
  #
59
59
  # @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil]
60
60
  optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true
@@ -95,7 +95,7 @@ module OpenAI
95
95
  #
96
96
  # @param model [String] The model to generate the completion.
97
97
  #
98
- # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
98
+ # @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the processing type used for serving the request.
99
99
  #
100
100
  # @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
101
101
  #
@@ -371,23 +371,23 @@ module OpenAI
371
371
  end
372
372
  end
373
373
 
374
- # Specifies the latency tier to use for processing the request. This parameter is
375
- # relevant for customers subscribed to the scale tier service:
374
+ # Specifies the processing type used for serving the request.
376
375
  #
377
- # - If set to 'auto', and the Project is Scale tier enabled, the system will
378
- # utilize scale tier credits until they are exhausted.
379
- # - If set to 'auto', and the Project is not Scale tier enabled, the request will
380
- # be processed using the default service tier with a lower uptime SLA and no
381
- # latency guarantee.
382
- # - If set to 'default', the request will be processed using the default service
383
- # tier with a lower uptime SLA and no latency guarantee.
384
- # - If set to 'flex', the request will be processed with the Flex Processing
385
- # service tier.
386
- # [Learn more](https://platform.openai.com/docs/guides/flex-processing).
376
+ # - If set to 'auto', then the request will be processed with the service tier
377
+ # configured in the Project settings. Unless otherwise configured, the Project
378
+ # will use 'default'.
379
+ # - If set to 'default', then the requset will be processed with the standard
380
+ # pricing and performance for the selected model.
381
+ # - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
382
+ # 'priority', then the request will be processed with the corresponding service
383
+ # tier. [Contact sales](https://openai.com/contact-sales) to learn more about
384
+ # Priority processing.
387
385
  # - When not set, the default behavior is 'auto'.
388
386
  #
389
- # When this parameter is set, the response body will include the `service_tier`
390
- # utilized.
387
+ # When the `service_tier` parameter is set, the response body will include the
388
+ # `service_tier` value based on the processing mode actually used to serve the
389
+ # request. This response value may be different from the value set in the
390
+ # parameter.
391
391
  #
392
392
  # @see OpenAI::Models::Chat::ChatCompletionChunk#service_tier
393
393
  module ServiceTier
@@ -397,6 +397,7 @@ module OpenAI
397
397
  DEFAULT = :default
398
398
  FLEX = :flex
399
399
  SCALE = :scale
400
+ PRIORITY = :priority
400
401
 
401
402
  # @!method self.values
402
403
  # @return [Array<Symbol>]