openai 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +20 -0
  3. data/README.md +1 -1
  4. data/lib/openai/errors.rb +22 -0
  5. data/lib/openai/internal/type/array_of.rb +6 -1
  6. data/lib/openai/internal/type/base_model.rb +76 -24
  7. data/lib/openai/internal/type/boolean.rb +7 -1
  8. data/lib/openai/internal/type/converter.rb +42 -34
  9. data/lib/openai/internal/type/enum.rb +10 -2
  10. data/lib/openai/internal/type/file_input.rb +6 -1
  11. data/lib/openai/internal/type/hash_of.rb +6 -1
  12. data/lib/openai/internal/type/union.rb +12 -7
  13. data/lib/openai/internal/type/unknown.rb +7 -1
  14. data/lib/openai/models/audio/speech_create_params.rb +23 -2
  15. data/lib/openai/models/audio/transcription.rb +118 -1
  16. data/lib/openai/models/audio/transcription_text_done_event.rb +80 -1
  17. data/lib/openai/models/audio/transcription_verbose.rb +31 -1
  18. data/lib/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rb +60 -25
  19. data/lib/openai/models/responses/response_code_interpreter_call_code_delta_event.rb +17 -8
  20. data/lib/openai/models/responses/response_code_interpreter_call_code_done_event.rb +14 -10
  21. data/lib/openai/models/responses/response_code_interpreter_call_completed_event.rb +11 -10
  22. data/lib/openai/models/responses/response_code_interpreter_call_in_progress_event.rb +11 -10
  23. data/lib/openai/models/responses/response_code_interpreter_call_interpreting_event.rb +11 -10
  24. data/lib/openai/models/responses/response_code_interpreter_tool_call.rb +49 -78
  25. data/lib/openai/models/responses/response_create_params.rb +29 -29
  26. data/lib/openai/models/responses/response_output_text.rb +18 -2
  27. data/lib/openai/models/responses/response_stream_event.rb +2 -2
  28. data/lib/openai/resources/audio/speech.rb +3 -1
  29. data/lib/openai/resources/chat/completions.rb +8 -0
  30. data/lib/openai/resources/fine_tuning/checkpoints/permissions.rb +1 -2
  31. data/lib/openai/resources/responses.rb +12 -12
  32. data/lib/openai/version.rb +1 -1
  33. data/rbi/openai/errors.rbi +16 -0
  34. data/rbi/openai/internal/type/boolean.rbi +2 -0
  35. data/rbi/openai/internal/type/converter.rbi +15 -15
  36. data/rbi/openai/internal/type/union.rbi +5 -0
  37. data/rbi/openai/internal/type/unknown.rbi +2 -0
  38. data/rbi/openai/models/audio/speech_create_params.rbi +59 -2
  39. data/rbi/openai/models/audio/transcription.rbi +213 -3
  40. data/rbi/openai/models/audio/transcription_text_done_event.rbi +146 -1
  41. data/rbi/openai/models/audio/transcription_verbose.rbi +47 -0
  42. data/rbi/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbi +95 -26
  43. data/rbi/openai/models/responses/response_code_interpreter_call_code_delta_event.rbi +17 -7
  44. data/rbi/openai/models/responses/response_code_interpreter_call_code_done_event.rbi +13 -5
  45. data/rbi/openai/models/responses/response_code_interpreter_call_completed_event.rbi +13 -21
  46. data/rbi/openai/models/responses/response_code_interpreter_call_in_progress_event.rbi +13 -21
  47. data/rbi/openai/models/responses/response_code_interpreter_call_interpreting_event.rbi +13 -21
  48. data/rbi/openai/models/responses/response_code_interpreter_tool_call.rbi +83 -125
  49. data/rbi/openai/models/responses/response_create_params.rbi +83 -60
  50. data/rbi/openai/models/responses/response_output_text.rbi +26 -4
  51. data/rbi/openai/resources/audio/speech.rbi +6 -1
  52. data/rbi/openai/resources/fine_tuning/checkpoints/permissions.rbi +1 -3
  53. data/rbi/openai/resources/responses.rbi +46 -46
  54. data/sig/openai/errors.rbs +9 -0
  55. data/sig/openai/internal/type/converter.rbs +7 -1
  56. data/sig/openai/models/audio/speech_create_params.rbs +21 -1
  57. data/sig/openai/models/audio/transcription.rbs +95 -3
  58. data/sig/openai/models/audio/transcription_text_done_event.rbs +72 -2
  59. data/sig/openai/models/audio/transcription_verbose.rbs +21 -0
  60. data/sig/openai/models/fine_tuning/checkpoints/permission_retrieve_response.rbs +53 -16
  61. data/sig/openai/models/responses/response_code_interpreter_call_code_delta_event.rbs +5 -0
  62. data/sig/openai/models/responses/response_code_interpreter_call_code_done_event.rbs +5 -0
  63. data/sig/openai/models/responses/response_code_interpreter_call_completed_event.rbs +4 -4
  64. data/sig/openai/models/responses/response_code_interpreter_call_in_progress_event.rbs +4 -4
  65. data/sig/openai/models/responses/response_code_interpreter_call_interpreting_event.rbs +4 -4
  66. data/sig/openai/models/responses/response_code_interpreter_tool_call.rbs +31 -52
  67. data/sig/openai/models/responses/response_create_params.rbs +18 -10
  68. data/sig/openai/models/responses/response_output_text.rbs +15 -1
  69. data/sig/openai/resources/audio/speech.rbs +1 -0
  70. data/sig/openai/resources/fine_tuning/checkpoints/permissions.rbs +1 -1
  71. data/sig/openai/resources/responses.rbs +4 -4
  72. metadata +2 -2
@@ -21,21 +21,21 @@ module OpenAI
21
21
  # your own data as input for the model's response.
22
22
  sig do
23
23
  params(
24
- input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
25
- model:
26
- T.any(
27
- String,
28
- OpenAI::ChatModel::OrSymbol,
29
- OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
30
- ),
31
24
  background: T.nilable(T::Boolean),
32
25
  include:
33
26
  T.nilable(
34
27
  T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
35
28
  ),
29
+ input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
36
30
  instructions: T.nilable(String),
37
31
  max_output_tokens: T.nilable(Integer),
38
32
  metadata: T.nilable(T::Hash[Symbol, String]),
33
+ model:
34
+ T.any(
35
+ String,
36
+ OpenAI::ChatModel::OrSymbol,
37
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
38
+ ),
39
39
  parallel_tool_calls: T.nilable(T::Boolean),
40
40
  previous_response_id: T.nilable(String),
41
41
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
@@ -81,22 +81,6 @@ module OpenAI
81
81
  ).returns(OpenAI::Responses::Response)
82
82
  end
83
83
  def create(
84
- # Text, image, or file inputs to the model, used to generate a response.
85
- #
86
- # Learn more:
87
- #
88
- # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
89
- # - [Image inputs](https://platform.openai.com/docs/guides/images)
90
- # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
91
- # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
92
- # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
93
- input:,
94
- # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
95
- # wide range of models with different capabilities, performance characteristics,
96
- # and price points. Refer to the
97
- # [model guide](https://platform.openai.com/docs/models) to browse and compare
98
- # available models.
99
- model:,
100
84
  # Whether to run the model response in the background.
101
85
  # [Learn more](https://platform.openai.com/docs/guides/background).
102
86
  background: nil,
@@ -116,6 +100,16 @@ module OpenAI
116
100
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
117
101
  # in code interpreter tool call items.
118
102
  include: nil,
103
+ # Text, image, or file inputs to the model, used to generate a response.
104
+ #
105
+ # Learn more:
106
+ #
107
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
108
+ # - [Image inputs](https://platform.openai.com/docs/guides/images)
109
+ # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
110
+ # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
111
+ # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
112
+ input: nil,
119
113
  # A system (or developer) message inserted into the model's context.
120
114
  #
121
115
  # When using along with `previous_response_id`, the instructions from a previous
@@ -133,6 +127,12 @@ module OpenAI
133
127
  # Keys are strings with a maximum length of 64 characters. Values are strings with
134
128
  # a maximum length of 512 characters.
135
129
  metadata: nil,
130
+ # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
131
+ # wide range of models with different capabilities, performance characteristics,
132
+ # and price points. Refer to the
133
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
134
+ # available models.
135
+ model: nil,
136
136
  # Whether to allow the model to run tool calls in parallel.
137
137
  parallel_tool_calls: nil,
138
138
  # The unique ID of the previous response to the model. Use this to create
@@ -237,21 +237,21 @@ module OpenAI
237
237
  # your own data as input for the model's response.
238
238
  sig do
239
239
  params(
240
- input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
241
- model:
242
- T.any(
243
- String,
244
- OpenAI::ChatModel::OrSymbol,
245
- OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
246
- ),
247
240
  background: T.nilable(T::Boolean),
248
241
  include:
249
242
  T.nilable(
250
243
  T::Array[OpenAI::Responses::ResponseIncludable::OrSymbol]
251
244
  ),
245
+ input: OpenAI::Responses::ResponseCreateParams::Input::Variants,
252
246
  instructions: T.nilable(String),
253
247
  max_output_tokens: T.nilable(Integer),
254
248
  metadata: T.nilable(T::Hash[Symbol, String]),
249
+ model:
250
+ T.any(
251
+ String,
252
+ OpenAI::ChatModel::OrSymbol,
253
+ OpenAI::ResponsesModel::ResponsesOnlyModel::OrSymbol
254
+ ),
255
255
  parallel_tool_calls: T.nilable(T::Boolean),
256
256
  previous_response_id: T.nilable(String),
257
257
  prompt: T.nilable(OpenAI::Responses::ResponsePrompt::OrHash),
@@ -297,22 +297,6 @@ module OpenAI
297
297
  )
298
298
  end
299
299
  def stream_raw(
300
- # Text, image, or file inputs to the model, used to generate a response.
301
- #
302
- # Learn more:
303
- #
304
- # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
305
- # - [Image inputs](https://platform.openai.com/docs/guides/images)
306
- # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
307
- # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
308
- # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
309
- input:,
310
- # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
311
- # wide range of models with different capabilities, performance characteristics,
312
- # and price points. Refer to the
313
- # [model guide](https://platform.openai.com/docs/models) to browse and compare
314
- # available models.
315
- model:,
316
300
  # Whether to run the model response in the background.
317
301
  # [Learn more](https://platform.openai.com/docs/guides/background).
318
302
  background: nil,
@@ -332,6 +316,16 @@ module OpenAI
332
316
  # - `code_interpreter_call.outputs`: Includes the outputs of python code execution
333
317
  # in code interpreter tool call items.
334
318
  include: nil,
319
+ # Text, image, or file inputs to the model, used to generate a response.
320
+ #
321
+ # Learn more:
322
+ #
323
+ # - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
324
+ # - [Image inputs](https://platform.openai.com/docs/guides/images)
325
+ # - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
326
+ # - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
327
+ # - [Function calling](https://platform.openai.com/docs/guides/function-calling)
328
+ input: nil,
335
329
  # A system (or developer) message inserted into the model's context.
336
330
  #
337
331
  # When using along with `previous_response_id`, the instructions from a previous
@@ -349,6 +343,12 @@ module OpenAI
349
343
  # Keys are strings with a maximum length of 64 characters. Values are strings with
350
344
  # a maximum length of 512 characters.
351
345
  metadata: nil,
346
+ # Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
347
+ # wide range of models with different capabilities, performance characteristics,
348
+ # and price points. Refer to the
349
+ # [model guide](https://platform.openai.com/docs/models) to browse and compare
350
+ # available models.
351
+ model: nil,
352
352
  # Whether to allow the model to run tool calls in parallel.
353
353
  parallel_tool_calls: nil,
354
354
  # The unique ID of the previous response to the model. Use this to create
@@ -5,6 +5,15 @@ module OpenAI
5
5
  end
6
6
 
7
7
  class ConversionError < OpenAI::Errors::Error
8
+ def cause: -> StandardError?
9
+
10
+ def initialize: (
11
+ on: Class,
12
+ method: Symbol,
13
+ target: top,
14
+ value: top,
15
+ ?cause: StandardError?
16
+ ) -> void
8
17
  end
9
18
 
10
19
  class APIError < OpenAI::Errors::Error
@@ -8,8 +8,10 @@ module OpenAI
8
8
 
9
9
  type coerce_state =
10
10
  {
11
- strictness: bool | :strong,
11
+ translate_names: bool,
12
+ strictness: bool,
12
13
  exactness: { yes: Integer, no: Integer, maybe: Integer },
14
+ error: Class,
13
15
  branched: Integer
14
16
  }
15
17
 
@@ -37,6 +39,10 @@ module OpenAI
37
39
  | OpenAI::Internal::Type::Converter::input spec
38
40
  ) -> (^-> top)
39
41
 
42
+ def self.new_coerce_state: (
43
+ ?translate_names: bool
44
+ ) -> OpenAI::Internal::Type::Converter::coerce_state
45
+
40
46
  def self.coerce: (
41
47
  OpenAI::Internal::Type::Converter::input target,
42
48
  top value,
@@ -8,7 +8,8 @@ module OpenAI
8
8
  voice: OpenAI::Models::Audio::SpeechCreateParams::voice,
9
9
  instructions: String,
10
10
  response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format,
11
- speed: Float
11
+ speed: Float,
12
+ stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format
12
13
  }
13
14
  & OpenAI::Internal::Type::request_parameters
14
15
 
@@ -36,6 +37,12 @@ module OpenAI
36
37
 
37
38
  def speed=: (Float) -> Float
38
39
 
40
+ attr_reader stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format?
41
+
42
+ def stream_format=: (
43
+ OpenAI::Models::Audio::SpeechCreateParams::stream_format
44
+ ) -> OpenAI::Models::Audio::SpeechCreateParams::stream_format
45
+
39
46
  def initialize: (
40
47
  input: String,
41
48
  model: OpenAI::Models::Audio::SpeechCreateParams::model,
@@ -43,6 +50,7 @@ module OpenAI
43
50
  ?instructions: String,
44
51
  ?response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format,
45
52
  ?speed: Float,
53
+ ?stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format,
46
54
  ?request_options: OpenAI::request_opts
47
55
  ) -> void
48
56
 
@@ -53,6 +61,7 @@ module OpenAI
53
61
  instructions: String,
54
62
  response_format: OpenAI::Models::Audio::SpeechCreateParams::response_format,
55
63
  speed: Float,
64
+ stream_format: OpenAI::Models::Audio::SpeechCreateParams::stream_format,
56
65
  request_options: OpenAI::RequestOptions
57
66
  }
58
67
 
@@ -110,6 +119,17 @@ module OpenAI
110
119
 
111
120
  def self?.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::response_format]
112
121
  end
122
+
123
+ type stream_format = :sse | :audio
124
+
125
+ module StreamFormat
126
+ extend OpenAI::Internal::Type::Enum
127
+
128
+ SSE: :sse
129
+ AUDIO: :audio
130
+
131
+ def self?.values: -> ::Array[OpenAI::Models::Audio::SpeechCreateParams::stream_format]
132
+ end
113
133
  end
114
134
  end
115
135
  end
@@ -4,7 +4,8 @@ module OpenAI
4
4
  type transcription =
5
5
  {
6
6
  text: String,
7
- logprobs: ::Array[OpenAI::Audio::Transcription::Logprob]
7
+ logprobs: ::Array[OpenAI::Audio::Transcription::Logprob],
8
+ usage: OpenAI::Models::Audio::Transcription::usage
8
9
  }
9
10
 
10
11
  class Transcription < OpenAI::Internal::Type::BaseModel
@@ -16,14 +17,22 @@ module OpenAI
16
17
  ::Array[OpenAI::Audio::Transcription::Logprob]
17
18
  ) -> ::Array[OpenAI::Audio::Transcription::Logprob]
18
19
 
20
+ attr_reader usage: OpenAI::Models::Audio::Transcription::usage?
21
+
22
+ def usage=: (
23
+ OpenAI::Models::Audio::Transcription::usage
24
+ ) -> OpenAI::Models::Audio::Transcription::usage
25
+
19
26
  def initialize: (
20
27
  text: String,
21
- ?logprobs: ::Array[OpenAI::Audio::Transcription::Logprob]
28
+ ?logprobs: ::Array[OpenAI::Audio::Transcription::Logprob],
29
+ ?usage: OpenAI::Models::Audio::Transcription::usage
22
30
  ) -> void
23
31
 
24
32
  def to_hash: -> {
25
33
  text: String,
26
- logprobs: ::Array[OpenAI::Audio::Transcription::Logprob]
34
+ logprobs: ::Array[OpenAI::Audio::Transcription::Logprob],
35
+ usage: OpenAI::Models::Audio::Transcription::usage
27
36
  }
28
37
 
29
38
  type logprob = { token: String, bytes: ::Array[Float], logprob: Float }
@@ -53,6 +62,89 @@ module OpenAI
53
62
  logprob: Float
54
63
  }
55
64
  end
65
+
66
+ type usage =
67
+ OpenAI::Audio::Transcription::Usage::Tokens
68
+ | OpenAI::Audio::Transcription::Usage::Duration
69
+
70
+ module Usage
71
+ extend OpenAI::Internal::Type::Union
72
+
73
+ type tokens =
74
+ {
75
+ input_tokens: Integer,
76
+ output_tokens: Integer,
77
+ total_tokens: Integer,
78
+ type: :tokens,
79
+ input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
80
+ }
81
+
82
+ class Tokens < OpenAI::Internal::Type::BaseModel
83
+ attr_accessor input_tokens: Integer
84
+
85
+ attr_accessor output_tokens: Integer
86
+
87
+ attr_accessor total_tokens: Integer
88
+
89
+ attr_accessor type: :tokens
90
+
91
+ attr_reader input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails?
92
+
93
+ def input_token_details=: (
94
+ OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
95
+ ) -> OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
96
+
97
+ def initialize: (
98
+ input_tokens: Integer,
99
+ output_tokens: Integer,
100
+ total_tokens: Integer,
101
+ ?input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails,
102
+ ?type: :tokens
103
+ ) -> void
104
+
105
+ def to_hash: -> {
106
+ input_tokens: Integer,
107
+ output_tokens: Integer,
108
+ total_tokens: Integer,
109
+ type: :tokens,
110
+ input_token_details: OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
111
+ }
112
+
113
+ type input_token_details =
114
+ { audio_tokens: Integer, text_tokens: Integer }
115
+
116
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
117
+ attr_reader audio_tokens: Integer?
118
+
119
+ def audio_tokens=: (Integer) -> Integer
120
+
121
+ attr_reader text_tokens: Integer?
122
+
123
+ def text_tokens=: (Integer) -> Integer
124
+
125
+ def initialize: (
126
+ ?audio_tokens: Integer,
127
+ ?text_tokens: Integer
128
+ ) -> void
129
+
130
+ def to_hash: -> { audio_tokens: Integer, text_tokens: Integer }
131
+ end
132
+ end
133
+
134
+ type duration = { duration: Float, type: :duration }
135
+
136
+ class Duration < OpenAI::Internal::Type::BaseModel
137
+ attr_accessor duration: Float
138
+
139
+ attr_accessor type: :duration
140
+
141
+ def initialize: (duration: Float, ?type: :duration) -> void
142
+
143
+ def to_hash: -> { duration: Float, type: :duration }
144
+ end
145
+
146
+ def self?.variants: -> ::Array[OpenAI::Models::Audio::Transcription::usage]
147
+ end
56
148
  end
57
149
  end
58
150
  end
@@ -5,7 +5,8 @@ module OpenAI
5
5
  {
6
6
  text: String,
7
7
  type: :"transcript.text.done",
8
- logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
8
+ logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
9
+ usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage
9
10
  }
10
11
 
11
12
  class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel
@@ -19,16 +20,24 @@ module OpenAI
19
20
  ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
20
21
  ) -> ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
21
22
 
23
+ attr_reader usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage?
24
+
25
+ def usage=: (
26
+ OpenAI::Audio::TranscriptionTextDoneEvent::Usage
27
+ ) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage
28
+
22
29
  def initialize: (
23
30
  text: String,
24
31
  ?logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
32
+ ?usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage,
25
33
  ?type: :"transcript.text.done"
26
34
  ) -> void
27
35
 
28
36
  def to_hash: -> {
29
37
  text: String,
30
38
  type: :"transcript.text.done",
31
- logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob]
39
+ logprobs: ::Array[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob],
40
+ usage: OpenAI::Audio::TranscriptionTextDoneEvent::Usage
32
41
  }
33
42
 
34
43
  type logprob =
@@ -59,6 +68,67 @@ module OpenAI
59
68
  logprob: Float
60
69
  }
61
70
  end
71
+
72
+ type usage =
73
+ {
74
+ input_tokens: Integer,
75
+ output_tokens: Integer,
76
+ total_tokens: Integer,
77
+ type: :tokens,
78
+ input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
79
+ }
80
+
81
+ class Usage < OpenAI::Internal::Type::BaseModel
82
+ attr_accessor input_tokens: Integer
83
+
84
+ attr_accessor output_tokens: Integer
85
+
86
+ attr_accessor total_tokens: Integer
87
+
88
+ attr_accessor type: :tokens
89
+
90
+ attr_reader input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails?
91
+
92
+ def input_token_details=: (
93
+ OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
94
+ ) -> OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
95
+
96
+ def initialize: (
97
+ input_tokens: Integer,
98
+ output_tokens: Integer,
99
+ total_tokens: Integer,
100
+ ?input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails,
101
+ ?type: :tokens
102
+ ) -> void
103
+
104
+ def to_hash: -> {
105
+ input_tokens: Integer,
106
+ output_tokens: Integer,
107
+ total_tokens: Integer,
108
+ type: :tokens,
109
+ input_token_details: OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
110
+ }
111
+
112
+ type input_token_details =
113
+ { audio_tokens: Integer, text_tokens: Integer }
114
+
115
+ class InputTokenDetails < OpenAI::Internal::Type::BaseModel
116
+ attr_reader audio_tokens: Integer?
117
+
118
+ def audio_tokens=: (Integer) -> Integer
119
+
120
+ attr_reader text_tokens: Integer?
121
+
122
+ def text_tokens=: (Integer) -> Integer
123
+
124
+ def initialize: (
125
+ ?audio_tokens: Integer,
126
+ ?text_tokens: Integer
127
+ ) -> void
128
+
129
+ def to_hash: -> { audio_tokens: Integer, text_tokens: Integer }
130
+ end
131
+ end
62
132
  end
63
133
  end
64
134
  end
@@ -7,6 +7,7 @@ module OpenAI
7
7
  language: String,
8
8
  text: String,
9
9
  segments: ::Array[OpenAI::Audio::TranscriptionSegment],
10
+ usage: OpenAI::Audio::TranscriptionVerbose::Usage,
10
11
  words: ::Array[OpenAI::Audio::TranscriptionWord]
11
12
  }
12
13
 
@@ -23,6 +24,12 @@ module OpenAI
23
24
  ::Array[OpenAI::Audio::TranscriptionSegment]
24
25
  ) -> ::Array[OpenAI::Audio::TranscriptionSegment]
25
26
 
27
+ attr_reader usage: OpenAI::Audio::TranscriptionVerbose::Usage?
28
+
29
+ def usage=: (
30
+ OpenAI::Audio::TranscriptionVerbose::Usage
31
+ ) -> OpenAI::Audio::TranscriptionVerbose::Usage
32
+
26
33
  attr_reader words: ::Array[OpenAI::Audio::TranscriptionWord]?
27
34
 
28
35
  def words=: (
@@ -34,6 +41,7 @@ module OpenAI
34
41
  language: String,
35
42
  text: String,
36
43
  ?segments: ::Array[OpenAI::Audio::TranscriptionSegment],
44
+ ?usage: OpenAI::Audio::TranscriptionVerbose::Usage,
37
45
  ?words: ::Array[OpenAI::Audio::TranscriptionWord]
38
46
  ) -> void
39
47
 
@@ -42,8 +50,21 @@ module OpenAI
42
50
  language: String,
43
51
  text: String,
44
52
  segments: ::Array[OpenAI::Audio::TranscriptionSegment],
53
+ usage: OpenAI::Audio::TranscriptionVerbose::Usage,
45
54
  words: ::Array[OpenAI::Audio::TranscriptionWord]
46
55
  }
56
+
57
+ type usage = { duration: Float, type: :duration }
58
+
59
+ class Usage < OpenAI::Internal::Type::BaseModel
60
+ attr_accessor duration: Float
61
+
62
+ attr_accessor type: :duration
63
+
64
+ def initialize: (duration: Float, ?type: :duration) -> void
65
+
66
+ def to_hash: -> { duration: Float, type: :duration }
67
+ end
47
68
  end
48
69
  end
49
70
  end
@@ -4,34 +4,71 @@ module OpenAI
4
4
  module Checkpoints
5
5
  type permission_retrieve_response =
6
6
  {
7
- id: String,
8
- created_at: Integer,
9
- object: :"checkpoint.permission",
10
- project_id: String
7
+ data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
8
+ has_more: bool,
9
+ object: :list,
10
+ first_id: String?,
11
+ last_id: String?
11
12
  }
12
13
 
13
14
  class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
14
- attr_accessor id: String
15
+ attr_accessor data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data]
15
16
 
16
- attr_accessor created_at: Integer
17
+ attr_accessor has_more: bool
17
18
 
18
- attr_accessor object: :"checkpoint.permission"
19
+ attr_accessor object: :list
19
20
 
20
- attr_accessor project_id: String
21
+ attr_accessor first_id: String?
22
+
23
+ attr_accessor last_id: String?
21
24
 
22
25
  def initialize: (
23
- id: String,
24
- created_at: Integer,
25
- project_id: String,
26
- ?object: :"checkpoint.permission"
26
+ data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
27
+ has_more: bool,
28
+ ?first_id: String?,
29
+ ?last_id: String?,
30
+ ?object: :list
27
31
  ) -> void
28
32
 
29
33
  def to_hash: -> {
30
- id: String,
31
- created_at: Integer,
32
- object: :"checkpoint.permission",
33
- project_id: String
34
+ data: ::Array[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data],
35
+ has_more: bool,
36
+ object: :list,
37
+ first_id: String?,
38
+ last_id: String?
34
39
  }
40
+
41
+ type data =
42
+ {
43
+ id: String,
44
+ created_at: Integer,
45
+ object: :"checkpoint.permission",
46
+ project_id: String
47
+ }
48
+
49
+ class Data < OpenAI::Internal::Type::BaseModel
50
+ attr_accessor id: String
51
+
52
+ attr_accessor created_at: Integer
53
+
54
+ attr_accessor object: :"checkpoint.permission"
55
+
56
+ attr_accessor project_id: String
57
+
58
+ def initialize: (
59
+ id: String,
60
+ created_at: Integer,
61
+ project_id: String,
62
+ ?object: :"checkpoint.permission"
63
+ ) -> void
64
+
65
+ def to_hash: -> {
66
+ id: String,
67
+ created_at: Integer,
68
+ object: :"checkpoint.permission",
69
+ project_id: String
70
+ }
71
+ end
35
72
  end
36
73
  end
37
74
  end
@@ -4,6 +4,7 @@ module OpenAI
4
4
  type response_code_interpreter_call_code_delta_event =
5
5
  {
6
6
  delta: String,
7
+ item_id: String,
7
8
  output_index: Integer,
8
9
  sequence_number: Integer,
9
10
  type: :"response.code_interpreter_call_code.delta"
@@ -12,6 +13,8 @@ module OpenAI
12
13
  class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
13
14
  attr_accessor delta: String
14
15
 
16
+ attr_accessor item_id: String
17
+
15
18
  attr_accessor output_index: Integer
16
19
 
17
20
  attr_accessor sequence_number: Integer
@@ -20,6 +23,7 @@ module OpenAI
20
23
 
21
24
  def initialize: (
22
25
  delta: String,
26
+ item_id: String,
23
27
  output_index: Integer,
24
28
  sequence_number: Integer,
25
29
  ?type: :"response.code_interpreter_call_code.delta"
@@ -27,6 +31,7 @@ module OpenAI
27
31
 
28
32
  def to_hash: -> {
29
33
  delta: String,
34
+ item_id: String,
30
35
  output_index: Integer,
31
36
  sequence_number: Integer,
32
37
  type: :"response.code_interpreter_call_code.delta"
@@ -4,6 +4,7 @@ module OpenAI
4
4
  type response_code_interpreter_call_code_done_event =
5
5
  {
6
6
  code: String,
7
+ item_id: String,
7
8
  output_index: Integer,
8
9
  sequence_number: Integer,
9
10
  type: :"response.code_interpreter_call_code.done"
@@ -12,6 +13,8 @@ module OpenAI
12
13
  class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel
13
14
  attr_accessor code: String
14
15
 
16
+ attr_accessor item_id: String
17
+
15
18
  attr_accessor output_index: Integer
16
19
 
17
20
  attr_accessor sequence_number: Integer
@@ -20,6 +23,7 @@ module OpenAI
20
23
 
21
24
  def initialize: (
22
25
  code: String,
26
+ item_id: String,
23
27
  output_index: Integer,
24
28
  sequence_number: Integer,
25
29
  ?type: :"response.code_interpreter_call_code.done"
@@ -27,6 +31,7 @@ module OpenAI
27
31
 
28
32
  def to_hash: -> {
29
33
  code: String,
34
+ item_id: String,
30
35
  output_index: Integer,
31
36
  sequence_number: Integer,
32
37
  type: :"response.code_interpreter_call_code.done"